Merge branch 'bc/asciidoctor-tab-width'
authorJunio C Hamano <gitster@pobox.com>
Wed, 23 May 2018 05:38:25 +0000 (14:38 +0900)
committerJunio C Hamano <gitster@pobox.com>
Wed, 23 May 2018 05:38:25 +0000 (14:38 +0900)
Asciidoctor gives a reasonable imitation for AsciiDoc, but does not
render illustration in a literal block correctly when indented with
HT by default. The problem is fixed by forcing 8-space tabs.

* bc/asciidoctor-tab-width:
Documentation: render revisions correctly under Asciidoctor
Documentation: use 8-space tabs with Asciidoctor

584 files changed:
.gitattributes
.gitignore
.mailmap
Documentation/Makefile
Documentation/RelNotes/2.18.0.txt [new file with mode: 0644]
Documentation/SubmittingPatches
Documentation/config.txt
Documentation/diff-options.txt
Documentation/fetch-options.txt
Documentation/git-add.txt
Documentation/git-apply.txt
Documentation/git-bisect.txt
Documentation/git-branch.txt
Documentation/git-bundle.txt
Documentation/git-clone.txt
Documentation/git-commit-graph.txt [new file with mode: 0644]
Documentation/git-config.txt
Documentation/git-cvsserver.txt
Documentation/git-diff-index.txt
Documentation/git-diff-tree.txt
Documentation/git-diff.txt
Documentation/git-fast-export.txt
Documentation/git-fast-import.txt
Documentation/git-fetch-pack.txt
Documentation/git-filter-branch.txt
Documentation/git-fmt-merge-msg.txt
Documentation/git-for-each-ref.txt
Documentation/git-format-patch.txt
Documentation/git-gc.txt
Documentation/git-grep.txt
Documentation/git-http-fetch.txt
Documentation/git-http-push.txt
Documentation/git-imap-send.txt
Documentation/git-index-pack.txt
Documentation/git-log.txt
Documentation/git-ls-files.txt
Documentation/git-ls-remote.txt
Documentation/git-mktree.txt
Documentation/git-name-rev.txt
Documentation/git-p4.txt
Documentation/git-pack-objects.txt
Documentation/git-prune.txt
Documentation/git-pull.txt
Documentation/git-push.txt
Documentation/git-read-tree.txt
Documentation/git-rebase.txt
Documentation/git-receive-pack.txt
Documentation/git-remote-ext.txt
Documentation/git-remote.txt
Documentation/git-repack.txt
Documentation/git-replace.txt
Documentation/git-request-pull.txt
Documentation/git-send-email.txt
Documentation/git-send-pack.txt
Documentation/git-shell.txt
Documentation/git-shortlog.txt
Documentation/git-show-branch.txt
Documentation/git-show-ref.txt
Documentation/git-show.txt
Documentation/git-status.txt
Documentation/git-submodule.txt
Documentation/git-svn.txt
Documentation/git-update-index.txt
Documentation/git-update-ref.txt
Documentation/git-var.txt
Documentation/git-web--browse.txt
Documentation/git-worktree.txt
Documentation/git.txt
Documentation/gitattributes.txt
Documentation/githooks.txt
Documentation/gitk.txt
Documentation/gitremote-helpers.txt
Documentation/gitrepository-layout.txt
Documentation/glossary-content.txt
Documentation/howto/recover-corrupted-object-harder.txt
Documentation/revisions.txt
Documentation/technical/api-config.txt
Documentation/technical/api-directory-listing.txt
Documentation/technical/api-object-access.txt
Documentation/technical/api-submodule-config.txt
Documentation/technical/commit-graph-format.txt [new file with mode: 0644]
Documentation/technical/commit-graph.txt [new file with mode: 0644]
Documentation/technical/hash-function-transition.txt
Documentation/technical/pack-format.txt
Documentation/technical/protocol-v2.txt [new file with mode: 0644]
Documentation/technical/shallow.txt
GIT-VERSION-GEN
Makefile
RelNotes
advice.c
advice.h
alloc.c
apply.c
archive-tar.c
archive-zip.c
archive.c
archive.h
argv-array.c
argv-array.h
attr.c
bisect.c
blame.c
builtin.h
builtin/add.c
builtin/am.c
builtin/blame.c
builtin/branch.c
builtin/cat-file.c
builtin/checkout.c
builtin/clone.c
builtin/column.c
builtin/commit-graph.c [new file with mode: 0644]
builtin/commit-tree.c
builtin/commit.c
builtin/config.c
builtin/count-objects.c
builtin/describe.c
builtin/diff.c
builtin/difftool.c
builtin/fast-export.c
builtin/fetch-pack.c
builtin/fetch.c
builtin/fmt-merge-msg.c
builtin/fsck.c
builtin/gc.c
builtin/grep.c
builtin/hash-object.c
builtin/help.c
builtin/index-pack.c
builtin/init-db.c
builtin/log.c
builtin/ls-files.c
builtin/ls-remote.c
builtin/ls-tree.c
builtin/merge-tree.c
builtin/merge.c
builtin/mktag.c
builtin/mktree.c
builtin/mv.c
builtin/name-rev.c
builtin/notes.c
builtin/pack-objects.c
builtin/pack-redundant.c
builtin/pack-refs.c
builtin/prune.c
builtin/pull.c
builtin/push.c
builtin/rebase--helper.c
builtin/receive-pack.c
builtin/reflog.c
builtin/remote.c
builtin/repack.c
builtin/replace.c
builtin/reset.c
builtin/rev-list.c
builtin/rev-parse.c
builtin/rm.c
builtin/send-pack.c
builtin/serve.c [new file with mode: 0644]
builtin/shortlog.c
builtin/show-branch.c
builtin/show-ref.c
builtin/submodule--helper.c
builtin/tag.c
builtin/unpack-file.c
builtin/unpack-objects.c
builtin/update-index.c
builtin/upload-pack.c [new file with mode: 0644]
builtin/verify-commit.c
builtin/verify-tag.c
builtin/worktree.c
builtin/write-tree.c
bulk-checkin.c
bulk-checkin.h
bundle.c
cache-tree.c
cache-tree.h
cache.h
chdir-notify.c [new file with mode: 0644]
chdir-notify.h [new file with mode: 0644]
ci/run-build-and-tests.sh
color.c
color.h
combine-diff.c
command-list.txt
commit-graph.c [new file with mode: 0644]
commit-graph.h [new file with mode: 0644]
commit.c
commit.h
common-main.c
compat/mingw.c
config.c
config.h
config.mak.dev [new file with mode: 0644]
config.mak.uname
configure.ac
connect.c
connect.h
contrib/coccinelle/commit.cocci [new file with mode: 0644]
contrib/completion/git-completion.bash
contrib/convert-grafts-to-replace-refs.sh [deleted file]
contrib/diff-highlight/DiffHighlight.pm
contrib/diff-highlight/t/t9400-diff-highlight.sh
contrib/emacs/.gitignore [deleted file]
contrib/emacs/Makefile [deleted file]
contrib/emacs/README
contrib/emacs/git-blame.el
contrib/emacs/git.el
contrib/examples/README
contrib/examples/builtin-fetch--tool.c [deleted file]
contrib/examples/git-am.sh [deleted file]
contrib/examples/git-checkout.sh [deleted file]
contrib/examples/git-clean.sh [deleted file]
contrib/examples/git-clone.sh [deleted file]
contrib/examples/git-commit.sh [deleted file]
contrib/examples/git-difftool.perl [deleted file]
contrib/examples/git-fetch.sh [deleted file]
contrib/examples/git-gc.sh [deleted file]
contrib/examples/git-log.sh [deleted file]
contrib/examples/git-ls-remote.sh [deleted file]
contrib/examples/git-merge-ours.sh [deleted file]
contrib/examples/git-merge.sh [deleted file]
contrib/examples/git-notes.sh [deleted file]
contrib/examples/git-pull.sh [deleted file]
contrib/examples/git-remote.perl [deleted file]
contrib/examples/git-repack.sh [deleted file]
contrib/examples/git-rerere.perl [deleted file]
contrib/examples/git-reset.sh [deleted file]
contrib/examples/git-resolve.sh [deleted file]
contrib/examples/git-revert.sh [deleted file]
contrib/examples/git-svnimport.perl [deleted file]
contrib/examples/git-svnimport.txt [deleted file]
contrib/examples/git-tag.sh [deleted file]
contrib/examples/git-verify-tag.sh [deleted file]
contrib/examples/git-whatchanged.sh [deleted file]
contrib/mw-to-git/Makefile
contrib/update-unicode/README
contrib/update-unicode/update_unicode.sh
convert.c
convert.h
credential.c
csum-file.c
csum-file.h
daemon.c
detect-compiler [new file with mode: 0755]
diff.c
dir.c
dir.h
entry.c
environment.c
exec-cmd.c [new file with mode: 0644]
exec-cmd.h [new file with mode: 0644]
exec_cmd.c [deleted file]
exec_cmd.h [deleted file]
fast-import.c
fetch-pack.c
fetch-pack.h
fsck.c
fsmonitor.c
gettext.c
git-compat-util.h
git-filter-branch.sh
git-gui/git-gui.sh
git-gui/lib/sshkey.tcl
git-gui/lib/themed.tcl
git-rebase--am.sh
git-rebase--interactive.sh
git-rebase--merge.sh
git-rebase.sh
git-send-email.perl
git-stash.sh
git-svn.perl
git.c
gpg-interface.c
gpg-interface.h
grep.c
help.c
http-backend.c
http-fetch.c
http-push.c
http-walker.c
http.c
http.h
imap-send.c
line-log.c
list-objects-filter.c
list-objects.c
log-tree.c
log-tree.h
ls-refs.c [new file with mode: 0644]
ls-refs.h [new file with mode: 0644]
mailmap.c
match-trees.c
mem-pool.c [new file with mode: 0644]
mem-pool.h [new file with mode: 0644]
merge-blobs.c
merge-recursive.c
merge-recursive.h
mergetools/guiffy [new file with mode: 0644]
notes-cache.c
notes-merge.c
notes.c
object-store.h [new file with mode: 0644]
object.c
object.h
pack-bitmap-write.c
pack-bitmap.c
pack-bitmap.h
pack-check.c
pack-objects.c
pack-objects.h
pack-revindex.c
pack-write.c
packfile.c
packfile.h
pager.c
parse-options-cb.c
parse-options.c
parse-options.h
path.c
perl/Git.pm
perl/Git/I18N.pm
perl/Git/SVN.pm
perl/header_templates/fixed_prefix.template.pl [new file with mode: 0644]
perl/header_templates/runtime_prefix.template.pl [new file with mode: 0644]
pkt-line.c
pkt-line.h
pretty.c
protocol.c
protocol.h
reachable.c
read-cache.c
ref-filter.c
ref-filter.h
refs.c
refs.h
refs/files-backend.c
refs/packed-backend.c
remote-curl.c
remote-testsvn.c
remote.c
remote.h
replace-object.c [new file with mode: 0644]
replace-object.h [new file with mode: 0644]
replace_object.c [deleted file]
repository.c
repository.h
rerere.c
resolve-undo.c
resolve-undo.h
revision.c
run-command.c
send-pack.c
sequencer.c
sequencer.h
serve.c [new file with mode: 0644]
serve.h [new file with mode: 0644]
server-info.c
setup.c
sha1-file.c [new file with mode: 0644]
sha1-name.c [new file with mode: 0644]
sha1_file.c [deleted file]
sha1_name.c [deleted file]
shell.c
sideband.c
strbuf.c
strbuf.h
streaming.c
streaming.h
submodule-config.c
submodule-config.h
submodule.c
submodule.h
t/README
t/helper/test-chmtime.c
t/helper/test-config.c
t/helper/test-ctype.c
t/helper/test-date.c
t/helper/test-delta.c
t/helper/test-drop-caches.c
t/helper/test-dump-cache-tree.c
t/helper/test-dump-split-index.c
t/helper/test-example-decorate.c
t/helper/test-genrandom.c
t/helper/test-hashmap.c
t/helper/test-index-version.c
t/helper/test-lazy-init-name-hash.c
t/helper/test-match-trees.c
t/helper/test-mergesort.c
t/helper/test-mktemp.c
t/helper/test-online-cpus.c
t/helper/test-path-utils.c
t/helper/test-pkt-line.c [new file with mode: 0644]
t/helper/test-prio-queue.c
t/helper/test-read-cache.c
t/helper/test-ref-store.c
t/helper/test-regex.c
t/helper/test-revision-walking.c
t/helper/test-run-command.c
t/helper/test-scrap-cache-tree.c
t/helper/test-sha1-array.c
t/helper/test-sha1.c
t/helper/test-sha1.sh
t/helper/test-sigchain.c
t/helper/test-strcmp-offset.c
t/helper/test-string-list.c
t/helper/test-submodule-config.c
t/helper/test-subprocess.c
t/helper/test-tool.c [new file with mode: 0644]
t/helper/test-tool.h [new file with mode: 0644]
t/helper/test-urlmatch-normalization.c
t/helper/test-wildmatch.c
t/helper/test-write-cache.c
t/lib-git-p4.sh
t/lib-git-svn.sh
t/lib-pack.sh
t/perf/aggregate.perl
t/perf/bisect_regression [new file with mode: 0755]
t/perf/bisect_run_script [new file with mode: 0755]
t/perf/p0002-read-cache.sh
t/perf/p0004-lazy-init-name-hash.sh
t/perf/p0007-write-cache.sh
t/perf/p0071-sort.sh
t/perf/p7519-fsmonitor.sh
t/perf/run
t/t0005-signals.sh
t/t0006-date.sh
t/t0009-prio-queue.sh
t/t0011-hashmap.sh
t/t0013-sha1dc.sh
t/t0021-conversion.sh
t/t0028-working-tree-encoding.sh [new file with mode: 0755]
t/t0040-parse-options.sh
t/t0041-usage.sh [new file with mode: 0755]
t/t0060-path-utils.sh
t/t0061-run-command.sh
t/t0062-revision-walking.sh
t/t0063-string-list.sh
t/t0064-sha1-array.sh
t/t0065-strcmp-offset.sh
t/t0070-fundamental.sh
t/t0090-cache-tree.sh
t/t0110-urlmatch-normalization.sh
t/t1006-cat-file.sh
t/t1011-read-tree-sparse-checkout.sh
t/t1050-large.sh
t/t1300-config.sh [new file with mode: 0755]
t/t1300-repo-config.sh [deleted file]
t/t1304-default-acl.sh
t/t1305-config-include.sh
t/t1308-config-set.sh
t/t1309-early-config.sh
t/t1310-config-default.sh [new file with mode: 0755]
t/t1405-main-ref-store.sh
t/t1406-submodule-ref-store.sh
t/t1407-worktree-ref-store.sh
t/t1411-reflog-show.sh
t/t1501-work-tree.sh
t/t1507-rev-parse-upstream.sh
t/t1510-repo-setup.sh
t/t1600-index.sh
t/t1700-split-index.sh
t/t2020-checkout-detach.sh
t/t2022-checkout-paths.sh
t/t2025-worktree-add.sh
t/t2026-worktree-prune.sh
t/t2028-worktree-move.sh
t/t2101-update-index-reupdate.sh
t/t2104-update-index-skip-worktree.sh
t/t2107-update-index-basic.sh
t/t3008-ls-files-lazy-init-name-hash.sh
t/t3070-wildmatch.sh
t/t3200-branch.sh
t/t3306-notes-prune.sh
t/t3404-rebase-interactive.sh
t/t3418-rebase-continue.sh
t/t3421-rebase-topology-linear.sh
t/t3428-rebase-signoff.sh
t/t3430-rebase-merges.sh [new file with mode: 0755]
t/t3501-revert-cherry-pick.sh
t/t3510-cherry-pick-sequence.sh
t/t3600-rm.sh
t/t3700-add.sh
t/t3905-stash-include-untracked.sh
t/t4001-diff-rename.sh
t/t4011-diff-symlink.sh
t/t4013-diff-various.sh
t/t4014-format-patch.sh
t/t4035-diff-quiet.sh
t/t4151-am-abort.sh
t/t4200-rerere.sh
t/t4201-shortlog.sh
t/t5000-tar-tree.sh
t/t5300-pack-object.sh
t/t5301-sliding-window.sh
t/t5302-pack-index.sh
t/t5303-pack-corruption-resilience.sh
t/t5304-prune.sh
t/t5310-pack-bitmaps.sh
t/t5313-pack-bounds-checks.sh
t/t5314-pack-cycle-detection.sh
t/t5316-pack-delta-depth.sh
t/t5318-commit-graph.sh [new file with mode: 0755]
t/t5400-send-pack.sh
t/t5404-tracking-branches.sh
t/t5510-fetch.sh
t/t5512-ls-remote.sh
t/t5516-fetch-push.sh
t/t5541-http-push-smart.sh
t/t5546-receive-limits.sh
t/t5547-push-quarantine.sh
t/t5550-http-fetch-dumb.sh
t/t5561-http-backend.sh
t/t5608-clone-2gb.sh
t/t5701-git-serve.sh [new file with mode: 0755]
t/t5702-protocol-v2.sh [new file with mode: 0755]
t/t6001-rev-list-graft.sh
t/t6022-merge-rename.sh
t/t6043-merge-rename-directories.sh [new file with mode: 0755]
t/t6046-merge-skip-unneeded-updates.sh [new file with mode: 0755]
t/t6050-replace.sh
t/t6500-gc.sh
t/t6501-freshen-objects.sh
t/t7001-mv.sh
t/t7003-filter-branch.sh
t/t7004-tag.sh
t/t7005-editor.sh
t/t7400-submodule-basic.sh
t/t7411-submodule-config.sh
t/t7501-commit.sh
t/t7508-status.sh
t/t7607-merge-overwrite.sh
t/t7700-repack.sh
t/t7701-repack-unpack-unreachable.sh
t/t7812-grep-icase-non-ascii.sh
t/t9004-example.sh
t/t9100-git-svn-basic.sh
t/t9104-git-svn-follow-parent.sh
t/t9108-git-svn-glob.sh
t/t9109-git-svn-multi-glob.sh
t/t9110-git-svn-use-svm-props.sh
t/t9111-git-svn-use-svnsync-props.sh
t/t9114-git-svn-dcommit-merge.sh
t/t9130-git-svn-authors-file.sh
t/t9138-git-svn-authors-prog.sh
t/t9153-git-svn-rewrite-uuid.sh
t/t9168-git-svn-partially-globbed-names.sh
t/t9300-fast-import.sh
t/t9350-fast-export.sh
t/t9802-git-p4-filetype.sh
t/t9803-git-p4-shell-metachars.sh
t/t9813-git-p4-preserve-users.sh
t/t9820-git-p4-editor-handling.sh
t/t9902-completion.sh
t/test-lib-functions.sh
t/test-lib.sh
tag.c
tmp-objdir.c
trace.c
trace.h
transport-helper.c
transport-internal.h
transport.c
transport.h
tree-walk.c
tree-walk.h
tree.c
tree.h
unicode-width.h [new file with mode: 0644]
unicode_width.h [deleted file]
unpack-trees.c
unpack-trees.h
upload-pack.c
upload-pack.h [new file with mode: 0644]
utf8.c
utf8.h
walker.c
walker.h
wrap-for-bin.sh
write-or-die.c [new file with mode: 0644]
write_or_die.c [deleted file]
wt-status.c
wt-status.h
xdiff-interface.c
index 8ce9c6b8888fe6c12949d30e3e8b461cb67bb43f..1bdc91e282c5393c527b3902a208227c19971b84 100644 (file)
@@ -1,8 +1,10 @@
 * whitespace=!indent,trail,space
 *.[ch] whitespace=indent,trail,space diff=cpp
 *.sh whitespace=indent,trail,space eol=lf
-*.perl eol=lf
-*.pm eol=lf
+*.perl eol=lf diff=perl
+*.pl eof=lf diff=perl
+*.pm eol=lf diff=perl
+*.py eol=lf diff=python
 /Documentation/git-*.txt eol=lf
 /command-list.txt eol=lf
 /GIT-VERSION-GEN eol=lf
index 833ef3b0b783b8180d0dad1ce336713bddf09b26..b2a1ae4a1d6293004b10d14c33bb64fd9d14fe8b 100644 (file)
@@ -3,6 +3,7 @@
 /GIT-LDFLAGS
 /GIT-PREFIX
 /GIT-PERL-DEFINES
+/GIT-PERL-HEADER
 /GIT-PYTHON-VARS
 /GIT-SCRIPT-DEFINES
 /GIT-USER-AGENT
@@ -34,6 +35,7 @@
 /git-clone
 /git-column
 /git-commit
+/git-commit-graph
 /git-commit-tree
 /git-config
 /git-count-objects
 /git-rm
 /git-send-email
 /git-send-pack
+/git-serve
 /git-sh-i18n
 /git-sh-i18n--envsubst
 /git-sh-setup
index 7c71e88ea51c52d453b0d6c08a3415f4c03de22b..df7cf6313c7dd0c5c065e448fd7c725ff537a08b 100644 (file)
--- a/.mailmap
+++ b/.mailmap
@@ -25,8 +25,8 @@ Ben Walton <bdwalton@gmail.com> <bwalton@artsci.utoronto.ca>
 Benoit Sigoure <tsunanet@gmail.com> <tsuna@lrde.epita.fr>
 Bernt Hansen <bernt@norang.ca> <bernt@alumni.uwaterloo.ca>
 Brandon Casey <drafnel@gmail.com> <casey@nrlssc.navy.mil>
-brian m. carlson <sandals@crustytoothpaste.ath.cx> Brian M. Carlson <sandals@crustytoothpaste.ath.cx>
-brian m. carlson <sandals@crustytoothpaste.ath.cx> <sandals@crustytoothpaste.net>
+brian m. carlson <sandals@crustytoothpaste.net> Brian M. Carlson <sandals@crustytoothpaste.ath.cx>
+brian m. carlson <sandals@crustytoothpaste.net> <sandals@crustytoothpaste.ath.cx>
 Bryan Larsen <bryan@larsen.st> <bryan.larsen@gmail.com>
 Bryan Larsen <bryan@larsen.st> <bryanlarsen@yahoo.com>
 Cheng Renquan <crquan@gmail.com>
index bcd216d96c6643b92f3db01ce8a5d9b35186e750..d079d7c73aca1fd91ac18045aab46b2446f43a16 100644 (file)
@@ -78,6 +78,7 @@ TECH_DOCS += technical/pack-heuristics
 TECH_DOCS += technical/pack-protocol
 TECH_DOCS += technical/protocol-capabilities
 TECH_DOCS += technical/protocol-common
+TECH_DOCS += technical/protocol-v2
 TECH_DOCS += technical/racy-git
 TECH_DOCS += technical/send-pack-pipeline
 TECH_DOCS += technical/shallow
diff --git a/Documentation/RelNotes/2.18.0.txt b/Documentation/RelNotes/2.18.0.txt
new file mode 100644 (file)
index 0000000..fccc2f3
--- /dev/null
@@ -0,0 +1,312 @@
+Git 2.18 Release Notes
+======================
+
+Updates since v2.17
+-------------------
+
+UI, Workflows & Features
+
+ * Rename detection logic in "diff" family that is used in "merge" has
+   learned to guess when all of x/a, x/b and x/c have moved to z/a,
+   z/b and z/c, it is likely that x/d added in the meantime would also
+   want to move to z/d by taking the hint that the entire directory
+   'x' moved to 'z'.  A bug causing dirty files involved in a rename
+   to be overwritten during merge has also been fixed as part of this
+   work.
+
+ * "git filter-branch" learned to use a different exit code to allow
+   the callers to tell the case where there was no new commits to
+   rewrite from other error cases.
+
+ * When built with more recent cURL, GIT_SSL_VERSION can now specify
+   "tlsv1.3" as its value.
+
+ * "git gui" learned that "~/.ssh/id_ecdsa.pub" and
+   "~/.ssh/id_ed25519.pub" are also possible SSH key files.
+   (merge 2e2f0288ef bb/git-gui-ssh-key-files later to maint).
+
+ * "git gui" performs commit upon CTRL/CMD+ENTER but the
+   CTRL/CMD+KP_ENTER (i.e. enter key on the numpad) did not have the
+   same key binding.  It now does.
+   (merge 28a1d94a06 bp/git-gui-bind-kp-enter later to maint).
+
+ * "git gui" has been taught to work with old versions of tk (like
+   8.5.7) that do not support "ttk::style theme use" as a way to query
+   the current theme.
+   (merge 4891961105 cb/git-gui-ttk-style later to maint).
+
+ * "git rebase" has learned to honor "--signoff" option when using
+   backends other than "am" (but not "--preserve-merges").
+
+ * "git branch --list" during an interrupted "rebase -i" now lets
+   users distinguish the case where a detached HEAD is being rebased
+   and a normal branch is being rebased.
+
+ * "git mergetools" learned talking to guiffy.
+
+ * The scripts in contrib/emacs/ have outlived their usefulness and
+   have been replaced with a stub that errors out and tells the user
+   there are replacements.
+
+ * The new "checkout-encoding" attribute can ask Git to convert the
+   contents to the specified encoding when checking out to the working
+   tree (and the other way around when checking in).
+
+ * The "git config" command uses separate options e.g. "--int",
+   "--bool", etc. to specify what type the caller wants the value to
+   be interpreted as.  A new "--type=<typename>" option has been
+   introduced, which would make it cleaner to define new types.
+
+ * "git config --get" learned the "--default" option, to help the
+   calling script.  Building on top of the above changes, the
+   "git config" learns "--type=color" type.  Taken together, you can
+   do things like "git config --get foo.color --default blue" and get
+   the ANSI color sequence for the color given to foo.color variable,
+   or "blue" if the variable does not exist.
+
+ * "git ls-remote" learned an option to allow sorting its output based
+   on the refnames being shown.
+
+ * The command line completion (in contrib/) has been taught that "git
+   stash save" has been deprecated ("git stash push" is the preferred
+   spelling in the new world) and does not offer it as a possible
+   completion candidate when "git stash push" can be.
+
+ * "git gc --prune=nonsense" spent long time repacking and then
+   silently failed when underlying "git prune --expire=nonsense"
+   failed to parse its command line.  This has been corrected.
+
+ * Error messages from "git push" can be painted for more visibility.
+
+ * "git http-fetch" (deprecated) had an optional and experimental
+   "feature" to fetch only commits and/or trees, which nobody used.
+   This has been removed.
+
+
+Performance, Internal Implementation, Development Support etc.
+
+ * A "git fetch" from a repository with insane number of refs into a
+   repository that is already up-to-date still wasted too many cycles
+   making many lstat(2) calls to see if these objects at the tips
+   exist as loose objects locally.  These lstat(2) calls are optimized
+   away by enumerating all loose objects beforehand.
+   It is unknown if the new strategy negatively affects existing use
+   cases, fetching into a repository with many loose objects from a
+   repository with small number of refs.
+
+ * Git can be built to use either v1 or v2 of the PCRE library, and so
+   far, the build-time configuration USE_LIBPCRE=YesPlease instructed
+   the build procedure to use v1, but now it means v2.  USE_LIBPCRE1
+   and USE_LIBPCRE2 can be used to explicitly choose which version to
+   use, as before.
+
+ * The build procedure learned to optionally use symbolic links
+   (instead of hardlinks and copies) to install "git-foo" for built-in
+   commands, whose binaries are all identical.
+
+ * Conversion from uchar[20] to struct object_id continues.
+
+ * The way "git worktree prune" worked internally has been simplified,
+   by assuming how "git worktree move" moves an existing worktree to a
+   different place.
+
+ * Code clean-up for the "repository" abstraction.
+   (merge 00a3da2a13 nd/remove-ignore-env-field later to maint).
+
+ * Code to find the length to uniquely abbreviate object names based
+   on packfile content, which is a relatively recent addtion, has been
+   optimized to use the same fan-out table.
+
+ * The mechanism to use parse-options API to automate the command line
+   completion continues to get extended and polished.
+
+ * Copies of old scripted Porcelain commands in contrib/examples/ have
+   been removed.
+
+ * Some tests that rely on the exact hardcoded values of object names
+   have been updated in preparation for hash function migration.
+
+ * Perf-test update.
+
+ * Test helper update.
+
+ * The effort continues to refactor the internal global data structure
+   to make it possible to open multiple repositories, work with and
+   then close them,
+
+ * Small test-helper programs have been consolidated into a single
+   binary.
+
+ * API clean-up around ref-filter code.
+
+ * Shell completion (in contrib) that gives list of paths have been
+   optimized somewhat.
+
+ * The index file is updated to record the fsmonitor section after a
+   full scan was made, to avoid wasting the effort that has already
+   spent.
+
+ * Performance measuring framework in t/perf learned to help bisecting
+   performance regressions.
+
+ * Some multi-word source filenames are being renamed to separate
+   words with dashes instead of underscores.
+
+ * An reusable "memory pool" implementation has been extracted from
+   fast-import.c, which in turn has become the first user of the
+   mem-pool API.
+
+ * A build-time option has been added to allow Git to be told to refer
+   to its associated files relative to the main binary, in the same
+   way that has been possible on Windows for quite some time, for
+   Linux, BSDs and Darwin.
+
+ * Precompute and store information necessary for ancestry traversal
+   in a separate file to optimize graph walking.
+
+ * The effort to pass the repository in-core structure throughout the
+   API continues.  This round deals with the code that implements the
+   refs/replace/ mechanism.
+
+ * The build procedure "make DEVELOPER=YesPlease" learned to enable a
+   bit more warning options depending on the compiler used to help
+   developers more.  There also is "make DEVOPTS=tokens" knob
+   available now, for those who want to help fixing warnings we
+   usually ignore, for example.
+
+ * A new version of the transport protocol is being worked on.
+
+ * The code to interface to GPG has been restructured somewhat to make
+   it cleaner to integrate with other types of signature systems later.
+
+
+Also contains various documentation updates and code clean-ups.
+
+
+Fixes since v2.17
+-----------------
+
+ * "git shortlog cruft" aborted with a BUG message when run outside a
+   Git repository.  The command has been taught to complain about
+   extra and unwanted arguments on its command line instead in such a
+   case.
+   (merge 4aa0161e83 ma/shortlog-revparse later to maint).
+
+ * "git stash push -u -- <pathspec>" gave an unnecessary and confusing
+   error message when there was no tracked files that match the
+   <pathspec>, which has been fixed.
+   (merge 353278687e tg/stash-untracked-with-pathspec-fix later to maint).
+
+ * "git tag --contains no-such-commit" gave a full list of options
+   after giving an error message.
+   (merge 3bb0923f06 ps/contains-id-error-message later to maint).
+
+ * "diff-highlight" filter (in contrib/) learned to undertand "git log
+   --graph" output better.
+   (merge 4551fbba14 jk/diff-highlight-graph-fix later to maint).
+
+ * when refs that do not point at committish are given, "git
+   filter-branch" gave a misleading error messages.  This has been
+   corrected.
+   (merge f78ab355e7 yk/filter-branch-non-committish-refs later to maint).
+
+ * "git submodule status" misbehaved on a submodule that has been
+   removed from the working tree.
+   (merge 74b6bda32f rs/status-with-removed-submodule later to maint).
+
+ * When credential helper exits very quickly without reading its
+   input, it used to cause Git to die with SIGPIPE, which has been
+   fixed.
+   (merge a0d51e8d0e eb/cred-helper-ignore-sigpipe later to maint).
+
+ * "git rebase --keep-empty" still removed an empty commit if the
+   other side contained an empty commit (due to the "does an
+   equivalent patch exist already?" check), which has been corrected.
+   (merge 3d946165e1 pw/rebase-keep-empty-fixes later to maint).
+
+ * Some codepaths, including the refs API, get and keep relative
+   paths, that go out of sync when the process does chdir(2).  The
+   chdir-notify API is introduced to let these codepaths adjust these
+   cached paths to the new current directory.
+   (merge fb9c2d2703 jk/relative-directory-fix later to maint).
+
+ * "cd sub/dir && git commit ../path" ought to record the changes to
+   the file "sub/path", but this regressed long time ago.
+   (merge 86238e07ef bw/commit-partial-from-subdirectory-fix later to maint).
+
+ * Recent introduction of "--log-destination" option to "git daemon"
+   did not work well when the daemon was run under "--inetd" mode.
+   (merge e67d906d73 lw/daemon-log-destination later to maint).
+
+ * Small fix to the autoconf build procedure.
+   (merge 249482daf0 es/fread-reads-dir-autoconf-fix later to maint).
+
+ * Fix an unexploitable (because the oversized contents are not under
+   attacker's control) buffer overflow.
+   (merge d8579accfa bp/fsmonitor-bufsize-fix later to maint).
+
+ * Recent simplification of build procedure forgot a bit of tweak to
+   the build procedure of contrib/mw-to-git/
+   (merge d8698987f3 ab/simplify-perl-makefile later to maint).
+
+ * Moving a submodule that itself has submodule in it with "git mv"
+   forgot to make necessary adjustment to the nested sub-submodules;
+   now the codepath learned to recurse into the submodules.
+
+ * "git config --unset a.b", when "a.b" is the last variable in an
+   otherwise empty section "a", left an empty section "a" behind, and
+   worse yet, a subsequent "git config a.c value" did not reuse that
+   empty shell and instead created a new one.  These have been
+   (partially) corrected.
+   (merge c71d8bb38a js/empty-config-section-fix later to maint).
+
+ * "git worktree remove" learned that "-f" is a shorthand for
+   "--force" option, just like for "git worktree add".
+   (merge d228eea514 sb/worktree-remove-opt-force later to maint).
+
+ * The completion script (in contrib/) learned to clear cached list of
+   command line options upon dot-sourcing it again in a more efficient
+   way.
+   (merge 94408dc71c sg/completion-clear-cached later to maint).
+
+ * "git svn" had a minor thinko/typo which has been fixed.
+   (merge 51db271587 ab/git-svn-get-record-typofix later to maint).
+
+ * During a "rebase -i" session, the code could give older timestamp
+   to commits created by later "pick" than an earlier "reword", which
+   has been corrected.
+   (merge 12f7babd6b js/ident-date-fix later to maint).
+
+ * "git submodule status" did not check the symbolic revision name it
+   computed for the submodule HEAD is not the NULL, and threw it at
+   printf routines, which has been corrected.
+   (merge 0b5e2ea7cf nd/submodule-status-fix later to maint).
+
+ * When fed input that already has In-Reply-To: and/or References:
+   headers and told to add the same information, "git send-email"
+   added these headers separately, instead of appending to an existing
+   one, which is a violation of the RFC.  This has been corrected.
+   (merge 256be1d3f0 sa/send-email-dedup-some-headers later to maint).
+
+ * "git fast-export" had a regression in v2.15.0 era where it skipped
+   some merge commits in certain cases, which has been corrected.
+   (merge be011bbe00 ma/fast-export-skip-merge-fix later to maint).
+
+ * Other minor doc, test and build updates and code cleanups.
+   (merge 248f66ed8e nd/trace-with-env later to maint).
+   (merge 14ced5562c ys/bisect-object-id-missing-conversion-fix later to maint).
+   (merge 5988eb631a ab/doc-hash-brokenness later to maint).
+   (merge a4d4e32a70 pk/test-avoid-pipe-hiding-exit-status later to maint).
+   (merge 05e293c1ac jk/flockfile-stdio later to maint).
+   (merge e9184b0789 jk/t5561-missing-curl later to maint).
+   (merge b1801b85a3 nd/worktree-move later to maint).
+   (merge bbd374dd20 ak/bisect-doc-typofix later to maint).
+   (merge 4855f06fb3 mn/send-email-credential-doc later to maint).
+   (merge 8523b1e355 en/doc-typoes later to maint).
+   (merge 43b44ccfe7 js/t5404-path-fix later to maint).
+   (merge decf711fc1 ps/test-chmtime-get later to maint).
+   (merge 22d11a6e8e es/worktree-docs later to maint).
+   (merge 92a5dbbc22 tg/use-git-contacts later to maint).
+   (merge adc887221f tq/t1510 later to maint).
+   (merge bed21a8ad6 sg/doc-gc-quote-mismatch-fix later to maint).
+   (merge 73364e4f10 tz/doc-git-urls-reference later to maint).
index a1d0feca36fea36b84b587c1387eab53e3b870c6..945f8edb463ce9d2da90ceda53d8ec03f570047b 100644 (file)
@@ -260,8 +260,8 @@ that starts with `-----BEGIN PGP SIGNED MESSAGE-----`.  That is
 not a text/plain, it's something else.
 
 Send your patch with "To:" set to the mailing list, with "cc:" listing
-people who are involved in the area you are touching (the output from
-`git blame $path` and `git shortlog --no-merges $path` would help to
+people who are involved in the area you are touching (the `git
+contacts` command in `contrib/contacts/` can help to
 identify them), to solicit comments and reviews.
 
 :1: footnote:[The current maintainer: gitster@pobox.com]
index 4e0cff87f62f5d5c320e3dcea03f646416604bca..84e2891aed156e367a38692ee34998c8e06261d3 100644 (file)
@@ -530,6 +530,12 @@ core.autocrlf::
        This variable can be set to 'input',
        in which case no output conversion is performed.
 
+core.checkRoundtripEncoding::
+       A comma and/or whitespace separated list of encodings that Git
+       performs UTF-8 round trip checks on if they are used in an
+       `working-tree-encoding` attribute (see linkgit:gitattributes[5]).
+       The default value is `SHIFT-JIS`.
+
 core.symlinks::
        If false, symbolic links are checked out as small plain files that
        contain the link text. linkgit:git-update-index[1] and
@@ -898,6 +904,10 @@ core.notesRef::
 This setting defaults to "refs/notes/commits", and it can be overridden by
 the `GIT_NOTES_REF` environment variable.  See linkgit:git-notes[1].
 
+core.commitGraph::
+       Enable git commit graph feature. Allows reading from the
+       commit-graph file.
+
 core.sparseCheckout::
        Enable "sparse checkout" feature. See section "Sparse checkout" in
        linkgit:git-read-tree[1] for more information.
@@ -1058,6 +1068,10 @@ branch.<name>.rebase::
        "git pull" is run. See "pull.rebase" for doing this in a non
        branch-specific manner.
 +
+When `merges`, pass the `--rebase-merges` option to 'git rebase'
+so that the local merge commits are included in the rebase (see
+linkgit:git-rebase[1] for details).
++
 When preserve, also pass `--preserve-merges` along to 'git rebase'
 so that locally committed merge commits will not be flattened
 by running 'git pull'.
@@ -1088,6 +1102,16 @@ clean.requireForce::
        A boolean to make git-clean do nothing unless given -f,
        -i or -n.   Defaults to true.
 
+color.advice::
+       A boolean to enable/disable color in hints (e.g. when a push
+       failed, see `advice.*` for a list).  May be set to `always`,
+       `false` (or `never`) or `auto` (or `true`), in which case colors
+       are used only when the error output goes to a terminal. If
+       unset, then the value of `color.ui` is used (`auto` by default).
+
+color.advice.hint::
+       Use customized color for hints.
+
 color.branch::
        A boolean to enable/disable color in the output of
        linkgit:git-branch[1]. May be set to `always`,
@@ -1190,6 +1214,15 @@ color.pager::
        A boolean to enable/disable colored output when the pager is in
        use (default is true).
 
+color.push::
+       A boolean to enable/disable color in push errors. May be set to
+       `always`, `false` (or `never`) or `auto` (or `true`), in which
+       case colors are used only when the error output goes to a terminal.
+       If unset, then the value of `color.ui` is used (`auto` by default).
+
+color.push.error::
+       Use customized color for push errors.
+
 color.showBranch::
        A boolean to enable/disable color in the output of
        linkgit:git-show-branch[1]. May be set to `always`,
@@ -1218,6 +1251,15 @@ color.status.<slot>::
        status short-format), or
        `unmerged` (files which have unmerged changes).
 
+color.transport::
+       A boolean to enable/disable color when pushes are rejected. May be
+       set to `always`, `false` (or `never`) or `auto` (or `true`), in which
+       case colors are used only when the error output goes to a terminal.
+       If unset, then the value of `color.ui` is used (`auto` by default).
+
+color.transport.rejected::
+       Use customized color when a push was rejected.
+
 color.ui::
        This variable determines the default value for variables such
        as `color.diff` and `color.grep` that control the use of color
@@ -1558,6 +1600,18 @@ gc.autoDetach::
        Make `git gc --auto` return immediately and run in background
        if the system supports it. Default is true.
 
+gc.bigPackThreshold::
+       If non-zero, all packs larger than this limit are kept when
+       `git gc` is run. This is very similar to `--keep-base-pack`
+       except that all packs that meet the threshold are kept, not
+       just the base pack. Defaults to zero. Common unit suffixes of
+       'k', 'm', or 'g' are supported.
++
+Note that if the number of kept packs is more than gc.autoPackLimit,
+this configuration variable is ignored, all packs except the base pack
+will be repacked. After this the number of packs should go below
+gc.autoPackLimit and gc.bigPackThreshold should be respected again.
+
 gc.logExpiry::
        If the file gc.log exists, then `git gc --auto` won't run
        unless that file is more than 'gc.logExpiry' old.  Default is
@@ -1957,6 +2011,7 @@ http.sslVersion::
        - tlsv1.0
        - tlsv1.1
        - tlsv1.2
+       - tlsv1.3
 
 +
 Can be overridden by the `GIT_SSL_VERSION` environment variable.
@@ -2421,6 +2476,7 @@ pack.window::
 pack.depth::
        The maximum delta depth used by linkgit:git-pack-objects[1] when no
        maximum depth is given on the command line. Defaults to 50.
+       Maximum value is 4095.
 
 pack.windowMemory::
        The maximum size of memory that is consumed by each thread
@@ -2457,7 +2513,8 @@ pack.deltaCacheLimit::
        The maximum size of a delta, that is cached in
        linkgit:git-pack-objects[1]. This cache is used to speed up the
        writing object phase by not having to recompute the final delta
-       result once the best match for all objects is found. Defaults to 1000.
+       result once the best match for all objects is found.
+       Defaults to 1000. Maximum value is 65535.
 
 pack.threads::
        Specifies the number of threads to spawn when searching for best
@@ -2616,6 +2673,10 @@ pull.rebase::
        pull" is run. See "branch.<name>.rebase" for setting this on a
        per-branch basis.
 +
+When `merges`, pass the `--rebase-merges` option to 'git rebase'
+so that the local merge commits are included in the rebase (see
+linkgit:git-rebase[1] for details).
++
 When preserve, also pass `--preserve-merges` along to 'git rebase'
 so that locally committed merge commits will not be flattened
 by running 'git pull'.
index e3a44f03cdcee92098287bfccc9801fde042ef2b..f466600972f86df57648eaab6dccf52289febda9 100644 (file)
@@ -568,7 +568,7 @@ the normal order.
 --
 +
 Patterns have the same syntax and semantics as patterns used for
-fnmantch(3) without the FNM_PATHNAME flag, except a pathname also
+fnmatch(3) without the FNM_PATHNAME flag, except a pathname also
 matches a pattern if removing any number of the final pathname
 components matches the pattern.  For example, the pattern "`foo*bar`"
 matches "`fooasdfbar`" and "`foo/bar/baz/asdf`" but not "`foobarx`".
@@ -592,7 +592,7 @@ endif::git-format-patch[]
        Treat all files as text.
 
 --ignore-cr-at-eol::
-       Ignore carrige-return at the end of line when doing a comparison.
+       Ignore carriage-return at the end of line when doing a comparison.
 
 --ignore-space-at-eol::
        Ignore changes in whitespace at EOL.
index 8631e365f437fd85058bed3dbd0cebde15756ccc..97d3217df9ac3f048073f62a0d5356c4546354ff 100644 (file)
@@ -188,6 +188,14 @@ endif::git-pull[]
        is specified. This flag forces progress status even if the
        standard error stream is not directed to a terminal.
 
+-o <option>::
+--server-option=<option>::
+       Transmit the given string to the server when communicating using
+       protocol version 2.  The given string must not contain a NUL or LF
+       character.
+       When multiple `--server-option=<option>` are given, they are all
+       sent to the other side in the order listed on the command line.
+
 -4::
 --ipv4::
        Use IPv4 addresses only, ignoring IPv6 addresses.
index d50fa339dcc523158896fda8cdccd1a2784dfdd7..45652fe4a6a51c251f773776576956745701d677 100644 (file)
@@ -193,7 +193,7 @@ for "git add --no-all <pathspec>...", i.e. ignored removed files.
        for command-line options).
 
 
-Configuration
+CONFIGURATION
 -------------
 
 The optional configuration variable `core.excludesFile` indicates a path to a
@@ -226,7 +226,7 @@ Because this example lets the shell expand the asterisk (i.e. you are
 listing the files explicitly), it does not consider
 `subdir/git-foo.sh`.
 
-Interactive mode
+INTERACTIVE MODE
 ----------------
 When the command enters the interactive mode, it shows the
 output of the 'status' subcommand, and then goes into its
index 4ebc3d32719dfefa988d34b41871f0e9fb969471..67228494c00e1df676723072d0884f7705e532f1 100644 (file)
@@ -113,8 +113,10 @@ explained for the configuration variable `core.quotePath` (see
 linkgit:git-config[1]).
 
 -p<n>::
-       Remove <n> leading slashes from traditional diff paths. The
-       default is 1.
+       Remove <n> leading path components (separated by slashes) from
+       traditional diff paths. E.g., with `-p2`, a patch against
+       `a/dir/file` will be applied directly to `file`. The default is
+       1.
 
 -C<n>::
        Ensure at least <n> lines of surrounding context match before
@@ -240,7 +242,7 @@ When `git apply` is used as a "better GNU patch", the user can pass
 the `--unsafe-paths` option to override this safety check.  This option
 has no effect when `--index` or `--cached` is in use.
 
-Configuration
+CONFIGURATION
 -------------
 
 apply.ignoreWhitespace::
@@ -251,7 +253,7 @@ apply.whitespace::
        When no `--whitespace` flag is given from the command
        line, this configuration item is used as the default.
 
-Submodules
+SUBMODULES
 ----------
 If the patch contains any changes to submodules then 'git apply'
 treats these changes as follows.
index 4a1417bdcd7826d444dbfd4cbc438ec9ec2edf1b..4b45d837a7e7c590fe3aa5f575009c43342b833c 100644 (file)
@@ -165,8 +165,8 @@ To get a reminder of the currently used terms, use
 git bisect terms
 ------------------------------------------------
 
-You can get just the old (respectively new) term with `git bisect term
---term-old` or `git bisect term --term-good`.
+You can get just the old (respectively new) term with `git bisect terms
+--term-old` or `git bisect terms --term-good`.
 
 If you would like to use your own terms instead of "bad"/"good" or
 "new"/"old", you can choose any names you like (except existing bisect
index b3084c99c1cabdccc690e4cec5071b22774bf6c8..02eccbb931eb1ffe6e4c018927eebce8eb98f124 100644 (file)
@@ -287,7 +287,7 @@ CONFIGURATION
 `--list` is used or implied. The default is to use a pager.
 See linkgit:git-config[1].
 
-Examples
+EXAMPLES
 --------
 
 Start development from a known tag::
@@ -318,7 +318,7 @@ See linkgit:git-fetch[1].
 is currently checked out) does not have all commits from the test branch.
 
 
-Notes
+NOTES
 -----
 
 If you are creating a branch that you want to checkout immediately, it is
index 3a8120c3b3795784cb05211bcde959a224f0a867..7d6c9dcd177b6a1fa4a6184b230480aa08e07120 100644 (file)
@@ -92,8 +92,8 @@ It is okay to err on the side of caution, causing the bundle file
 to contain objects already in the destination, as these are ignored
 when unpacking at the destination.
 
-EXAMPLE
--------
+EXAMPLES
+--------
 
 Assume you want to transfer the history from a repository R1 on machine A
 to another repository R2 on machine B.
index 42ca7b50956aa8560b6aa58b5d7741913c145a43..a55536f0bfb2dfa3774e1b952ac4dee1eea6e505 100644 (file)
@@ -260,7 +260,7 @@ or `--mirror` is given)
 
 <repository>::
        The (possibly remote) repository to clone from.  See the
-       <<URLS,URLS>> section below for more information on specifying
+       <<URLS,GIT URLS>> section below for more information on specifying
        repositories.
 
 <directory>::
@@ -273,7 +273,7 @@ or `--mirror` is given)
 :git-clone: 1
 include::urls.txt[]
 
-Examples
+EXAMPLES
 --------
 
 * Clone from upstream:
diff --git a/Documentation/git-commit-graph.txt b/Documentation/git-commit-graph.txt
new file mode 100644 (file)
index 0000000..4c97b55
--- /dev/null
@@ -0,0 +1,94 @@
+git-commit-graph(1)
+===================
+
+NAME
+----
+git-commit-graph - Write and verify Git commit graph files
+
+
+SYNOPSIS
+--------
+[verse]
+'git commit-graph read' [--object-dir <dir>]
+'git commit-graph write' <options> [--object-dir <dir>]
+
+
+DESCRIPTION
+-----------
+
+Manage the serialized commit graph file.
+
+
+OPTIONS
+-------
+--object-dir::
+       Use given directory for the location of packfiles and commit graph
+       file. This parameter exists to specify the location of an alternate
+       that only has the objects directory, not a full .git directory. The
+       commit graph file is expected to be at <dir>/info/commit-graph and
+       the packfiles are expected to be in <dir>/pack.
+
+
+COMMANDS
+--------
+'write'::
+
+Write a commit graph file based on the commits found in packfiles.
++
+With the `--stdin-packs` option, generate the new commit graph by
+walking objects only in the specified pack-indexes. (Cannot be combined
+with --stdin-commits.)
++
+With the `--stdin-commits` option, generate the new commit graph by
+walking commits starting at the commits specified in stdin as a list
+of OIDs in hex, one OID per line. (Cannot be combined with
+--stdin-packs.)
++
+With the `--append` option, include all commits that are present in the
+existing commit-graph file.
+
+'read'::
+
+Read a graph file given by the commit-graph file and output basic
+details about the graph file. Used for debugging purposes.
+
+
+EXAMPLES
+--------
+
+* Write a commit graph file for the packed commits in your local .git folder.
++
+------------------------------------------------
+$ git commit-graph write
+------------------------------------------------
+
+* Write a graph file, extending the current graph file using commits
+* in <pack-index>.
++
+------------------------------------------------
+$ echo <pack-index> | git commit-graph write --stdin-packs
+------------------------------------------------
+
+* Write a graph file containing all reachable commits.
++
+------------------------------------------------
+$ git show-ref -s | git commit-graph write --stdin-commits
+------------------------------------------------
+
+* Write a graph file containing all commits in the current
+* commit-graph file along with those reachable from HEAD.
++
+------------------------------------------------
+$ git rev-parse HEAD | git commit-graph write --stdin-commits --append
+------------------------------------------------
+
+* Read basic information from the commit-graph file.
++
+------------------------------------------------
+$ git commit-graph read
+------------------------------------------------
+
+
+GIT
+---
+Part of the linkgit:git[1] suite
index e09ed5d7d5147d93039c479efc8ab450bf5ca8b4..18ddc78f42d69724cf5a04087a7dbd13c5ebf711 100644 (file)
@@ -9,13 +9,13 @@ git-config - Get and set repository or global options
 SYNOPSIS
 --------
 [verse]
-'git config' [<file-option>] [type] [--show-origin] [-z|--null] name [value [value_regex]]
-'git config' [<file-option>] [type] --add name value
-'git config' [<file-option>] [type] --replace-all name value [value_regex]
-'git config' [<file-option>] [type] [--show-origin] [-z|--null] --get name [value_regex]
-'git config' [<file-option>] [type] [--show-origin] [-z|--null] --get-all name [value_regex]
-'git config' [<file-option>] [type] [--show-origin] [-z|--null] [--name-only] --get-regexp name_regex [value_regex]
-'git config' [<file-option>] [type] [-z|--null] --get-urlmatch name URL
+'git config' [<file-option>] [--type=<type>] [--show-origin] [-z|--null] name [value [value_regex]]
+'git config' [<file-option>] [--type=<type>] --add name value
+'git config' [<file-option>] [--type=<type>] --replace-all name value [value_regex]
+'git config' [<file-option>] [--type=<type>] [--show-origin] [-z|--null] --get name [value_regex]
+'git config' [<file-option>] [--type=<type>] [--show-origin] [-z|--null] --get-all name [value_regex]
+'git config' [<file-option>] [--type=<type>] [--show-origin] [-z|--null] [--name-only] --get-regexp name_regex [value_regex]
+'git config' [<file-option>] [--type=<type>] [-z|--null] --get-urlmatch name URL
 'git config' [<file-option>] --unset name [value_regex]
 'git config' [<file-option>] --unset-all name [value_regex]
 'git config' [<file-option>] --rename-section old_name new_name
@@ -38,12 +38,10 @@ existing values that match the regexp are updated or unset.  If
 you want to handle the lines that do *not* match the regex, just
 prepend a single exclamation mark in front (see also <<EXAMPLES>>).
 
-The type specifier can be either `--int` or `--bool`, to make
-'git config' ensure that the variable(s) are of the given type and
-convert the value to the canonical form (simple decimal number for int,
-a "true" or "false" string for bool), or `--path`, which does some
-path expansion (see `--path` below).  If no type specifier is passed, no
-checks or transformations are performed on the value.
+The `--type=<type>` option instructs 'git config' to ensure that incoming and
+outgoing values are canonicalize-able under the given <type>.  If no
+`--type=<type>` is given, no canonicalization will be performed. Callers may
+unset an existing `--type` specifier with `--no-type`.
 
 When reading, the values are read from the system, global and
 repository local configuration files by default, and options
@@ -160,30 +158,43 @@ See also <<FILES>>.
 --list::
        List all variables set in config file, along with their values.
 
---bool::
-       'git config' will ensure that the output is "true" or "false"
+--type <type>::
+  'git config' will ensure that any input or output is valid under the given
+  type constraint(s), and will canonicalize outgoing values in `<type>`'s
+  canonical form.
++
+Valid `<type>`'s include:
++
+- 'bool': canonicalize values as either "true" or "false".
+- 'int': canonicalize values as simple decimal numbers. An optional suffix of
+  'k', 'm', or 'g' will cause the value to be multiplied by 1024, 1048576, or
+  1073741824 upon input.
+- 'bool-or-int': canonicalize according to either 'bool' or 'int', as described
+  above.
+- 'path': canonicalize by adding a leading `~` to the value of `$HOME` and
+  `~user` to the home directory for the specified user. This specifier has no
+  effect when setting the value (but you can use `git config section.variable
+  ~/` from the command line to let your shell do the expansion.)
+- 'expiry-date': canonicalize by converting from a fixed or relative date-string
+  to a timestamp. This specifier has no effect when setting the value.
+- 'color': When getting a value, canonicalize by converting to an ANSI color
+  escape sequence. When setting a value, a sanity-check is performed to ensure
+  that the given value is canonicalize-able as an ANSI color, but it is written
+  as-is.
++
 
+--bool::
 --int::
-       'git config' will ensure that the output is a simple
-       decimal number.  An optional value suffix of 'k', 'm', or 'g'
-       in the config file will cause the value to be multiplied
-       by 1024, 1048576, or 1073741824 prior to output.
-
 --bool-or-int::
-       'git config' will ensure that the output matches the format of
-       either --bool or --int, as described above.
-
 --path::
-       `git config` will expand a leading `~` to the value of
-       `$HOME`, and `~user` to the home directory for the
-       specified user.  This option has no effect when setting the
-       value (but you can use `git config section.variable ~/`
-       from the command line to let your shell do the expansion).
-
 --expiry-date::
-       `git config` will ensure that the output is converted from
-       a fixed or relative date-string to a timestamp. This option
-       has no effect when setting the value.
+  Historical options for selecting a type specifier. Prefer instead `--type`,
+  (see: above).
+
+--no-type::
+  Un-sets the previously set type specifier (if one was previously set). This
+  option requests that 'git config' not canonicalize the retrieved variable.
+  `--no-type` has no effect without `--type=<type>` or `--<type>`.
 
 -z::
 --null::
@@ -221,6 +232,8 @@ See also <<FILES>>.
        output it as the ANSI color escape sequence to the standard
        output.  The optional `default` parameter is used instead, if
        there is no color configured for `name`.
++
+`--type=color [--default=<default>]` is preferred over `--get-color`.
 
 -e::
 --edit::
@@ -233,6 +246,10 @@ See also <<FILES>>.
        using `--file`, `--global`, etc) and `on` when searching all
        config files.
 
+--default <value>::
+  When using `--get`, and the requested variable is not found, behave as if
+  <value> were the value assigned to the that variable.
+
 CONFIGURATION
 -------------
 `pager.config` is only respected when listing configuration, i.e., when
index ba90066f108cf525eb7781c1607d65358c617b0e..37b96c5453457dc2ab4750c3ef4490f8d55438d9 100644 (file)
@@ -207,7 +207,7 @@ allowing access over SSH.
 ------
 
 [[dbbackend]]
-Database Backend
+DATABASE BACKEND
 ----------------
 
 'git-cvsserver' uses one database per Git head (i.e. CVS module) to
@@ -321,7 +321,7 @@ git-cvsserver, as described above.
 When these environment variables are set, the corresponding
 command-line arguments may not be used.
 
-Eclipse CVS Client Notes
+ECLIPSE CVS CLIENT NOTES
 ------------------------
 
 To get a checkout with the Eclipse CVS client:
@@ -346,7 +346,7 @@ offer. In that case CVS_SERVER is ignored, and you will have to replace
 the cvs utility on the server with 'git-cvsserver' or manipulate your `.bashrc`
 so that calling 'cvs' effectively calls 'git-cvsserver'.
 
-Clients known to work
+CLIENTS KNOWN TO WORK
 ---------------------
 
 - CVS 1.12.9 on Debian
@@ -354,7 +354,7 @@ Clients known to work
 - Eclipse 3.0, 3.1.2 on MacOSX (see Eclipse CVS Client Notes)
 - TortoiseCVS
 
-Operations supported
+OPERATIONS SUPPORTED
 --------------------
 
 All the operations required for normal use are supported, including
@@ -424,7 +424,7 @@ For best consistency with 'cvs', it is probably best to override the
 defaults by setting `gitcvs.usecrlfattr` to true,
 and `gitcvs.allBinary` to "guess".
 
-Dependencies
+DEPENDENCIES
 ------------
 'git-cvsserver' depends on DBD::SQLite.
 
index b380677718ae129b4643814d7c1a16351ce4d779..f4bd8155c0a707308162e050d5d59b5dbd5ca7a6 100644 (file)
@@ -37,14 +37,14 @@ include::diff-options.txt[]
 
 include::diff-format.txt[]
 
-Operating Modes
+OPERATING MODES
 ---------------
 You can choose whether you want to trust the index file entirely
 (using the `--cached` flag) or ask the diff logic to show any files
 that don't match the stat state as being "tentatively changed".  Both
 of these operations are very useful indeed.
 
-Cached Mode
+CACHED MODE
 -----------
 If `--cached` is specified, it allows you to ask:
 
@@ -77,7 +77,7 @@ So doing a `git diff-index --cached` is basically very useful when you are
 asking yourself "what have I already marked for being committed, and
 what's the difference to a previous tree".
 
-Non-cached Mode
+NON-CACHED MODE
 ---------------
 The "non-cached" mode takes a different approach, and is potentially
 the more useful of the two in that what it does can't be emulated with
index 7870e175b7683ffcdbdc98d3f669998c615c2ffd..2319b2b19209467bf5796abf8df425c6118dadac 100644 (file)
@@ -116,7 +116,7 @@ include::pretty-options.txt[]
 include::pretty-formats.txt[]
 
 
-Limiting Output
+LIMITING OUTPUT
 ---------------
 If you're only interested in differences in a subset of files, for
 example some architecture-specific files, you might do:
index b0c1bb95c83b8e8e6cf6fa863e74108e3e5fc35f..7c2c4427001f851339e48284e811a438417a02e8 100644 (file)
@@ -13,7 +13,7 @@ SYNOPSIS
 'git diff' [options] --cached [<commit>] [--] [<path>...]
 'git diff' [options] <commit> <commit> [--] [<path>...]
 'git diff' [options] <blob> <blob>
-'git diff' [options] [--no-index] [--] <path> <path>
+'git diff' [options] --no-index [--] <path> <path>
 
 DESCRIPTION
 -----------
@@ -21,7 +21,7 @@ Show changes between the working tree and the index or a tree, changes
 between the index and a tree, changes between two trees, changes between
 two blob objects, or changes between two files on disk.
 
-'git diff' [--options] [--] [<path>...]::
+'git diff' [options] [--] [<path>...]::
 
        This form is to view the changes you made relative to
        the index (staging area for the next commit).  In other
@@ -29,7 +29,7 @@ two blob objects, or changes between two files on disk.
        further add to the index but you still haven't.  You can
        stage these changes by using linkgit:git-add[1].
 
-'git diff' --no-index [--options] [--] [<path>...]::
+'git diff' [options] --no-index [--] <path> <path>::
 
        This form is to compare the given two paths on the
        filesystem.  You can omit the `--no-index` option when
@@ -38,7 +38,7 @@ two blob objects, or changes between two files on disk.
        or when running the command outside a working tree
        controlled by Git.
 
-'git diff' [--options] --cached [<commit>] [--] [<path>...]::
+'git diff' [options] --cached [<commit>] [--] [<path>...]::
 
        This form is to view the changes you staged for the next
        commit relative to the named <commit>.  Typically you
@@ -48,7 +48,7 @@ two blob objects, or changes between two files on disk.
        <commit> is not given, it shows all staged changes.
        --staged is a synonym of --cached.
 
-'git diff' [--options] <commit> [--] [<path>...]::
+'git diff' [options] <commit> [--] [<path>...]::
 
        This form is to view the changes you have in your
        working tree relative to the named <commit>.  You can
@@ -56,18 +56,18 @@ two blob objects, or changes between two files on disk.
        branch name to compare with the tip of a different
        branch.
 
-'git diff' [--options] <commit> <commit> [--] [<path>...]::
+'git diff' [options] <commit> <commit> [--] [<path>...]::
 
        This is to view the changes between two arbitrary
        <commit>.
 
-'git diff' [--options] <commit>..<commit> [--] [<path>...]::
+'git diff' [options] <commit>..<commit> [--] [<path>...]::
 
        This is synonymous to the previous form.  If <commit> on
        one side is omitted, it will have the same effect as
        using HEAD instead.
 
-'git diff' [--options] <commit>\...<commit> [--] [<path>...]::
+'git diff' [options] <commit>\...<commit> [--] [<path>...]::
 
        This form is to view the changes on the branch containing
        and up to the second <commit>, starting at a common ancestor
index ed57c684dbc82c587ca1cf6b66ae46f3760ecfba..44098595dd6a2e7d303a2ee78f348207813186b8 100644 (file)
@@ -202,7 +202,7 @@ smaller output, and it is usually easy to quickly confirm that there is
 no private data in the stream.
 
 
-Limitations
+LIMITATIONS
 -----------
 
 Since 'git fast-import' cannot tag trees, you will not be
index 3d3d219e58e5cc05b44c9e0675628bdbd9f0b07f..cdf696ff7f6d9873d4d5c069022eae547911ede8 100644 (file)
@@ -139,7 +139,7 @@ Performance and Compression Tuning
 fastimport.unpackLimit::
        See linkgit:git-config[1]
 
-Performance
+PERFORMANCE
 -----------
 The design of fast-import allows it to import large projects in a minimum
 amount of memory usage and processing time.  Assuming the frontend
@@ -155,7 +155,7 @@ faster if the source data is stored on a different drive than the
 destination Git repository (due to less IO contention).
 
 
-Development Cost
+DEVELOPMENT COST
 ----------------
 A typical frontend for fast-import tends to weigh in at approximately 200
 lines of Perl/Python/Ruby code.  Most developers have been able to
@@ -165,7 +165,7 @@ an ideal situation, given that most conversion tools are throw-away
 (use once, and never look back).
 
 
-Parallel Operation
+PARALLEL OPERATION
 ------------------
 Like 'git push' or 'git fetch', imports handled by fast-import are safe to
 run alongside parallel `git repack -a -d` or `git gc` invocations,
@@ -186,7 +186,7 @@ this only be used on an otherwise quiet repository.  Using --force
 is not necessary for an initial import into an empty repository.
 
 
-Technical Discussion
+TECHNICAL DISCUSSION
 --------------------
 fast-import tracks a set of branches in memory.  Any branch can be created
 or modified at any point during the import process by sending a
@@ -204,7 +204,7 @@ directory also allows fast-import to run very quickly, as it does not
 need to perform any costly file update operations when switching
 between branches.
 
-Input Format
+INPUT FORMAT
 ------------
 With the exception of raw file data (which Git does not interpret)
 the fast-import input format is text (ASCII) based.  This text based
@@ -1131,7 +1131,7 @@ If the `--done` command-line option or `feature done` command is
 in use, the `done` command is mandatory and marks the end of the
 stream.
 
-Responses To Commands
+RESPONSES TO COMMANDS
 ---------------------
 New objects written by fast-import are not available immediately.
 Most fast-import commands have no visible effect until the next
@@ -1160,7 +1160,7 @@ To avoid deadlock, such frontends must completely consume any
 pending output from `progress`, `ls`, `get-mark`, and `cat-blob` before
 performing writes to fast-import that might block.
 
-Crash Reports
+CRASH REPORTS
 -------------
 If fast-import is supplied invalid input it will terminate with a
 non-zero exit status and create a crash report in the top level of
@@ -1247,7 +1247,7 @@ An example crash:
        END OF CRASH REPORT
 ====
 
-Tips and Tricks
+TIPS AND TRICKS
 ---------------
 The following tips and tricks have been collected from various
 users of fast-import, and are offered here as suggestions.
@@ -1349,7 +1349,7 @@ Your users will feel better knowing how much of the data stream
 has been processed.
 
 
-Packfile Optimization
+PACKFILE OPTIMIZATION
 ---------------------
 When packing a blob fast-import always attempts to deltify against the last
 blob written.  Unless specifically arranged for by the frontend,
@@ -1380,7 +1380,7 @@ to force recomputation of all deltas can significantly reduce the
 final packfile size (30-50% smaller can be quite typical).
 
 
-Memory Utilization
+MEMORY UTILIZATION
 ------------------
 There are a number of factors which affect how much memory fast-import
 requires to perform an import.  Like critical sections of core
@@ -1458,7 +1458,7 @@ and lazy loading of subtrees, allows fast-import to efficiently import
 projects with 2,000+ branches and 45,114+ files in a very limited
 memory footprint (less than 2.7 MiB per active branch).
 
-Signals
+SIGNALS
 -------
 Sending *SIGUSR1* to the 'git fast-import' process ends the current
 packfile early, simulating a `checkpoint` command.  The impatient
index f7ebe36a7b2c203e0b2f320309a5ce6250b12471..c9758847937e7db9fb3c4c3a498f5074e9e1f5d9 100644 (file)
@@ -88,7 +88,7 @@ be in a separate packet, and the list must end with a flush packet.
        infinite even if there is an ancestor-chain that long.
 
 --shallow-since=<date>::
-       Deepen or shorten the history of a shallow'repository to
+       Deepen or shorten the history of a shallow repository to
        include all reachable commits after <date>.
 
 --shallow-exclude=<revision>::
index 3a52e4dce39eeaf6eba896ccbf9e0505cebb3ec9..e6f08ab189489ec1631169d0ad0b190428883235 100644 (file)
@@ -222,7 +222,15 @@ this purpose, they are instead rewritten to point at the nearest ancestor that
 was not excluded.
 
 
-Examples
+EXIT STATUS
+-----------
+
+On success, the exit status is `0`.  If the filter can't find any commits to
+rewrite, the exit status is `2`.  On any other error, the exit status may be
+any other non-zero value.
+
+
+EXAMPLES
 --------
 
 Suppose you want to remove a file (containing confidential information
@@ -280,7 +288,7 @@ git filter-branch --parent-filter \
 or even simpler:
 
 -----------------------------------------------
-echo "$commit-id $graft-id" >> .git/info/grafts
+git replace --graft $commit-id $graft-id
 git filter-branch $graft-id..HEAD
 -----------------------------------------------
 
@@ -398,7 +406,7 @@ git filter-branch --index-filter \
 
 
 
-Checklist for Shrinking a Repository
+CHECKLIST FOR SHRINKING A REPOSITORY
 ------------------------------------
 
 git-filter-branch can be used to get rid of a subset of files,
@@ -437,7 +445,7 @@ warned.
   (or if your git-gc is not new enough to support arguments to
   `--prune`, use `git repack -ad; git prune` instead).
 
-Notes
+NOTES
 -----
 
 git-filter-branch allows you to make complex shell-scripted rewrites
index 44892c447e79f1d06435357b30f4ddbbb465e5f1..423b6e033ba512f8a8ed6fe3f0ee6ef158dd5234 100644 (file)
@@ -57,8 +57,8 @@ merge.summary::
        Synonym to `merge.log`; this is deprecated and will be removed in
        the future.
 
-EXAMPLE
--------
+EXAMPLES
+--------
 
 ---------
 $ git fetch origin master
index dffa14a7950e074bbff73ec79defdbbdcc9702be..085d177d976546a0b21891884e490c9ce7530913 100644 (file)
@@ -121,7 +121,7 @@ refname::
        stripping with positive <N>, or it becomes the full refname if
        stripping with negative <N>.  Neither is an error.
 +
-`strip` can be used as a synomym to `lstrip`.
+`strip` can be used as a synonym to `lstrip`.
 
 objecttype::
        The type of the object (`blob`, `tree`, `commit`, `tag`).
index 6cbe462a77467b05561938ff9cf8e9dcebd42efe..b41e1329a7d8439762790e663ac52ec5f487bc8b 100644 (file)
@@ -47,7 +47,7 @@ There are two ways to specify which commits to operate on.
 
 The first rule takes precedence in the case of a single <commit>.  To
 apply the second rule, i.e., format everything since the beginning of
-history up until <commit>, use the '\--root' option: `git format-patch
+history up until <commit>, use the `--root` option: `git format-patch
 --root <commit>`.  If you want to format only <commit> itself, you
 can do this with `git format-patch -1 <commit>`.
 
index 571b5a7e3c9dbc11aafc194b6e08dbbed5b2f7d3..24b2dd44fe445a66121fa957f0af8e2209a85676 100644 (file)
@@ -9,14 +9,15 @@ git-gc - Cleanup unnecessary files and optimize the local repository
 SYNOPSIS
 --------
 [verse]
-'git gc' [--aggressive] [--auto] [--quiet] [--prune=<date> | --no-prune] [--force]
+'git gc' [--aggressive] [--auto] [--quiet] [--prune=<date> | --no-prune] [--force] [--keep-largest-pack]
 
 DESCRIPTION
 -----------
 Runs a number of housekeeping tasks within the current repository,
 such as compressing file revisions (to reduce disk space and increase
-performance) and removing unreachable objects which may have been
-created from prior invocations of 'git add'.
+performance), removing unreachable objects which may have been
+created from prior invocations of 'git add', packing refs, pruning
+reflog, rerere metadata or stale working trees.
 
 Users are encouraged to run this task on a regular basis within
 each repository to maintain good disk space utilization and good
@@ -45,20 +46,31 @@ OPTIONS
        With this option, 'git gc' checks whether any housekeeping is
        required; if not, it exits without performing any work.
        Some git commands run `git gc --auto` after performing
-       operations that could create many loose objects.
+       operations that could create many loose objects. Housekeeping
+       is required if there are too many loose objects or too many
+       packs in the repository.
 +
-Housekeeping is required if there are too many loose objects or
-too many packs in the repository. If the number of loose objects
-exceeds the value of the `gc.auto` configuration variable, then
-all loose objects are combined into a single pack using
-`git repack -d -l`.  Setting the value of `gc.auto` to 0
-disables automatic packing of loose objects.
+If the number of loose objects exceeds the value of the `gc.auto`
+configuration variable, then all loose objects are combined into a
+single pack using `git repack -d -l`.  Setting the value of `gc.auto`
+to 0 disables automatic packing of loose objects.
 +
 If the number of packs exceeds the value of `gc.autoPackLimit`,
-then existing packs (except those marked with a `.keep` file)
+then existing packs (except those marked with a `.keep` file
+or over `gc.bigPackThreshold` limit)
 are consolidated into a single pack by using the `-A` option of
-'git repack'. Setting `gc.autoPackLimit` to 0 disables
-automatic consolidation of packs.
+'git repack'.
+If the amount of memory is estimated not enough for `git repack` to
+run smoothly and `gc.bigPackThreshold` is not set, the largest
+pack will also be excluded (this is the equivalent of running `git gc`
+with `--keep-base-pack`).
+Setting `gc.autoPackLimit` to 0 disables automatic consolidation of
+packs.
++
+If houskeeping is required due to many loose objects or packs, all
+other housekeeping tasks (e.g. rerere, working trees, reflog...) will
+be performed as well.
+
 
 --prune=<date>::
        Prune loose objects older than date (default is 2 weeks ago,
@@ -78,7 +90,12 @@ automatic consolidation of packs.
        Force `git gc` to run even if there may be another `git gc`
        instance running on this repository.
 
-Configuration
+--keep-largest-pack::
+       All packs except the largest pack and those marked with a
+       `.keep` files are consolidated into a single pack. When this
+       option is used, `gc.bigPackThreshold` is ignored.
+
+CONFIGURATION
 -------------
 
 The optional configuration variable `gc.reflogExpire` can be
@@ -123,7 +140,7 @@ The optional configuration variable `gc.aggressiveWindow` controls how
 much time is spent optimizing the delta compression of the objects in
 the repository when the --aggressive option is specified.  The larger
 the value, the more time is spent optimizing the delta compression.  See
-the documentation for the --window' option in linkgit:git-repack[1] for
+the documentation for the --window option in linkgit:git-repack[1] for
 more details.  This defaults to 250.
 
 Similarly, the optional configuration variable `gc.aggressiveDepth`
@@ -133,8 +150,12 @@ The optional configuration variable `gc.pruneExpire` controls how old
 the unreferenced loose objects have to be before they are pruned.  The
 default is "2 weeks ago".
 
+Optional configuration variable `gc.worktreePruneExpire` controls how
+old a stale working tree should be before `git worktree prune` deletes
+it. Default is "3 months ago".
+
 
-Notes
+NOTES
 -----
 
 'git gc' tries very hard not to delete objects that are referenced
index 18b494731f51145e9d4a4d078264bedfd3beb00f..312409a607231d66bd296c1d955bb62c47ecffcf 100644 (file)
@@ -293,7 +293,7 @@ providing this option will cause it to die.
 For more details about the <pathspec> syntax, see the 'pathspec' entry
 in linkgit:gitglossary[7].
 
-Examples
+EXAMPLES
 --------
 
 `git grep 'time_t' -- '*.[ch]'`::
index 21a33d2c414e24eb779669f10beefde58db00f1c..666b042679f405fd1759b42a8d86aafb083e817c 100644 (file)
@@ -15,8 +15,9 @@ DESCRIPTION
 -----------
 Downloads a remote Git repository via HTTP.
 
-*NOTE*: use of this command without -a is deprecated.  The -a
-behaviour will become the default in a future release.
+This command always gets all objects. Historically, there were three options
+`-a`, `-c` and `-t` for choosing which objects to download. They are now
+silently ignored.
 
 OPTIONS
 -------
@@ -24,12 +25,8 @@ commit-id::
         Either the hash or the filename under [URL]/refs/ to
         pull.
 
--c::
-       Get the commit objects.
--t::
-       Get trees associated with the commit objects.
--a::
-       Get all the objects.
+-a, -c, -t::
+       These options are ignored for historical reasons.
 -v::
        Report what is downloaded.
 
index 2aceb6f26da2299d87afe6aeeaf54d18d1487488..ea03a4eeb0fd3124e784553f55c3dccac84a03c7 100644 (file)
@@ -55,7 +55,7 @@ OPTIONS
        The remote refs to update.
 
 
-Specifying the Refs
+SPECIFYING THE REFS
 -------------------
 
 A '<ref>' specification can be either a single pattern, or a pair
index 5d1e4c80cd5d479a43c39ffb12b66a7302e754e7..032613c420cf3cbcf75e82fc89abed6ec8d85355 100644 (file)
@@ -136,8 +136,8 @@ Using direct mode with SSL:
 .........................
 
 
-EXAMPLE
--------
+EXAMPLES
+--------
 To submit patches using GMail's IMAP interface, first, edit your ~/.gitconfig
 to specify your account settings:
 
index 138edb47b6a17ab925ef3206c1aec6336ff380fe..d5b7560bfe2d51370c313ee006a9f0ed4388eda4 100644 (file)
@@ -93,8 +93,8 @@ OPTIONS
 --max-input-size=<size>::
        Die, if the pack is larger than <size>.
 
-Note
-----
+NOTES
+-----
 
 Once the index has been created, the list of object names is sorted
 and the SHA-1 hash of that list is printed to stdout. If --stdin was
index 5437f8b0f0e6699eca662879290d47df85387f0f..90761f169444c165f0e94ebc3b7731cd8d85d3f0 100644 (file)
@@ -9,7 +9,7 @@ git-log - Show commit logs
 SYNOPSIS
 --------
 [verse]
-'git log' [<options>] [<revision range>] [[\--] <path>...]
+'git log' [<options>] [<revision range>] [[--] <path>...]
 
 DESCRIPTION
 -----------
@@ -90,13 +90,13 @@ include::line-range-format.txt[]
        ways to spell <revision range>, see the 'Specifying Ranges'
        section of linkgit:gitrevisions[7].
 
-[\--] <path>...::
+[--] <path>...::
        Show only commits that are enough to explain how the files
        that match the specified paths came to be.  See 'History
        Simplification' below for details and other simplification
        modes.
 +
-Paths may need to be prefixed with ``\-- '' to separate them from
+Paths may need to be prefixed with `--` to separate them from
 options or the revision range, when confusion arises.
 
 include::rev-list-options.txt[]
@@ -125,7 +125,7 @@ EXAMPLES
 `git log --since="2 weeks ago" -- gitk`::
 
        Show the changes during the last two weeks to the file 'gitk'.
-       The ``--'' is necessary to avoid confusion with the *branch* named
+       The `--` is necessary to avoid confusion with the *branch* named
        'gitk'
 
 `git log --name-status release..test`::
index 3ac3e3a77d171d55a2992b263e03350cf4c08b3d..5298f1bc3052f47e390eee780efe665083744309 100644 (file)
@@ -53,7 +53,8 @@ OPTIONS
        Show only ignored files in the output. When showing files in the
        index, print only those matched by an exclude pattern. When
        showing "other" files, show only those matched by an exclude
-       pattern.
+       pattern. Standard ignore rules are not automatically activated,
+       therefore at least one of the `--exclude*` options is required.
 
 -s::
 --stage::
@@ -183,7 +184,7 @@ followed by the  ("attr/<eolattr>").
        Files to show. If no files are given all files which match the other
        specified criteria are shown.
 
-Output
+OUTPUT
 ------
 'git ls-files' just outputs the filenames unless `--stage` is specified in
 which case it outputs:
@@ -208,7 +209,7 @@ quoted as explained for the configuration variable `core.quotePath`
 verbatim and the line is terminated by a NUL byte.
 
 
-Exclude Patterns
+EXCLUDE PATTERNS
 ----------------
 
 'git ls-files' can use a list of "exclude patterns" when
index 5f2628c8f86a65b0bfe8e29995fa2176927a30f0..b9fd3770a6ce19c341c421e07b68985d89d94df5 100644 (file)
@@ -10,7 +10,7 @@ SYNOPSIS
 --------
 [verse]
 'git ls-remote' [--heads] [--tags] [--refs] [--upload-pack=<exec>]
-             [-q | --quiet] [--exit-code] [--get-url]
+             [-q | --quiet] [--exit-code] [--get-url] [--sort=<key>]
              [--symref] [<repository> [<refs>...]]
 
 DESCRIPTION
@@ -60,6 +60,24 @@ OPTIONS
        upload-pack only shows the symref HEAD, so it will be the only
        one shown by ls-remote.
 
+--sort=<key>::
+       Sort based on the key given. Prefix `-` to sort in descending order
+       of the value. Supports "version:refname" or "v:refname" (tag names
+       are treated as versions). The "version:refname" sort order can also
+       be affected by the "versionsort.suffix" configuration variable.
+       See linkgit:git-for-each-ref[1] for more sort options, but be aware
+       keys like `committerdate` that require access to the objects
+       themselves will not work for refs whose objects have not yet been
+       fetched from the remote, and will give a `missing object` error.
+
+-o <option>::
+--server-option=<option>::
+       Transmit the given string to the server when communicating using
+       protocol version 2.  The given string must not contain a NUL or LF
+       character.
+       When multiple `--server-option=<option>` are given, they are all
+       sent to the other side in the order listed on the command line.
+
 <repository>::
        The "remote" repository to query.  This parameter can be
        either a URL or the name of a remote (see the GIT URLS and
@@ -90,6 +108,10 @@ EXAMPLES
        c5db5456ae3b0873fc659c19fafdde22313cc441        refs/tags/v0.99.2
        7ceca275d047c90c0c7d5afb13ab97efdf51bd6e        refs/tags/v0.99.3
 
+SEE ALSO
+--------
+linkgit:git-check-ref-format[1].
+
 GIT
 ---
 Part of the linkgit:git[1] suite
index c3616e7711aef80ee7e2bd3bb90ea5cee7dd1d4b..27fe2b32e10b2f0c92315483ac4a5e8a9722d3db 100644 (file)
@@ -14,7 +14,7 @@ SYNOPSIS
 DESCRIPTION
 -----------
 Reads standard input in non-recursive `ls-tree` output format, and creates
-a tree object.  The order of the tree entries is normalised by mktree so
+a tree object.  The order of the tree entries is normalized by mktree so
 pre-sorting the input is not required.  The object name of the tree object
 built is written to the standard output.
 
index e8e68f528cf2fa5705e7cedce8d3d807fa20057d..5cb0eb0855fefe582721baeb3295beb610a74891 100644 (file)
@@ -61,8 +61,8 @@ OPTIONS
 --always::
        Show uniquely abbreviated commit object as fallback.
 
-EXAMPLE
--------
+EXAMPLES
+--------
 
 Given a commit, find out where it is relative to the local refs. Say somebody
 wrote you about that fantastic commit 33db5f4d9027a10e477ccf054b2c1ab94f74c85a.
index d8c8f11c9f2dc94a2f4d46445e33c4f1494ab39e..b0abe2cb07968a119c0631e550b048891324130c 100644 (file)
@@ -29,8 +29,8 @@ Submit Git changes back to p4 using 'git p4 submit'.  The command
 the updated p4 remote branch.
 
 
-EXAMPLE
--------
+EXAMPLES
+--------
 * Clone a repository:
 +
 ------------
index 81bc490ac52eb9414015979d8c244ce063c838b5..d95b472d16828b2bea304727e7c5daaa2b75ae89 100644 (file)
@@ -12,7 +12,7 @@ SYNOPSIS
 'git pack-objects' [-q | --progress | --all-progress] [--all-progress-implied]
        [--no-reuse-delta] [--delta-base-offset] [--non-empty]
        [--local] [--incremental] [--window=<n>] [--depth=<n>]
-       [--revs [--unpacked | --all]]
+       [--revs [--unpacked | --all]] [--keep-pack=<pack-name>]
        [--stdout [--filter=<filter-spec>] | base-name]
        [--shallow] [--keep-true-parents] < object-list
 
@@ -96,7 +96,9 @@ base-name::
        it too deep affects the performance on the unpacker
        side, because delta data needs to be applied that many
        times to get to the necessary object.
-       The default value for --window is 10 and --depth is 50.
++
+The default value for --window is 10 and --depth is 50. The maximum
+depth is 4095.
 
 --window-memory=<n>::
        This option provides an additional limit on top of `--window`;
@@ -126,6 +128,13 @@ base-name::
        has a .keep file to be ignored, even if it would have
        otherwise been packed.
 
+--keep-pack=<pack-name>::
+       This flag causes an object already in the given pack to be
+       ignored, even if it would have otherwise been
+       packed. `<pack-name>` is the the pack file name without
+       leading directory (e.g. `pack-123.pack`). The option could be
+       specified multiple times to keep multiple packs.
+
 --incremental::
        This flag causes an object already in a pack to be ignored
        even if it would have otherwise been packed.
@@ -267,6 +276,19 @@ Unexpected missing object will raise an error.
        locally created objects [without .promisor] and objects from the
        promisor remote [with .promisor].)  This is used with partial clone.
 
+--keep-unreachable::
+       Objects unreachable from the refs in packs named with
+       --unpacked= option are added to the resulting pack, in
+       addition to the reachable objects that are not in packs marked
+       with *.keep files. This implies `--revs`.
+
+--pack-loose-unreachable::
+       Pack unreachable loose objects (and their loose counterparts
+       removed). This implies `--revs`.
+
+--unpack-unreachable::
+       Keep unreachable objects in loose form. This implies `--revs`.
+
 SEE ALSO
 --------
 linkgit:git-rev-list[1]
index a37c0af9313e8e47806c89a584c70c09517244d1..03552dd86fc412b622aff2bcf8feda8e71711b3e 100644 (file)
@@ -56,8 +56,8 @@ OPTIONS
        reachable from any of our references, keep objects
        reachable from listed <head>s.
 
-EXAMPLE
--------
+EXAMPLES
+--------
 
 To prune objects not used by your repository or another that
 borrows from your repository via its
@@ -67,7 +67,7 @@ borrows from your repository via its
 $ git prune $(cd ../another && git rev-parse --all)
 ------------
 
-Notes
+NOTES
 -----
 
 In most cases, users will not need to call 'git prune' directly, but
index ce05b7a5b13eadb6870f16a7168559434d376c20..4e0ad6fd8e0b91d9d82ef66f8e3a10a3882589e8 100644 (file)
@@ -101,13 +101,17 @@ Options related to merging
 include::merge-options.txt[]
 
 -r::
---rebase[=false|true|preserve|interactive]::
+--rebase[=false|true|merges|preserve|interactive]::
        When true, rebase the current branch on top of the upstream
        branch after fetching. If there is a remote-tracking branch
        corresponding to the upstream branch and the upstream branch
        was rebased since last fetched, the rebase uses that information
        to avoid rebasing non-local changes.
 +
+When set to `merges`, rebase using `git rebase --rebase-merges` so that
+the local merge commits are included in the rebase (see
+linkgit:git-rebase[1] for details).
++
 When set to preserve, rebase with the `--preserve-merges` option passed
 to `git rebase` so that locally created merge commits will not be flattened.
 +
index 5b08302fc2299fe1b42fc0137b50a3bab774ca10..55277a97811fa6933c76c1bd2c96114672f6d5cd 100644 (file)
@@ -11,7 +11,7 @@ SYNOPSIS
 [verse]
 'git push' [--all | --mirror | --tags] [--follow-tags] [--atomic] [-n | --dry-run] [--receive-pack=<git-receive-pack>]
           [--repo=<repository>] [-f | --force] [-d | --delete] [--prune] [-v | --verbose]
-          [-u | --set-upstream] [--push-option=<string>]
+          [-u | --set-upstream] [-o <string> | --push-option=<string>]
           [--[no-]signed|--signed=(true|false|if-asked)]
           [--force-with-lease[=<refname>[:<expect>]]]
           [--no-verify] [<repository> [<refspec>...]]
@@ -123,6 +123,7 @@ already exists on the remote side.
        will be tab-separated and sent to stdout instead of stderr.  The full
        symbolic names of the refs will be given.
 
+-d::
 --delete::
        All listed refs are deleted from the remote repository. This is
        the same as prefixing all refs with a colon.
@@ -300,7 +301,7 @@ origin +master` to force a push to the `master` branch). See the
        These options are passed to linkgit:git-send-pack[1]. A thin transfer
        significantly reduces the amount of sent data when the sender and
        receiver share many of the same objects in common. The default is
-       \--thin.
+       `--thin`.
 
 -q::
 --quiet::
@@ -423,7 +424,7 @@ reason::
        refs, no explanation is needed. For a failed ref, the reason for
        failure is described.
 
-Note about fast-forwards
+NOTE ABOUT FAST-FORWARDS
 ------------------------
 
 When an update changes a branch (or more in general, a ref) that used to
@@ -510,7 +511,7 @@ overwrite it. In other words, "git push --force" is a method reserved for
 a case where you do mean to lose history.
 
 
-Examples
+EXAMPLES
 --------
 
 `git push`::
index f2a07d54d694b42657aa38e71dc0eeb5fc94e9a0..5c70bc2878fc2f68698b931c6bf951d0097ef813 100644 (file)
@@ -132,7 +132,7 @@ OPTIONS
        The id of the tree object(s) to be read/merged.
 
 
-Merging
+MERGING
 -------
 If `-m` is specified, 'git read-tree' can perform 3 kinds of
 merge, a single tree merge if only 1 tree is given, a
@@ -382,7 +382,7 @@ middle of doing, and when your working tree is ready (i.e. you
 have finished your work-in-progress), attempt the merge again.
 
 
-Sparse checkout
+SPARSE CHECKOUT
 ---------------
 
 "Sparse checkout" allows populating the working directory sparsely.
index 3277ca143273e01f5f4973ed351c8a5cb4b8e0fa..bd5ecff980ef297df18d96453f91b30e9a34c031 100644 (file)
@@ -364,9 +364,10 @@ default is `--no-fork-point`, otherwise the default is `--fork-point`.
        Incompatible with the --interactive option.
 
 --signoff::
-       This flag is passed to 'git am' to sign off all the rebased
-       commits (see linkgit:git-am[1]). Incompatible with the
-       --interactive option.
+       Add a Signed-off-by: trailer to all the rebased commits. Note
+       that if `--interactive` is given then only commits marked to be
+       picked, edited or reworded will have the trailer added. Incompatible
+       with the `--preserve-merges` option.
 
 -i::
 --interactive::
@@ -378,6 +379,33 @@ The commit list format can be changed by setting the configuration option
 rebase.instructionFormat.  A customized instruction format will automatically
 have the long commit hash prepended to the format.
 
+-r::
+--rebase-merges[=(rebase-cousins|no-rebase-cousins)]::
+       By default, a rebase will simply drop merge commits from the todo
+       list, and put the rebased commits into a single, linear branch.
+       With `--rebase-merges`, the rebase will instead try to preserve
+       the branching structure within the commits that are to be rebased,
+       by recreating the merge commits. Any resolved merge conflicts or
+       manual amendments in these merge commits will have to be
+       resolved/re-applied manually.
++
+By default, or when `no-rebase-cousins` was specified, commits which do not
+have `<upstream>` as direct ancestor will keep their original branch point,
+i.e. commits that would be excluded by gitlink:git-log[1]'s
+`--ancestry-path` option will keep their original ancestry by default. If
+the `rebase-cousins` mode is turned on, such commits are instead rebased
+onto `<upstream>` (or `<onto>`, if specified).
++
+The `--rebase-merges` mode is similar in spirit to `--preserve-merges`, but
+in contrast to that option works well in interactive rebases: commits can be
+reordered, inserted and dropped at will.
++
+It is currently only possible to recreate the merge commits using the
+`recursive` merge strategy; Different merge strategies can be used only via
+explicit `exec git merge -s <strategy> [...]` commands.
++
+See also REBASING MERGES below.
+
 -p::
 --preserve-merges::
        Recreate merge commits instead of flattening the history by replaying
@@ -775,12 +803,146 @@ The ripple effect of a "hard case" recovery is especially bad:
 'everyone' downstream from 'topic' will now have to perform a "hard
 case" recovery too!
 
+REBASING MERGES
+-----------------
+
+The interactive rebase command was originally designed to handle
+individual patch series. As such, it makes sense to exclude merge
+commits from the todo list, as the developer may have merged the
+then-current `master` while working on the branch, only to rebase
+all the commits onto `master` eventually (skipping the merge
+commits).
+
+However, there are legitimate reasons why a developer may want to
+recreate merge commits: to keep the branch structure (or "commit
+topology") when working on multiple, inter-related branches.
+
+In the following example, the developer works on a topic branch that
+refactors the way buttons are defined, and on another topic branch
+that uses that refactoring to implement a "Report a bug" button. The
+output of `git log --graph --format=%s -5` may look like this:
+
+------------
+*   Merge branch 'report-a-bug'
+|\
+| * Add the feedback button
+* | Merge branch 'refactor-button'
+|\ \
+| |/
+| * Use the Button class for all buttons
+| * Extract a generic Button class from the DownloadButton one
+------------
+
+The developer might want to rebase those commits to a newer `master`
+while keeping the branch topology, for example when the first topic
+branch is expected to be integrated into `master` much earlier than the
+second one, say, to resolve merge conflicts with changes to the
+DownloadButton class that made it into `master`.
+
+This rebase can be performed using the `--rebase-merges` option.
+It will generate a todo list looking like this:
+
+------------
+label onto
+
+# Branch: refactor-button
+reset onto
+pick 123456 Extract a generic Button class from the DownloadButton one
+pick 654321 Use the Button class for all buttons
+label refactor-button
+
+# Branch: report-a-bug
+reset refactor-button # Use the Button class for all buttons
+pick abcdef Add the feedback button
+label report-a-bug
+
+reset onto
+merge -C a1b2c3 refactor-button # Merge 'refactor-button'
+merge -C 6f5e4d report-a-bug # Merge 'report-a-bug'
+------------
+
+In contrast to a regular interactive rebase, there are `label`, `reset`
+and `merge` commands in addition to `pick` ones.
+
+The `label` command associates a label with the current HEAD when that
+command is executed. These labels are created as worktree-local refs
+(`refs/rewritten/<label>`) that will be deleted when the rebase
+finishes. That way, rebase operations in multiple worktrees linked to
+the same repository do not interfere with one another. If the `label`
+command fails, it is rescheduled immediately, with a helpful message how
+to proceed.
+
+The `reset` command resets the HEAD, index and worktree to the specified
+revision. It is isimilar to an `exec git reset --hard <label>`, but
+refuses to overwrite untracked files. If the `reset` command fails, it is
+rescheduled immediately, with a helpful message how to edit the todo list
+(this typically happens when a `reset` command was inserted into the todo
+list manually and contains a typo).
+
+The `merge` command will merge the specified revision into whatever is
+HEAD at that time. With `-C <original-commit>`, the commit message of
+the specified merge commit will be used. When the `-C` is changed to
+a lower-case `-c`, the message will be opened in an editor after a
+successful merge so that the user can edit the message.
+
+If a `merge` command fails for any reason other than merge conflicts (i.e.
+when the merge operation did not even start), it is rescheduled immediately.
+
+At this time, the `merge` command will *always* use the `recursive`
+merge strategy, with no way to choose a different one. To work around
+this, an `exec` command can be used to call `git merge` explicitly,
+using the fact that the labels are worktree-local refs (the ref
+`refs/rewritten/onto` would correspond to the label `onto`, for example).
+
+Note: the first command (`label onto`) labels the revision onto which
+the commits are rebased; The name `onto` is just a convention, as a nod
+to the `--onto` option.
+
+It is also possible to introduce completely new merge commits from scratch
+by adding a command of the form `merge <merge-head>`. This form will
+generate a tentative commit message and always open an editor to let the
+user edit it. This can be useful e.g. when a topic branch turns out to
+address more than a single concern and wants to be split into two or
+even more topic branches. Consider this todo list:
+
+------------
+pick 192837 Switch from GNU Makefiles to CMake
+pick 5a6c7e Document the switch to CMake
+pick 918273 Fix detection of OpenSSL in CMake
+pick afbecd http: add support for TLS v1.3
+pick fdbaec Fix detection of cURL in CMake on Windows
+------------
+
+The one commit in this list that is not related to CMake may very well
+have been motivated by working on fixing all those bugs introduced by
+switching to CMake, but it addresses a different concern. To split this
+branch into two topic branches, the todo list could be edited like this:
+
+------------
+label onto
+
+pick afbecd http: add support for TLS v1.3
+label tlsv1.3
+
+reset onto
+pick 192837 Switch from GNU Makefiles to CMake
+pick 918273 Fix detection of OpenSSL in CMake
+pick fdbaec Fix detection of cURL in CMake on Windows
+pick 5a6c7e Document the switch to CMake
+label cmake
+
+reset onto
+merge tlsv1.3
+merge cmake
+------------
+
 BUGS
 ----
 The todo list presented by `--preserve-merges --interactive` does not
 represent the topology of the revision graph.  Editing commits and
 rewording their commit messages should work fine, but attempts to
-reorder commits tend to produce counterintuitive results.
+reorder commits tend to produce counterintuitive results. Use
+`--rebase-merges` in such scenarios instead.
 
 For example, an attempt to rearrange
 ------------
index 86a4b32f0f1cbb8b69ff8b935edade6f14e7d5e2..dedf97efbb2282a15abfee41926afd893e59955c 100644 (file)
@@ -41,7 +41,7 @@ OPTIONS
 <directory>::
        The repository to sync into.
 
-pre-receive Hook
+PRE-RECEIVE HOOK
 ----------------
 Before any ref is updated, if $GIT_DIR/hooks/pre-receive file exists
 and is executable, it will be invoked once with no parameters.  The
@@ -116,7 +116,7 @@ bail out if the update is not to be supported.
 
 See the notes on the quarantine environment below.
 
-update Hook
+UPDATE HOOK
 -----------
 Before each ref is updated, if $GIT_DIR/hooks/update file exists
 and is executable, it is invoked once per ref, with three parameters:
@@ -138,7 +138,7 @@ ensure the ref will actually be updated, it is only a prerequisite.
 As such it is not a good idea to send notices (e.g. email) from
 this hook.  Consider using the post-receive hook instead.
 
-post-receive Hook
+POST-RECEIVE HOOK
 -----------------
 After all refs were updated (or attempted to be updated), if any
 ref update was successful, and if $GIT_DIR/hooks/post-receive
@@ -198,7 +198,7 @@ after it was updated by 'git-receive-pack', but before the hook was able
 to evaluate it.  It is recommended that hooks rely on sha1-new
 rather than the current value of refname.
 
-post-update Hook
+POST-UPDATE HOOK
 ----------------
 After all other processing, if at least one ref was updated, and
 if $GIT_DIR/hooks/post-update file exists and is executable, then
@@ -216,7 +216,7 @@ if the repository is packed and is served via a dumb transport.
        exec git update-server-info
 
 
-Quarantine Environment
+QUARANTINE ENVIRONMENT
 ----------------------
 
 When `receive-pack` takes in objects, they are placed into a temporary
index b25d0b5996b560837648c13c742bfc5ad09f0310..3fc5d94336f7c706bab12513e14744947a0bcae2 100644 (file)
@@ -55,14 +55,14 @@ some tunnel.
        the vhost field in the git:// service request (to rest of the argument).
        Default is not to send vhost in such request (if sent).
 
-ENVIRONMENT VARIABLES:
-----------------------
+ENVIRONMENT VARIABLES
+---------------------
 
 GIT_TRANSLOOP_DEBUG::
        If set, prints debugging information about various reads/writes.
 
-ENVIRONMENT VARIABLES PASSED TO COMMAND:
-----------------------------------------
+ENVIRONMENT VARIABLES PASSED TO COMMAND
+---------------------------------------
 
 GIT_EXT_SERVICE::
        Set to long name (git-upload-pack, etc...) of service helper needs
@@ -73,8 +73,8 @@ GIT_EXT_SERVICE_NOPREFIX::
        to invoke.
 
 
-EXAMPLES:
----------
+EXAMPLES
+--------
 This remote helper is transparently used by Git when
 you use commands such as "git fetch <URL>", "git clone <URL>",
 , "git push <URL>" or "git remote add <nick> <URL>", where <URL>
index 4feddc0293bd7eb9827ce45f11519335a484f013..595948da53093459635fd3dd8c79fe756020eea8 100644 (file)
@@ -203,7 +203,7 @@ The remote configuration is achieved using the `remote.origin.url` and
 `remote.origin.fetch` configuration variables.  (See
 linkgit:git-config[1]).
 
-Examples
+EXAMPLES
 --------
 
 * Add a new remote, fetch, and check out a branch from it
index ae750e9e1149f512dd5889a8081452055ccdb6d7..d90e7907f4843a048caf11a2fae42a973d893d9a 100644 (file)
@@ -9,7 +9,7 @@ git-repack - Pack unpacked objects in a repository
 SYNOPSIS
 --------
 [verse]
-'git repack' [-a] [-A] [-d] [-f] [-F] [-l] [-n] [-q] [-b] [--window=<n>] [--depth=<n>] [--threads=<n>]
+'git repack' [-a] [-A] [-d] [-f] [-F] [-l] [-n] [-q] [-b] [--window=<n>] [--depth=<n>] [--threads=<n>] [--keep-pack=<pack-name>]
 
 DESCRIPTION
 -----------
@@ -90,7 +90,9 @@ other objects in that pack they already have locally.
        space. `--depth` limits the maximum delta depth; making it too deep
        affects the performance on the unpacker side, because delta data needs
        to be applied that many times to get to the necessary object.
-       The default value for --window is 10 and --depth is 50.
++
+The default value for --window is 10 and --depth is 50. The maximum
+depth is 4095.
 
 --threads=<n>::
        This option is passed through to `git pack-objects`.
@@ -133,6 +135,13 @@ other objects in that pack they already have locally.
        with `-b` or `repack.writeBitmaps`, as it ensures that the
        bitmapped packfile has the necessary objects.
 
+--keep-pack=<pack-name>::
+       Exclude the given pack from repacking. This is the equivalent
+       of having `.keep` file on the pack. `<pack-name>` is the the
+       pack file name without leading directory (e.g. `pack-123.pack`).
+       The option could be specified multiple times to keep multiple
+       packs.
+
 --unpack-unreachable=<when>::
        When loosening unreachable objects, do not bother loosening any
        objects older than `<when>`. This can be used to optimize out
index e5c57ae6ef4afd71944e23f895e9a0f354eaf6b1..246dc9943c223d34c72d7b90f40ee8f523b7d77e 100644 (file)
@@ -11,6 +11,7 @@ SYNOPSIS
 'git replace' [-f] <object> <replacement>
 'git replace' [-f] --edit <object>
 'git replace' [-f] --graft <commit> [<parent>...]
+'git replace' [-f] --convert-graft-file
 'git replace' -d <object>...
 'git replace' [--format=<format>] [-l [<pattern>]]
 
@@ -87,9 +88,13 @@ OPTIONS
        content as <commit> except that its parents will be
        [<parent>...] instead of <commit>'s parents. A replacement ref
        is then created to replace <commit> with the newly created
-       commit. See contrib/convert-grafts-to-replace-refs.sh for an
-       example script based on this option that can convert grafts to
-       replace refs.
+       commit. Use `--convert-graft-file` to convert a
+       `$GIT_DIR/info/grafts` file and use replace refs instead.
+
+--convert-graft-file::
+       Creates graft commits for all entries in `$GIT_DIR/info/grafts`
+       and deletes that file upon success. The purpose is to help users
+       with transitioning off of the now-deprecated graft file.
 
 -l <pattern>::
 --list <pattern>::
index c32cb0bea1d6c057dabdd3065417744f5209ba46..4d4392d0f841b7e447b536ef1281fcbe2e49786d 100644 (file)
@@ -46,8 +46,8 @@ ref that is different from the ref you have locally, you can use the
 its remote name.
 
 
-EXAMPLE
--------
+EXAMPLES
+--------
 
 Imagine that you built your work on your `master` branch on top of
 the `v1.0` release, and want it to be integrated to the project.
index 71ef97ba9b22aad996049bd0c1500a835a433350..464c15b94f391e9fb477b5258e7f318d5f2f14ca 100644 (file)
@@ -255,7 +255,7 @@ must be used for each option.
 
 --batch-size=<num>::
        Some email servers (e.g. smtp.163.com) limit the number emails to be
-       sent per session (connection) and this will lead to a faliure when
+       sent per session (connection) and this will lead to a failure when
        sending many messages.  With this option, send-email will disconnect after
        sending $<num> messages and wait for a few seconds (see --relogin-delay)
        and reconnect, to work around such a limit.  You may want to
@@ -458,8 +458,8 @@ sendemail.confirm::
        one of 'always', 'never', 'cc', 'compose', or 'auto'. See `--confirm`
        in the previous section for the meaning of these values.
 
-EXAMPLE
--------
+EXAMPLES
+--------
 Use gmail as the smtp server
 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 To use 'git send-email' to send your patches through the GMail SMTP server,
@@ -473,16 +473,7 @@ edit ~/.gitconfig to specify your account settings:
 
 If you have multifactor authentication setup on your gmail account, you will
 need to generate an app-specific password for use with 'git send-email'. Visit
-https://security.google.com/settings/security/apppasswords to setup an
-app-specific password.  Once setup, you can store it with the credentials
-helper:
-
-       $ git credential fill
-       protocol=smtp
-       host=smtp.gmail.com
-       username=youname@gmail.com
-       password=app-password
-
+https://security.google.com/settings/security/apppasswords to create it.
 
 Once your commits are ready to be sent to the mailing list, run the
 following commands:
@@ -491,6 +482,11 @@ following commands:
        $ edit outgoing/0000-*
        $ git send-email outgoing/*
 
+The first time you run it, you will be prompted for your credentials.  Enter the
+app-specific or your regular password as appropriate.  If you have credential
+helper configured (see linkgit:git-credential[1]), the password will be saved in
+the credential store so you won't have to type it the next time.
+
 Note: the following perl modules are required
       Net::SMTP::SSL, MIME::Base64 and Authen::SASL
 
index f51c64939b48b7b082752a294b17aee6d92c4fa0..44fd146b9120305112c59b800c9e989bbc60f450 100644 (file)
@@ -99,7 +99,7 @@ be in a separate packet, and the list must end with a flush packet.
        The remote refs to update.
 
 
-Specifying the Refs
+SPECIFYING THE REFS
 -------------------
 
 There are three ways to specify which refs to update on the
index 54cf2560bebbfc538644c636f696d4e500f1d852..11361f33e93937429a8d1791efaca5e00888f236 100644 (file)
@@ -62,8 +62,8 @@ permissions.
 If a `no-interactive-login` command exists, then it is run and the
 interactive shell is aborted.
 
-EXAMPLE
--------
+EXAMPLES
+--------
 
 To disable interactive logins, displaying a greeting instead:
 
index ee6c5476c1d2bf3b2a708e6152ebaba5882cc4f7..bc80905a8a06b5121e2c33c83844301a8212c28c 100644 (file)
@@ -8,8 +8,8 @@ git-shortlog - Summarize 'git log' output
 SYNOPSIS
 --------
 [verse]
+'git shortlog' [<options>] [<revision range>] [[--] <path>...]
 git log --pretty=short | 'git shortlog' [<options>]
-'git shortlog' [<options>] [<revision range>] [[\--] <path>...]
 
 DESCRIPTION
 -----------
@@ -69,11 +69,11 @@ them.
        ways to spell <revision range>, see the "Specifying Ranges"
        section of linkgit:gitrevisions[7].
 
-[\--] <path>...::
+[--] <path>...::
        Consider only commits that are enough to explain how the files
        that match the specified paths came to be.
 +
-Paths may need to be prefixed with "\-- " to separate them from
+Paths may need to be prefixed with `--` to separate them from
 options or the revision range, when confusion arises.
 
 MAPPING AUTHORS
index 7818e0f09853f9c2a2ad0923d1fc13b698cad065..262db049d772d4249983da5ca0d21d8f6b0f8d92 100644 (file)
@@ -173,8 +173,8 @@ The "fixes" branch adds one commit "Introduce "reset type" flag to
 The current branch is "master".
 
 
-EXAMPLE
--------
+EXAMPLES
+--------
 
 If you keep your primary branches immediately under
 `refs/heads`, and topic branches in subdirectories of
index c0aa871c9e8b06ea297e0a723167767338081453..d28e6154c6863d2d87c52251669e305caf7f995a 100644 (file)
@@ -120,8 +120,8 @@ $ git show-ref --heads --hash
 ...
 -----------------------------------------------------------------------------
 
-EXAMPLE
--------
+EXAMPLES
+--------
 
 To show all references called "master", whether tags or heads or anything
 else, and regardless of how deep in the reference naming hierarchy they are,
index e73ef540176b65f6c049a3b5fcfd7e8faa6a6b58..0e1695df350b68a2739df946bbe74df7a5f9d71b 100644 (file)
@@ -77,7 +77,7 @@ EXAMPLES
        Concatenates the contents of said Makefiles in the head
        of the branch `master`.
 
-Discussion
+DISCUSSION
 ----------
 
 include::i18n.txt[]
index 6c230c0c7200412b988d233352e3411a9fb813a8..c16e27e63d4cf6522d29c9d0ff099e06cfcd38a4 100644 (file)
@@ -113,7 +113,7 @@ The possible options are:
        - 'matching'    - Shows ignored files and directories matching an
                          ignore pattern.
 +
-When 'matching' mode is specified, paths that explicity match an
+When 'matching' mode is specified, paths that explicitly match an
 ignored pattern are shown. If a directory matches an ignore pattern,
 then it is shown, but not paths contained in the ignored directory. If
 a directory does not match an ignore pattern, but all contents are
index 71c5618e82aacc8616522a6f498b1172910890e4..630999f41a902d8d2043e17f797666489792fd57 100644 (file)
@@ -213,8 +213,8 @@ sync [--recursive] [--] [<path>...]::
        submodule URLs change upstream and you need to update your local
        repositories accordingly.
 +
-"git submodule sync" synchronizes all submodules while
-"git submodule sync \-- A" synchronizes submodule "A" only.
+`git submodule sync` synchronizes all submodules while
+`git submodule sync -- A` synchronizes submodule "A" only.
 +
 If `--recursive` is specified, this command will recurse into the
 registered submodules, and sync any nested submodules within.
index 636e09048e8846b813166737ef58eef96eb3a3c4..e9615951d22f724c30abdfc92485f5506a08249b 100644 (file)
@@ -635,7 +635,8 @@ config key: svn.findcopiesharder
 
 -A<filename>::
 --authors-file=<filename>::
-       Syntax is compatible with the file used by 'git cvsimport':
+       Syntax is compatible with the file used by 'git cvsimport' but
+       an empty email address can be supplied with '<>':
 +
 ------------------------------------------------------------------------
        loginname = Joe User <user@example.com>
@@ -654,8 +655,14 @@ config key: svn.authorsfile
        If this option is specified, for each SVN committer name that
        does not exist in the authors file, the given file is executed
        with the committer name as the first argument.  The program is
-       expected to return a single line of the form "Name <email>",
-       which will be treated as if included in the authors file.
+       expected to return a single line of the form "Name <email>" or
+       "Name <>", which will be treated as if included in the authors
+       file.
++
+Due to historical reasons a relative 'filename' is first searched
+relative to the current directory for 'init' and 'clone' and relative
+to the root of the working tree for 'fetch'. If 'filename' is
+not found, it is searched like any other command in '$PATH'.
 +
 [verse]
 config key: svn.authorsProg
@@ -700,7 +707,7 @@ creating the branch or tag.
 config key: svn.useLogAuthor
 
 --add-author-from::
-       When committing to svn from Git (as part of 'commit-diff', 'set-tree' or 'dcommit'
+       When committing to svn from Git (as part of 'set-tree' or 'dcommit'
        operations), if the existing log message doesn't already have a
        `From:` or `Signed-off-by:` line, append a `From:` line based on the
        Git commit's author string.  If you use this, then `--use-log-author`
index 3897a59ee94bc424c2c66cb5d05bc0193fb6eaf2..4e8e762e68690cab880460fbe1fef0077c8351e2 100644 (file)
@@ -228,7 +228,7 @@ will remove the intended effect of the option.
        cleaner names.
        The same applies to directories ending '/' and paths with '//'
 
-Using --refresh
+USING --REFRESH
 ---------------
 `--refresh` does not calculate a new sha1 file or bring the index
 up to date for mode/content changes. But what it *does* do is to
@@ -239,7 +239,7 @@ the stat entry is out of date.
 For example, you'd want to do this after doing a 'git read-tree', to link
 up the stat index details with the proper files.
 
-Using --cacheinfo or --info-only
+USING --CACHEINFO OR --INFO-ONLY
 --------------------------------
 `--cacheinfo` is used to register a file that is not in the
 current working directory.  This is useful for minimum-checkout
@@ -261,7 +261,7 @@ useful when the file is available, but you do not wish to update the
 object database.
 
 
-Using --index-info
+USING --INDEX-INFO
 ------------------
 
 `--index-info` is a more powerful mechanism that lets you feed
@@ -317,7 +317,7 @@ $ git ls-files -s
 ------------
 
 
-Using ``assume unchanged'' bit
+USING ``ASSUME UNCHANGED'' BIT
 ------------------------------
 
 Many operations in Git depend on your filesystem to have an
@@ -350,7 +350,7 @@ the index (use `git update-index --really-refresh` if you want
 to mark them as "assume unchanged").
 
 
-Examples
+EXAMPLES
 --------
 To update and refresh only the files already checked out:
 
@@ -387,7 +387,7 @@ M foo.c
 <9> now it checks with lstat(2) and finds it has been changed.
 
 
-Skip-worktree bit
+SKIP-WORKTREE BIT
 -----------------
 
 Skip-worktree bit can be defined in one (long) sentence: When reading
@@ -407,7 +407,7 @@ Although this bit looks similar to assume-unchanged bit, its goal is
 different from assume-unchanged bit's. Skip-worktree also takes
 precedence over assume-unchanged bit when both are set.
 
-Split index
+SPLIT INDEX
 -----------
 
 This mode is designed for repositories with very large indexes, and
@@ -432,7 +432,7 @@ To avoid deleting a shared index file that is still used, its
 modification time is updated to the current time everytime a new split
 index based on the shared index file is either created or read from.
 
-Untracked cache
+UNTRACKED CACHE
 ---------------
 
 This cache is meant to speed up commands that involve determining
@@ -490,7 +490,7 @@ As with the bug described above the solution is to one-off do a "git
 status" run with `core.untrackedCache=false` to flush out the leftover
 bad data.
 
-File System Monitor
+FILE SYSTEM MONITOR
 -------------------
 
 This feature is intended to speed up git operations for repos that have
@@ -518,7 +518,7 @@ file system monitor is added to or removed from the index the next time
 a command reads the index. When `--[no-]fsmonitor` are used, the file
 system monitor is immediately added to or removed from the index.
 
-Configuration
+CONFIGURATION
 -------------
 
 The command honors `core.filemode` configuration variable.  If
index 969bfab2ab422ca8b7bdf2eb3ba45edc92fcd00b..bc8fdfd4691326b44e2cbdea3dc4925cf7440515 100644 (file)
@@ -120,7 +120,7 @@ modifications are performed.  Note that while each individual
 <ref> is updated or deleted atomically, a concurrent reader may
 still see a subset of the modifications.
 
-Logging Updates
+LOGGING UPDATES
 ---------------
 If config parameter "core.logAllRefUpdates" is true and the ref is one under
 "refs/heads/", "refs/remotes/", "refs/notes/", or the symbolic ref HEAD; or
index 44ff9541df1f5dd432d37b5828bfb800f66dfae8..6072f936ab5e3a8b3aeafcaeddd81b7b6d5e0b01 100644 (file)
@@ -23,14 +23,14 @@ OPTIONS
        as well. (However, the configuration variables listing functionality
        is deprecated in favor of `git config -l`.)
 
-EXAMPLE
+EXAMPLES
 --------
        $ git var GIT_AUTHOR_IDENT
        Eric W. Biederman <ebiederm@lnxi.com> 1121223278 -0600
 
 
 VARIABLES
-----------
+---------
 GIT_AUTHOR_IDENT::
     The author of a piece of code.
 
index 2d6b09a43cd63e3ad768f9b49ba67f1deda05111..a4ec25b450c8084f32c3a6ac920ed36c3529e54c 100644 (file)
@@ -84,7 +84,7 @@ variable exists then 'git web{litdd}browse' will treat the specified tool
 as a custom command and will use a shell eval to run the command with
 the URLs passed as arguments.
 
-Note about konqueror
+NOTE ABOUT KONQUEROR
 --------------------
 
 When 'konqueror' is specified by a command-line option or a
index e7eb24ab8528e39aa4e0a75f6feaa46d9cfb39fe..afc6576a14d56ea49e37d1251a5665bf77457f89 100644 (file)
@@ -14,7 +14,7 @@ SYNOPSIS
 'git worktree lock' [--reason <string>] <worktree>
 'git worktree move' <worktree> <new-path>
 'git worktree prune' [-n] [-v] [--expire <expire>]
-'git worktree remove' [--force] <worktree>
+'git worktree remove' [-f] <worktree>
 'git worktree unlock' <worktree>
 
 DESCRIPTION
@@ -27,11 +27,12 @@ out more than one branch at a time.  With `git worktree add` a new working
 tree is associated with the repository.  This new working tree is called a
 "linked working tree" as opposed to the "main working tree" prepared by "git
 init" or "git clone".  A repository has one main working tree (if it's not a
-bare repository) and zero or more linked working trees.
+bare repository) and zero or more linked working trees. When you are done
+with a linked working tree, remove it with `git worktree remove`.
 
-When you are done with a linked working tree you can simply delete it.
-The working tree's administrative files in the repository (see
-"DETAILS" below) will eventually be removed automatically (see
+If a working tree is deleted without using `git worktree remove`, then
+its associated administrative files, which reside in the repository
+(see "DETAILS" below), will eventually be removed automatically (see
 `gc.worktreePruneExpire` in linkgit:git-config[1]), or you can run
 `git worktree prune` in the main or any linked working tree to
 clean up any stale administrative files.
@@ -60,8 +61,13 @@ $ git worktree add --track -b <branch> <path> <remote>/<branch>
 ------------
 +
 If `<commit-ish>` is omitted and neither `-b` nor `-B` nor `--detach` used,
-then, as a convenience, a new branch based at HEAD is created automatically,
-as if `-b $(basename <path>)` was specified.
+then, as a convenience, the new worktree is associated with a branch
+(call it `<branch>`) named after `$(basename <path>)`.  If `<branch>`
+doesn't exist, a new branch based on HEAD is automatically created as
+if `-b <branch>` was given.  If `<branch>` does exist, it will be
+checked out in the new worktree, if it's not checked out anywhere
+else, otherwise the command will refuse to create the worktree (unless
+`--force` is used).
 
 list::
 
@@ -106,7 +112,7 @@ OPTIONS
        By default, `add` refuses to create a new working tree when
        `<commit-ish>` is a branch name and is already checked out by
        another working tree and `remove` refuses to remove an unclean
-       working tree. This option overrides that safeguard.
+       working tree. This option overrides these safeguards.
 
 -b <new-branch>::
 -B <new-branch>::
@@ -232,7 +238,7 @@ The worktree list command has two output formats.  The default format shows the
 details on a single line with columns.  For example:
 
 ------------
-S git worktree list
+$ git worktree list
 /path/to/bare-source            (bare)
 /path/to/linked-worktree        abcd1234 [master]
 /path/to/other-linked-worktree  1234abc  (detached HEAD)
@@ -247,7 +253,7 @@ if the value is true.  An empty line indicates the end of a worktree.  For
 example:
 
 ------------
-S git worktree list --porcelain
+$ git worktree list --porcelain
 worktree /path/to/bare-source
 bare
 
@@ -278,8 +284,7 @@ $ pushd ../temp
 # ... hack hack hack ...
 $ git commit -a -m 'emergency fix for boss'
 $ popd
-$ rm -rf ../temp
-$ git worktree prune
+$ git worktree remove ../temp
 ------------
 
 BUGS
index 4767860e72f46d4e4df883f2fdbb4a46eb8e8eda..c662f41c1dce8ee69113dbb35298bd5c97e831b7 100644 (file)
@@ -11,7 +11,7 @@ SYNOPSIS
 [verse]
 'git' [--version] [--help] [-C <path>] [-c <name>=<value>]
     [--exec-path[=<path>]] [--html-path] [--man-path] [--info-path]
-    [-p|--paginate|--no-pager] [--no-replace-objects] [--bare]
+    [-p|--paginate|-P|--no-pager] [--no-replace-objects] [--bare]
     [--git-dir=<path>] [--work-tree=<path>] [--namespace=<name>]
     [--super-prefix=<path>]
     <command> [<args>]
@@ -103,6 +103,7 @@ foo.bar= ...`) sets `foo.bar` to the empty string which `git config
        configuration options (see the "Configuration Mechanism" section
        below).
 
+-P::
 --no-pager::
        Do not pipe Git output into a pager.
 
index 1094fe2b5b0cc97030dc6694364f473999a9f4d3..b72936a885c9772d60c7105cb9e5ba2e27968fcc 100644 (file)
@@ -279,6 +279,94 @@ few exceptions.  Even though...
   catch potential problems early, safety triggers.
 
 
+`working-tree-encoding`
+^^^^^^^^^^^^^^^^^^^^^^^
+
+Git recognizes files encoded in ASCII or one of its supersets (e.g.
+UTF-8, ISO-8859-1, ...) as text files. Files encoded in certain other
+encodings (e.g. UTF-16) are interpreted as binary and consequently
+built-in Git text processing tools (e.g. 'git diff') as well as most Git
+web front ends do not visualize the contents of these files by default.
+
+In these cases you can tell Git the encoding of a file in the working
+directory with the `working-tree-encoding` attribute. If a file with this
+attribute is added to Git, then Git reencodes the content from the
+specified encoding to UTF-8. Finally, Git stores the UTF-8 encoded
+content in its internal data structure (called "the index"). On checkout
+the content is reencoded back to the specified encoding.
+
+Please note that using the `working-tree-encoding` attribute may have a
+number of pitfalls:
+
+- Alternative Git implementations (e.g. JGit or libgit2) and older Git
+  versions (as of March 2018) do not support the `working-tree-encoding`
+  attribute. If you decide to use the `working-tree-encoding` attribute
+  in your repository, then it is strongly recommended to ensure that all
+  clients working with the repository support it.
+
+  For example, Microsoft Visual Studio resources files (`*.rc`) or
+  PowerShell script files (`*.ps1`) are sometimes encoded in UTF-16.
+  If you declare `*.ps1` as files as UTF-16 and you add `foo.ps1` with
+  a `working-tree-encoding` enabled Git client, then `foo.ps1` will be
+  stored as UTF-8 internally. A client without `working-tree-encoding`
+  support will checkout `foo.ps1` as UTF-8 encoded file. This will
+  typically cause trouble for the users of this file.
+
+  If a Git client, that does not support the `working-tree-encoding`
+  attribute, adds a new file `bar.ps1`, then `bar.ps1` will be
+  stored "as-is" internally (in this example probably as UTF-16).
+  A client with `working-tree-encoding` support will interpret the
+  internal contents as UTF-8 and try to convert it to UTF-16 on checkout.
+  That operation will fail and cause an error.
+
+- Reencoding content to non-UTF encodings can cause errors as the
+  conversion might not be UTF-8 round trip safe. If you suspect your
+  encoding to not be round trip safe, then add it to
+  `core.checkRoundtripEncoding` to make Git check the round trip
+  encoding (see linkgit:git-config[1]). SHIFT-JIS (Japanese character
+  set) is known to have round trip issues with UTF-8 and is checked by
+  default.
+
+- Reencoding content requires resources that might slow down certain
+  Git operations (e.g 'git checkout' or 'git add').
+
+Use the `working-tree-encoding` attribute only if you cannot store a file
+in UTF-8 encoding and if you want Git to be able to process the content
+as text.
+
+As an example, use the following attributes if your '*.ps1' files are
+UTF-16 encoded with byte order mark (BOM) and you want Git to perform
+automatic line ending conversion based on your platform.
+
+------------------------
+*.ps1          text working-tree-encoding=UTF-16
+------------------------
+
+Use the following attributes if your '*.ps1' files are UTF-16 little
+endian encoded without BOM and you want Git to use Windows line endings
+in the working directory. Please note, it is highly recommended to
+explicitly define the line endings with `eol` if the `working-tree-encoding`
+attribute is used to avoid ambiguity.
+
+------------------------
+*.ps1          text working-tree-encoding=UTF-16LE eol=CRLF
+------------------------
+
+You can get a list of all available encodings on your platform with the
+following command:
+
+------------------------
+iconv --list
+------------------------
+
+If you do not know the encoding of a file, then you can use the `file`
+command to guess the encoding:
+
+------------------------
+file foo.ps1
+------------------------
+
+
 `ident`
 ^^^^^^^
 
@@ -1141,8 +1229,8 @@ to:
 ------------
 
 
-EXAMPLE
--------
+EXAMPLES
+--------
 
 If you have these three `gitattributes` file:
 
index f877f7b7cd19c3fb1d2ce1263d64c94798c5130d..e3c283a174c46352f26e3e6a957e72b76bf82680 100644 (file)
@@ -31,7 +31,7 @@ Hooks can get their arguments via the environment, command-line
 arguments, and stdin. See the documentation for each hook below for
 details.
 
-'git init' may copy hooks to the new repository, depending on its
+`git init` may copy hooks to the new repository, depending on its
 configuration. See the "TEMPLATE DIRECTORY" section in
 linkgit:git-init[1] for details. When the rest of this document refers
 to "default hooks" it's talking about the default template shipped
@@ -45,9 +45,9 @@ HOOKS
 applypatch-msg
 ~~~~~~~~~~~~~~
 
-This hook is invoked by 'git am'.  It takes a single
+This hook is invoked by linkgit:git-am[1].  It takes a single
 parameter, the name of the file that holds the proposed commit
-log message.  Exiting with a non-zero status causes 'git am' to abort
+log message.  Exiting with a non-zero status causes `git am` to abort
 before applying the patch.
 
 The hook is allowed to edit the message file in place, and can
@@ -61,7 +61,7 @@ The default 'applypatch-msg' hook, when enabled, runs the
 pre-applypatch
 ~~~~~~~~~~~~~~
 
-This hook is invoked by 'git am'.  It takes no parameter, and is
+This hook is invoked by linkgit:git-am[1].  It takes no parameter, and is
 invoked after the patch is applied, but before a commit is made.
 
 If it exits with non-zero status, then the working tree will not be
@@ -76,33 +76,33 @@ The default 'pre-applypatch' hook, when enabled, runs the
 post-applypatch
 ~~~~~~~~~~~~~~~
 
-This hook is invoked by 'git am'.  It takes no parameter,
+This hook is invoked by linkgit:git-am[1].  It takes no parameter,
 and is invoked after the patch is applied and a commit is made.
 
 This hook is meant primarily for notification, and cannot affect
-the outcome of 'git am'.
+the outcome of `git am`.
 
 pre-commit
 ~~~~~~~~~~
 
-This hook is invoked by 'git commit', and can be bypassed
+This hook is invoked by linkgit:git-commit[1], and can be bypassed
 with the `--no-verify` option.  It takes no parameters, and is
 invoked before obtaining the proposed commit log message and
 making a commit.  Exiting with a non-zero status from this script
-causes the 'git commit' command to abort before creating a commit.
+causes the `git commit` command to abort before creating a commit.
 
 The default 'pre-commit' hook, when enabled, catches introduction
 of lines with trailing whitespaces and aborts the commit when
 such a line is found.
 
-All the 'git commit' hooks are invoked with the environment
+All the `git commit` hooks are invoked with the environment
 variable `GIT_EDITOR=:` if the command will not bring up an editor
 to modify the commit message.
 
 prepare-commit-msg
 ~~~~~~~~~~~~~~~~~~
 
-This hook is invoked by 'git commit' right after preparing the
+This hook is invoked by linkgit:git-commit[1] right after preparing the
 default log message, and before the editor is started.
 
 It takes one to three parameters.  The first is the name of the file
@@ -114,7 +114,7 @@ commit is a merge or a `.git/MERGE_MSG` file exists); `squash`
 (if a `.git/SQUASH_MSG` file exists); or `commit`, followed by
 a commit SHA-1 (if a `-c`, `-C` or `--amend` option was given).
 
-If the exit status is non-zero, 'git commit' will abort.
+If the exit status is non-zero, `git commit` will abort.
 
 The purpose of the hook is to edit the message file in place, and
 it is not suppressed by the `--no-verify` option.  A non-zero exit
@@ -127,7 +127,7 @@ help message found in the commented portion of the commit template.
 commit-msg
 ~~~~~~~~~~
 
-This hook is invoked by 'git commit' and 'git merge', and can be
+This hook is invoked by linkgit:git-commit[1] and linkgit:git-merge[1], and can be
 bypassed with the `--no-verify` option.  It takes a single parameter,
 the name of the file that holds the proposed commit log message.
 Exiting with a non-zero status causes the command to abort.
@@ -143,16 +143,16 @@ The default 'commit-msg' hook, when enabled, detects duplicate
 post-commit
 ~~~~~~~~~~~
 
-This hook is invoked by 'git commit'. It takes no parameters, and is
+This hook is invoked by linkgit:git-commit[1]. It takes no parameters, and is
 invoked after a commit is made.
 
 This hook is meant primarily for notification, and cannot affect
-the outcome of 'git commit'.
+the outcome of `git commit`.
 
 pre-rebase
 ~~~~~~~~~~
 
-This hook is called by 'git rebase' and can be used to prevent a
+This hook is called by linkgit:git-rebase[1] and can be used to prevent a
 branch from getting rebased.  The hook may be called with one or
 two parameters.  The first parameter is the upstream from which
 the series was forked.  The second parameter is the branch being
@@ -161,17 +161,17 @@ rebased, and is not set when rebasing the current branch.
 post-checkout
 ~~~~~~~~~~~~~
 
-This hook is invoked when a 'git checkout' is run after having updated the
+This hook is invoked when a linkgit:git-checkout[1] is run after having updated the
 worktree.  The hook is given three parameters: the ref of the previous HEAD,
 the ref of the new HEAD (which may or may not have changed), and a flag
 indicating whether the checkout was a branch checkout (changing branches,
 flag=1) or a file checkout (retrieving a file from the index, flag=0).
-This hook cannot affect the outcome of 'git checkout'.
+This hook cannot affect the outcome of `git checkout`.
 
-It is also run after 'git clone', unless the --no-checkout (-n) option is
+It is also run after linkgit:git-clone[1], unless the `--no-checkout` (`-n`) option is
 used. The first parameter given to the hook is the null-ref, the second the
-ref of the new HEAD and the flag is always 1. Likewise for 'git worktree add'
-unless --no-checkout is used.
+ref of the new HEAD and the flag is always 1. Likewise for `git worktree add`
+unless `--no-checkout` is used.
 
 This hook can be used to perform repository validity checks, auto-display
 differences from the previous HEAD if different, or set working dir metadata
@@ -180,10 +180,10 @@ properties.
 post-merge
 ~~~~~~~~~~
 
-This hook is invoked by 'git merge', which happens when a 'git pull'
+This hook is invoked by linkgit:git-merge[1], which happens when a `git pull`
 is done on a local repository.  The hook takes a single parameter, a status
 flag specifying whether or not the merge being done was a squash merge.
-This hook cannot affect the outcome of 'git merge' and is not executed,
+This hook cannot affect the outcome of `git merge` and is not executed,
 if the merge failed due to conflicts.
 
 This hook can be used in conjunction with a corresponding pre-commit hook to
@@ -194,10 +194,10 @@ for an example of how to do this.
 pre-push
 ~~~~~~~~
 
-This hook is called by 'git push' and can be used to prevent a push from taking
-place.  The hook is called with two parameters which provide the name and
-location of the destination remote, if a named remote is not being used both
-values will be the same.
+This hook is called by linkgit:git-push[1] and can be used to prevent
+a push from taking place.  The hook is called with two parameters
+which provide the name and location of the destination remote, if a
+named remote is not being used both values will be the same.
 
 Information about what is to be pushed is provided on the hook's standard
 input with lines of the form:
@@ -216,7 +216,7 @@ SHA-1>` will be 40 `0`.  If the local commit was specified by something other
 than a name which could be expanded (such as `HEAD~`, or a SHA-1) it will be
 supplied as it was originally given.
 
-If this hook exits with a non-zero status, 'git push' will abort without
+If this hook exits with a non-zero status, `git push` will abort without
 pushing anything.  Information about why the push is rejected may be sent
 to the user by writing to standard error.
 
@@ -224,8 +224,8 @@ to the user by writing to standard error.
 pre-receive
 ~~~~~~~~~~~
 
-This hook is invoked by 'git-receive-pack' when it reacts to
-'git push' and updates reference(s) in its repository.
+This hook is invoked by linkgit:git-receive-pack[1] when it reacts to
+`git push` and updates reference(s) in its repository.
 Just before starting to update refs on the remote repository, the
 pre-receive hook is invoked.  Its exit status determines the success
 or failure of the update.
@@ -246,7 +246,7 @@ updated. If the hook exits with zero, updating of individual refs can
 still be prevented by the <<update,'update'>> hook.
 
 Both standard output and standard error output are forwarded to
-'git send-pack' on the other end, so you can simply `echo` messages
+`git send-pack` on the other end, so you can simply `echo` messages
 for the user.
 
 The number of push options given on the command line of
@@ -265,8 +265,8 @@ linkgit:git-receive-pack[1] for some caveats.
 update
 ~~~~~~
 
-This hook is invoked by 'git-receive-pack' when it reacts to
-'git push' and updates reference(s) in its repository.
+This hook is invoked by linkgit:git-receive-pack[1] when it reacts to
+`git push` and updates reference(s) in its repository.
 Just before updating the ref on the remote repository, the update hook
 is invoked.  Its exit status determines the success or failure of
 the ref update.
@@ -279,7 +279,7 @@ three parameters:
  - and the new object name to be stored in the ref.
 
 A zero exit from the update hook allows the ref to be updated.
-Exiting with a non-zero status prevents 'git-receive-pack'
+Exiting with a non-zero status prevents `git receive-pack`
 from updating that ref.
 
 This hook can be used to prevent 'forced' update on certain refs by
@@ -299,7 +299,7 @@ membership. See linkgit:git-shell[1] for how you might use the login
 shell to restrict the user's access to only git commands.
 
 Both standard output and standard error output are forwarded to
-'git send-pack' on the other end, so you can simply `echo` messages
+`git send-pack` on the other end, so you can simply `echo` messages
 for the user.
 
 The default 'update' hook, when enabled--and with
@@ -310,8 +310,8 @@ unannotated tags to be pushed.
 post-receive
 ~~~~~~~~~~~~
 
-This hook is invoked by 'git-receive-pack' when it reacts to
-'git push' and updates reference(s) in its repository.
+This hook is invoked by linkgit:git-receive-pack[1] when it reacts to
+`git push` and updates reference(s) in its repository.
 It executes on the remote repository once after all the refs have
 been updated.
 
@@ -320,7 +320,7 @@ arguments, but gets the same information as the
 <<pre-receive,'pre-receive'>>
 hook does on its standard input.
 
-This hook does not affect the outcome of 'git-receive-pack', as it
+This hook does not affect the outcome of `git receive-pack`, as it
 is called after the real work is done.
 
 This supersedes the <<post-update,'post-update'>> hook in that it gets
@@ -328,7 +328,7 @@ both old and new values of all the refs in addition to their
 names.
 
 Both standard output and standard error output are forwarded to
-'git send-pack' on the other end, so you can simply `echo` messages
+`git send-pack` on the other end, so you can simply `echo` messages
 for the user.
 
 The default 'post-receive' hook is empty, but there is
@@ -349,8 +349,8 @@ will be set to zero, `GIT_PUSH_OPTION_COUNT=0`.
 post-update
 ~~~~~~~~~~~
 
-This hook is invoked by 'git-receive-pack' when it reacts to
-'git push' and updates reference(s) in its repository.
+This hook is invoked by linkgit:git-receive-pack[1] when it reacts to
+`git push` and updates reference(s) in its repository.
 It executes on the remote repository once after all the refs have
 been updated.
 
@@ -358,7 +358,7 @@ It takes a variable number of parameters, each of which is the
 name of ref that was actually updated.
 
 This hook is meant primarily for notification, and cannot affect
-the outcome of 'git-receive-pack'.
+the outcome of `git receive-pack`.
 
 The 'post-update' hook can tell what are the heads that were pushed,
 but it does not know what their original and updated values are,
@@ -368,20 +368,20 @@ updated values of the refs. You might consider it instead if you need
 them.
 
 When enabled, the default 'post-update' hook runs
-'git update-server-info' to keep the information used by dumb
+`git update-server-info` to keep the information used by dumb
 transports (e.g., HTTP) up to date.  If you are publishing
 a Git repository that is accessible via HTTP, you should
 probably enable this hook.
 
 Both standard output and standard error output are forwarded to
-'git send-pack' on the other end, so you can simply `echo` messages
+`git send-pack` on the other end, so you can simply `echo` messages
 for the user.
 
 push-to-checkout
 ~~~~~~~~~~~~~~~~
 
-This hook is invoked by 'git-receive-pack' when it reacts to
-'git push' and updates reference(s) in its repository, and when
+This hook is invoked by linkgit:git-receive-pack[1] when it reacts to
+`git push` and updates reference(s) in its repository, and when
 the push tries to update the branch that is currently checked out
 and the `receive.denyCurrentBranch` configuration variable is set to
 `updateInstead`.  Such a push by default is refused if the working
@@ -400,8 +400,8 @@ when the tip of the current branch is updated to the new commit, and
 exit with a zero status.
 
 For example, the hook can simply run `git read-tree -u -m HEAD "$1"`
-in order to emulate 'git fetch' that is run in the reverse direction
-with `git push`, as the two-tree form of `read-tree -u -m` is
+in order to emulate `git fetch` that is run in the reverse direction
+with `git push`, as the two-tree form of `git read-tree -u -m` is
 essentially the same as `git checkout` that switches branches while
 keeping the local changes in the working tree that do not interfere
 with the difference between the branches.
@@ -410,15 +410,16 @@ with the difference between the branches.
 pre-auto-gc
 ~~~~~~~~~~~
 
-This hook is invoked by 'git gc --auto'. It takes no parameter, and
-exiting with non-zero status from this script causes the 'git gc --auto'
-to abort.
+This hook is invoked by `git gc --auto` (see linkgit:git-gc[1]). It
+takes no parameter, and exiting with non-zero status from this script
+causes the `git gc --auto` to abort.
 
 post-rewrite
 ~~~~~~~~~~~~
 
-This hook is invoked by commands that rewrite commits (`git commit
---amend`, 'git-rebase'; currently 'git-filter-branch' does 'not' call
+This hook is invoked by commands that rewrite commits
+(linkgit:git-commit[1] when called with `--amend` and
+linkgit:git-rebase[1]; currently `git filter-branch` does 'not' call
 it!).  Its first argument denotes the command it was invoked by:
 currently one of `amend` or `rebase`.  Further command-dependent
 arguments may be passed in the future.
@@ -450,16 +451,16 @@ processed by rebase.
 sendemail-validate
 ~~~~~~~~~~~~~~~~~~
 
-This hook is invoked by 'git send-email'.  It takes a single parameter,
+This hook is invoked by linkgit:git-send-email[1].  It takes a single parameter,
 the name of the file that holds the e-mail to be sent.  Exiting with a
-non-zero status causes 'git send-email' to abort before sending any
+non-zero status causes `git send-email` to abort before sending any
 e-mails.
 
 fsmonitor-watchman
 ~~~~~~~~~~~~~~~~~~
 
-This hook is invoked when the configuration option core.fsmonitor is
-set to .git/hooks/fsmonitor-watchman.  It takes two arguments, a version
+This hook is invoked when the configuration option `core.fsmonitor` is
+set to `.git/hooks/fsmonitor-watchman`.  It takes two arguments, a version
 (currently 1) and the time in elapsed nanoseconds since midnight,
 January 1, 1970.
 
@@ -478,7 +479,7 @@ directories are checked for untracked files based on the path names
 given.
 
 An optimized way to tell git "all files have changed" is to return
-the filename '/'.
+the filename `/`.
 
 The exit status determines whether git will use the data from the
 hook to limit its search.  On error, it will fall back to verifying
index ca96c281d1f3abbf71cdac112a019d2849e7ad0b..244cd01493127035b27fb19416f1f4027a726bba 100644 (file)
@@ -8,7 +8,7 @@ gitk - The Git repository browser
 SYNOPSIS
 --------
 [verse]
-'gitk' [<options>] [<revision range>] [\--] [<path>...]
+'gitk' [<options>] [<revision range>] [--] [<path>...]
 
 DESCRIPTION
 -----------
index 4b8c93ec59de3db02b9914aed4955d486be5f875..9d1459aac6d0b12ad1a87ff25a158dca0f2bf470 100644 (file)
@@ -102,6 +102,14 @@ Capabilities for Pushing
 +
 Supported commands: 'connect'.
 
+'stateless-connect'::
+       Experimental; for internal use only.
+       Can attempt to connect to a remote server for communication
+       using git's wire-protocol version 2.  See the documentation
+       for the stateless-connect command for more information.
++
+Supported commands: 'stateless-connect'.
+
 'push'::
        Can discover remote refs and push local commits and the
        history leading up to them to new or existing remote refs.
@@ -136,6 +144,14 @@ Capabilities for Fetching
 +
 Supported commands: 'connect'.
 
+'stateless-connect'::
+       Experimental; for internal use only.
+       Can attempt to connect to a remote server for communication
+       using git's wire-protocol version 2.  See the documentation
+       for the stateless-connect command for more information.
++
+Supported commands: 'stateless-connect'.
+
 'fetch'::
        Can discover remote refs and transfer objects reachable from
        them to the local object store.
@@ -375,6 +391,22 @@ Supported if the helper has the "export" capability.
 +
 Supported if the helper has the "connect" capability.
 
+'stateless-connect' <service>::
+       Experimental; for internal use only.
+       Connects to the given remote service for communication using
+       git's wire-protocol version 2.  Valid replies to this command
+       are empty line (connection established), 'fallback' (no smart
+       transport support, fall back to dumb transports) and just
+       exiting with error message printed (can't connect, don't bother
+       trying to fall back).  After line feed terminating the positive
+       (empty) response, the output of the service starts.  Messages
+       (both request and response) must consist of zero or more
+       PKT-LINEs, terminating in a flush packet. The client must not
+       expect the server to store any state in between request-response
+       pairs.  After the connection ends, the remote helper exits.
++
+Supported if the helper has the "stateless-connect" capability.
+
 If a fatal error occurs, the program writes the error message to
 stderr and exits. The caller should expect that a suitable error
 message has been printed if the child closes the connection without
index c60bcad44aa581b2449a7a638b6487c1c73e8c23..e85148f05eb79a968ad84bac6ce7a88289270c49 100644 (file)
@@ -275,11 +275,6 @@ worktrees/<id>/locked::
        or manually by `git worktree prune`. The file may contain a string
        explaining why the repository is locked.
 
-worktrees/<id>/link::
-       If this file exists, it is a hard link to the linked .git
-       file. It is used to detect if the linked repository is
-       manually removed.
-
 SEE ALSO
 --------
 linkgit:git-init[1],
index 6b8888d123826179ace38660f5043d897eb5ce70..6c2d23dc489474958d39d8053fc799b8616aafee 100644 (file)
@@ -463,7 +463,7 @@ exclude;;
 [[def_push]]push::
        Pushing a <<def_branch,branch>> means to get the branch's
        <<def_head_ref,head ref>> from a remote <<def_repository,repository>>,
-       find out if it is a direct ancestor to the branch's local
+       find out if it is an ancestor to the branch's local
        head ref, and in that case, putting all
        objects, which are <<def_reachable,reachable>> from the local
        head ref, and which are missing from the remote
index 9c4cd0915fe3f3e14879184f3a474e559130bdef..8994e2559eac0c5746ca898b0bad1946a7b83298 100644 (file)
@@ -80,7 +80,7 @@ valid pack like:
     # now add our object data
     cat object >>tmp.pack
     # and then append the pack trailer
-    /path/to/git.git/test-sha1 -b <tmp.pack >trailer
+    /path/to/git.git/t/helper/test-tool sha1 -b <tmp.pack >trailer
     cat trailer >>tmp.pack
 ------------
 
index 8f60c9f43109a6f3833bd274c9123d6dc1c80a5b..7d1bd440944149bb8de03a9a202e46c85ec646f2 100644 (file)
@@ -7,6 +7,10 @@ syntax.  Here are various ways to spell object names.  The
 ones listed near the end of this list name trees and
 blobs contained in a commit.
 
+NOTE: This document shows the "raw" syntax as seen by git. The shell
+and other UIs might require additional quoting to protect special
+characters and to avoid word splitting.
+
 '<sha1>', e.g. 'dae86e1950b1277e545cee180551750029cfe735', 'dae86e'::
   The full SHA-1 object name (40-byte hexadecimal string), or
   a leading substring that is unique within the repository.
@@ -186,6 +190,8 @@ existing tag object.
   is matched. ':/!-foo' performs a negative match, while ':/!!foo' matches a
   literal '!' character, followed by 'foo'. Any other sequence beginning with
   ':/!' is reserved for now.
+  Depending on the given text, the shell's word splitting rules might
+  require additional quoting.
 
 '<rev>:<path>', e.g. 'HEAD:README', ':README', 'master:./README'::
   A suffix ':' followed by a path names the blob or tree
index 9a778b0cad02faab3ce6bd7f89c24a17de45b777..fa39ac9d719b57e0df33441b11144bede4822621 100644 (file)
@@ -47,21 +47,23 @@ will first feed the user-wide one to the callback, and then the
 repo-specific one; by overwriting, the higher-priority repo-specific
 value is left at the end).
 
-The `git_config_with_options` function lets the caller examine config
+The `config_with_options` function lets the caller examine config
 while adjusting some of the default behavior of `git_config`. It should
 almost never be used by "regular" Git code that is looking up
 configuration variables. It is intended for advanced callers like
 `git-config`, which are intentionally tweaking the normal config-lookup
 process. It takes two extra parameters:
 
-`filename`::
-If this parameter is non-NULL, it specifies the name of a file to
-parse for configuration, rather than looking in the usual files. Regular
-`git_config` defaults to `NULL`.
+`config_source`::
+If this parameter is non-NULL, it specifies the source to parse for
+configuration, rather than looking in the usual files. See `struct
+git_config_source` in `config.h` for details. Regular `git_config` defaults
+to `NULL`.
 
-`respect_includes`::
-Specify whether include directives should be followed in parsed files.
-Regular `git_config` defaults to `1`.
+`opts`::
+Specify options to adjust the behavior of parsing config files. See `struct
+config_options` in `config.h` for details. As an example: regular `git_config`
+sets `opts.respect_includes` to `1` by default.
 
 Reading Specific Files
 ----------------------
index 7fae00f44fe1798da82bfdbb902479a03d583843..4f44ca24f6457e6acf208d7068dfcd02039cf41b 100644 (file)
@@ -53,7 +53,7 @@ The notable options are:
        not be returned even if all of its contents are ignored. In
        this case, the contents are returned as individual entries.
 +
-If this is set, files and directories that explicity match an ignore
+If this is set, files and directories that explicitly match an ignore
 pattern are reported. Implicity ignored directories (directories that
 do not match an ignore pattern, but whose contents are all ignored)
 are not reported, instead all of the contents are reported.
index a1162e5bcd19ba509fff39bceca49e1b33e2add2..5b29622d00ea61808176a27bdc2a86971d297749 100644 (file)
@@ -1,7 +1,7 @@
 object access API
 =================
 
-Talk about <sha1_file.c> and <object.h> family, things like
+Talk about <sha1-file.c> and <object.h> family, things like
 
 * read_sha1_file()
 * read_object_with_reference()
index ee907c4a82a9127c0abc67b8a5dd215a3b0535d4..fb060893931f2e74c5857c2d03b019e1fa138976 100644 (file)
@@ -38,7 +38,7 @@ Data Structures
 Functions
 ---------
 
-`void submodule_free()`::
+`void submodule_free(struct repository *r)`::
 
        Use these to free the internally cached values.
 
diff --git a/Documentation/technical/commit-graph-format.txt b/Documentation/technical/commit-graph-format.txt
new file mode 100644 (file)
index 0000000..ad6af81
--- /dev/null
@@ -0,0 +1,97 @@
+Git commit graph format
+=======================
+
+The Git commit graph stores a list of commit OIDs and some associated
+metadata, including:
+
+- The generation number of the commit. Commits with no parents have
+  generation number 1; commits with parents have generation number
+  one more than the maximum generation number of its parents. We
+  reserve zero as special, and can be used to mark a generation
+  number invalid or as "not computed".
+
+- The root tree OID.
+
+- The commit date.
+
+- The parents of the commit, stored using positional references within
+  the graph file.
+
+These positional references are stored as unsigned 32-bit integers
+corresponding to the array position withing the list of commit OIDs. We
+use the most-significant bit for special purposes, so we can store at most
+(1 << 31) - 1 (around 2 billion) commits.
+
+== Commit graph files have the following format:
+
+In order to allow extensions that add extra data to the graph, we organize
+the body into "chunks" and provide a binary lookup table at the beginning
+of the body. The header includes certain values, such as number of chunks
+and hash type.
+
+All 4-byte numbers are in network order.
+
+HEADER:
+
+  4-byte signature:
+      The signature is: {'C', 'G', 'P', 'H'}
+
+  1-byte version number:
+      Currently, the only valid version is 1.
+
+  1-byte Hash Version (1 = SHA-1)
+      We infer the hash length (H) from this value.
+
+  1-byte number (C) of "chunks"
+
+  1-byte (reserved for later use)
+     Current clients should ignore this value.
+
+CHUNK LOOKUP:
+
+  (C + 1) * 12 bytes listing the table of contents for the chunks:
+      First 4 bytes describe the chunk id. Value 0 is a terminating label.
+      Other 8 bytes provide the byte-offset in current file for chunk to
+      start. (Chunks are ordered contiguously in the file, so you can infer
+      the length using the next chunk position if necessary.) Each chunk
+      ID appears at most once.
+
+  The remaining data in the body is described one chunk at a time, and
+  these chunks may be given in any order. Chunks are required unless
+  otherwise specified.
+
+CHUNK DATA:
+
+  OID Fanout (ID: {'O', 'I', 'D', 'F'}) (256 * 4 bytes)
+      The ith entry, F[i], stores the number of OIDs with first
+      byte at most i. Thus F[255] stores the total
+      number of commits (N).
+
+  OID Lookup (ID: {'O', 'I', 'D', 'L'}) (N * H bytes)
+      The OIDs for all commits in the graph, sorted in ascending order.
+
+  Commit Data (ID: {'C', 'G', 'E', 'T' }) (N * (H + 16) bytes)
+    * The first H bytes are for the OID of the root tree.
+    * The next 8 bytes are for the positions of the first two parents
+      of the ith commit. Stores value 0xffffffff if no parent in that
+      position. If there are more than two parents, the second value
+      has its most-significant bit on and the other bits store an array
+      position into the Large Edge List chunk.
+    * The next 8 bytes store the generation number of the commit and
+      the commit time in seconds since EPOCH. The generation number
+      uses the higher 30 bits of the first 4 bytes, while the commit
+      time uses the 32 bits of the second 4 bytes, along with the lowest
+      2 bits of the lowest byte, storing the 33rd and 34th bit of the
+      commit time.
+
+  Large Edge List (ID: {'E', 'D', 'G', 'E'}) [Optional]
+      This list of 4-byte values store the second through nth parents for
+      all octopus merges. The second parent value in the commit data stores
+      an array position within this list along with the most-significant bit
+      on. Starting at that array position, iterate through this list of commit
+      positions for the parents until reaching a value with the most-significant
+      bit on. The other bits correspond to the position of the last parent.
+
+TRAILER:
+
+       H-byte HASH-checksum of all of the above.
diff --git a/Documentation/technical/commit-graph.txt b/Documentation/technical/commit-graph.txt
new file mode 100644 (file)
index 0000000..0550c6d
--- /dev/null
@@ -0,0 +1,163 @@
+Git Commit Graph Design Notes
+=============================
+
+Git walks the commit graph for many reasons, including:
+
+1. Listing and filtering commit history.
+2. Computing merge bases.
+
+These operations can become slow as the commit count grows. The merge
+base calculation shows up in many user-facing commands, such as 'merge-base'
+or 'status' and can take minutes to compute depending on history shape.
+
+There are two main costs here:
+
+1. Decompressing and parsing commits.
+2. Walking the entire graph to satisfy topological order constraints.
+
+The commit graph file is a supplemental data structure that accelerates
+commit graph walks. If a user downgrades or disables the 'core.commitGraph'
+config setting, then the existing ODB is sufficient. The file is stored
+as "commit-graph" either in the .git/objects/info directory or in the info
+directory of an alternate.
+
+The commit graph file stores the commit graph structure along with some
+extra metadata to speed up graph walks. By listing commit OIDs in lexi-
+cographic order, we can identify an integer position for each commit and
+refer to the parents of a commit using those integer positions. We use
+binary search to find initial commits and then use the integer positions
+for fast lookups during the walk.
+
+A consumer may load the following info for a commit from the graph:
+
+1. The commit OID.
+2. The list of parents, along with their integer position.
+3. The commit date.
+4. The root tree OID.
+5. The generation number (see definition below).
+
+Values 1-4 satisfy the requirements of parse_commit_gently().
+
+Define the "generation number" of a commit recursively as follows:
+
+ * A commit with no parents (a root commit) has generation number one.
+
+ * A commit with at least one parent has generation number one more than
+   the largest generation number among its parents.
+
+Equivalently, the generation number of a commit A is one more than the
+length of a longest path from A to a root commit. The recursive definition
+is easier to use for computation and observing the following property:
+
+    If A and B are commits with generation numbers N and M, respectively,
+    and N <= M, then A cannot reach B. That is, we know without searching
+    that B is not an ancestor of A because it is further from a root commit
+    than A.
+
+    Conversely, when checking if A is an ancestor of B, then we only need
+    to walk commits until all commits on the walk boundary have generation
+    number at most N. If we walk commits using a priority queue seeded by
+    generation numbers, then we always expand the boundary commit with highest
+    generation number and can easily detect the stopping condition.
+
+This property can be used to significantly reduce the time it takes to
+walk commits and determine topological relationships. Without generation
+numbers, the general heuristic is the following:
+
+    If A and B are commits with commit time X and Y, respectively, and
+    X < Y, then A _probably_ cannot reach B.
+
+This heuristic is currently used whenever the computation is allowed to
+violate topological relationships due to clock skew (such as "git log"
+with default order), but is not used when the topological order is
+required (such as merge base calculations, "git log --graph").
+
+In practice, we expect some commits to be created recently and not stored
+in the commit graph. We can treat these commits as having "infinite"
+generation number and walk until reaching commits with known generation
+number.
+
+Design Details
+--------------
+
+- The commit graph file is stored in a file named 'commit-graph' in the
+  .git/objects/info directory. This could be stored in the info directory
+  of an alternate.
+
+- The core.commitGraph config setting must be on to consume graph files.
+
+- The file format includes parameters for the object ID hash function,
+  so a future change of hash algorithm does not require a change in format.
+
+Future Work
+-----------
+
+- The commit graph feature currently does not honor commit grafts. This can
+  be remedied by duplicating or refactoring the current graft logic.
+
+- The 'commit-graph' subcommand does not have a "verify" mode that is
+  necessary for integration with fsck.
+
+- The file format includes room for precomputed generation numbers. These
+  are not currently computed, so all generation numbers will be marked as
+  0 (or "uncomputed"). A later patch will include this calculation.
+
+- After computing and storing generation numbers, we must make graph
+  walks aware of generation numbers to gain the performance benefits they
+  enable. This will mostly be accomplished by swapping a commit-date-ordered
+  priority queue with one ordered by generation number. The following
+  operations are important candidates:
+
+    - paint_down_to_common()
+    - 'log --topo-order'
+
+- Currently, parse_commit_gently() requires filling in the root tree
+  object for a commit. This passes through lookup_tree() and consequently
+  lookup_object(). Also, it calls lookup_commit() when loading the parents.
+  These method calls check the ODB for object existence, even if the
+  consumer does not need the content. For example, we do not need the
+  tree contents when computing merge bases. Now that commit parsing is
+  removed from the computation time, these lookup operations are the
+  slowest operations keeping graph walks from being fast. Consider
+  loading these objects without verifying their existence in the ODB and
+  only loading them fully when consumers need them. Consider a method
+  such as "ensure_tree_loaded(commit)" that fully loads a tree before
+  using commit->tree.
+
+- The current design uses the 'commit-graph' subcommand to generate the graph.
+  When this feature stabilizes enough to recommend to most users, we should
+  add automatic graph writes to common operations that create many commits.
+  For example, one could compute a graph on 'clone', 'fetch', or 'repack'
+  commands.
+
+- A server could provide a commit graph file as part of the network protocol
+  to avoid extra calculations by clients. This feature is only of benefit if
+  the user is willing to trust the file, because verifying the file is correct
+  is as hard as computing it from scratch.
+
+Related Links
+-------------
+[0] https://bugs.chromium.org/p/git/issues/detail?id=8
+    Chromium work item for: Serialized Commit Graph
+
+[1] https://public-inbox.org/git/20110713070517.GC18566@sigill.intra.peff.net/
+    An abandoned patch that introduced generation numbers.
+
+[2] https://public-inbox.org/git/20170908033403.q7e6dj7benasrjes@sigill.intra.peff.net/
+    Discussion about generation numbers on commits and how they interact
+    with fsck.
+
+[3] https://public-inbox.org/git/20170908034739.4op3w4f2ma5s65ku@sigill.intra.peff.net/
+    More discussion about generation numbers and not storing them inside
+    commit objects. A valuable quote:
+
+    "I think we should be moving more in the direction of keeping
+     repo-local caches for optimizations. Reachability bitmaps have been
+     a big performance win. I think we should be doing the same with our
+     properties of commits. Not just generation numbers, but making it
+     cheap to access the graph structure without zlib-inflating whole
+     commit objects (i.e., packv4 or something like the "metapacks" I
+     proposed a few years ago)."
+
+[4] https://public-inbox.org/git/20180108154822.54829-1-git@jeffhostetler.com/T/#u
+    A patch to remove the ahead-behind calculation from 'status'.
index 417ba491d0f3fa06b60a47623a5f756919adcbd6..4ab6cd1012abae711acf02e6a76ee93eae86a1ad 100644 (file)
@@ -28,11 +28,30 @@ advantages:
   address stored content.
 
 Over time some flaws in SHA-1 have been discovered by security
-researchers. https://shattered.io demonstrated a practical SHA-1 hash
-collision. As a result, SHA-1 cannot be considered cryptographically
-secure any more. This impacts the communication of hash values because
-we cannot trust that a given hash value represents the known good
-version of content that the speaker intended.
+researchers. On 23 February 2017 the SHAttered attack
+(https://shattered.io) demonstrated a practical SHA-1 hash collision.
+
+Git v2.13.0 and later subsequently moved to a hardened SHA-1
+implementation by default, which isn't vulnerable to the SHAttered
+attack.
+
+Thus Git has in effect already migrated to a new hash that isn't SHA-1
+and doesn't share its vulnerabilities, its new hash function just
+happens to produce exactly the same output for all known inputs,
+except two PDFs published by the SHAttered researchers, and the new
+implementation (written by those researchers) claims to detect future
+cryptanalytic collision attacks.
+
+Regardless, it's considered prudent to move past any variant of SHA-1
+to a new hash. There's no guarantee that future attacks on SHA-1 won't
+be published in the future, and those attacks may not have viable
+mitigations.
+
+If SHA-1 and its variants were to be truly broken, Git's hash function
+could not be considered cryptographically secure any more. This would
+impact the communication of hash values because we could not trust
+that a given hash value represented the known good version of content
+that the speaker intended.
 
 SHA-1 still possesses the other properties such as fast object lookup
 and safe error checking, but other hash functions are equally suitable
@@ -116,10 +135,15 @@ Documentation/technical/repository-version.txt) with extensions
                objectFormat = newhash
                compatObjectFormat = sha1
 
-Specifying a repository format extension ensures that versions of Git
-not aware of NewHash do not try to operate on these repositories,
-instead producing an error message:
+The combination of setting `core.repositoryFormatVersion=1` and
+populating `extensions.*` ensures that all versions of Git later than
+`v0.99.9l` will die instead of trying to operate on the NewHash
+repository, instead producing an error message.
 
+       # Between v0.99.9l and v2.7.0
+       $ git status
+       fatal: Expected git repo version <= 0, found 1
+       # After v2.7.0
        $ git status
        fatal: unknown repository extensions found:
                objectformat
index 8e5bf60be3f0689d61feb8ce43cb379b2417fd8f..70a99fd1423894255f5e0e8cdbb345276620ffde 100644 (file)
@@ -36,6 +36,98 @@ Git pack format
 
   - The trailer records 20-byte SHA-1 checksum of all of the above.
 
+=== Object types
+
+Valid object types are:
+
+- OBJ_COMMIT (1)
+- OBJ_TREE (2)
+- OBJ_BLOB (3)
+- OBJ_TAG (4)
+- OBJ_OFS_DELTA (6)
+- OBJ_REF_DELTA (7)
+
+Type 5 is reserved for future expansion. Type 0 is invalid.
+
+=== Deltified representation
+
+Conceptually there are only four object types: commit, tree, tag and
+blob. However to save space, an object could be stored as a "delta" of
+another "base" object. These representations are assigned new types
+ofs-delta and ref-delta, which is only valid in a pack file.
+
+Both ofs-delta and ref-delta store the "delta" to be applied to
+another object (called 'base object') to reconstruct the object. The
+difference between them is, ref-delta directly encodes 20-byte base
+object name. If the base object is in the same pack, ofs-delta encodes
+the offset of the base object in the pack instead.
+
+The base object could also be deltified if it's in the same pack.
+Ref-delta can also refer to an object outside the pack (i.e. the
+so-called "thin pack"). When stored on disk however, the pack should
+be self contained to avoid cyclic dependency.
+
+The delta data is a sequence of instructions to reconstruct an object
+from the base object. If the base object is deltified, it must be
+converted to canonical form first. Each instruction appends more and
+more data to the target object until it's complete. There are two
+supported instructions so far: one for copy a byte range from the
+source object and one for inserting new data embedded in the
+instruction itself.
+
+Each instruction has variable length. Instruction type is determined
+by the seventh bit of the first octet. The following diagrams follow
+the convention in RFC 1951 (Deflate compressed data format).
+
+==== Instruction to copy from base object
+
+  +----------+---------+---------+---------+---------+-------+-------+-------+
+  | 1xxxxxxx | offset1 | offset2 | offset3 | offset4 | size1 | size2 | size3 |
+  +----------+---------+---------+---------+---------+-------+-------+-------+
+
+This is the instruction format to copy a byte range from the source
+object. It encodes the offset to copy from and the number of bytes to
+copy. Offset and size are in little-endian order.
+
+All offset and size bytes are optional. This is to reduce the
+instruction size when encoding small offsets or sizes. The first seven
+bits in the first octet determines which of the next seven octets is
+present. If bit zero is set, offset1 is present. If bit one is set
+offset2 is present and so on.
+
+Note that a more compact instruction does not change offset and size
+encoding. For example, if only offset2 is omitted like below, offset3
+still contains bits 16-23. It does not become offset2 and contains
+bits 8-15 even if it's right next to offset1.
+
+  +----------+---------+---------+
+  | 10000101 | offset1 | offset3 |
+  +----------+---------+---------+
+
+In its most compact form, this instruction only takes up one byte
+(0x80) with both offset and size omitted, which will have default
+values zero. There is another exception: size zero is automatically
+converted to 0x10000.
+
+==== Instruction to add new data
+
+  +----------+============+
+  | 0xxxxxxx |    data    |
+  +----------+============+
+
+This is the instruction to construct target object without the base
+object. The following data is appended to the target object. The first
+seven bits of the first octet determines the size of data in
+bytes. The size must be non-zero.
+
+==== Reserved instruction
+
+  +----------+============
+  | 00000000 |
+  +----------+============
+
+This is the instruction reserved for future expansion.
+
 == Original (version 1) pack-*.idx files have the following format:
 
   - The header consists of 256 4-byte network byte order
diff --git a/Documentation/technical/protocol-v2.txt b/Documentation/technical/protocol-v2.txt
new file mode 100644 (file)
index 0000000..d7b6f38
--- /dev/null
@@ -0,0 +1,405 @@
+ Git Wire Protocol, Version 2
+==============================
+
+This document presents a specification for a version 2 of Git's wire
+protocol.  Protocol v2 will improve upon v1 in the following ways:
+
+  * Instead of multiple service names, multiple commands will be
+    supported by a single service
+  * Easily extendable as capabilities are moved into their own section
+    of the protocol, no longer being hidden behind a NUL byte and
+    limited by the size of a pkt-line
+  * Separate out other information hidden behind NUL bytes (e.g. agent
+    string as a capability and symrefs can be requested using 'ls-refs')
+  * Reference advertisement will be omitted unless explicitly requested
+  * ls-refs command to explicitly request some refs
+  * Designed with http and stateless-rpc in mind.  With clear flush
+    semantics the http remote helper can simply act as a proxy
+
+In protocol v2 communication is command oriented.  When first contacting a
+server a list of capabilities will advertised.  Some of these capabilities
+will be commands which a client can request be executed.  Once a command
+has completed, a client can reuse the connection and request that other
+commands be executed.
+
+ Packet-Line Framing
+---------------------
+
+All communication is done using packet-line framing, just as in v1.  See
+`Documentation/technical/pack-protocol.txt` and
+`Documentation/technical/protocol-common.txt` for more information.
+
+In protocol v2 these special packets will have the following semantics:
+
+  * '0000' Flush Packet (flush-pkt) - indicates the end of a message
+  * '0001' Delimiter Packet (delim-pkt) - separates sections of a message
+
+ Initial Client Request
+------------------------
+
+In general a client can request to speak protocol v2 by sending
+`version=2` through the respective side-channel for the transport being
+used which inevitably sets `GIT_PROTOCOL`.  More information can be
+found in `pack-protocol.txt` and `http-protocol.txt`.  In all cases the
+response from the server is the capability advertisement.
+
+ Git Transport
+~~~~~~~~~~~~~~~
+
+When using the git:// transport, you can request to use protocol v2 by
+sending "version=2" as an extra parameter:
+
+   003egit-upload-pack /project.git\0host=myserver.com\0\0version=2\0
+
+ SSH and File Transport
+~~~~~~~~~~~~~~~~~~~~~~~~
+
+When using either the ssh:// or file:// transport, the GIT_PROTOCOL
+environment variable must be set explicitly to include "version=2".
+
+ HTTP Transport
+~~~~~~~~~~~~~~~~
+
+When using the http:// or https:// transport a client makes a "smart"
+info/refs request as described in `http-protocol.txt` and requests that
+v2 be used by supplying "version=2" in the `Git-Protocol` header.
+
+   C: Git-Protocol: version=2
+   C:
+   C: GET $GIT_URL/info/refs?service=git-upload-pack HTTP/1.0
+
+A v2 server would reply:
+
+   S: 200 OK
+   S: <Some headers>
+   S: ...
+   S:
+   S: 000eversion 2\n
+   S: <capability-advertisement>
+
+Subsequent requests are then made directly to the service
+`$GIT_URL/git-upload-pack`. (This works the same for git-receive-pack).
+
+ Capability Advertisement
+--------------------------
+
+A server which decides to communicate (based on a request from a client)
+using protocol version 2, notifies the client by sending a version string
+in its initial response followed by an advertisement of its capabilities.
+Each capability is a key with an optional value.  Clients must ignore all
+unknown keys.  Semantics of unknown values are left to the definition of
+each key.  Some capabilities will describe commands which can be requested
+to be executed by the client.
+
+    capability-advertisement = protocol-version
+                              capability-list
+                              flush-pkt
+
+    protocol-version = PKT-LINE("version 2" LF)
+    capability-list = *capability
+    capability = PKT-LINE(key[=value] LF)
+
+    key = 1*(ALPHA | DIGIT | "-_")
+    value = 1*(ALPHA | DIGIT | " -_.,?\/{}[]()<>!@#$%^&*+=:;")
+
+ Command Request
+-----------------
+
+After receiving the capability advertisement, a client can then issue a
+request to select the command it wants with any particular capabilities
+or arguments.  There is then an optional section where the client can
+provide any command specific parameters or queries.  Only a single
+command can be requested at a time.
+
+    request = empty-request | command-request
+    empty-request = flush-pkt
+    command-request = command
+                     capability-list
+                     [command-args]
+                     flush-pkt
+    command = PKT-LINE("command=" key LF)
+    command-args = delim-pkt
+                  *command-specific-arg
+
+    command-specific-args are packet line framed arguments defined by
+    each individual command.
+
+The server will then check to ensure that the client's request is
+comprised of a valid command as well as valid capabilities which were
+advertised.  If the request is valid the server will then execute the
+command.  A server MUST wait till it has received the client's entire
+request before issuing a response.  The format of the response is
+determined by the command being executed, but in all cases a flush-pkt
+indicates the end of the response.
+
+When a command has finished, and the client has received the entire
+response from the server, a client can either request that another
+command be executed or can terminate the connection.  A client may
+optionally send an empty request consisting of just a flush-pkt to
+indicate that no more requests will be made.
+
+ Capabilities
+--------------
+
+There are two different types of capabilities: normal capabilities,
+which can be used to to convey information or alter the behavior of a
+request, and commands, which are the core actions that a client wants to
+perform (fetch, push, etc).
+
+Protocol version 2 is stateless by default.  This means that all commands
+must only last a single round and be stateless from the perspective of the
+server side, unless the client has requested a capability indicating that
+state should be maintained by the server.  Clients MUST NOT require state
+management on the server side in order to function correctly.  This
+permits simple round-robin load-balancing on the server side, without
+needing to worry about state management.
+
+ agent
+~~~~~~~
+
+The server can advertise the `agent` capability with a value `X` (in the
+form `agent=X`) to notify the client that the server is running version
+`X`.  The client may optionally send its own agent string by including
+the `agent` capability with a value `Y` (in the form `agent=Y`) in its
+request to the server (but it MUST NOT do so if the server did not
+advertise the agent capability). The `X` and `Y` strings may contain any
+printable ASCII characters except space (i.e., the byte range 32 < x <
+127), and are typically of the form "package/version" (e.g.,
+"git/1.8.3.1"). The agent strings are purely informative for statistics
+and debugging purposes, and MUST NOT be used to programmatically assume
+the presence or absence of particular features.
+
+ ls-refs
+~~~~~~~~~
+
+`ls-refs` is the command used to request a reference advertisement in v2.
+Unlike the current reference advertisement, ls-refs takes in arguments
+which can be used to limit the refs sent from the server.
+
+Additional features not supported in the base command will be advertised
+as the value of the command in the capability advertisement in the form
+of a space separated list of features: "<command>=<feature 1> <feature 2>"
+
+ls-refs takes in the following arguments:
+
+    symrefs
+       In addition to the object pointed by it, show the underlying ref
+       pointed by it when showing a symbolic ref.
+    peel
+       Show peeled tags.
+    ref-prefix <prefix>
+       When specified, only references having a prefix matching one of
+       the provided prefixes are displayed.
+
+The output of ls-refs is as follows:
+
+    output = *ref
+            flush-pkt
+    ref = PKT-LINE(obj-id SP refname *(SP ref-attribute) LF)
+    ref-attribute = (symref | peeled)
+    symref = "symref-target:" symref-target
+    peeled = "peeled:" obj-id
+
+ fetch
+~~~~~~~
+
+`fetch` is the command used to fetch a packfile in v2.  It can be looked
+at as a modified version of the v1 fetch where the ref-advertisement is
+stripped out (since the `ls-refs` command fills that role) and the
+message format is tweaked to eliminate redundancies and permit easy
+addition of future extensions.
+
+Additional features not supported in the base command will be advertised
+as the value of the command in the capability advertisement in the form
+of a space separated list of features: "<command>=<feature 1> <feature 2>"
+
+A `fetch` request can take the following arguments:
+
+    want <oid>
+       Indicates to the server an object which the client wants to
+       retrieve.  Wants can be anything and are not limited to
+       advertised objects.
+
+    have <oid>
+       Indicates to the server an object which the client has locally.
+       This allows the server to make a packfile which only contains
+       the objects that the client needs. Multiple 'have' lines can be
+       supplied.
+
+    done
+       Indicates to the server that negotiation should terminate (or
+       not even begin if performing a clone) and that the server should
+       use the information supplied in the request to construct the
+       packfile.
+
+    thin-pack
+       Request that a thin pack be sent, which is a pack with deltas
+       which reference base objects not contained within the pack (but
+       are known to exist at the receiving end). This can reduce the
+       network traffic significantly, but it requires the receiving end
+       to know how to "thicken" these packs by adding the missing bases
+       to the pack.
+
+    no-progress
+       Request that progress information that would normally be sent on
+       side-band channel 2, during the packfile transfer, should not be
+       sent.  However, the side-band channel 3 is still used for error
+       responses.
+
+    include-tag
+       Request that annotated tags should be sent if the objects they
+       point to are being sent.
+
+    ofs-delta
+       Indicate that the client understands PACKv2 with delta referring
+       to its base by position in pack rather than by an oid.  That is,
+       they can read OBJ_OFS_DELTA (ake type 6) in a packfile.
+
+If the 'shallow' feature is advertised the following arguments can be
+included in the clients request as well as the potential addition of the
+'shallow-info' section in the server's response as explained below.
+
+    shallow <oid>
+       A client must notify the server of all commits for which it only
+       has shallow copies (meaning that it doesn't have the parents of
+       a commit) by supplying a 'shallow <oid>' line for each such
+       object so that the server is aware of the limitations of the
+       client's history.  This is so that the server is aware that the
+       client may not have all objects reachable from such commits.
+
+    deepen <depth>
+       Requests that the fetch/clone should be shallow having a commit
+       depth of <depth> relative to the remote side.
+
+    deepen-relative
+       Requests that the semantics of the "deepen" command be changed
+       to indicate that the depth requested is relative to the client's
+       current shallow boundary, instead of relative to the requested
+       commits.
+
+    deepen-since <timestamp>
+       Requests that the shallow clone/fetch should be cut at a
+       specific time, instead of depth.  Internally it's equivalent to
+       doing "git rev-list --max-age=<timestamp>". Cannot be used with
+       "deepen".
+
+    deepen-not <rev>
+       Requests that the shallow clone/fetch should be cut at a
+       specific revision specified by '<rev>', instead of a depth.
+       Internally it's equivalent of doing "git rev-list --not <rev>".
+       Cannot be used with "deepen", but can be used with
+       "deepen-since".
+
+The response of `fetch` is broken into a number of sections separated by
+delimiter packets (0001), with each section beginning with its section
+header.
+
+    output = *section
+    section = (acknowledgments | shallow-info | packfile)
+             (flush-pkt | delim-pkt)
+
+    acknowledgments = PKT-LINE("acknowledgments" LF)
+                     (nak | *ack)
+                     (ready)
+    ready = PKT-LINE("ready" LF)
+    nak = PKT-LINE("NAK" LF)
+    ack = PKT-LINE("ACK" SP obj-id LF)
+
+    shallow-info = PKT-LINE("shallow-info" LF)
+                  *PKT-LINE((shallow | unshallow) LF)
+    shallow = "shallow" SP obj-id
+    unshallow = "unshallow" SP obj-id
+
+    packfile = PKT-LINE("packfile" LF)
+              *PKT-LINE(%x01-03 *%x00-ff)
+
+    acknowledgments section
+       * If the client determines that it is finished with negotiations
+         by sending a "done" line, the acknowledgments sections MUST be
+         omitted from the server's response.
+
+       * Always begins with the section header "acknowledgments"
+
+       * The server will respond with "NAK" if none of the object ids sent
+         as have lines were common.
+
+       * The server will respond with "ACK obj-id" for all of the
+         object ids sent as have lines which are common.
+
+       * A response cannot have both "ACK" lines as well as a "NAK"
+         line.
+
+       * The server will respond with a "ready" line indicating that
+         the server has found an acceptable common base and is ready to
+         make and send a packfile (which will be found in the packfile
+         section of the same response)
+
+       * If the server has found a suitable cut point and has decided
+         to send a "ready" line, then the server can decide to (as an
+         optimization) omit any "ACK" lines it would have sent during
+         its response.  This is because the server will have already
+         determined the objects it plans to send to the client and no
+         further negotiation is needed.
+
+    shallow-info section
+       * If the client has requested a shallow fetch/clone, a shallow
+         client requests a fetch or the server is shallow then the
+         server's response may include a shallow-info section.  The
+         shallow-info section will be included if (due to one of the
+         above conditions) the server needs to inform the client of any
+         shallow boundaries or adjustments to the clients already
+         existing shallow boundaries.
+
+       * Always begins with the section header "shallow-info"
+
+       * If a positive depth is requested, the server will compute the
+         set of commits which are no deeper than the desired depth.
+
+       * The server sends a "shallow obj-id" line for each commit whose
+         parents will not be sent in the following packfile.
+
+       * The server sends an "unshallow obj-id" line for each commit
+         which the client has indicated is shallow, but is no longer
+         shallow as a result of the fetch (due to its parents being
+         sent in the following packfile).
+
+       * The server MUST NOT send any "unshallow" lines for anything
+         which the client has not indicated was shallow as a part of
+         its request.
+
+       * This section is only included if a packfile section is also
+         included in the response.
+
+    packfile section
+       * This section is only included if the client has sent 'want'
+         lines in its request and either requested that no more
+         negotiation be done by sending 'done' or if the server has
+         decided it has found a sufficient cut point to produce a
+         packfile.
+
+       * Always begins with the section header "packfile"
+
+       * The transmission of the packfile begins immediately after the
+         section header
+
+       * The data transfer of the packfile is always multiplexed, using
+         the same semantics of the 'side-band-64k' capability from
+         protocol version 1.  This means that each packet, during the
+         packfile data stream, is made up of a leading 4-byte pkt-line
+         length (typical of the pkt-line format), followed by a 1-byte
+         stream code, followed by the actual data.
+
+         The stream code can be one of:
+               1 - pack data
+               2 - progress messages
+               3 - fatal error message just before stream aborts
+
+ server-option
+~~~~~~~~~~~~~~~
+
+If advertised, indicates that any number of server specific options can be
+included in a request.  This is done by sending each option as a
+"server-option=<option>" capability line in the capability-list section of
+a request.
+
+The provided options must not contain a NUL or LF character.
index 5183b154229d7c25f0feab3d0afcd1d52ca652e0..01dedfe9ffedc28c2c6ac5ef15dbb3c5fe519cc0 100644 (file)
@@ -8,20 +8,22 @@ repo, and therefore grafts are introduced pretending that
 these commits have no parents.
 *********************************************************
 
-The basic idea is to write the SHA-1s of shallow commits into
-$GIT_DIR/shallow, and handle its contents like the contents
-of $GIT_DIR/info/grafts (with the difference that shallow
-cannot contain parent information).
-
-This information is stored in a new file instead of grafts, or
-even the config, since the user should not touch that file
-at all (even throughout development of the shallow clone, it
-was never manually edited!).
+$GIT_DIR/shallow lists commit object names and tells Git to
+pretend as if they are root commits (e.g. "git log" traversal
+stops after showing them; "git fsck" does not complain saying
+the commits listed on their "parent" lines do not exist).
 
 Each line contains exactly one SHA-1. When read, a commit_graft
 will be constructed, which has nr_parent < 0 to make it easier
 to discern from user provided grafts.
 
+Note that the shallow feature could not be changed easily to
+use replace refs: a commit containing a `mergetag` is not allowed
+to be replaced, not even by a root commit. Such a commit can be
+made shallow, though. Also, having a `shallow` file explicitly
+listing all the commits made shallow makes it a *lot* easier to
+do shallow-specific things such as to deepen the history.
+
 Since fsck-objects relies on the library to read the objects,
 it honours shallow commits automatically.
 
index 1b4624c876dae8f38f7c9e13f82d11b6ead39c9b..12ff59c2c78565a1ad9c491147d2b8e0615aa1e5 100755 (executable)
@@ -1,7 +1,7 @@
 #!/bin/sh
 
 GVF=GIT-VERSION-FILE
-DEF_VER=v2.17.0
+DEF_VER=v2.17.GIT
 
 LF='
 '
index a1d8775adb4b38a0340cd7d04184915f0ee65d28..ad880d1fc57212fc6b47aeea792a58129b61238e 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -29,10 +29,10 @@ all::
 # Perl-compatible regular expressions instead of standard or extended
 # POSIX regular expressions.
 #
-# Currently USE_LIBPCRE is a synonym for USE_LIBPCRE1, define
-# USE_LIBPCRE2 instead if you'd like to use version 2 of the PCRE
-# library. The USE_LIBPCRE flag will likely be changed to mean v2 by
-# default in future releases.
+# USE_LIBPCRE is a synonym for USE_LIBPCRE2, define USE_LIBPCRE1
+# instead if you'd like to use the legacy version 1 of the PCRE
+# library. Support for version 1 will likely be removed in some future
+# release of Git, as upstream has all but abandoned it.
 #
 # When using USE_LIBPCRE1, define NO_LIBPCRE1_JIT if the PCRE v1
 # library is compiled without --enable-jit. We will auto-detect
@@ -335,6 +335,13 @@ all::
 # when hardlinking a file to another name and unlinking the original file right
 # away (some NTFS drivers seem to zero the contents in that scenario).
 #
+# Define INSTALL_SYMLINKS if you prefer to have everything that can be
+# symlinked between bin/ and libexec/ to use relative symlinks between
+# the two. This option overrides NO_CROSS_DIRECTORY_HARDLINKS and
+# NO_INSTALL_HARDLINKS which will also use symlinking by indirection
+# within the same directory in some cases, INSTALL_SYMLINKS will
+# always symlink to the final target directly.
+#
 # Define NO_CROSS_DIRECTORY_HARDLINKS if you plan to distribute the installed
 # programs as a tar, where bin/ and libexec/ might be on different file systems.
 #
@@ -434,6 +441,49 @@ all::
 #
 # When cross-compiling, define HOST_CPU as the canonical name of the CPU on
 # which the built Git will run (for instance "x86_64").
+#
+# Define RUNTIME_PREFIX to configure Git to resolve its ancillary tooling and
+# support files relative to the location of the runtime binary, rather than
+# hard-coding them into the binary. Git installations built with RUNTIME_PREFIX
+# can be moved to arbitrary filesystem locations. RUNTIME_PREFIX also causes
+# Perl scripts to use a modified entry point header allowing them to resolve
+# support files at runtime.
+#
+# When using RUNTIME_PREFIX, define HAVE_BSD_KERN_PROC_SYSCTL if your platform
+# supports the KERN_PROC BSD sysctl function.
+#
+# When using RUNTIME_PREFIX, define PROCFS_EXECUTABLE_PATH if your platform
+# mounts a "procfs" filesystem capable of resolving the path of the current
+# executable. If defined, this must be the canonical path for the "procfs"
+# current executable path.
+#
+# When using RUNTIME_PREFIX, define HAVE_NS_GET_EXECUTABLE_PATH if your platform
+# supports calling _NSGetExecutablePath to retrieve the path of the running
+# executable.
+#
+# When using RUNTIME_PREFIX, define HAVE_WPGMPTR if your platform offers
+# the global variable _wpgmptr containing the absolute path of the current
+# executable (this is the case on Windows).
+#
+# Define DEVELOPER to enable more compiler warnings. Compiler version
+# and family are auto detected, but could be overridden by defining
+# COMPILER_FEATURES (see config.mak.dev)
+#
+# When DEVELOPER is set, DEVOPTS can be used to control compiler
+# options.  This variable contains keywords separated by
+# whitespace. The following keywords are are recognized:
+#
+#    no-error:
+#
+#        suppresses the -Werror that implicitly comes with
+#        DEVELOPER=1. Useful for getting the full set of errors
+#        without immediately dying, or for logging them.
+#
+#    extra-all:
+#
+#        The DEVELOPER mode enables -Wextra with a few exceptions. By
+#        setting this flag the exceptions are removed, and all of
+#        -Wextra is used.
 
 GIT-VERSION-FILE: FORCE
        @$(SHELL_PATH) ./GIT-VERSION-GEN
@@ -442,15 +492,6 @@ GIT-VERSION-FILE: FORCE
 # CFLAGS and LDFLAGS are for the users to override from the command line.
 
 CFLAGS = -g -O2 -Wall
-DEVELOPER_CFLAGS = -Werror \
-       -Wdeclaration-after-statement \
-       -Wno-format-zero-length \
-       -Wold-style-definition \
-       -Woverflow \
-       -Wpointer-arith \
-       -Wstrict-prototypes \
-       -Wunused \
-       -Wvla
 LDFLAGS =
 ALL_CFLAGS = $(CPPFLAGS) $(CFLAGS)
 ALL_LDFLAGS = $(LDFLAGS)
@@ -471,11 +512,12 @@ ARFLAGS = rcs
 #   mandir
 #   infodir
 #   htmldir
+#   localedir
+#   perllibdir
 # This can help installing the suite in a relocatable way.
 
 prefix = $(HOME)
-bindir_relative = bin
-bindir = $(prefix)/$(bindir_relative)
+bindir = $(prefix)/bin
 mandir = $(prefix)/share/man
 infodir = $(prefix)/share/info
 gitexecdir = libexec/git-core
@@ -492,9 +534,13 @@ lib = lib
 # DESTDIR =
 pathsep = :
 
+bindir_relative = $(patsubst $(prefix)/%,%,$(bindir))
 mandir_relative = $(patsubst $(prefix)/%,%,$(mandir))
 infodir_relative = $(patsubst $(prefix)/%,%,$(infodir))
+gitexecdir_relative = $(patsubst $(prefix)/%,%,$(gitexecdir))
+localedir_relative = $(patsubst $(prefix)/%,%,$(localedir))
 htmldir_relative = $(patsubst $(prefix)/%,%,$(htmldir))
+perllibdir_relative = $(patsubst $(prefix)/%,%,$(perllibdir))
 
 export prefix bindir sharedir sysconfdir gitwebdir perllibdir localedir
 
@@ -546,6 +592,7 @@ SCRIPT_PERL =
 SCRIPT_PYTHON =
 SCRIPT_SH =
 SCRIPT_LIB =
+TEST_BUILTINS_OBJS =
 TEST_PROGRAMS_NEED_X =
 
 # Having this variable in your environment would break pipelines because
@@ -643,7 +690,6 @@ PROGRAM_OBJS += imap-send.o
 PROGRAM_OBJS += sh-i18n--envsubst.o
 PROGRAM_OBJS += shell.o
 PROGRAM_OBJS += show-index.o
-PROGRAM_OBJS += upload-pack.o
 PROGRAM_OBJS += remote-testsvn.o
 
 # Binary suffix, set to .exe for Windows builds
@@ -651,47 +697,50 @@ X =
 
 PROGRAMS += $(patsubst %.o,git-%$X,$(PROGRAM_OBJS))
 
-TEST_PROGRAMS_NEED_X += test-chmtime
-TEST_PROGRAMS_NEED_X += test-ctype
-TEST_PROGRAMS_NEED_X += test-config
-TEST_PROGRAMS_NEED_X += test-date
-TEST_PROGRAMS_NEED_X += test-delta
-TEST_PROGRAMS_NEED_X += test-drop-caches
-TEST_PROGRAMS_NEED_X += test-dump-cache-tree
+TEST_BUILTINS_OBJS += test-chmtime.o
+TEST_BUILTINS_OBJS += test-config.o
+TEST_BUILTINS_OBJS += test-ctype.o
+TEST_BUILTINS_OBJS += test-date.o
+TEST_BUILTINS_OBJS += test-delta.o
+TEST_BUILTINS_OBJS += test-drop-caches.o
+TEST_BUILTINS_OBJS += test-dump-cache-tree.o
+TEST_BUILTINS_OBJS += test-dump-split-index.o
+TEST_BUILTINS_OBJS += test-example-decorate.o
+TEST_BUILTINS_OBJS += test-genrandom.o
+TEST_BUILTINS_OBJS += test-hashmap.o
+TEST_BUILTINS_OBJS += test-index-version.o
+TEST_BUILTINS_OBJS += test-lazy-init-name-hash.o
+TEST_BUILTINS_OBJS += test-match-trees.o
+TEST_BUILTINS_OBJS += test-mergesort.o
+TEST_BUILTINS_OBJS += test-mktemp.o
+TEST_BUILTINS_OBJS += test-online-cpus.o
+TEST_BUILTINS_OBJS += test-path-utils.o
+TEST_BUILTINS_OBJS += test-prio-queue.o
+TEST_BUILTINS_OBJS += test-read-cache.o
+TEST_BUILTINS_OBJS += test-ref-store.o
+TEST_BUILTINS_OBJS += test-regex.o
+TEST_BUILTINS_OBJS += test-revision-walking.o
+TEST_BUILTINS_OBJS += test-run-command.o
+TEST_BUILTINS_OBJS += test-scrap-cache-tree.o
+TEST_BUILTINS_OBJS += test-sha1-array.o
+TEST_BUILTINS_OBJS += test-sha1.o
+TEST_BUILTINS_OBJS += test-sigchain.o
+TEST_BUILTINS_OBJS += test-strcmp-offset.o
+TEST_BUILTINS_OBJS += test-string-list.o
+TEST_BUILTINS_OBJS += test-submodule-config.o
+TEST_BUILTINS_OBJS += test-subprocess.o
+TEST_BUILTINS_OBJS += test-urlmatch-normalization.o
+TEST_BUILTINS_OBJS += test-wildmatch.o
+TEST_BUILTINS_OBJS += test-write-cache.o
+
 TEST_PROGRAMS_NEED_X += test-dump-fsmonitor
-TEST_PROGRAMS_NEED_X += test-dump-split-index
 TEST_PROGRAMS_NEED_X += test-dump-untracked-cache
-TEST_PROGRAMS_NEED_X += test-example-decorate
 TEST_PROGRAMS_NEED_X += test-fake-ssh
-TEST_PROGRAMS_NEED_X += test-genrandom
-TEST_PROGRAMS_NEED_X += test-hashmap
-TEST_PROGRAMS_NEED_X += test-index-version
-TEST_PROGRAMS_NEED_X += test-lazy-init-name-hash
 TEST_PROGRAMS_NEED_X += test-line-buffer
-TEST_PROGRAMS_NEED_X += test-match-trees
-TEST_PROGRAMS_NEED_X += test-mergesort
-TEST_PROGRAMS_NEED_X += test-mktemp
-TEST_PROGRAMS_NEED_X += test-online-cpus
 TEST_PROGRAMS_NEED_X += test-parse-options
-TEST_PROGRAMS_NEED_X += test-path-utils
-TEST_PROGRAMS_NEED_X += test-prio-queue
-TEST_PROGRAMS_NEED_X += test-read-cache
-TEST_PROGRAMS_NEED_X += test-write-cache
-TEST_PROGRAMS_NEED_X += test-ref-store
-TEST_PROGRAMS_NEED_X += test-regex
-TEST_PROGRAMS_NEED_X += test-revision-walking
-TEST_PROGRAMS_NEED_X += test-run-command
-TEST_PROGRAMS_NEED_X += test-scrap-cache-tree
-TEST_PROGRAMS_NEED_X += test-sha1
-TEST_PROGRAMS_NEED_X += test-sha1-array
-TEST_PROGRAMS_NEED_X += test-sigchain
-TEST_PROGRAMS_NEED_X += test-strcmp-offset
-TEST_PROGRAMS_NEED_X += test-string-list
-TEST_PROGRAMS_NEED_X += test-submodule-config
-TEST_PROGRAMS_NEED_X += test-subprocess
+TEST_PROGRAMS_NEED_X += test-pkt-line
 TEST_PROGRAMS_NEED_X += test-svn-fe
-TEST_PROGRAMS_NEED_X += test-urlmatch-normalization
-TEST_PROGRAMS_NEED_X += test-wildmatch
+TEST_PROGRAMS_NEED_X += test-tool
 
 TEST_PROGRAMS = $(patsubst %,t/helper/%$X,$(TEST_PROGRAMS_NEED_X))
 
@@ -772,11 +821,13 @@ LIB_OBJS += branch.o
 LIB_OBJS += bulk-checkin.o
 LIB_OBJS += bundle.o
 LIB_OBJS += cache-tree.o
+LIB_OBJS += chdir-notify.o
 LIB_OBJS += checkout.o
 LIB_OBJS += color.o
 LIB_OBJS += column.o
 LIB_OBJS += combine-diff.o
 LIB_OBJS += commit.o
+LIB_OBJS += commit-graph.o
 LIB_OBJS += compat/obstack.o
 LIB_OBJS += compat/terminal.o
 LIB_OBJS += config.o
@@ -807,7 +858,7 @@ LIB_OBJS += ewah/bitmap.o
 LIB_OBJS += ewah/ewah_bitmap.o
 LIB_OBJS += ewah/ewah_io.o
 LIB_OBJS += ewah/ewah_rlw.o
-LIB_OBJS += exec_cmd.o
+LIB_OBJS += exec-cmd.o
 LIB_OBJS += fetch-object.o
 LIB_OBJS += fetch-pack.o
 LIB_OBJS += fsck.o
@@ -830,9 +881,11 @@ LIB_OBJS += list-objects-filter-options.o
 LIB_OBJS += ll-merge.o
 LIB_OBJS += lockfile.o
 LIB_OBJS += log-tree.o
+LIB_OBJS += ls-refs.o
 LIB_OBJS += mailinfo.o
 LIB_OBJS += mailmap.o
 LIB_OBJS += match-trees.o
+LIB_OBJS += mem-pool.o
 LIB_OBJS += merge.o
 LIB_OBJS += merge-blobs.o
 LIB_OBJS += merge-recursive.o
@@ -877,7 +930,7 @@ LIB_OBJS += refs/packed-backend.o
 LIB_OBJS += refs/ref-cache.o
 LIB_OBJS += ref-filter.o
 LIB_OBJS += remote.o
-LIB_OBJS += replace_object.o
+LIB_OBJS += replace-object.o
 LIB_OBJS += repository.o
 LIB_OBJS += rerere.o
 LIB_OBJS += resolve-undo.o
@@ -885,12 +938,13 @@ LIB_OBJS += revision.o
 LIB_OBJS += run-command.o
 LIB_OBJS += send-pack.o
 LIB_OBJS += sequencer.o
+LIB_OBJS += serve.o
 LIB_OBJS += server-info.o
 LIB_OBJS += setup.o
 LIB_OBJS += sha1-array.o
 LIB_OBJS += sha1-lookup.o
-LIB_OBJS += sha1_file.o
-LIB_OBJS += sha1_name.o
+LIB_OBJS += sha1-file.o
+LIB_OBJS += sha1-name.o
 LIB_OBJS += shallow.o
 LIB_OBJS += sideband.o
 LIB_OBJS += sigchain.o
@@ -913,6 +967,7 @@ LIB_OBJS += tree-diff.o
 LIB_OBJS += tree.o
 LIB_OBJS += tree-walk.o
 LIB_OBJS += unpack-trees.o
+LIB_OBJS += upload-pack.o
 LIB_OBJS += url.o
 LIB_OBJS += urlmatch.o
 LIB_OBJS += usage.o
@@ -925,7 +980,7 @@ LIB_OBJS += walker.o
 LIB_OBJS += wildmatch.o
 LIB_OBJS += worktree.o
 LIB_OBJS += wrapper.o
-LIB_OBJS += write_or_die.o
+LIB_OBJS += write-or-die.o
 LIB_OBJS += ws.o
 LIB_OBJS += wt-status.o
 LIB_OBJS += xdiff-interface.o
@@ -952,6 +1007,7 @@ BUILTIN_OBJS += builtin/clone.o
 BUILTIN_OBJS += builtin/column.o
 BUILTIN_OBJS += builtin/commit-tree.o
 BUILTIN_OBJS += builtin/commit.o
+BUILTIN_OBJS += builtin/commit-graph.o
 BUILTIN_OBJS += builtin/config.o
 BUILTIN_OBJS += builtin/count-objects.o
 BUILTIN_OBJS += builtin/credential.o
@@ -1017,6 +1073,7 @@ BUILTIN_OBJS += builtin/rev-parse.o
 BUILTIN_OBJS += builtin/revert.o
 BUILTIN_OBJS += builtin/rm.o
 BUILTIN_OBJS += builtin/send-pack.o
+BUILTIN_OBJS += builtin/serve.o
 BUILTIN_OBJS += builtin/shortlog.o
 BUILTIN_OBJS += builtin/show-branch.o
 BUILTIN_OBJS += builtin/show-ref.o
@@ -1030,6 +1087,7 @@ BUILTIN_OBJS += builtin/update-index.o
 BUILTIN_OBJS += builtin/update-ref.o
 BUILTIN_OBJS += builtin/update-server-info.o
 BUILTIN_OBJS += builtin/upload-archive.o
+BUILTIN_OBJS += builtin/upload-pack.o
 BUILTIN_OBJS += builtin/var.o
 BUILTIN_OBJS += builtin/verify-commit.o
 BUILTIN_OBJS += builtin/verify-pack.o
@@ -1051,7 +1109,7 @@ include config.mak.uname
 -include config.mak
 
 ifdef DEVELOPER
-CFLAGS += $(DEVELOPER_CFLAGS)
+include config.mak.dev
 endif
 
 comma := ,
@@ -1170,13 +1228,18 @@ ifdef NO_LIBGEN_H
        COMPAT_OBJS += compat/basename.o
 endif
 
-USE_LIBPCRE1 ?= $(USE_LIBPCRE)
+USE_LIBPCRE2 ?= $(USE_LIBPCRE)
 
-ifneq (,$(USE_LIBPCRE1))
-       ifdef USE_LIBPCRE2
-$(error Only set USE_LIBPCRE1 (or its alias USE_LIBPCRE) or USE_LIBPCRE2, not both!)
+ifneq (,$(USE_LIBPCRE2))
+       ifdef USE_LIBPCRE1
+$(error Only set USE_LIBPCRE2 (or its alias USE_LIBPCRE) or USE_LIBPCRE1, not both!)
        endif
 
+       BASIC_CFLAGS += -DUSE_LIBPCRE2
+       EXTLIBS += -lpcre2-8
+endif
+
+ifdef USE_LIBPCRE1
        BASIC_CFLAGS += -DUSE_LIBPCRE1
        EXTLIBS += -lpcre
 
@@ -1185,11 +1248,6 @@ ifdef NO_LIBPCRE1_JIT
 endif
 endif
 
-ifdef USE_LIBPCRE2
-       BASIC_CFLAGS += -DUSE_LIBPCRE2
-       EXTLIBS += -lpcre2-8
-endif
-
 ifdef LIBPCREDIR
        BASIC_CFLAGS += -I$(LIBPCREDIR)/include
        EXTLIBS += -L$(LIBPCREDIR)/$(lib) $(CC_LD_DYNPATH)$(LIBPCREDIR)/$(lib)
@@ -1652,10 +1710,27 @@ ifdef HAVE_BSD_SYSCTL
        BASIC_CFLAGS += -DHAVE_BSD_SYSCTL
 endif
 
+ifdef HAVE_BSD_KERN_PROC_SYSCTL
+       BASIC_CFLAGS += -DHAVE_BSD_KERN_PROC_SYSCTL
+endif
+
 ifdef HAVE_GETDELIM
        BASIC_CFLAGS += -DHAVE_GETDELIM
 endif
 
+ifneq ($(PROCFS_EXECUTABLE_PATH),)
+       procfs_executable_path_SQ = $(subst ','\'',$(PROCFS_EXECUTABLE_PATH))
+       BASIC_CFLAGS += '-DPROCFS_EXECUTABLE_PATH="$(procfs_executable_path_SQ)"'
+endif
+
+ifdef HAVE_NS_GET_EXECUTABLE_PATH
+       BASIC_CFLAGS += -DHAVE_NS_GET_EXECUTABLE_PATH
+endif
+
+ifdef HAVE_WPGMPTR
+       BASIC_CFLAGS += -DHAVE_WPGMPTR
+endif
+
 ifeq ($(TCLTK_PATH),)
 NO_TCLTK = NoThanks
 endif
@@ -1740,10 +1815,13 @@ mandir_relative_SQ = $(subst ','\'',$(mandir_relative))
 infodir_relative_SQ = $(subst ','\'',$(infodir_relative))
 perllibdir_SQ = $(subst ','\'',$(perllibdir))
 localedir_SQ = $(subst ','\'',$(localedir))
+localedir_relative_SQ = $(subst ','\'',$(localedir_relative))
 gitexecdir_SQ = $(subst ','\'',$(gitexecdir))
+gitexecdir_relative_SQ = $(subst ','\'',$(gitexecdir_relative))
 template_dir_SQ = $(subst ','\'',$(template_dir))
 htmldir_relative_SQ = $(subst ','\'',$(htmldir_relative))
 prefix_SQ = $(subst ','\'',$(prefix))
+perllibdir_relative_SQ = $(subst ','\'',$(perllibdir_relative))
 gitwebdir_SQ = $(subst ','\'',$(gitwebdir))
 
 SHELL_PATH_SQ = $(subst ','\'',$(SHELL_PATH))
@@ -1754,6 +1832,31 @@ TCLTK_PATH_SQ = $(subst ','\'',$(TCLTK_PATH))
 DIFF_SQ = $(subst ','\'',$(DIFF))
 PERLLIB_EXTRA_SQ = $(subst ','\'',$(PERLLIB_EXTRA))
 
+# RUNTIME_PREFIX's resolution logic requires resource paths to be expressed
+# relative to each other and share an installation path.
+#
+# This is a dependency in:
+# - Git's binary RUNTIME_PREFIX logic in (see "exec_cmd.c").
+# - The runtime prefix Perl header (see
+#   "perl/header_templates/runtime_prefix.template.pl").
+ifdef RUNTIME_PREFIX
+
+ifneq ($(filter /%,$(firstword $(gitexecdir_relative))),)
+$(error RUNTIME_PREFIX requires a relative gitexecdir, not: $(gitexecdir))
+endif
+
+ifneq ($(filter /%,$(firstword $(localedir_relative))),)
+$(error RUNTIME_PREFIX requires a relative localedir, not: $(localedir))
+endif
+
+ifndef NO_PERL
+ifneq ($(filter /%,$(firstword $(perllibdir_relative))),)
+$(error RUNTIME_PREFIX requires a relative perllibdir, not: $(perllibdir))
+endif
+endif
+
+endif
+
 # We must filter out any object files from $(GITLIBS),
 # as it is typically used like:
 #
@@ -1974,27 +2077,44 @@ git.res: git.rc GIT-VERSION-FILE
 # This makes sure we depend on the NO_PERL setting itself.
 $(SCRIPT_PERL_GEN): GIT-BUILD-OPTIONS
 
-ifndef NO_PERL
-$(SCRIPT_PERL_GEN):
+# Used for substitution in Perl modules. Disabled when using RUNTIME_PREFIX
+# since the locale directory is injected.
+perl_localedir_SQ = $(localedir_SQ)
 
+ifndef NO_PERL
+PERL_HEADER_TEMPLATE = perl/header_templates/fixed_prefix.template.pl
 PERL_DEFINES = $(PERL_PATH_SQ):$(PERLLIB_EXTRA_SQ):$(perllibdir_SQ)
-$(SCRIPT_PERL_GEN): % : %.perl GIT-PERL-DEFINES GIT-VERSION-FILE
+
+PERL_DEFINES := $(PERL_PATH_SQ) $(PERLLIB_EXTRA_SQ) $(perllibdir_SQ)
+PERL_DEFINES += $(RUNTIME_PREFIX)
+
+# Support Perl runtime prefix. In this mode, a different header is installed
+# into Perl scripts.
+ifdef RUNTIME_PREFIX
+
+PERL_HEADER_TEMPLATE = perl/header_templates/runtime_prefix.template.pl
+
+# Don't export a fixed $(localedir) path; it will be resolved by the Perl header
+# at runtime.
+perl_localedir_SQ =
+
+endif
+
+PERL_DEFINES += $(gitexecdir) $(perllibdir) $(localedir)
+
+$(SCRIPT_PERL_GEN): % : %.perl GIT-PERL-DEFINES GIT-PERL-HEADER GIT-VERSION-FILE
        $(QUIET_GEN)$(RM) $@ $@+ && \
-       INSTLIBDIR='$(perllibdir_SQ)' && \
-       INSTLIBDIR_EXTRA='$(PERLLIB_EXTRA_SQ)' && \
-       INSTLIBDIR="$$INSTLIBDIR$${INSTLIBDIR_EXTRA:+:$$INSTLIBDIR_EXTRA}" && \
        sed -e '1{' \
            -e '        s|#!.*perl|#!$(PERL_PATH_SQ)|' \
-           -e '        h' \
-           -e '        s=.*=use lib (split(/$(pathsep)/, $$ENV{GITPERLLIB} || "'"$$INSTLIBDIR"'"));=' \
-           -e '        H' \
-           -e '        x' \
+           -e '        rGIT-PERL-HEADER' \
+           -e '        G' \
            -e '}' \
            -e 's/@@GIT_VERSION@@/$(GIT_VERSION)/g' \
            $< >$@+ && \
        chmod +x $@+ && \
        mv $@+ $@
 
+PERL_DEFINES := $(subst $(space),:,$(PERL_DEFINES))
 GIT-PERL-DEFINES: FORCE
        @FLAGS='$(PERL_DEFINES)'; \
            if test x"$$FLAGS" != x"`cat $@ 2>/dev/null`" ; then \
@@ -2002,6 +2122,22 @@ GIT-PERL-DEFINES: FORCE
                echo "$$FLAGS" >$@; \
            fi
 
+GIT-PERL-HEADER: $(PERL_HEADER_TEMPLATE) GIT-PERL-DEFINES Makefile
+       $(QUIET_GEN)$(RM) $@ && \
+       INSTLIBDIR='$(perllibdir_SQ)' && \
+       INSTLIBDIR_EXTRA='$(PERLLIB_EXTRA_SQ)' && \
+       INSTLIBDIR="$$INSTLIBDIR$${INSTLIBDIR_EXTRA:+:$$INSTLIBDIR_EXTRA}" && \
+       sed -e 's=@@PATHSEP@@=$(pathsep)=g' \
+           -e "s=@@INSTLIBDIR@@=$$INSTLIBDIR=g" \
+           -e 's=@@PERLLIBDIR_REL@@=$(perllibdir_relative_SQ)=g' \
+           -e 's=@@GITEXECDIR_REL@@=$(gitexecdir_relative_SQ)=g' \
+           -e 's=@@LOCALEDIR_REL@@=$(localedir_relative_SQ)=g' \
+           $< >$@+ && \
+       mv $@+ $@
+
+.PHONY: perllibdir
+perllibdir:
+       @echo '$(perllibdir_SQ)'
 
 .PHONY: gitweb
 gitweb:
@@ -2083,7 +2219,7 @@ VCSSVN_OBJS += vcs-svn/fast_export.o
 VCSSVN_OBJS += vcs-svn/svndiff.o
 VCSSVN_OBJS += vcs-svn/svndump.o
 
-TEST_OBJS := $(patsubst %$X,%.o,$(TEST_PROGRAMS))
+TEST_OBJS := $(patsubst %$X,%.o,$(TEST_PROGRAMS)) $(patsubst %,t/helper/%,$(TEST_BUILTINS_OBJS))
 OBJECTS := $(LIB_OBJS) $(BUILTIN_OBJS) $(PROGRAM_OBJS) $(TEST_OBJS) \
        $(XDIFF_OBJS) \
        $(VCSSVN_OBJS) \
@@ -2143,11 +2279,12 @@ else
 $(OBJECTS): $(LIB_H)
 endif
 
-exec_cmd.sp exec_cmd.s exec_cmd.o: GIT-PREFIX
-exec_cmd.sp exec_cmd.s exec_cmd.o: EXTRA_CPPFLAGS = \
+exec-cmd.sp exec-cmd.s exec-cmd.o: GIT-PREFIX
+exec-cmd.sp exec-cmd.s exec-cmd.o: EXTRA_CPPFLAGS = \
        '-DGIT_EXEC_PATH="$(gitexecdir_SQ)"' \
+       '-DGIT_LOCALE_PATH="$(localedir_relative_SQ)"' \
        '-DBINDIR="$(bindir_relative_SQ)"' \
-       '-DPREFIX="$(prefix_SQ)"'
+       '-DFALLBACK_RUNTIME_PREFIX="$(prefix_SQ)"'
 
 builtin/init-db.sp builtin/init-db.s builtin/init-db.o: GIT-PREFIX
 builtin/init-db.sp builtin/init-db.s builtin/init-db.o: EXTRA_CPPFLAGS = \
@@ -2163,7 +2300,7 @@ attr.sp attr.s attr.o: EXTRA_CPPFLAGS = \
 
 gettext.sp gettext.s gettext.o: GIT-PREFIX
 gettext.sp gettext.s gettext.o: EXTRA_CPPFLAGS = \
-       -DGIT_LOCALE_PATH='"$(localedir_SQ)"'
+       -DGIT_LOCALE_PATH='"$(localedir_relative_SQ)"'
 
 http-push.sp http.sp http-walker.sp remote-curl.sp imap-send.sp: SPARSE_FLAGS += \
        -DCURL_DISABLE_TYPECHECK
@@ -2323,7 +2460,7 @@ endif
 
 perl/build/lib/%.pm: perl/%.pm
        $(QUIET_GEN)mkdir -p $(dir $@) && \
-       sed -e 's|@@LOCALEDIR@@|$(localedir_SQ)|g' \
+       sed -e 's|@@LOCALEDIR@@|$(perl_localedir_SQ)|g' \
            -e 's|@@NO_PERL_CPAN_FALLBACKS@@|$(NO_PERL_CPAN_FALLBACKS_SQ)|g' \
        < $< > $@
 
@@ -2494,10 +2631,12 @@ t/helper/test-svn-fe$X: $(VCSSVN_LIB)
 
 .PRECIOUS: $(TEST_OBJS)
 
+t/helper/test-tool$X: $(patsubst %,t/helper/%,$(TEST_BUILTINS_OBJS))
+
 t/helper/test-%$X: t/helper/test-%.o GIT-LDFLAGS $(GITLIBS)
        $(QUIET_LINK)$(CC) $(ALL_CFLAGS) -o $@ $(ALL_LDFLAGS) $(filter %.o,$^) $(filter %.a,$^) $(LIBS)
 
-check-sha1:: t/helper/test-sha1$X
+check-sha1:: t/helper/test-tool$X
        t/helper/test-sha1.sh
 
 SP_OBJ = $(patsubst %.o,%.sp,$(C_OBJ))
@@ -2606,35 +2745,44 @@ endif
 
        bindir=$$(cd '$(DESTDIR_SQ)$(bindir_SQ)' && pwd) && \
        execdir=$$(cd '$(DESTDIR_SQ)$(gitexec_instdir_SQ)' && pwd) && \
+       destdir_from_execdir_SQ=$$(echo '$(gitexecdir_relative_SQ)' | sed -e 's|[^/][^/]*|..|g') && \
        { test "$$bindir/" = "$$execdir/" || \
          for p in git$X $(filter $(install_bindir_programs),$(ALL_PROGRAMS)); do \
                $(RM) "$$execdir/$$p" && \
-               test -z "$(NO_INSTALL_HARDLINKS)$(NO_CROSS_DIRECTORY_HARDLINKS)" && \
-               ln "$$bindir/$$p" "$$execdir/$$p" 2>/dev/null || \
-               cp "$$bindir/$$p" "$$execdir/$$p" || exit; \
+               test -n "$(INSTALL_SYMLINKS)" && \
+               ln -s "$$destdir_from_execdir_SQ/$(bindir_relative_SQ)/$$p" "$$execdir/$$p" || \
+               { test -z "$(NO_INSTALL_HARDLINKS)$(NO_CROSS_DIRECTORY_HARDLINKS)" && \
+                 ln "$$bindir/$$p" "$$execdir/$$p" 2>/dev/null || \
+                 cp "$$bindir/$$p" "$$execdir/$$p" || exit; } \
          done; \
        } && \
        for p in $(filter $(install_bindir_programs),$(BUILT_INS)); do \
                $(RM) "$$bindir/$$p" && \
-               test -z "$(NO_INSTALL_HARDLINKS)" && \
-               ln "$$bindir/git$X" "$$bindir/$$p" 2>/dev/null || \
-               ln -s "git$X" "$$bindir/$$p" 2>/dev/null || \
-               cp "$$bindir/git$X" "$$bindir/$$p" || exit; \
+               test -n "$(INSTALL_SYMLINKS)" && \
+               ln -s "git$X" "$$bindir/$$p" || \
+               { test -z "$(NO_INSTALL_HARDLINKS)" && \
+                 ln "$$bindir/git$X" "$$bindir/$$p" 2>/dev/null || \
+                 ln -s "git$X" "$$bindir/$$p" 2>/dev/null || \
+                 cp "$$bindir/git$X" "$$bindir/$$p" || exit; } \
        done && \
        for p in $(BUILT_INS); do \
                $(RM) "$$execdir/$$p" && \
-               test -z "$(NO_INSTALL_HARDLINKS)" && \
-               ln "$$execdir/git$X" "$$execdir/$$p" 2>/dev/null || \
-               ln -s "git$X" "$$execdir/$$p" 2>/dev/null || \
-               cp "$$execdir/git$X" "$$execdir/$$p" || exit; \
+               test -n "$(INSTALL_SYMLINKS)" && \
+               ln -s "$$destdir_from_execdir_SQ/$(bindir_relative_SQ)/git$X" "$$execdir/$$p" || \
+               { test -z "$(NO_INSTALL_HARDLINKS)" && \
+                 ln "$$execdir/git$X" "$$execdir/$$p" 2>/dev/null || \
+                 ln -s "git$X" "$$execdir/$$p" 2>/dev/null || \
+                 cp "$$execdir/git$X" "$$execdir/$$p" || exit; } \
        done && \
        remote_curl_aliases="$(REMOTE_CURL_ALIASES)" && \
        for p in $$remote_curl_aliases; do \
                $(RM) "$$execdir/$$p" && \
-               test -z "$(NO_INSTALL_HARDLINKS)" && \
-               ln "$$execdir/git-remote-http$X" "$$execdir/$$p" 2>/dev/null || \
-               ln -s "git-remote-http$X" "$$execdir/$$p" 2>/dev/null || \
-               cp "$$execdir/git-remote-http$X" "$$execdir/$$p" || exit; \
+               test -n "$(INSTALL_SYMLINKS)" && \
+               ln -s "git-remote-http$X" "$$execdir/$$p" || \
+               { test -z "$(NO_INSTALL_HARDLINKS)" && \
+                 ln "$$execdir/git-remote-http$X" "$$execdir/$$p" 2>/dev/null || \
+                 ln -s "git-remote-http$X" "$$execdir/$$p" 2>/dev/null || \
+                 cp "$$execdir/git-remote-http$X" "$$execdir/$$p" || exit; } \
        done && \
        ./check_bindir "z$$bindir" "z$$execdir" "$$bindir/git-add$X"
 
@@ -2770,7 +2918,7 @@ ifndef NO_TCLTK
 endif
        $(RM) GIT-VERSION-FILE GIT-CFLAGS GIT-LDFLAGS GIT-BUILD-OPTIONS
        $(RM) GIT-USER-AGENT GIT-PREFIX
-       $(RM) GIT-SCRIPT-DEFINES GIT-PERL-DEFINES GIT-PYTHON-VARS
+       $(RM) GIT-SCRIPT-DEFINES GIT-PERL-DEFINES GIT-PERL-HEADER GIT-PYTHON-VARS
 
 .PHONY: all install profile-clean clean strip
 .PHONY: shell_compatibility_test please_set_SHELL_PATH_to_a_more_modern_shell
index 7a6dc0603be1af20219ec41b4df926a9861d3644..f6c58b347fd8338482625ea11ec1802baa6ea068 120000 (symlink)
--- a/RelNotes
+++ b/RelNotes
@@ -1 +1 @@
-Documentation/RelNotes/2.17.0.txt
\ No newline at end of file
+Documentation/RelNotes/2.18.0.txt
\ No newline at end of file
index 406efc183ba272b94a39d4cc7e97d7483a40a9d4..370a56d0546bb3fc7aa4a62203c600e9a8af2d7a 100644 (file)
--- a/advice.c
+++ b/advice.c
@@ -1,5 +1,6 @@
 #include "cache.h"
 #include "config.h"
+#include "color.h"
 
 int advice_push_update_rejected = 1;
 int advice_push_non_ff_current = 1;
@@ -19,6 +20,34 @@ int advice_rm_hints = 1;
 int advice_add_embedded_repo = 1;
 int advice_ignored_hook = 1;
 int advice_waiting_for_editor = 1;
+int advice_graft_file_deprecated = 1;
+
+static int advice_use_color = -1;
+static char advice_colors[][COLOR_MAXLEN] = {
+       GIT_COLOR_RESET,
+       GIT_COLOR_YELLOW,       /* HINT */
+};
+
+enum color_advice {
+       ADVICE_COLOR_RESET = 0,
+       ADVICE_COLOR_HINT = 1,
+};
+
+static int parse_advise_color_slot(const char *slot)
+{
+       if (!strcasecmp(slot, "reset"))
+               return ADVICE_COLOR_RESET;
+       if (!strcasecmp(slot, "hint"))
+               return ADVICE_COLOR_HINT;
+       return -1;
+}
+
+static const char *advise_get_color(enum color_advice ix)
+{
+       if (want_color_stderr(advice_use_color))
+               return advice_colors[ix];
+       return "";
+}
 
 static struct {
        const char *name;
@@ -42,6 +71,7 @@ static struct {
        { "addembeddedrepo", &advice_add_embedded_repo },
        { "ignoredhook", &advice_ignored_hook },
        { "waitingforeditor", &advice_waiting_for_editor },
+       { "graftfiledeprecated", &advice_graft_file_deprecated },
 
        /* make this an alias for backward compatibility */
        { "pushnonfastforward", &advice_push_update_rejected }
@@ -59,7 +89,10 @@ void advise(const char *advice, ...)
 
        for (cp = buf.buf; *cp; cp = np) {
                np = strchrnul(cp, '\n');
-               fprintf(stderr, _("hint: %.*s\n"), (int)(np - cp), cp);
+               fprintf(stderr, _("%shint: %.*s%s\n"),
+                       advise_get_color(ADVICE_COLOR_HINT),
+                       (int)(np - cp), cp,
+                       advise_get_color(ADVICE_COLOR_RESET));
                if (*np)
                        np++;
        }
@@ -68,9 +101,23 @@ void advise(const char *advice, ...)
 
 int git_default_advice_config(const char *var, const char *value)
 {
-       const char *k;
+       const char *k, *slot_name;
        int i;
 
+       if (!strcmp(var, "color.advice")) {
+               advice_use_color = git_config_colorbool(var, value);
+               return 0;
+       }
+
+       if (skip_prefix(var, "color.advice.", &slot_name)) {
+               int slot = parse_advise_color_slot(slot_name);
+               if (slot < 0)
+                       return 0;
+               if (!value)
+                       return config_error_nonbool(var);
+               return color_parse(value, advice_colors[slot]);
+       }
+
        if (!skip_prefix(var, "advice.", &k))
                return 0;
 
index 70568fa7922d8aea403e415f9b9b554a88ace293..9f5064e82a862e6daaabe3f23ea9ea896aad87e6 100644 (file)
--- a/advice.h
+++ b/advice.h
@@ -21,6 +21,7 @@ extern int advice_rm_hints;
 extern int advice_add_embedded_repo;
 extern int advice_ignored_hook;
 extern int advice_waiting_for_editor;
+extern int advice_graft_file_deprecated;
 
 int git_default_advice_config(const char *var, const char *value);
 __attribute__((format (printf, 1, 2)))
diff --git a/alloc.c b/alloc.c
index 12afadfacdd6094912a6e18a217a9aa6318b47b2..cf4f8b61e126c0e9992dc34c6cb52b6f896a565d 100644 (file)
--- a/alloc.c
+++ b/alloc.c
@@ -93,6 +93,7 @@ void *alloc_commit_node(void)
        struct commit *c = alloc_node(&commit_state, sizeof(struct commit));
        c->object.type = OBJ_COMMIT;
        c->index = alloc_commit_index();
+       c->graph_pos = COMMIT_NOT_FROM_GRAPH;
        return c;
 }
 
diff --git a/apply.c b/apply.c
index 134dc7ba78cddd99406b78a97898bd8a32393b4c..7e5792c996f430952b1b768f8267de851156ce83 100644 (file)
--- a/apply.c
+++ b/apply.c
@@ -3180,7 +3180,7 @@ static int apply_binary(struct apply_state *state,
                unsigned long size;
                char *result;
 
-               result = read_sha1_file(oid.hash, &type, &size);
+               result = read_object_file(&oid, &type, &size);
                if (!result)
                        return error(_("the necessary postimage %s for "
                                       "'%s' cannot be read"),
@@ -3242,7 +3242,7 @@ static int read_blob_object(struct strbuf *buf, const struct object_id *oid, uns
                unsigned long sz;
                char *result;
 
-               result = read_sha1_file(oid->hash, &type, &sz);
+               result = read_object_file(oid, &type, &sz);
                if (!result)
                        return -1;
                /* XXX read_sha1_file NUL-terminates */
index c6ed96ee74ec10f5c9ffb6f520193326d4704b6b..f93409324f9d4f18a4b44b35eb4b2cf3514234fb 100644 (file)
@@ -111,7 +111,7 @@ static void write_trailer(void)
  * queues up writes, so that all our write(2) calls write exactly one
  * full block; pads writes to RECORDSIZE
  */
-static int stream_blocked(const unsigned char *sha1)
+static int stream_blocked(const struct object_id *oid)
 {
        struct git_istream *st;
        enum object_type type;
@@ -119,9 +119,9 @@ static int stream_blocked(const unsigned char *sha1)
        char buf[BLOCKSIZE];
        ssize_t readlen;
 
-       st = open_istream(sha1, &type, &sz, NULL);
+       st = open_istream(oid, &type, &sz, NULL);
        if (!st)
-               return error("cannot stream blob %s", sha1_to_hex(sha1));
+               return error("cannot stream blob %s", oid_to_hex(oid));
        for (;;) {
                readlen = read_istream(st, buf, sizeof(buf));
                if (readlen <= 0)
@@ -218,7 +218,7 @@ static void prepare_header(struct archiver_args *args,
 }
 
 static void write_extended_header(struct archiver_args *args,
-                                 const unsigned char *sha1,
+                                 const struct object_id *oid,
                                  const void *buffer, unsigned long size)
 {
        struct ustar_header header;
@@ -226,14 +226,14 @@ static void write_extended_header(struct archiver_args *args,
        memset(&header, 0, sizeof(header));
        *header.typeflag = TYPEFLAG_EXT_HEADER;
        mode = 0100666;
-       xsnprintf(header.name, sizeof(header.name), "%s.paxheader", sha1_to_hex(sha1));
+       xsnprintf(header.name, sizeof(header.name), "%s.paxheader", oid_to_hex(oid));
        prepare_header(args, &header, mode, size);
        write_blocked(&header, sizeof(header));
        write_blocked(buffer, size);
 }
 
 static int write_tar_entry(struct archiver_args *args,
-                          const unsigned char *sha1,
+                          const struct object_id *oid,
                           const char *path, size_t pathlen,
                           unsigned int mode)
 {
@@ -257,7 +257,7 @@ static int write_tar_entry(struct archiver_args *args,
                mode = (mode | ((mode & 0100) ? 0777 : 0666)) & ~tar_umask;
        } else {
                return error("unsupported file mode: 0%o (SHA1: %s)",
-                            mode, sha1_to_hex(sha1));
+                            mode, oid_to_hex(oid));
        }
        if (pathlen > sizeof(header.name)) {
                size_t plen = get_path_prefix(path, pathlen,
@@ -268,7 +268,7 @@ static int write_tar_entry(struct archiver_args *args,
                        memcpy(header.name, path + plen + 1, rest);
                } else {
                        xsnprintf(header.name, sizeof(header.name), "%s.data",
-                                 sha1_to_hex(sha1));
+                                 oid_to_hex(oid));
                        strbuf_append_ext_header(&ext_header, "path",
                                                 path, pathlen);
                }
@@ -276,14 +276,14 @@ static int write_tar_entry(struct archiver_args *args,
                memcpy(header.name, path, pathlen);
 
        if (S_ISREG(mode) && !args->convert &&
-           sha1_object_info(sha1, &size) == OBJ_BLOB &&
+           oid_object_info(the_repository, oid, &size) == OBJ_BLOB &&
            size > big_file_threshold)
                buffer = NULL;
        else if (S_ISLNK(mode) || S_ISREG(mode)) {
                enum object_type type;
-               buffer = sha1_file_to_archive(args, path, sha1, old_mode, &type, &size);
+               buffer = object_file_to_archive(args, path, oid, old_mode, &type, &size);
                if (!buffer)
-                       return error("cannot read %s", sha1_to_hex(sha1));
+                       return error("cannot read %s", oid_to_hex(oid));
        } else {
                buffer = NULL;
                size = 0;
@@ -292,7 +292,7 @@ static int write_tar_entry(struct archiver_args *args,
        if (S_ISLNK(mode)) {
                if (size > sizeof(header.linkname)) {
                        xsnprintf(header.linkname, sizeof(header.linkname),
-                                 "see %s.paxheader", sha1_to_hex(sha1));
+                                 "see %s.paxheader", oid_to_hex(oid));
                        strbuf_append_ext_header(&ext_header, "linkpath",
                                                 buffer, size);
                } else
@@ -308,7 +308,7 @@ static int write_tar_entry(struct archiver_args *args,
        prepare_header(args, &header, mode, size_in_header);
 
        if (ext_header.len > 0) {
-               write_extended_header(args, sha1, ext_header.buf,
+               write_extended_header(args, oid, ext_header.buf,
                                      ext_header.len);
        }
        strbuf_release(&ext_header);
@@ -317,7 +317,7 @@ static int write_tar_entry(struct archiver_args *args,
                if (buffer)
                        write_blocked(buffer, size);
                else
-                       err = stream_blocked(sha1);
+                       err = stream_blocked(oid);
        }
        free(buffer);
        return err;
index e8913e5a26c6e97216c4b79ad96b5e3ddf906c45..74f3fe9103420571c9f22273ae44ebb9d5715c78 100644 (file)
@@ -276,7 +276,7 @@ static int entry_is_binary(const char *path, const void *buffer, size_t size)
 #define STREAM_BUFFER_SIZE (1024 * 16)
 
 static int write_zip_entry(struct archiver_args *args,
-                          const unsigned char *sha1,
+                          const struct object_id *oid,
                           const char *path, size_t pathlen,
                           unsigned int mode)
 {
@@ -314,7 +314,7 @@ static int write_zip_entry(struct archiver_args *args,
 
        if (pathlen > 0xffff) {
                return error("path too long (%d chars, SHA1: %s): %s",
-                               (int)pathlen, sha1_to_hex(sha1), path);
+                               (int)pathlen, oid_to_hex(oid), path);
        }
 
        if (S_ISDIR(mode) || S_ISGITLINK(mode)) {
@@ -325,7 +325,8 @@ static int write_zip_entry(struct archiver_args *args,
                compressed_size = 0;
                buffer = NULL;
        } else if (S_ISREG(mode) || S_ISLNK(mode)) {
-               enum object_type type = sha1_object_info(sha1, &size);
+               enum object_type type = oid_object_info(the_repository, oid,
+                                                       &size);
 
                method = 0;
                attr2 = S_ISLNK(mode) ? ((mode | 0777) << 16) :
@@ -337,18 +338,18 @@ static int write_zip_entry(struct archiver_args *args,
 
                if (S_ISREG(mode) && type == OBJ_BLOB && !args->convert &&
                    size > big_file_threshold) {
-                       stream = open_istream(sha1, &type, &size, NULL);
+                       stream = open_istream(oid, &type, &size, NULL);
                        if (!stream)
                                return error("cannot stream blob %s",
-                                            sha1_to_hex(sha1));
+                                            oid_to_hex(oid));
                        flags |= ZIP_STREAM;
                        out = buffer = NULL;
                } else {
-                       buffer = sha1_file_to_archive(args, path, sha1, mode,
-                                                     &type, &size);
+                       buffer = object_file_to_archive(args, path, oid, mode,
+                                                       &type, &size);
                        if (!buffer)
                                return error("cannot read %s",
-                                            sha1_to_hex(sha1));
+                                            oid_to_hex(oid));
                        crc = crc32(crc, buffer, size);
                        is_binary = entry_is_binary(path_without_prefix,
                                                    buffer, size);
@@ -357,7 +358,7 @@ static int write_zip_entry(struct archiver_args *args,
                compressed_size = (method == 0) ? size : 0;
        } else {
                return error("unsupported file mode: 0%o (SHA1: %s)", mode,
-                               sha1_to_hex(sha1));
+                               oid_to_hex(oid));
        }
 
        if (creator_version > max_creator_version)
index 0b7b62af0c3ecee10a26e9bd2d274690604ffcad..93ab175b0b4055bcfbd9334c7ccb36475c33e549 100644 (file)
--- a/archive.c
+++ b/archive.c
@@ -63,16 +63,16 @@ static void format_subst(const struct commit *commit,
        free(to_free);
 }
 
-void *sha1_file_to_archive(const struct archiver_args *args,
-                          const char *path, const unsigned char *sha1,
-                          unsigned int mode, enum object_type *type,
-                          unsigned long *sizep)
+void *object_file_to_archive(const struct archiver_args *args,
+                            const char *path, const struct object_id *oid,
+                            unsigned int mode, enum object_type *type,
+                            unsigned long *sizep)
 {
        void *buffer;
        const struct commit *commit = args->convert ? args->commit : NULL;
 
        path += args->baselen;
-       buffer = read_sha1_file(sha1, type, sizep);
+       buffer = read_object_file(oid, type, sizep);
        if (buffer && S_ISREG(mode)) {
                struct strbuf buf = STRBUF_INIT;
                size_t size = 0;
@@ -121,7 +121,7 @@ static int check_attr_export_subst(const struct attr_check *check)
        return check && ATTR_TRUE(check->items[1].value);
 }
 
-static int write_archive_entry(const unsigned char *sha1, const char *base,
+static int write_archive_entry(const struct object_id *oid, const char *base,
                int baselen, const char *filename, unsigned mode, int stage,
                void *context)
 {
@@ -153,7 +153,7 @@ static int write_archive_entry(const unsigned char *sha1, const char *base,
        if (S_ISDIR(mode) || S_ISGITLINK(mode)) {
                if (args->verbose)
                        fprintf(stderr, "%.*s\n", (int)path.len, path.buf);
-               err = write_entry(args, sha1, path.buf, path.len, mode);
+               err = write_entry(args, oid, path.buf, path.len, mode);
                if (err)
                        return err;
                return (S_ISDIR(mode) ? READ_TREE_RECURSIVE : 0);
@@ -161,7 +161,7 @@ static int write_archive_entry(const unsigned char *sha1, const char *base,
 
        if (args->verbose)
                fprintf(stderr, "%.*s\n", (int)path.len, path.buf);
-       return write_entry(args, sha1, path.buf, path.len, mode);
+       return write_entry(args, oid, path.buf, path.len, mode);
 }
 
 static void queue_directory(const unsigned char *sha1,
@@ -191,14 +191,14 @@ static int write_directory(struct archiver_context *c)
        d->path[d->len - 1] = '\0'; /* no trailing slash */
        ret =
                write_directory(c) ||
-               write_archive_entry(d->oid.hash, d->path, d->baselen,
+               write_archive_entry(&d->oid, d->path, d->baselen,
                                    d->path + d->baselen, d->mode,
                                    d->stage, c) != READ_TREE_RECURSIVE;
        free(d);
        return ret ? -1 : 0;
 }
 
-static int queue_or_write_archive_entry(const unsigned char *sha1,
+static int queue_or_write_archive_entry(const struct object_id *oid,
                struct strbuf *base, const char *filename,
                unsigned mode, int stage, void *context)
 {
@@ -224,14 +224,14 @@ static int queue_or_write_archive_entry(const unsigned char *sha1,
 
                if (check_attr_export_ignore(check))
                        return 0;
-               queue_directory(sha1, base, filename,
+               queue_directory(oid->hash, base, filename,
                                mode, stage, c);
                return READ_TREE_RECURSIVE;
        }
 
        if (write_directory(c))
                return -1;
-       return write_archive_entry(sha1, base->buf, base->len, filename, mode,
+       return write_archive_entry(oid, base->buf, base->len, filename, mode,
                                   stage, context);
 }
 
@@ -250,7 +250,7 @@ int write_archive_entries(struct archiver_args *args,
                        len--;
                if (args->verbose)
                        fprintf(stderr, "%.*s\n", (int)len, args->base);
-               err = write_entry(args, args->tree->object.oid.hash, args->base,
+               err = write_entry(args, &args->tree->object.oid, args->base,
                                  len, 040777);
                if (err)
                        return err;
@@ -303,7 +303,7 @@ static const struct archiver *lookup_archiver(const char *name)
        return NULL;
 }
 
-static int reject_entry(const unsigned char *sha1, struct strbuf *base,
+static int reject_entry(const struct object_id *oid, struct strbuf *base,
                        const char *filename, unsigned mode,
                        int stage, void *context)
 {
@@ -397,8 +397,8 @@ static void parse_treeish_arg(const char **argv,
                unsigned int mode;
                int err;
 
-               err = get_tree_entry(tree->object.oid.hash, prefix,
-                                    tree_oid.hash, &mode);
+               err = get_tree_entry(&tree->object.oid, prefix, &tree_oid,
+                                    &mode);
                if (err || !S_ISDIR(mode))
                        die("current working directory is untracked");
 
index 62d1d82c1af0fa3bf77b32d63e9b4866f3428898..1f9954f7cdc5a1ee8036321e439a65bdfb90e59f 100644 (file)
--- a/archive.h
+++ b/archive.h
@@ -31,7 +31,7 @@ extern void init_tar_archiver(void);
 extern void init_zip_archiver(void);
 
 typedef int (*write_archive_entry_fn_t)(struct archiver_args *args,
-                                       const unsigned char *sha1,
+                                       const struct object_id *oid,
                                        const char *path, size_t pathlen,
                                        unsigned int mode);
 
@@ -39,9 +39,9 @@ extern int write_archive_entries(struct archiver_args *args, write_archive_entry
 extern int write_archive(int argc, const char **argv, const char *prefix, const char *name_hint, int remote);
 
 const char *archive_format_from_filename(const char *filename);
-extern void *sha1_file_to_archive(const struct archiver_args *args,
-                                 const char *path, const unsigned char *sha1,
-                                 unsigned int mode, enum object_type *type,
-                                 unsigned long *sizep);
+extern void *object_file_to_archive(const struct archiver_args *args,
+                                   const char *path, const struct object_id *oid,
+                                   unsigned int mode, enum object_type *type,
+                                   unsigned long *sizep);
 
 #endif /* ARCHIVE_H */
index 5d370fa3366163f8c0c81ca0b6b1a64a7030c696..cb5bcd2c064961919d965cb2cae79de653162fde 100644 (file)
@@ -64,6 +64,26 @@ void argv_array_pop(struct argv_array *array)
        array->argc--;
 }
 
+void argv_array_split(struct argv_array *array, const char *to_split)
+{
+       while (isspace(*to_split))
+               to_split++;
+       for (;;) {
+               const char *p = to_split;
+
+               if (!*p)
+                       break;
+
+               while (*p && !isspace(*p))
+                       p++;
+               argv_array_push_nodup(array, xstrndup(to_split, p - to_split));
+
+               while (isspace(*p))
+                       p++;
+               to_split = p;
+       }
+}
+
 void argv_array_clear(struct argv_array *array)
 {
        if (array->argv != empty_argv) {
index 29056e49a1208b5506d0809c7311e4112dc1f7f3..750c30d2f2cc7c7532ae67778da6396b4744708f 100644 (file)
@@ -19,6 +19,8 @@ LAST_ARG_MUST_BE_NULL
 void argv_array_pushl(struct argv_array *, ...);
 void argv_array_pushv(struct argv_array *, const char **);
 void argv_array_pop(struct argv_array *);
+/* Splits by whitespace; does not handle quoted arguments! */
+void argv_array_split(struct argv_array *, const char *);
 void argv_array_clear(struct argv_array *);
 const char **argv_array_detach(struct argv_array *);
 
diff --git a/attr.c b/attr.c
index dfc3a558d83737921e1f97c96886539ef5fc3acb..03a678fa9be09ac71da4b654c99c1ab26bde3430 100644 (file)
--- a/attr.c
+++ b/attr.c
@@ -10,7 +10,7 @@
 #define NO_THE_INDEX_COMPATIBILITY_MACROS
 #include "cache.h"
 #include "config.h"
-#include "exec_cmd.h"
+#include "exec-cmd.h"
 #include "attr.h"
 #include "dir.h"
 #include "utf8.h"
index f6d05bd66f42bd9874a08f5585ae99337d22dad5..a579b50884f8e6f8ce8390308d39b2664d050583 100644 (file)
--- a/bisect.c
+++ b/bisect.c
@@ -132,7 +132,8 @@ static void show_list(const char *debug, int counted, int nr,
                unsigned flags = commit->object.flags;
                enum object_type type;
                unsigned long size;
-               char *buf = read_sha1_file(commit->object.oid.hash, &type, &size);
+               char *buf = read_object_file(&commit->object.oid, &type,
+                                            &size);
                const char *subject_start;
                int subject_len;
 
@@ -144,10 +145,10 @@ static void show_list(const char *debug, int counted, int nr,
                        fprintf(stderr, "%3d", weight(p));
                else
                        fprintf(stderr, "---");
-               fprintf(stderr, " %.*s", 8, sha1_to_hex(commit->object.oid.hash));
+               fprintf(stderr, " %.*s", 8, oid_to_hex(&commit->object.oid));
                for (pp = commit->parents; pp; pp = pp->next)
                        fprintf(stderr, " %.*s", 8,
-                               sha1_to_hex(pp->item->object.oid.hash));
+                               oid_to_hex(&pp->item->object.oid));
 
                subject_len = find_commit_subject(buf, &subject_start);
                if (subject_len)
diff --git a/blame.c b/blame.c
index 200e0ad9a299adb13982cdc27cb6e9a768560f58..0edea04a052d33f771f44ff5c70d45fedacbe54b 100644 (file)
--- a/blame.c
+++ b/blame.c
@@ -80,8 +80,8 @@ static void verify_working_tree_path(struct commit *work_tree, const char *path)
                struct object_id blob_oid;
                unsigned mode;
 
-               if (!get_tree_entry(commit_oid->hash, path, blob_oid.hash, &mode) &&
-                   sha1_object_info(blob_oid.hash, NULL) == OBJ_BLOB)
+               if (!get_tree_entry(commit_oid, path, &blob_oid, &mode) &&
+                   oid_object_info(the_repository, &blob_oid, NULL) == OBJ_BLOB)
                        return;
        }
 
@@ -297,8 +297,8 @@ static void fill_origin_blob(struct diff_options *opt,
                    textconv_object(o->path, o->mode, &o->blob_oid, 1, &file->ptr, &file_size))
                        ;
                else
-                       file->ptr = read_sha1_file(o->blob_oid.hash, &type,
-                                                  &file_size);
+                       file->ptr = read_object_file(&o->blob_oid, &type,
+                                                    &file_size);
                file->size = file_size;
 
                if (!file->ptr)
@@ -502,11 +502,9 @@ static int fill_blob_sha1_and_mode(struct blame_origin *origin)
 {
        if (!is_null_oid(&origin->blob_oid))
                return 0;
-       if (get_tree_entry(origin->commit->object.oid.hash,
-                          origin->path,
-                          origin->blob_oid.hash, &origin->mode))
+       if (get_tree_entry(&origin->commit->object.oid, origin->path, &origin->blob_oid, &origin->mode))
                goto error_out;
-       if (sha1_object_info(origin->blob_oid.hash, NULL) != OBJ_BLOB)
+       if (oid_object_info(the_repository, &origin->blob_oid, NULL) != OBJ_BLOB)
                goto error_out;
        return 0;
  error_out:
@@ -553,10 +551,10 @@ static struct blame_origin *find_origin(struct commit *parent,
        diff_setup_done(&diff_opts);
 
        if (is_null_oid(&origin->commit->object.oid))
-               do_diff_cache(&parent->tree->object.oid, &diff_opts);
+               do_diff_cache(get_commit_tree_oid(parent), &diff_opts);
        else
-               diff_tree_oid(&parent->tree->object.oid,
-                             &origin->commit->tree->object.oid,
+               diff_tree_oid(get_commit_tree_oid(parent),
+                             get_commit_tree_oid(origin->commit),
                              "", &diff_opts);
        diffcore_std(&diff_opts);
 
@@ -622,10 +620,10 @@ static struct blame_origin *find_rename(struct commit *parent,
        diff_setup_done(&diff_opts);
 
        if (is_null_oid(&origin->commit->object.oid))
-               do_diff_cache(&parent->tree->object.oid, &diff_opts);
+               do_diff_cache(get_commit_tree_oid(parent), &diff_opts);
        else
-               diff_tree_oid(&parent->tree->object.oid,
-                             &origin->commit->tree->object.oid,
+               diff_tree_oid(get_commit_tree_oid(parent),
+                             get_commit_tree_oid(origin->commit),
                              "", &diff_opts);
        diffcore_std(&diff_opts);
 
@@ -1257,10 +1255,10 @@ static void find_copy_in_parent(struct blame_scoreboard *sb,
                diff_opts.flags.find_copies_harder = 1;
 
        if (is_null_oid(&target->commit->object.oid))
-               do_diff_cache(&parent->tree->object.oid, &diff_opts);
+               do_diff_cache(get_commit_tree_oid(parent), &diff_opts);
        else
-               diff_tree_oid(&parent->tree->object.oid,
-                             &target->commit->tree->object.oid,
+               diff_tree_oid(get_commit_tree_oid(parent),
+                             get_commit_tree_oid(target->commit),
                              "", &diff_opts);
 
        if (!diff_opts.flags.find_copies_harder)
@@ -1831,8 +1829,8 @@ void setup_scoreboard(struct blame_scoreboard *sb, const char *path, struct blam
                                    &sb->final_buf_size))
                        ;
                else
-                       sb->final_buf = read_sha1_file(o->blob_oid.hash, &type,
-                                                      &sb->final_buf_size);
+                       sb->final_buf = read_object_file(&o->blob_oid, &type,
+                                                        &sb->final_buf_size);
 
                if (!sb->final_buf)
                        die(_("cannot read blob %s for path %s"),
index 42378f3aa471eb79594d96736ad2410b54d6c4dd..4e0f64723ed8dde9c97827cc688535b2dda73025 100644 (file)
--- a/builtin.h
+++ b/builtin.h
@@ -149,6 +149,7 @@ extern int cmd_clone(int argc, const char **argv, const char *prefix);
 extern int cmd_clean(int argc, const char **argv, const char *prefix);
 extern int cmd_column(int argc, const char **argv, const char *prefix);
 extern int cmd_commit(int argc, const char **argv, const char *prefix);
+extern int cmd_commit_graph(int argc, const char **argv, const char *prefix);
 extern int cmd_commit_tree(int argc, const char **argv, const char *prefix);
 extern int cmd_config(int argc, const char **argv, const char *prefix);
 extern int cmd_count_objects(int argc, const char **argv, const char *prefix);
@@ -215,6 +216,7 @@ extern int cmd_rev_parse(int argc, const char **argv, const char *prefix);
 extern int cmd_revert(int argc, const char **argv, const char *prefix);
 extern int cmd_rm(int argc, const char **argv, const char *prefix);
 extern int cmd_send_pack(int argc, const char **argv, const char *prefix);
+extern int cmd_serve(int argc, const char **argv, const char *prefix);
 extern int cmd_shortlog(int argc, const char **argv, const char *prefix);
 extern int cmd_show(int argc, const char **argv, const char *prefix);
 extern int cmd_show_branch(int argc, const char **argv, const char *prefix);
@@ -231,6 +233,7 @@ extern int cmd_update_ref(int argc, const char **argv, const char *prefix);
 extern int cmd_update_server_info(int argc, const char **argv, const char *prefix);
 extern int cmd_upload_archive(int argc, const char **argv, const char *prefix);
 extern int cmd_upload_archive_writer(int argc, const char **argv, const char *prefix);
+extern int cmd_upload_pack(int argc, const char **argv, const char *prefix);
 extern int cmd_var(int argc, const char **argv, const char *prefix);
 extern int cmd_verify_commit(int argc, const char **argv, const char *prefix);
 extern int cmd_verify_tag(int argc, const char **argv, const char *prefix);
index 9ef7fb02d56aac94d104b50aed7d7dfda09cfc98..c9e2619a9ad8febc10e4132c88e7a74c6c9bbeaf 100644 (file)
@@ -9,7 +9,7 @@
 #include "lockfile.h"
 #include "dir.h"
 #include "pathspec.h"
-#include "exec_cmd.h"
+#include "exec-cmd.h"
 #include "cache-tree.h"
 #include "run-command.h"
 #include "parse-options.h"
index 1151b5c73aec81dbfca36a799cd4049429c75973..d834f9e62b6a0f5550909b5a0bb43cce0e48e8ec 100644 (file)
@@ -6,7 +6,7 @@
 #include "cache.h"
 #include "config.h"
 #include "builtin.h"
-#include "exec_cmd.h"
+#include "exec-cmd.h"
 #include "parse-options.h"
 #include "dir.h"
 #include "run-command.h"
@@ -1550,7 +1550,7 @@ static int fall_back_threeway(const struct am_state *state, const char *index_pa
        discard_cache();
        read_cache_from(index_path);
 
-       if (write_index_as_tree(orig_tree.hash, &the_index, index_path, 0, NULL))
+       if (write_index_as_tree(&orig_tree, &the_index, index_path, 0, NULL))
                return error(_("Repository lacks necessary blobs to fall back on 3-way merge."));
 
        say(state, stdout, _("Using index info to reconstruct a base tree..."));
@@ -1575,7 +1575,7 @@ static int fall_back_threeway(const struct am_state *state, const char *index_pa
                return error(_("Did you hand edit your patch?\n"
                                "It does not apply to blobs recorded in its index."));
 
-       if (write_index_as_tree(their_tree.hash, &the_index, index_path, 0, NULL))
+       if (write_index_as_tree(&their_tree, &the_index, index_path, 0, NULL))
                return error("could not write tree");
 
        say(state, stdout, _("Falling back to patching base and 3-way merge..."));
@@ -1626,7 +1626,7 @@ static void do_commit(const struct am_state *state)
        if (run_hook_le(NULL, "pre-applypatch", NULL))
                exit(1);
 
-       if (write_cache_as_tree(tree.hash, 0, NULL))
+       if (write_cache_as_tree(&tree, 0, NULL))
                die(_("git write-tree failed to write a tree"));
 
        if (!get_oid_commit("HEAD", &parent)) {
@@ -1862,7 +1862,7 @@ static void am_run(struct am_state *state, int resume)
         */
        if (!state->rebasing) {
                am_destroy(state);
-               close_all_packs();
+               close_all_packs(the_repository->objects);
                run_command_v_opt(argv_gc_auto, RUN_GIT_CMD);
        }
 }
@@ -2004,7 +2004,7 @@ static int clean_index(const struct object_id *head, const struct object_id *rem
        if (fast_forward_to(head_tree, head_tree, 1))
                return -1;
 
-       if (write_cache_as_tree(index.hash, 0, NULL))
+       if (write_cache_as_tree(&index, 0, NULL))
                return -1;
 
        index_tree = parse_tree_indirect(&index);
index 9dcb367b90d99fc9ebbbf878f53cd5b4651d7864..bfdf7cc1325826d6b7a41520b52a07f2fa1cb042 100644 (file)
@@ -499,7 +499,7 @@ static int read_ancestry(const char *graft_file)
 
 static int update_auto_abbrev(int auto_abbrev, struct blame_origin *suspect)
 {
-       const char *uniq = find_unique_abbrev(suspect->commit->object.oid.hash,
+       const char *uniq = find_unique_abbrev(&suspect->commit->object.oid,
                                              auto_abbrev);
        int len = strlen(uniq);
        if (auto_abbrev < len)
@@ -655,7 +655,7 @@ static int is_a_rev(const char *name)
 
        if (get_oid(name, &oid))
                return 0;
-       return OBJ_NONE < sha1_object_info(oid.hash, NULL);
+       return OBJ_NONE < oid_object_info(the_repository, &oid, NULL);
 }
 
 int cmd_blame(int argc, const char **argv, const char *prefix)
@@ -729,6 +729,7 @@ int cmd_blame(int argc, const char **argv, const char *prefix)
        for (;;) {
                switch (parse_options_step(&ctx, options, blame_opt_usage)) {
                case PARSE_OPT_HELP:
+               case PARSE_OPT_ERROR:
                        exit(129);
                case PARSE_OPT_DONE:
                        if (ctx.argv[0])
index 6d0cea9d4bcc4eb866280d6424a6dec32b5f9c87..efc9ac1922c8c45e13cddb82a342885153ae0fba 100644 (file)
@@ -273,7 +273,7 @@ static int delete_branches(int argc, const char **argv, int force, int kinds,
                               bname.buf,
                               (flags & REF_ISBROKEN) ? "broken"
                               : (flags & REF_ISSYMREF) ? target
-                              : find_unique_abbrev(oid.hash, DEFAULT_ABBREV));
+                              : find_unique_abbrev(&oid, DEFAULT_ABBREV));
                }
                delete_branch_config(bname.buf);
 
@@ -391,7 +391,6 @@ static void print_ref_list(struct ref_filter *filter, struct ref_sorting *sortin
        struct ref_array array;
        int maxwidth = 0;
        const char *remote_prefix = "";
-       struct strbuf out = STRBUF_INIT;
        char *to_free = NULL;
 
        /*
@@ -419,7 +418,10 @@ static void print_ref_list(struct ref_filter *filter, struct ref_sorting *sortin
        ref_array_sort(sorting, &array);
 
        for (i = 0; i < array.nr; i++) {
-               format_ref_array_item(array.items[i], format, &out);
+               struct strbuf out = STRBUF_INIT;
+               struct strbuf err = STRBUF_INIT;
+               if (format_ref_array_item(array.items[i], format, &out, &err))
+                       die("%s", err.buf);
                if (column_active(colopts)) {
                        assert(!filter->verbose && "--column and --verbose are incompatible");
                         /* format to a string_list to let print_columns() do its job */
@@ -428,6 +430,7 @@ static void print_ref_list(struct ref_filter *filter, struct ref_sorting *sortin
                        fwrite(out.buf, 1, out.len, stdout);
                        putchar('\n');
                }
+               strbuf_release(&err);
                strbuf_release(&out);
        }
 
index d90170f070f4f6ab7750c31fdcfac462c934b26d..b8ecbea98e966f00004d85cdb1b9f43f2fb444a2 100644 (file)
@@ -32,7 +32,7 @@ static int filter_object(const char *path, unsigned mode,
 {
        enum object_type type;
 
-       *buf = read_sha1_file(oid->hash, &type, size);
+       *buf = read_object_file(oid, &type, size);
        if (!*buf)
                return error(_("cannot read object %s '%s'"),
                             oid_to_hex(oid), path);
@@ -77,7 +77,7 @@ static int cat_one_file(int opt, const char *exp_type, const char *obj_name,
        switch (opt) {
        case 't':
                oi.type_name = &sb;
-               if (sha1_object_info_extended(oid.hash, &oi, flags) < 0)
+               if (oid_object_info_extended(the_repository, &oid, &oi, flags) < 0)
                        die("git cat-file: could not get object info");
                if (sb.len) {
                        printf("%s\n", sb.buf);
@@ -88,7 +88,7 @@ static int cat_one_file(int opt, const char *exp_type, const char *obj_name,
 
        case 's':
                oi.sizep = &size;
-               if (sha1_object_info_extended(oid.hash, &oi, flags) < 0)
+               if (oid_object_info_extended(the_repository, &oid, &oi, flags) < 0)
                        die("git cat-file: could not get object info");
                printf("%lu\n", size);
                return 0;
@@ -116,7 +116,7 @@ static int cat_one_file(int opt, const char *exp_type, const char *obj_name,
                /* else fallthrough */
 
        case 'p':
-               type = sha1_object_info(oid.hash, NULL);
+               type = oid_object_info(the_repository, &oid, NULL);
                if (type < 0)
                        die("Not a valid object name %s", obj_name);
 
@@ -130,7 +130,7 @@ static int cat_one_file(int opt, const char *exp_type, const char *obj_name,
 
                if (type == OBJ_BLOB)
                        return stream_blob_to_fd(1, &oid, NULL, 0);
-               buf = read_sha1_file(oid.hash, &type, &size);
+               buf = read_object_file(&oid, &type, &size);
                if (!buf)
                        die("Cannot read object %s", obj_name);
 
@@ -140,8 +140,9 @@ static int cat_one_file(int opt, const char *exp_type, const char *obj_name,
        case 0:
                if (type_from_string(exp_type) == OBJ_BLOB) {
                        struct object_id blob_oid;
-                       if (sha1_object_info(oid.hash, NULL) == OBJ_TAG) {
-                               char *buffer = read_sha1_file(oid.hash, &type, &size);
+                       if (oid_object_info(the_repository, &oid, NULL) == OBJ_TAG) {
+                               char *buffer = read_object_file(&oid, &type,
+                                                               &size);
                                const char *target;
                                if (!skip_prefix(buffer, "object ", &target) ||
                                    get_oid_hex(target, &blob_oid))
@@ -150,7 +151,7 @@ static int cat_one_file(int opt, const char *exp_type, const char *obj_name,
                        } else
                                oidcpy(&blob_oid, &oid);
 
-                       if (sha1_object_info(blob_oid.hash, NULL) == OBJ_BLOB)
+                       if (oid_object_info(the_repository, &blob_oid, NULL) == OBJ_BLOB)
                                return stream_blob_to_fd(1, &blob_oid, NULL, 0);
                        /*
                         * we attempted to dereference a tag to a blob
@@ -159,7 +160,7 @@ static int cat_one_file(int opt, const char *exp_type, const char *obj_name,
                         * fall-back to the usual case.
                         */
                }
-               buf = read_object_with_reference(oid.hash, exp_type, &size, NULL);
+               buf = read_object_with_reference(&oid, exp_type, &size, NULL);
                break;
 
        default:
@@ -304,8 +305,9 @@ static void print_object_or_die(struct batch_options *opt, struct expand_data *d
                                enum object_type type;
                                if (!textconv_object(data->rest, 0100644, oid,
                                                     1, &contents, &size))
-                                       contents = read_sha1_file(oid->hash, &type,
-                                                                 &size);
+                                       contents = read_object_file(oid,
+                                                                   &type,
+                                                                   &size);
                                if (!contents)
                                        die("could not convert '%s' %s",
                                            oid_to_hex(oid), data->rest);
@@ -321,7 +323,7 @@ static void print_object_or_die(struct batch_options *opt, struct expand_data *d
                unsigned long size;
                void *contents;
 
-               contents = read_sha1_file(oid->hash, &type, &size);
+               contents = read_object_file(oid, &type, &size);
                if (!contents)
                        die("object %s disappeared", oid_to_hex(oid));
                if (type != data->type)
@@ -340,8 +342,8 @@ static void batch_object_write(const char *obj_name, struct batch_options *opt,
        struct strbuf buf = STRBUF_INIT;
 
        if (!data->skip_object_info &&
-           sha1_object_info_extended(data->oid.hash, &data->info,
-                                     OBJECT_INFO_LOOKUP_REPLACE) < 0) {
+           oid_object_info_extended(the_repository, &data->oid, &data->info,
+                                    OBJECT_INFO_LOOKUP_REPLACE) < 0) {
                printf("%s missing\n",
                       obj_name ? obj_name : oid_to_hex(&data->oid));
                fflush(stdout);
index d76e13c8524003fcc5c55d706c1177f66520b9d4..2b3b768effd75e5d12d2c1828eb080f15c91895f 100644 (file)
@@ -66,7 +66,7 @@ static int post_checkout_hook(struct commit *old_commit, struct commit *new_comm
 
 }
 
-static int update_some(const unsigned char *sha1, struct strbuf *base,
+static int update_some(const struct object_id *oid, struct strbuf *base,
                const char *pathname, unsigned mode, int stage, void *context)
 {
        int len;
@@ -78,7 +78,7 @@ static int update_some(const unsigned char *sha1, struct strbuf *base,
 
        len = base->len + strlen(pathname);
        ce = xcalloc(1, cache_entry_size(len));
-       hashcpy(ce->oid.hash, sha1);
+       oidcpy(&ce->oid, oid);
        memcpy(ce->name, base->buf, base->len);
        memcpy(ce->name + base->len, pathname, len - base->len);
        ce->ce_flags = create_ce_flags(0) | CE_UPDATE;
@@ -405,10 +405,10 @@ static void describe_detached_head(const char *msg, struct commit *commit)
                pp_commit_easy(CMIT_FMT_ONELINE, commit, &sb);
        if (print_sha1_ellipsis()) {
                fprintf(stderr, "%s %s... %s\n", msg,
-                       find_unique_abbrev(commit->object.oid.hash, DEFAULT_ABBREV), sb.buf);
+                       find_unique_abbrev(&commit->object.oid, DEFAULT_ABBREV), sb.buf);
        } else {
                fprintf(stderr, "%s %s %s\n", msg,
-                       find_unique_abbrev(commit->object.oid.hash, DEFAULT_ABBREV), sb.buf);
+                       find_unique_abbrev(&commit->object.oid, DEFAULT_ABBREV), sb.buf);
        }
        strbuf_release(&sb);
 }
@@ -484,7 +484,8 @@ static int merge_working_tree(const struct checkout_opts *opts,
 
        resolve_undo_clear();
        if (opts->force) {
-               ret = reset_tree(new_branch_info->commit->tree, opts, 1, writeout_error);
+               ret = reset_tree(get_commit_tree(new_branch_info->commit),
+                                opts, 1, writeout_error);
                if (ret)
                        return ret;
        } else {
@@ -570,18 +571,23 @@ static int merge_working_tree(const struct checkout_opts *opts,
                        o.verbosity = 0;
                        work = write_tree_from_memory(&o);
 
-                       ret = reset_tree(new_branch_info->commit->tree, opts, 1,
+                       ret = reset_tree(get_commit_tree(new_branch_info->commit),
+                                        opts, 1,
                                         writeout_error);
                        if (ret)
                                return ret;
                        o.ancestor = old_branch_info->name;
                        o.branch1 = new_branch_info->name;
                        o.branch2 = "local";
-                       ret = merge_trees(&o, new_branch_info->commit->tree, work,
-                               old_branch_info->commit->tree, &result);
+                       ret = merge_trees(&o,
+                                         get_commit_tree(new_branch_info->commit),
+                                         work,
+                                         get_commit_tree(old_branch_info->commit),
+                                         &result);
                        if (ret < 0)
                                exit(128);
-                       ret = reset_tree(new_branch_info->commit->tree, opts, 0,
+                       ret = reset_tree(get_commit_tree(new_branch_info->commit),
+                                        opts, 0,
                                         writeout_error);
                        strbuf_release(&o.obuf);
                        if (ret)
@@ -720,7 +726,7 @@ static int add_pending_uninteresting_ref(const char *refname,
 static void describe_one_orphan(struct strbuf *sb, struct commit *commit)
 {
        strbuf_addstr(sb, "  ");
-       strbuf_add_unique_abbrev(sb, commit->object.oid.hash, DEFAULT_ABBREV);
+       strbuf_add_unique_abbrev(sb, &commit->object.oid, DEFAULT_ABBREV);
        strbuf_addch(sb, ' ');
        if (!parse_commit(commit))
                pp_commit_easy(CMIT_FMT_ONELINE, commit, sb);
@@ -778,7 +784,7 @@ static void suggest_reattach(struct commit *commit, struct rev_info *revs)
                        " git branch <new-branch-name> %s\n\n",
                        /* Give ngettext() the count */
                        lost),
-                       find_unique_abbrev(commit->object.oid.hash, DEFAULT_ABBREV));
+                       find_unique_abbrev(&commit->object.oid, DEFAULT_ABBREV));
 }
 
 /*
@@ -1002,7 +1008,7 @@ static int parse_branchname_arg(int argc, const char **argv,
                *source_tree = parse_tree_indirect(rev);
        } else {
                parse_commit_or_die(new_branch_info->commit);
-               *source_tree = new_branch_info->commit->tree;
+               *source_tree = get_commit_tree(new_branch_info->commit);
        }
 
        if (!*source_tree)                   /* case (1): want a tree */
index 101c27a593f4c64a735410f18bfcb46489728696..84f1473d19dc5a521e58c0bc1a7363808888dff1 100644 (file)
@@ -27,6 +27,7 @@
 #include "connected.h"
 #include "packfile.h"
 #include "list-objects-filter-options.h"
+#include "object-store.h"
 
 /*
  * Overall FIXMEs:
@@ -1134,7 +1135,7 @@ int cmd_clone(int argc, const char **argv, const char *prefix)
        if (transport->smart_options && !deepen && !filter_options.choice)
                transport->smart_options->check_self_contained_and_connected = 1;
 
-       refs = transport_get_remote_refs(transport);
+       refs = transport_get_remote_refs(transport, NULL);
 
        if (refs) {
                mapped_refs = wanted_peer_refs(refs, refspec);
@@ -1217,7 +1218,7 @@ int cmd_clone(int argc, const char **argv, const char *prefix)
        transport_disconnect(transport);
 
        if (option_dissociate) {
-               close_all_packs();
+               close_all_packs(the_repository->objects);
                dissociate_from_references();
        }
 
index 0c3223d64b159580935bf24f8583a35a1ae903ff..5228ccf37a5c8f568091ebef6df86fda40aa93dc 100644 (file)
@@ -42,7 +42,6 @@ int cmd_column(int argc, const char **argv, const char *prefix)
                git_config(column_config, NULL);
 
        memset(&copts, 0, sizeof(copts));
-       copts.width = term_columns();
        copts.padding = 1;
        argc = parse_options(argc, argv, "", options, builtin_column_usage, 0);
        if (argc)
diff --git a/builtin/commit-graph.c b/builtin/commit-graph.c
new file mode 100644 (file)
index 0000000..37420ae
--- /dev/null
@@ -0,0 +1,171 @@
+#include "builtin.h"
+#include "config.h"
+#include "dir.h"
+#include "lockfile.h"
+#include "parse-options.h"
+#include "commit-graph.h"
+
+static char const * const builtin_commit_graph_usage[] = {
+       N_("git commit-graph [--object-dir <objdir>]"),
+       N_("git commit-graph read [--object-dir <objdir>]"),
+       N_("git commit-graph write [--object-dir <objdir>] [--append] [--stdin-packs|--stdin-commits]"),
+       NULL
+};
+
+static const char * const builtin_commit_graph_read_usage[] = {
+       N_("git commit-graph read [--object-dir <objdir>]"),
+       NULL
+};
+
+static const char * const builtin_commit_graph_write_usage[] = {
+       N_("git commit-graph write [--object-dir <objdir>] [--append] [--stdin-packs|--stdin-commits]"),
+       NULL
+};
+
+static struct opts_commit_graph {
+       const char *obj_dir;
+       int stdin_packs;
+       int stdin_commits;
+       int append;
+} opts;
+
+static int graph_read(int argc, const char **argv)
+{
+       struct commit_graph *graph = NULL;
+       char *graph_name;
+
+       static struct option builtin_commit_graph_read_options[] = {
+               OPT_STRING(0, "object-dir", &opts.obj_dir,
+                       N_("dir"),
+                       N_("The object directory to store the graph")),
+               OPT_END(),
+       };
+
+       argc = parse_options(argc, argv, NULL,
+                            builtin_commit_graph_read_options,
+                            builtin_commit_graph_read_usage, 0);
+
+       if (!opts.obj_dir)
+               opts.obj_dir = get_object_directory();
+
+       graph_name = get_commit_graph_filename(opts.obj_dir);
+       graph = load_commit_graph_one(graph_name);
+
+       if (!graph)
+               die("graph file %s does not exist", graph_name);
+       FREE_AND_NULL(graph_name);
+
+       printf("header: %08x %d %d %d %d\n",
+               ntohl(*(uint32_t*)graph->data),
+               *(unsigned char*)(graph->data + 4),
+               *(unsigned char*)(graph->data + 5),
+               *(unsigned char*)(graph->data + 6),
+               *(unsigned char*)(graph->data + 7));
+       printf("num_commits: %u\n", graph->num_commits);
+       printf("chunks:");
+
+       if (graph->chunk_oid_fanout)
+               printf(" oid_fanout");
+       if (graph->chunk_oid_lookup)
+               printf(" oid_lookup");
+       if (graph->chunk_commit_data)
+               printf(" commit_metadata");
+       if (graph->chunk_large_edges)
+               printf(" large_edges");
+       printf("\n");
+
+       return 0;
+}
+
+static int graph_write(int argc, const char **argv)
+{
+       const char **pack_indexes = NULL;
+       int packs_nr = 0;
+       const char **commit_hex = NULL;
+       int commits_nr = 0;
+       const char **lines = NULL;
+       int lines_nr = 0;
+       int lines_alloc = 0;
+
+       static struct option builtin_commit_graph_write_options[] = {
+               OPT_STRING(0, "object-dir", &opts.obj_dir,
+                       N_("dir"),
+                       N_("The object directory to store the graph")),
+               OPT_BOOL(0, "stdin-packs", &opts.stdin_packs,
+                       N_("scan pack-indexes listed by stdin for commits")),
+               OPT_BOOL(0, "stdin-commits", &opts.stdin_commits,
+                       N_("start walk at commits listed by stdin")),
+               OPT_BOOL(0, "append", &opts.append,
+                       N_("include all commits already in the commit-graph file")),
+               OPT_END(),
+       };
+
+       argc = parse_options(argc, argv, NULL,
+                            builtin_commit_graph_write_options,
+                            builtin_commit_graph_write_usage, 0);
+
+       if (opts.stdin_packs && opts.stdin_commits)
+               die(_("cannot use both --stdin-commits and --stdin-packs"));
+       if (!opts.obj_dir)
+               opts.obj_dir = get_object_directory();
+
+       if (opts.stdin_packs || opts.stdin_commits) {
+               struct strbuf buf = STRBUF_INIT;
+               lines_nr = 0;
+               lines_alloc = 128;
+               ALLOC_ARRAY(lines, lines_alloc);
+
+               while (strbuf_getline(&buf, stdin) != EOF) {
+                       ALLOC_GROW(lines, lines_nr + 1, lines_alloc);
+                       lines[lines_nr++] = strbuf_detach(&buf, NULL);
+               }
+
+               if (opts.stdin_packs) {
+                       pack_indexes = lines;
+                       packs_nr = lines_nr;
+               }
+               if (opts.stdin_commits) {
+                       commit_hex = lines;
+                       commits_nr = lines_nr;
+               }
+       }
+
+       write_commit_graph(opts.obj_dir,
+                          pack_indexes,
+                          packs_nr,
+                          commit_hex,
+                          commits_nr,
+                          opts.append);
+
+       return 0;
+}
+
+int cmd_commit_graph(int argc, const char **argv, const char *prefix)
+{
+       static struct option builtin_commit_graph_options[] = {
+               OPT_STRING(0, "object-dir", &opts.obj_dir,
+                       N_("dir"),
+                       N_("The object directory to store the graph")),
+               OPT_END(),
+       };
+
+       if (argc == 2 && !strcmp(argv[1], "-h"))
+               usage_with_options(builtin_commit_graph_usage,
+                                  builtin_commit_graph_options);
+
+       git_config(git_default_config, NULL);
+       argc = parse_options(argc, argv, prefix,
+                            builtin_commit_graph_options,
+                            builtin_commit_graph_usage,
+                            PARSE_OPT_STOP_AT_NON_OPTION);
+
+       if (argc > 0) {
+               if (!strcmp(argv[0], "read"))
+                       return graph_read(argc, argv);
+               if (!strcmp(argv[0], "write"))
+                       return graph_write(argc, argv);
+       }
+
+       usage_with_options(builtin_commit_graph_usage,
+                          builtin_commit_graph_options);
+}
index e5bdf57b1e14cf54916c7eef077e63edddb28901..ecf42191da10cd2e87360f001d5493e792b9682e 100644 (file)
@@ -58,7 +58,7 @@ int cmd_commit_tree(int argc, const char **argv, const char *prefix)
                                usage(commit_tree_usage);
                        if (get_oid_commit(argv[i], &oid))
                                die("Not a valid object name %s", argv[i]);
-                       assert_sha1_type(oid.hash, OBJ_COMMIT);
+                       assert_oid_type(&oid, OBJ_COMMIT);
                        new_parent(lookup_commit(&oid), &parents);
                        continue;
                }
index 37fcb55ab0a03a5fdabaca1913bc700201fd8e10..5240f112257566a13ef0697783c713c822c7b256 100644 (file)
@@ -161,9 +161,9 @@ static void determine_whence(struct wt_status *s)
 static void status_init_config(struct wt_status *s, config_fn_t fn)
 {
        wt_status_prepare(s);
+       init_diff_ui_defaults();
        git_config(fn, s);
        determine_whence(s);
-       init_diff_ui_defaults();
        s->hints = advice_status_hints; /* must come after git_config() */
 }
 
@@ -218,8 +218,7 @@ static int list_paths(struct string_list *list, const char *with_tree,
 
        if (with_tree) {
                char *max_prefix = common_prefix(pattern);
-               overlay_tree_on_index(&the_index, with_tree,
-                                     max_prefix ? max_prefix : prefix);
+               overlay_tree_on_index(&the_index, with_tree, max_prefix);
                free(max_prefix);
        }
 
index 01169dd628b24a7b5502550a6342ab73cb8154c5..69e7270356c5a4da8372201ac80ec0d33e8909c2 100644 (file)
@@ -25,7 +25,8 @@ static char term = '\n';
 
 static int use_global_config, use_system_config, use_local_config;
 static struct git_config_source given_config_source;
-static int actions, types;
+static int actions, type;
+static char *default_value;
 static int end_null;
 static int respect_includes_opt = -1;
 static struct config_options config_options;
@@ -55,11 +56,68 @@ static int show_origin;
 #define PAGING_ACTIONS (ACTION_LIST | ACTION_GET_ALL | \
                        ACTION_GET_REGEXP | ACTION_GET_URLMATCH)
 
-#define TYPE_BOOL (1<<0)
-#define TYPE_INT (1<<1)
-#define TYPE_BOOL_OR_INT (1<<2)
-#define TYPE_PATH (1<<3)
-#define TYPE_EXPIRY_DATE (1<<4)
+#define TYPE_BOOL              1
+#define TYPE_INT               2
+#define TYPE_BOOL_OR_INT       3
+#define TYPE_PATH              4
+#define TYPE_EXPIRY_DATE       5
+#define TYPE_COLOR             6
+
+#define OPT_CALLBACK_VALUE(s, l, v, h, i) \
+       { OPTION_CALLBACK, (s), (l), (v), NULL, (h), PARSE_OPT_NOARG | \
+       PARSE_OPT_NONEG, option_parse_type, (i) }
+
+static struct option builtin_config_options[];
+
+static int option_parse_type(const struct option *opt, const char *arg,
+                            int unset)
+{
+       int new_type, *to_type;
+
+       if (unset) {
+               *((int *) opt->value) = 0;
+               return 0;
+       }
+
+       /*
+        * To support '--<type>' style flags, begin with new_type equal to
+        * opt->defval.
+        */
+       new_type = opt->defval;
+       if (!new_type) {
+               if (!strcmp(arg, "bool"))
+                       new_type = TYPE_BOOL;
+               else if (!strcmp(arg, "int"))
+                       new_type = TYPE_INT;
+               else if (!strcmp(arg, "bool-or-int"))
+                       new_type = TYPE_BOOL_OR_INT;
+               else if (!strcmp(arg, "path"))
+                       new_type = TYPE_PATH;
+               else if (!strcmp(arg, "expiry-date"))
+                       new_type = TYPE_EXPIRY_DATE;
+               else if (!strcmp(arg, "color"))
+                       new_type = TYPE_COLOR;
+               else
+                       die(_("unrecognized --type argument, %s"), arg);
+       }
+
+       to_type = opt->value;
+       if (*to_type && *to_type != new_type) {
+               /*
+                * Complain when there is a new type not equal to the old type.
+                * This allows for combinations like '--int --type=int' and
+                * '--type=int --type=int', but disallows ones like '--type=bool
+                * --int' and '--type=bool
+                * --type=int'.
+                */
+               error("only one type at a time.");
+               usage_with_options(builtin_config_usage,
+                       builtin_config_options);
+       }
+       *to_type = new_type;
+
+       return 0;
+}
 
 static struct option builtin_config_options[] = {
        OPT_GROUP(N_("Config file location")),
@@ -84,16 +142,18 @@ static struct option builtin_config_options[] = {
        OPT_BIT(0, "get-color", &actions, N_("find the color configured: slot [default]"), ACTION_GET_COLOR),
        OPT_BIT(0, "get-colorbool", &actions, N_("find the color setting: slot [stdout-is-tty]"), ACTION_GET_COLORBOOL),
        OPT_GROUP(N_("Type")),
-       OPT_BIT(0, "bool", &types, N_("value is \"true\" or \"false\""), TYPE_BOOL),
-       OPT_BIT(0, "int", &types, N_("value is decimal number"), TYPE_INT),
-       OPT_BIT(0, "bool-or-int", &types, N_("value is --bool or --int"), TYPE_BOOL_OR_INT),
-       OPT_BIT(0, "path", &types, N_("value is a path (file or directory name)"), TYPE_PATH),
-       OPT_BIT(0, "expiry-date", &types, N_("value is an expiry date"), TYPE_EXPIRY_DATE),
+       OPT_CALLBACK('t', "type", &type, "", N_("value is given this type"), option_parse_type),
+       OPT_CALLBACK_VALUE(0, "bool", &type, N_("value is \"true\" or \"false\""), TYPE_BOOL),
+       OPT_CALLBACK_VALUE(0, "int", &type, N_("value is decimal number"), TYPE_INT),
+       OPT_CALLBACK_VALUE(0, "bool-or-int", &type, N_("value is --bool or --int"), TYPE_BOOL_OR_INT),
+       OPT_CALLBACK_VALUE(0, "path", &type, N_("value is a path (file or directory name)"), TYPE_PATH),
+       OPT_CALLBACK_VALUE(0, "expiry-date", &type, N_("value is an expiry date"), TYPE_EXPIRY_DATE),
        OPT_GROUP(N_("Other")),
        OPT_BOOL('z', "null", &end_null, N_("terminate values with NUL byte")),
        OPT_BOOL(0, "name-only", &omit_values, N_("show variable names only")),
        OPT_BOOL(0, "includes", &respect_includes_opt, N_("respect include directives on lookup")),
        OPT_BOOL(0, "show-origin", &show_origin, N_("show origin of config (file, standard input, blob, command line)")),
+       OPT_STRING(0, "default", &default_value, N_("value"), N_("with --get, use default value when missing entry")),
        OPT_END(),
 };
 
@@ -149,30 +209,35 @@ static int format_config(struct strbuf *buf, const char *key_, const char *value
                if (show_keys)
                        strbuf_addch(buf, key_delim);
 
-               if (types == TYPE_INT)
+               if (type == TYPE_INT)
                        strbuf_addf(buf, "%"PRId64,
                                    git_config_int64(key_, value_ ? value_ : ""));
-               else if (types == TYPE_BOOL)
+               else if (type == TYPE_BOOL)
                        strbuf_addstr(buf, git_config_bool(key_, value_) ?
                                      "true" : "false");
-               else if (types == TYPE_BOOL_OR_INT) {
+               else if (type == TYPE_BOOL_OR_INT) {
                        int is_bool, v;
                        v = git_config_bool_or_int(key_, value_, &is_bool);
                        if (is_bool)
                                strbuf_addstr(buf, v ? "true" : "false");
                        else
                                strbuf_addf(buf, "%d", v);
-               } else if (types == TYPE_PATH) {
+               } else if (type == TYPE_PATH) {
                        const char *v;
                        if (git_config_pathname(&v, key_, value_) < 0)
                                return -1;
                        strbuf_addstr(buf, v);
                        free((char *)v);
-               } else if (types == TYPE_EXPIRY_DATE) {
+               } else if (type == TYPE_EXPIRY_DATE) {
                        timestamp_t t;
                        if (git_config_expiry_date(&t, key_, value_) < 0)
                                return -1;
                        strbuf_addf(buf, "%"PRItime, t);
+               } else if (type == TYPE_COLOR) {
+                       char v[COLOR_MAXLEN];
+                       if (git_config_color(v, key_, value_) < 0)
+                               return -1;
+                       strbuf_addstr(buf, v);
                } else if (value_) {
                        strbuf_addstr(buf, value_);
                } else {
@@ -258,6 +323,16 @@ static int get_value(const char *key_, const char *regex_)
        config_with_options(collect_config, &values,
                            &given_config_source, &config_options);
 
+       if (!values.nr && default_value) {
+               struct strbuf *item;
+               ALLOC_GROW(values.items, values.nr + 1, values.alloc);
+               item = &values.items[values.nr++];
+               strbuf_init(item, 0);
+               if (format_config(item, key_, default_value) < 0)
+                       die(_("failed to format default config value: %s"),
+                               default_value);
+       }
+
        ret = !values.nr;
 
        for (i = 0; i < values.nr; i++) {
@@ -287,7 +362,7 @@ static char *normalize_value(const char *key, const char *value)
        if (!value)
                return NULL;
 
-       if (types == 0 || types == TYPE_PATH || types == TYPE_EXPIRY_DATE)
+       if (type == 0 || type == TYPE_PATH || type == TYPE_EXPIRY_DATE)
                /*
                 * We don't do normalization for TYPE_PATH here: If
                 * the path is like ~/foobar/, we prefer to store
@@ -296,11 +371,11 @@ static char *normalize_value(const char *key, const char *value)
                 * Also don't do normalization for expiry dates.
                 */
                return xstrdup(value);
-       if (types == TYPE_INT)
+       if (type == TYPE_INT)
                return xstrfmt("%"PRId64, git_config_int64(key, value));
-       if (types == TYPE_BOOL)
+       if (type == TYPE_BOOL)
                return xstrdup(git_config_bool(key, value) ?  "true" : "false");
-       if (types == TYPE_BOOL_OR_INT) {
+       if (type == TYPE_BOOL_OR_INT) {
                int is_bool, v;
                v = git_config_bool_or_int(key, value, &is_bool);
                if (!is_bool)
@@ -308,8 +383,22 @@ static char *normalize_value(const char *key, const char *value)
                else
                        return xstrdup(v ? "true" : "false");
        }
+       if (type == TYPE_COLOR) {
+               char v[COLOR_MAXLEN];
+               if (git_config_color(v, key, value))
+                       die("cannot parse color '%s'", value);
+
+               /*
+                * The contents of `v` now contain an ANSI escape
+                * sequence, not suitable for including within a
+                * configuration file. Treat the above as a
+                * "sanity-check", and return the given value, which we
+                * know is representable as valid color code.
+                */
+               return xstrdup(value);
+       }
 
-       die("BUG: cannot normalize type %d", types);
+       die("BUG: cannot normalize type %d", type);
 }
 
 static int get_color_found;
@@ -566,12 +655,7 @@ int cmd_config(int argc, const char **argv, const char *prefix)
                key_delim = '\n';
        }
 
-       if (HAS_MULTI_BITS(types)) {
-               error("only one type at a time.");
-               usage_with_options(builtin_config_usage, builtin_config_options);
-       }
-
-       if ((actions & (ACTION_GET_COLOR|ACTION_GET_COLORBOOL)) && types) {
+       if ((actions & (ACTION_GET_COLOR|ACTION_GET_COLORBOOL)) && type) {
                error("--get-color and variable type are incoherent");
                usage_with_options(builtin_config_usage, builtin_config_options);
        }
@@ -601,6 +685,12 @@ int cmd_config(int argc, const char **argv, const char *prefix)
                usage_with_options(builtin_config_usage, builtin_config_options);
        }
 
+       if (default_value && !(actions & ACTION_GET)) {
+               error("--default is only applicable to --get");
+               usage_with_options(builtin_config_usage,
+                       builtin_config_options);
+       }
+
        if (actions & PAGING_ACTIONS)
                setup_auto_pager("config", 1);
 
index 33343818c830bf64452640b14b2ce7b876221022..b054713e1a1e7d83563df0ec82d46dbca6bdb666 100644 (file)
@@ -7,10 +7,12 @@
 #include "cache.h"
 #include "config.h"
 #include "dir.h"
+#include "repository.h"
 #include "builtin.h"
 #include "parse-options.h"
 #include "quote.h"
 #include "packfile.h"
+#include "object-store.h"
 
 static unsigned long garbage;
 static off_t size_garbage;
@@ -120,9 +122,8 @@ int cmd_count_objects(int argc, const char **argv, const char *prefix)
                struct strbuf loose_buf = STRBUF_INIT;
                struct strbuf pack_buf = STRBUF_INIT;
                struct strbuf garbage_buf = STRBUF_INIT;
-               if (!packed_git)
-                       prepare_packed_git();
-               for (p = packed_git; p; p = p->next) {
+
+               for (p = get_packed_git(the_repository); p; p = p->next) {
                        if (!p->pack_local)
                                continue;
                        if (open_pack_index(p))
index e4869df7b434845544dfcc0c37cae6a77cd42dad..a4160e7f5d1321e08be78f86d63b780a1711616f 100644 (file)
@@ -6,7 +6,7 @@
 #include "blob.h"
 #include "refs.h"
 #include "builtin.h"
-#include "exec_cmd.h"
+#include "exec-cmd.h"
 #include "parse-options.h"
 #include "revision.h"
 #include "diff.h"
@@ -285,7 +285,7 @@ static void append_name(struct commit_name *n, struct strbuf *dst)
 
 static void append_suffix(int depth, const struct object_id *oid, struct strbuf *dst)
 {
-       strbuf_addf(dst, "-%d-g%s", depth, find_unique_abbrev(oid->hash, abbrev));
+       strbuf_addf(dst, "-%d-g%s", depth, find_unique_abbrev(oid, abbrev));
 }
 
 static void describe_commit(struct object_id *oid, struct strbuf *dst)
@@ -383,7 +383,7 @@ static void describe_commit(struct object_id *oid, struct strbuf *dst)
        if (!match_cnt) {
                struct object_id *cmit_oid = &cmit->object.oid;
                if (always) {
-                       strbuf_add_unique_abbrev(dst, cmit_oid->hash, abbrev);
+                       strbuf_add_unique_abbrev(dst, cmit_oid, abbrev);
                        if (suffix)
                                strbuf_addstr(dst, suffix);
                        return;
@@ -502,7 +502,7 @@ static void describe(const char *arg, int last_one)
 
        if (cmit)
                describe_commit(&oid, &sb);
-       else if (sha1_object_info(oid.hash, NULL) == OBJ_BLOB)
+       else if (oid_object_info(the_repository, &oid, NULL) == OBJ_BLOB)
                describe_blob(oid, &sb);
        else
                die(_("%s is neither a commit nor blob"), arg);
index 16bfb22f7381ee8e6967ab836686c5def7cff892..bfefff3a84896a79fbed42eec1121286edcc86dd 100644 (file)
@@ -398,7 +398,7 @@ int cmd_diff(int argc, const char **argv, const char *prefix)
                if (!obj)
                        die(_("invalid object '%s' given."), name);
                if (obj->type == OBJ_COMMIT)
-                       obj = &((struct commit *)obj)->tree->object;
+                       obj = &get_commit_tree(((struct commit *)obj))->object;
 
                if (obj->type == OBJ_TREE) {
                        obj->flags |= flags;
index bcc79d1888f2217bcb380ffb1e7178c100a41e8e..aad0e073ee61648a73843ceddffbab6047c9721b 100644 (file)
@@ -15,7 +15,7 @@
 #include "config.h"
 #include "builtin.h"
 #include "run-command.h"
-#include "exec_cmd.h"
+#include "exec-cmd.h"
 #include "parse-options.h"
 #include "argv-array.h"
 #include "strbuf.h"
@@ -306,7 +306,7 @@ static char *get_symlink(const struct object_id *oid, const char *path)
        } else {
                enum object_type type;
                unsigned long size;
-               data = read_sha1_file(oid->hash, &type, &size);
+               data = read_object_file(oid, &type, &size);
                if (!data)
                        die(_("could not read object %s for symlink %s"),
                                oid_to_hex(oid), path);
index 27b2cc138e67c013adbee3cbe152ca48c3d82ff5..68a762fbeaa1363d6b0d709f1500b1925796a711 100644 (file)
@@ -237,10 +237,10 @@ static void export_blob(const struct object_id *oid)
                object = (struct object *)lookup_blob(oid);
                eaten = 0;
        } else {
-               buf = read_sha1_file(oid->hash, &type, &size);
+               buf = read_object_file(oid, &type, &size);
                if (!buf)
                        die ("Could not read blob %s", oid_to_hex(oid));
-               if (check_sha1_signature(oid->hash, buf, size, type_name(type)) < 0)
+               if (check_object_signature(oid, buf, size, type_name(type)) < 0)
                        die("sha1 mismatch in blob %s", oid_to_hex(oid));
                object = parse_object_buffer(oid, type, size, buf, &eaten);
        }
@@ -578,11 +578,11 @@ static void handle_commit(struct commit *commit, struct rev_info *rev,
            get_object_mark(&commit->parents->item->object) != 0 &&
            !full_tree) {
                parse_commit_or_die(commit->parents->item);
-               diff_tree_oid(&commit->parents->item->tree->object.oid,
-                             &commit->tree->object.oid, "", &rev->diffopt);
+               diff_tree_oid(get_commit_tree_oid(commit->parents->item),
+                             get_commit_tree_oid(commit), "", &rev->diffopt);
        }
        else
-               diff_root_tree_oid(&commit->tree->object.oid,
+               diff_root_tree_oid(get_commit_tree_oid(commit),
                                   "", &rev->diffopt);
 
        /* Export the referenced blobs, and remember the marks. */
@@ -651,8 +651,11 @@ static void handle_tail(struct object_array *commits, struct rev_info *revs,
        struct commit *commit;
        while (commits->nr) {
                commit = (struct commit *)object_array_pop(commits);
-               if (has_unshown_parent(commit))
+               if (has_unshown_parent(commit)) {
+                       /* Queue again, to be handled later */
+                       add_object_array(&commit->object, NULL, commits);
                        return;
+               }
                handle_commit(commit, revs, paths_of_changed_objects);
        }
 }
@@ -682,7 +685,7 @@ static void handle_tag(const char *name, struct tag *tag)
                return;
        }
 
-       buf = read_sha1_file(tag->object.oid.hash, &type, &size);
+       buf = read_object_file(&tag->object.oid, &type, &size);
        if (!buf)
                die ("Could not read tag %s", oid_to_hex(&tag->object.oid));
        message = memmem(buf, size, "\n\n", 2);
@@ -947,7 +950,7 @@ static void import_marks(char *input_file)
                if (last_idnum < mark)
                        last_idnum = mark;
 
-               type = sha1_object_info(oid.hash, NULL);
+               type = oid_object_info(the_repository, &oid, NULL);
                if (type < 0)
                        die("object not found: %s", oid_to_hex(&oid));
 
index a7bc1366ab375765c41014640743ef9d77c84c42..1a1bc63566b44bc83c8429463104615d1b2117ff 100644 (file)
@@ -4,6 +4,7 @@
 #include "remote.h"
 #include "connect.h"
 #include "sha1-array.h"
+#include "protocol.h"
 
 static const char fetch_pack_usage[] =
 "git fetch-pack [--all] [--stdin] [--quiet | -q] [--keep | -k] [--thin] "
@@ -52,6 +53,7 @@ int cmd_fetch_pack(int argc, const char **argv, const char *prefix)
        struct fetch_pack_args args;
        struct oid_array shallow = OID_ARRAY_INIT;
        struct string_list deepen_not = STRING_LIST_INIT_DUP;
+       struct packet_reader reader;
 
        fetch_if_missing = 0;
 
@@ -211,10 +213,24 @@ int cmd_fetch_pack(int argc, const char **argv, const char *prefix)
                if (!conn)
                        return args.diag_url ? 0 : 1;
        }
-       get_remote_heads(fd[0], NULL, 0, &ref, 0, NULL, &shallow);
+
+       packet_reader_init(&reader, fd[0], NULL, 0,
+                          PACKET_READ_CHOMP_NEWLINE |
+                          PACKET_READ_GENTLE_ON_EOF);
+
+       switch (discover_version(&reader)) {
+       case protocol_v2:
+               die("support for protocol v2 not implemented yet");
+       case protocol_v1:
+       case protocol_v0:
+               get_remote_heads(&reader, &ref, 0, NULL, &shallow);
+               break;
+       case protocol_unknown_version:
+               BUG("unknown protocol version");
+       }
 
        ref = fetch_pack(&args, fd, conn, ref, dest, sought, nr_sought,
-                        &shallow, pack_lockfile_ptr);
+                        &shallow, pack_lockfile_ptr, protocol_v0);
        if (pack_lockfile) {
                printf("lock %s\n", pack_lockfile);
                fflush(stdout);
index 6d73656a486fed1afd031e6cf5e26e1c2039e0cb..1f037e8e4b8ba41b66314c5dd9633e0164f86c0e 100644 (file)
@@ -62,6 +62,7 @@ static int shown_url = 0;
 static int refmap_alloc, refmap_nr;
 static const char **refmap_array;
 static struct list_objects_filter_options filter_options;
+static struct string_list server_options = STRING_LIST_INIT_DUP;
 
 static int git_fetch_config(const char *k, const char *v, void *cb)
 {
@@ -170,6 +171,7 @@ static struct option builtin_fetch_options[] = {
                 N_("accept refs that update .git/shallow")),
        { OPTION_CALLBACK, 0, "refmap", NULL, N_("refmap"),
          N_("specify fetch refmap"), PARSE_OPT_NONEG, parse_refmap_arg },
+       OPT_STRING_LIST('o', "server-option", &server_options, N_("server-specific"), N_("option to transmit")),
        OPT_SET_INT('4', "ipv4", &family, N_("use IPv4 addresses only"),
                        TRANSPORT_FAMILY_IPV4),
        OPT_SET_INT('6', "ipv6", &family, N_("use IPv6 addresses only"),
@@ -264,7 +266,7 @@ static void find_non_local_tags(struct transport *transport,
        struct string_list_item *item = NULL;
 
        for_each_ref(add_existing, &existing_refs);
-       for (ref = transport_get_remote_refs(transport); ref; ref = ref->next) {
+       for (ref = transport_get_remote_refs(transport, NULL); ref; ref = ref->next) {
                if (!starts_with(ref->name, "refs/tags/"))
                        continue;
 
@@ -346,11 +348,28 @@ static struct ref *get_ref_map(struct transport *transport,
        struct ref *rm;
        struct ref *ref_map = NULL;
        struct ref **tail = &ref_map;
+       struct argv_array ref_prefixes = ARGV_ARRAY_INIT;
 
        /* opportunistically-updated references: */
        struct ref *orefs = NULL, **oref_tail = &orefs;
 
-       const struct ref *remote_refs = transport_get_remote_refs(transport);
+       const struct ref *remote_refs;
+
+       for (i = 0; i < refspec_count; i++) {
+               if (!refspecs[i].exact_sha1) {
+                       const char *glob = strchr(refspecs[i].src, '*');
+                       if (glob)
+                               argv_array_pushf(&ref_prefixes, "%.*s",
+                                                (int)(glob - refspecs[i].src),
+                                                refspecs[i].src);
+                       else
+                               expand_ref_prefix(&ref_prefixes, refspecs[i].src);
+               }
+       }
+
+       remote_refs = transport_get_remote_refs(transport, &ref_prefixes);
+
+       argv_array_clear(&ref_prefixes);
 
        if (refspec_count) {
                struct refspec *fetch_refspec;
@@ -637,7 +656,7 @@ static int update_local_ref(struct ref *ref,
        struct branch *current_branch = branch_get(NULL);
        const char *pretty_ref = prettify_refname(ref->name);
 
-       type = sha1_object_info(ref->new_oid.hash, NULL);
+       type = oid_object_info(the_repository, &ref->new_oid, NULL);
        if (type < 0)
                die(_("object %s not found"), oid_to_hex(&ref->new_oid));
 
@@ -708,9 +727,9 @@ static int update_local_ref(struct ref *ref,
        if (in_merge_bases(current, updated)) {
                struct strbuf quickref = STRBUF_INIT;
                int r;
-               strbuf_add_unique_abbrev(&quickref, current->object.oid.hash, DEFAULT_ABBREV);
+               strbuf_add_unique_abbrev(&quickref, &current->object.oid, DEFAULT_ABBREV);
                strbuf_addstr(&quickref, "..");
-               strbuf_add_unique_abbrev(&quickref, ref->new_oid.hash, DEFAULT_ABBREV);
+               strbuf_add_unique_abbrev(&quickref, &ref->new_oid, DEFAULT_ABBREV);
                if ((recurse_submodules != RECURSE_SUBMODULES_OFF) &&
                    (recurse_submodules != RECURSE_SUBMODULES_ON))
                        check_for_new_submodule_commits(&ref->new_oid);
@@ -723,9 +742,9 @@ static int update_local_ref(struct ref *ref,
        } else if (force || ref->force) {
                struct strbuf quickref = STRBUF_INIT;
                int r;
-               strbuf_add_unique_abbrev(&quickref, current->object.oid.hash, DEFAULT_ABBREV);
+               strbuf_add_unique_abbrev(&quickref, &current->object.oid, DEFAULT_ABBREV);
                strbuf_addstr(&quickref, "...");
-               strbuf_add_unique_abbrev(&quickref, ref->new_oid.hash, DEFAULT_ABBREV);
+               strbuf_add_unique_abbrev(&quickref, &ref->new_oid, DEFAULT_ABBREV);
                if ((recurse_submodules != RECURSE_SUBMODULES_OFF) &&
                    (recurse_submodules != RECURSE_SUBMODULES_ON))
                        check_for_new_submodule_commits(&ref->new_oid);
@@ -1400,6 +1419,9 @@ static int fetch_one(struct remote *remote, int argc, const char **argv, int pru
                }
        }
 
+       if (server_options.nr)
+               gtransport->server_options = &server_options;
+
        sigchain_push_common(unlock_pack_on_signal);
        atexit(unlock_pack);
        refspec = parse_fetch_refspec(ref_nr, refs);
@@ -1516,7 +1538,7 @@ int cmd_fetch(int argc, const char **argv, const char *prefix)
 
        string_list_clear(&list, 0);
 
-       close_all_packs();
+       close_all_packs(the_repository->objects);
 
        argv_array_pushl(&argv_gc_auto, "gc", "--auto", NULL);
        if (verbosity < 0)
index 8e8a15ea4ad6de2bb73f63d39a0895a263918774..bd680be6874da29cf776c84468a60888beea53fb 100644 (file)
@@ -485,10 +485,10 @@ static void fmt_merge_msg_sigs(struct strbuf *out)
        struct strbuf tagbuf = STRBUF_INIT;
 
        for (i = 0; i < origins.nr; i++) {
-               unsigned char *sha1 = origins.items[i].util;
+               struct object_id *oid = origins.items[i].util;
                enum object_type type;
                unsigned long size, len;
-               char *buf = read_sha1_file(sha1, &type, &size);
+               char *buf = read_object_file(oid, &type, &size);
                struct strbuf sig = STRBUF_INIT;
 
                if (!buf || type != OBJ_TAG)
index ef78c6c00cbf4401ed672d6ad954bbbc68c9c115..9d59d7d5a215379b221b02ac34277b542e757df1 100644 (file)
@@ -1,5 +1,6 @@
 #include "builtin.h"
 #include "cache.h"
+#include "repository.h"
 #include "config.h"
 #include "commit.h"
 #include "tree.h"
@@ -16,6 +17,7 @@
 #include "streaming.h"
 #include "decorate.h"
 #include "packfile.h"
+#include "object-store.h"
 
 #define REACHABLE 0x0001
 #define SEEN      0x0002
@@ -65,7 +67,8 @@ static const char *printable_type(struct object *obj)
        const char *ret;
 
        if (obj->type == OBJ_NONE) {
-               enum object_type type = sha1_object_info(obj->oid.hash, NULL);
+               enum object_type type = oid_object_info(the_repository,
+                                                       &obj->oid, NULL);
                if (type > 0)
                        object_as_type(obj, type, 0);
        }
@@ -513,7 +516,7 @@ static struct object *parse_loose_object(const struct object_id *oid,
        unsigned long size;
        int eaten;
 
-       if (read_loose_object(path, oid->hash, &type, &size, &contents) < 0)
+       if (read_loose_object(path, oid, &type, &size, &contents) < 0)
                return NULL;
 
        if (!contents && type != OBJ_BLOB)
@@ -719,9 +722,12 @@ int cmd_fsck(int argc, const char **argv, const char *prefix)
                for_each_loose_object(mark_loose_for_connectivity, NULL, 0);
                for_each_packed_object(mark_packed_for_connectivity, NULL, 0);
        } else {
+               struct alternate_object_database *alt_odb_list;
+
                fsck_object_dir(get_object_directory());
 
-               prepare_alt_odb();
+               prepare_alt_odb(the_repository);
+               alt_odb_list = the_repository->objects->alt_odb_list;
                for (alt = alt_odb_list; alt; alt = alt->next)
                        fsck_object_dir(alt->path);
 
@@ -730,10 +736,9 @@ int cmd_fsck(int argc, const char **argv, const char *prefix)
                        uint32_t total = 0, count = 0;
                        struct progress *progress = NULL;
 
-                       prepare_packed_git();
-
                        if (show_progress) {
-                               for (p = packed_git; p; p = p->next) {
+                               for (p = get_packed_git(the_repository); p;
+                                    p = p->next) {
                                        if (open_pack_index(p))
                                                continue;
                                        total += p->num_objects;
@@ -741,7 +746,8 @@ int cmd_fsck(int argc, const char **argv, const char *prefix)
 
                                progress = start_progress(_("Checking objects"), total);
                        }
-                       for (p = packed_git; p; p = p->next) {
+                       for (p = get_packed_git(the_repository); p;
+                            p = p->next) {
                                /* verify gives error messages itself */
                                if (verify_pack(p, fsck_obj_buffer,
                                                progress, count))
index f51e5a6500fc294cb719716671259de42f31bfe7..c4777b2449e331336bac0baea317adb7c387eefb 100644 (file)
@@ -11,6 +11,7 @@
  */
 
 #include "builtin.h"
+#include "repository.h"
 #include "config.h"
 #include "tempfile.h"
 #include "lockfile.h"
 #include "argv-array.h"
 #include "commit.h"
 #include "packfile.h"
+#include "object-store.h"
+#include "pack.h"
+#include "pack-objects.h"
+#include "blob.h"
+#include "tree.h"
 
 #define FAILED_RUN "failed to run %s"
 
@@ -39,6 +45,8 @@ static timestamp_t gc_log_expire_time;
 static const char *gc_log_expire = "1.day.ago";
 static const char *prune_expire = "2.weeks.ago";
 static const char *prune_worktrees_expire = "3.months.ago";
+static unsigned long big_pack_threshold;
+static unsigned long max_delta_cache_size = DEFAULT_DELTA_CACHE_SIZE;
 
 static struct argv_array pack_refs_cmd = ARGV_ARRAY_INIT;
 static struct argv_array reflog = ARGV_ARRAY_INIT;
@@ -126,6 +134,9 @@ static void gc_config(void)
        git_config_get_expiry("gc.worktreepruneexpire", &prune_worktrees_expire);
        git_config_get_expiry("gc.logexpiry", &gc_log_expire);
 
+       git_config_get_ulong("gc.bigpackthreshold", &big_pack_threshold);
+       git_config_get_ulong("pack.deltacachesize", &max_delta_cache_size);
+
        git_config(git_default_config, NULL);
 }
 
@@ -164,6 +175,28 @@ static int too_many_loose_objects(void)
        return needed;
 }
 
+static struct packed_git *find_base_packs(struct string_list *packs,
+                                         unsigned long limit)
+{
+       struct packed_git *p, *base = NULL;
+
+       for (p = get_packed_git(the_repository); p; p = p->next) {
+               if (!p->pack_local)
+                       continue;
+               if (limit) {
+                       if (p->pack_size >= limit)
+                               string_list_append(packs, p->pack_name);
+               } else if (!base || base->pack_size < p->pack_size) {
+                       base = p;
+               }
+       }
+
+       if (base)
+               string_list_append(packs, base->pack_name);
+
+       return base;
+}
+
 static int too_many_packs(void)
 {
        struct packed_git *p;
@@ -172,8 +205,7 @@ static int too_many_packs(void)
        if (gc_auto_pack_limit <= 0)
                return 0;
 
-       prepare_packed_git();
-       for (cnt = 0, p = packed_git; p; p = p->next) {
+       for (cnt = 0, p = get_packed_git(the_repository); p; p = p->next) {
                if (!p->pack_local)
                        continue;
                if (p->pack_keep)
@@ -187,7 +219,86 @@ static int too_many_packs(void)
        return gc_auto_pack_limit < cnt;
 }
 
-static void add_repack_all_option(void)
+static uint64_t total_ram(void)
+{
+#if defined(HAVE_SYSINFO)
+       struct sysinfo si;
+
+       if (!sysinfo(&si))
+               return si.totalram;
+#elif defined(HAVE_BSD_SYSCTL) && (defined(HW_MEMSIZE) || defined(HW_PHYSMEM))
+       int64_t physical_memory;
+       int mib[2];
+       size_t length;
+
+       mib[0] = CTL_HW;
+# if defined(HW_MEMSIZE)
+       mib[1] = HW_MEMSIZE;
+# else
+       mib[1] = HW_PHYSMEM;
+# endif
+       length = sizeof(int64_t);
+       if (!sysctl(mib, 2, &physical_memory, &length, NULL, 0))
+               return physical_memory;
+#elif defined(GIT_WINDOWS_NATIVE)
+       MEMORYSTATUSEX memInfo;
+
+       memInfo.dwLength = sizeof(MEMORYSTATUSEX);
+       if (GlobalMemoryStatusEx(&memInfo))
+               return memInfo.ullTotalPhys;
+#endif
+       return 0;
+}
+
+static uint64_t estimate_repack_memory(struct packed_git *pack)
+{
+       unsigned long nr_objects = approximate_object_count();
+       size_t os_cache, heap;
+
+       if (!pack || !nr_objects)
+               return 0;
+
+       /*
+        * First we have to scan through at least one pack.
+        * Assume enough room in OS file cache to keep the entire pack
+        * or we may accidentally evict data of other processes from
+        * the cache.
+        */
+       os_cache = pack->pack_size + pack->index_size;
+       /* then pack-objects needs lots more for book keeping */
+       heap = sizeof(struct object_entry) * nr_objects;
+       /*
+        * internal rev-list --all --objects takes up some memory too,
+        * let's say half of it is for blobs
+        */
+       heap += sizeof(struct blob) * nr_objects / 2;
+       /*
+        * and the other half is for trees (commits and tags are
+        * usually insignificant)
+        */
+       heap += sizeof(struct tree) * nr_objects / 2;
+       /* and then obj_hash[], underestimated in fact */
+       heap += sizeof(struct object *) * nr_objects;
+       /* revindex is used also */
+       heap += sizeof(struct revindex_entry) * nr_objects;
+       /*
+        * read_sha1_file() (either at delta calculation phase, or
+        * writing phase) also fills up the delta base cache
+        */
+       heap += delta_base_cache_limit;
+       /* and of course pack-objects has its own delta cache */
+       heap += max_delta_cache_size;
+
+       return os_cache + heap;
+}
+
+static int keep_one_pack(struct string_list_item *item, void *data)
+{
+       argv_array_pushf(&repack, "--keep-pack=%s", basename(item->string));
+       return 0;
+}
+
+static void add_repack_all_option(struct string_list *keep_pack)
 {
        if (prune_expire && !strcmp(prune_expire, "now"))
                argv_array_push(&repack, "-a");
@@ -196,6 +307,9 @@ static void add_repack_all_option(void)
                if (prune_expire)
                        argv_array_pushf(&repack, "--unpack-unreachable=%s", prune_expire);
        }
+
+       if (keep_pack)
+               for_each_string_list(keep_pack, keep_one_pack, NULL);
 }
 
 static void add_repack_incremental_option(void)
@@ -218,9 +332,35 @@ static int need_to_gc(void)
         * we run "repack -A -d -l".  Otherwise we tell the caller
         * there is no need.
         */
-       if (too_many_packs())
-               add_repack_all_option();
-       else if (too_many_loose_objects())
+       if (too_many_packs()) {
+               struct string_list keep_pack = STRING_LIST_INIT_NODUP;
+
+               if (big_pack_threshold) {
+                       find_base_packs(&keep_pack, big_pack_threshold);
+                       if (keep_pack.nr >= gc_auto_pack_limit) {
+                               big_pack_threshold = 0;
+                               string_list_clear(&keep_pack, 0);
+                               find_base_packs(&keep_pack, 0);
+                       }
+               } else {
+                       struct packed_git *p = find_base_packs(&keep_pack, 0);
+                       uint64_t mem_have, mem_want;
+
+                       mem_have = total_ram();
+                       mem_want = estimate_repack_memory(p);
+
+                       /*
+                        * Only allow 1/2 of memory for pack-objects, leave
+                        * the rest for the OS and other processes in the
+                        * system.
+                        */
+                       if (!mem_have || mem_want < mem_have / 2)
+                               string_list_clear(&keep_pack, 0);
+               }
+
+               add_repack_all_option(&keep_pack);
+               string_list_clear(&keep_pack, 0);
+       } else if (too_many_loose_objects())
                add_repack_incremental_option();
        else
                return 0;
@@ -353,6 +493,8 @@ int cmd_gc(int argc, const char **argv, const char *prefix)
        const char *name;
        pid_t pid;
        int daemonized = 0;
+       int keep_base_pack = -1;
+       timestamp_t dummy;
 
        struct option builtin_gc_options[] = {
                OPT__QUIET(&quiet, N_("suppress progress reporting")),
@@ -365,6 +507,8 @@ int cmd_gc(int argc, const char **argv, const char *prefix)
                OPT_BOOL_F(0, "force", &force,
                           N_("force running gc even if there may be another gc running"),
                           PARSE_OPT_NOCOMPLETE),
+               OPT_BOOL(0, "keep-largest-pack", &keep_base_pack,
+                        N_("repack all other packs except the largest pack")),
                OPT_END()
        };
 
@@ -381,7 +525,7 @@ int cmd_gc(int argc, const char **argv, const char *prefix)
        /* default expiry time, overwritten in gc_config */
        gc_config();
        if (parse_expiry_date(gc_log_expire, &gc_log_expire_time))
-               die(_("Failed to parse gc.logexpiry value %s"), gc_log_expire);
+               die(_("failed to parse gc.logexpiry value %s"), gc_log_expire);
 
        if (pack_refs < 0)
                pack_refs = !is_bare_repository();
@@ -391,6 +535,9 @@ int cmd_gc(int argc, const char **argv, const char *prefix)
        if (argc > 0)
                usage_with_options(builtin_gc_usage, builtin_gc_options);
 
+       if (prune_expire && parse_expiry_date(prune_expire, &dummy))
+               die(_("failed to parse prune expiry value %s"), prune_expire);
+
        if (aggressive) {
                argv_array_push(&repack, "-f");
                if (aggressive_depth > 0)
@@ -430,8 +577,19 @@ int cmd_gc(int argc, const char **argv, const char *prefix)
                         */
                        daemonized = !daemonize();
                }
-       } else
-               add_repack_all_option();
+       } else {
+               struct string_list keep_pack = STRING_LIST_INIT_NODUP;
+
+               if (keep_base_pack != -1) {
+                       if (keep_base_pack)
+                               find_base_packs(&keep_pack, 0);
+               } else if (big_pack_threshold) {
+                       find_base_packs(&keep_pack, big_pack_threshold);
+               }
+
+               add_repack_all_option(&keep_pack);
+               string_list_clear(&keep_pack, 0);
+       }
 
        name = lock_repo_for_gc(force, &pid);
        if (name) {
@@ -479,7 +637,7 @@ int cmd_gc(int argc, const char **argv, const char *prefix)
                return error(FAILED_RUN, rerere.argv[0]);
 
        report_garbage = report_pack_garbage;
-       reprepare_packed_git();
+       reprepare_packed_git(the_repository);
        if (pack_garbage.nr > 0)
                clean_pack_garbage();
 
index 789a89133aca7b8eeb93a936fd2301bd3f37d0c7..6e7bc76785ace33f80251edfcc4feac8ad19d7c5 100644 (file)
@@ -22,6 +22,7 @@
 #include "pathspec.h"
 #include "submodule.h"
 #include "submodule-config.h"
+#include "object-store.h"
 
 static char const * const grep_usage[] = {
        N_("git grep [<options>] [-e] <pattern> [<rev>...] [[--] <path>...]"),
@@ -306,7 +307,7 @@ static void *lock_and_read_oid_file(const struct object_id *oid, enum object_typ
        void *data;
 
        grep_read_lock();
-       data = read_sha1_file(oid->hash, type, size);
+       data = read_object_file(oid, type, size);
        grep_read_unlock();
        return data;
 }
@@ -439,7 +440,7 @@ static int grep_submodule(struct grep_opt *opt, struct repository *superproject,
         * object.
         */
        grep_read_lock();
-       add_to_alternates_memory(submodule.objectdir);
+       add_to_alternates_memory(submodule.objects->objectdir);
        grep_read_unlock();
 
        if (oid) {
@@ -452,7 +453,7 @@ static int grep_submodule(struct grep_opt *opt, struct repository *superproject,
                object = parse_object_or_die(oid, oid_to_hex(oid));
 
                grep_read_lock();
-               data = read_object_with_reference(object->oid.hash, tree_type,
+               data = read_object_with_reference(&object->oid, tree_type,
                                                  &size, NULL);
                grep_read_unlock();
 
@@ -601,8 +602,7 @@ static int grep_tree(struct grep_opt *opt, const struct pathspec *pathspec,
 }
 
 static int grep_object(struct grep_opt *opt, const struct pathspec *pathspec,
-                      struct object *obj, const char *name, const char *path,
-                      struct repository *repo)
+                      struct object *obj, const char *name, const char *path)
 {
        if (obj->type == OBJ_BLOB)
                return grep_oid(opt, &obj->oid, name, 0, path);
@@ -614,7 +614,7 @@ static int grep_object(struct grep_opt *opt, const struct pathspec *pathspec,
                int hit, len;
 
                grep_read_lock();
-               data = read_object_with_reference(obj->oid.hash, tree_type,
+               data = read_object_with_reference(&obj->oid, tree_type,
                                                  &size, NULL);
                grep_read_unlock();
 
@@ -629,7 +629,7 @@ static int grep_object(struct grep_opt *opt, const struct pathspec *pathspec,
                }
                init_tree_desc(&tree, data, size);
                hit = grep_tree(opt, pathspec, &tree, &base, base.len,
-                               obj->type == OBJ_COMMIT, repo);
+                               obj->type == OBJ_COMMIT, the_repository);
                strbuf_release(&base);
                free(data);
                return hit;
@@ -638,7 +638,6 @@ static int grep_object(struct grep_opt *opt, const struct pathspec *pathspec,
 }
 
 static int grep_objects(struct grep_opt *opt, const struct pathspec *pathspec,
-                       struct repository *repo,
                        const struct object_array *list)
 {
        unsigned int i;
@@ -651,11 +650,11 @@ static int grep_objects(struct grep_opt *opt, const struct pathspec *pathspec,
 
                /* load the gitmodules file for this rev */
                if (recurse_submodules) {
-                       submodule_free();
+                       submodule_free(the_repository);
                        gitmodules_config_oid(&real_obj->oid);
                }
-               if (grep_object(opt, pathspec, real_obj, list->objects[i].name, list->objects[i].path,
-                               repo)) {
+               if (grep_object(opt, pathspec, real_obj, list->objects[i].name,
+                               list->objects[i].path)) {
                        hit = 1;
                        if (opt->status_only)
                                break;
@@ -1107,7 +1106,7 @@ int cmd_grep(int argc, const char **argv, const char *prefix)
                if (cached)
                        die(_("both --cached and trees are given."));
 
-               hit = grep_objects(&opt, &pathspec, the_repository, &list);
+               hit = grep_objects(&opt, &pathspec, &list);
        }
 
        if (num_threads)
index 526da5c1856ed1c387975a767f4d01382d1ea1a9..a9a3a198c3b47197cb847ba32a33177f1a45f428 100644 (file)
@@ -9,7 +9,7 @@
 #include "blob.h"
 #include "quote.h"
 #include "parse-options.h"
-#include "exec_cmd.h"
+#include "exec-cmd.h"
 
 /*
  * This is to create corrupt objects for debugging and as such it
index 598867cfea40c6e0df25111c5484dde5d2cd8b47..2d5107142926d46230d17715211338c1aa171e69 100644 (file)
@@ -4,7 +4,7 @@
 #include "cache.h"
 #include "config.h"
 #include "builtin.h"
-#include "exec_cmd.h"
+#include "exec-cmd.h"
 #include "parse-options.h"
 #include "run-command.h"
 #include "column.h"
index bda84a92effe41adb1e50a06ff6accac0563d04a..e2f670bef9ec6af7dfeaf8f507c9a9f80968c4df 100644 (file)
@@ -9,10 +9,11 @@
 #include "tree.h"
 #include "progress.h"
 #include "fsck.h"
-#include "exec_cmd.h"
+#include "exec-cmd.h"
 #include "streaming.h"
 #include "thread-utils.h"
 #include "packfile.h"
+#include "object-store.h"
 
 static const char index_pack_usage[] =
 "git index-pack [-v] [-o <index-file>] [--keep | --keep=<msg>] [--verify] [--strict] (<pack-file> | --stdin [--fix-thin] [<pack-file>])";
@@ -59,7 +60,7 @@ struct ofs_delta_entry {
 };
 
 struct ref_delta_entry {
-       unsigned char sha1[20];
+       struct object_id oid;
        int obj_no;
 };
 
@@ -222,7 +223,7 @@ static unsigned check_object(struct object *obj)
 
        if (!(obj->flags & FLAG_CHECKED)) {
                unsigned long size;
-               int type = sha1_object_info(obj->oid.hash, &size);
+               int type = oid_object_info(the_repository, &obj->oid, &size);
                if (type <= 0)
                        die(_("did not receive expected object %s"),
                              oid_to_hex(&obj->oid));
@@ -672,18 +673,18 @@ static void find_ofs_delta_children(off_t offset,
        *last_index = last;
 }
 
-static int compare_ref_delta_bases(const unsigned char *sha1,
-                                  const unsigned char *sha2,
+static int compare_ref_delta_bases(const struct object_id *oid1,
+                                  const struct object_id *oid2,
                                   enum object_type type1,
                                   enum object_type type2)
 {
        int cmp = type1 - type2;
        if (cmp)
                return cmp;
-       return hashcmp(sha1, sha2);
+       return oidcmp(oid1, oid2);
 }
 
-static int find_ref_delta(const unsigned char *sha1, enum object_type type)
+static int find_ref_delta(const struct object_id *oid, enum object_type type)
 {
        int first = 0, last = nr_ref_deltas;
 
@@ -692,7 +693,7 @@ static int find_ref_delta(const unsigned char *sha1, enum object_type type)
                struct ref_delta_entry *delta = &ref_deltas[next];
                int cmp;
 
-               cmp = compare_ref_delta_bases(sha1, delta->sha1,
+               cmp = compare_ref_delta_bases(oid, &delta->oid,
                                              type, objects[delta->obj_no].type);
                if (!cmp)
                        return next;
@@ -705,11 +706,11 @@ static int find_ref_delta(const unsigned char *sha1, enum object_type type)
        return -first-1;
 }
 
-static void find_ref_delta_children(const unsigned char *sha1,
+static void find_ref_delta_children(const struct object_id *oid,
                                    int *first_index, int *last_index,
                                    enum object_type type)
 {
-       int first = find_ref_delta(sha1, type);
+       int first = find_ref_delta(oid, type);
        int last = first;
        int end = nr_ref_deltas - 1;
 
@@ -718,9 +719,9 @@ static void find_ref_delta_children(const unsigned char *sha1,
                *last_index = -1;
                return;
        }
-       while (first > 0 && !hashcmp(ref_deltas[first - 1].sha1, sha1))
+       while (first > 0 && !oidcmp(&ref_deltas[first - 1].oid, oid))
                --first;
-       while (last < end && !hashcmp(ref_deltas[last + 1].sha1, sha1))
+       while (last < end && !oidcmp(&ref_deltas[last + 1].oid, oid))
                ++last;
        *first_index = first;
        *last_index = last;
@@ -772,7 +773,7 @@ static int check_collison(struct object_entry *entry)
 
        memset(&data, 0, sizeof(data));
        data.entry = entry;
-       data.st = open_istream(entry->idx.oid.hash, &type, &size, NULL);
+       data.st = open_istream(&entry->idx.oid, &type, &size, NULL);
        if (!data.st)
                return -1;
        if (size != entry->size || type != entry->type)
@@ -811,12 +812,12 @@ static void sha1_object(const void *data, struct object_entry *obj_entry,
                enum object_type has_type;
                unsigned long has_size;
                read_lock();
-               has_type = sha1_object_info(oid->hash, &has_size);
+               has_type = oid_object_info(the_repository, oid, &has_size);
                if (has_type < 0)
                        die(_("cannot read existing object info %s"), oid_to_hex(oid));
                if (has_type != type || has_size != size)
                        die(_("SHA1 COLLISION FOUND WITH %s !"), oid_to_hex(oid));
-               has_data = read_sha1_file(oid->hash, &has_type, &has_size);
+               has_data = read_object_file(oid, &has_type, &has_size);
                read_unlock();
                if (!data)
                        data = new_data = get_data_from_pack(obj_entry);
@@ -992,7 +993,7 @@ static struct base_data *find_unresolved_deltas_1(struct base_data *base,
                                                  struct base_data *prev_base)
 {
        if (base->ref_last == -1 && base->ofs_last == -1) {
-               find_ref_delta_children(base->obj->idx.oid.hash,
+               find_ref_delta_children(&base->obj->idx.oid,
                                        &base->ref_first, &base->ref_last,
                                        OBJ_REF_DELTA);
 
@@ -1076,7 +1077,7 @@ static int compare_ref_delta_entry(const void *a, const void *b)
        const struct ref_delta_entry *delta_a = a;
        const struct ref_delta_entry *delta_b = b;
 
-       return hashcmp(delta_a->sha1, delta_b->sha1);
+       return oidcmp(&delta_a->oid, &delta_b->oid);
 }
 
 static void resolve_base(struct object_entry *obj)
@@ -1142,7 +1143,7 @@ static void parse_pack_objects(unsigned char *hash)
                        ofs_delta++;
                } else if (obj->type == OBJ_REF_DELTA) {
                        ALLOC_GROW(ref_deltas, nr_ref_deltas + 1, ref_deltas_alloc);
-                       hashcpy(ref_deltas[nr_ref_deltas].sha1, ref_delta_oid.hash);
+                       oidcpy(&ref_deltas[nr_ref_deltas].oid, &ref_delta_oid);
                        ref_deltas[nr_ref_deltas].obj_no = i;
                        nr_ref_deltas++;
                } else if (!data) {
@@ -1270,7 +1271,7 @@ static void conclude_pack(int fix_thin_pack, const char *curr_pack, unsigned cha
                            nr_objects - nr_objects_initial);
                stop_progress_msg(&progress, msg.buf);
                strbuf_release(&msg);
-               hashclose(f, tail_hash, 0);
+               finalize_hashfile(f, tail_hash, 0);
                hashcpy(read_hash, pack_hash);
                fixup_pack_header_footer(output_fd, pack_hash,
                                         curr_pack, nr_objects,
@@ -1374,14 +1375,15 @@ static void fix_unresolved_deltas(struct hashfile *f)
 
                if (objects[d->obj_no].real_type != OBJ_REF_DELTA)
                        continue;
-               base_obj->data = read_sha1_file(d->sha1, &type, &base_obj->size);
+               base_obj->data = read_object_file(&d->oid, &type,
+                                                 &base_obj->size);
                if (!base_obj->data)
                        continue;
 
-               if (check_sha1_signature(d->sha1, base_obj->data,
+               if (check_object_signature(&d->oid, base_obj->data,
                                base_obj->size, type_name(type)))
-                       die(_("local object %s is corrupt"), sha1_to_hex(d->sha1));
-               base_obj->obj = append_obj_to_pack(f, d->sha1,
+                       die(_("local object %s is corrupt"), oid_to_hex(&d->oid));
+               base_obj->obj = append_obj_to_pack(f, d->oid.hash,
                                        base_obj->data, base_obj->size, type);
                find_unresolved_deltas(base_obj);
                display_progress(progress, nr_resolved_deltas);
@@ -1591,7 +1593,7 @@ static void read_idx_option(struct pack_idx_option *opts, const char *pack_name)
        /*
         * Get rid of the idx file as we do not need it anymore.
         * NEEDSWORK: extract this bit from free_pack_by_name() in
-        * sha1_file.c, perhaps?  It shouldn't matter very much as we
+        * sha1-file.c, perhaps?  It shouldn't matter very much as we
         * know we haven't installed this pack (hence we never have
         * read anything from it).
         */
index 68ff4ad75ace6566a233c1343fed93365c5abbe4..2542c5244e91b68692c7ed371fa9976997ee8ff5 100644 (file)
@@ -7,7 +7,7 @@
 #include "config.h"
 #include "refs.h"
 #include "builtin.h"
-#include "exec_cmd.h"
+#include "exec-cmd.h"
 #include "parse-options.h"
 
 #ifndef DEFAULT_GIT_TEMPLATE_DIR
index 94ee177d56d6ff9c82b02ebfb2d27cf789527974..a15599f4f031d6d6b23f68f83dbc935f36d41ae3 100644 (file)
@@ -518,7 +518,7 @@ static int show_tag_object(const struct object_id *oid, struct rev_info *rev)
 {
        unsigned long size;
        enum object_type type;
-       char *buf = read_sha1_file(oid->hash, &type, &size);
+       char *buf = read_object_file(oid, &type, &size);
        int offset = 0;
 
        if (!buf)
@@ -541,7 +541,7 @@ static int show_tag_object(const struct object_id *oid, struct rev_info *rev)
        return 0;
 }
 
-static int show_tree_object(const unsigned char *sha1,
+static int show_tree_object(const struct object_id *oid,
                struct strbuf *base,
                const char *pathname, unsigned mode, int stage, void *context)
 {
@@ -1019,7 +1019,7 @@ static void make_cover_letter(struct rev_info *rev, int use_stdout,
            open_next_file(NULL, rev->numbered_files ? NULL : "cover-letter", rev, quiet))
                return;
 
-       log_write_email_headers(rev, head, &pp.after_subject, &need_8bit_cte);
+       log_write_email_headers(rev, head, &pp.after_subject, &need_8bit_cte, 0);
 
        for (i = 0; !need_8bit_cte && i < nr; i++) {
                const char *buf = get_commit_buffer(list[i], NULL);
@@ -1067,8 +1067,8 @@ static void make_cover_letter(struct rev_info *rev, int use_stdout,
 
        diff_setup_done(&opts);
 
-       diff_tree_oid(&origin->tree->object.oid,
-                     &head->tree->object.oid,
+       diff_tree_oid(get_commit_tree_oid(origin),
+                     get_commit_tree_oid(head),
                      "", &opts);
        diffcore_std(&opts);
        diff_flush(&opts);
@@ -1873,12 +1873,12 @@ static void print_commit(char sign, struct commit *commit, int verbose,
 {
        if (!verbose) {
                fprintf(file, "%c %s\n", sign,
-                      find_unique_abbrev(commit->object.oid.hash, abbrev));
+                      find_unique_abbrev(&commit->object.oid, abbrev));
        } else {
                struct strbuf buf = STRBUF_INIT;
                pp_commit_easy(CMIT_FMT_ONELINE, commit, &buf);
                fprintf(file, "%c %s %s\n", sign,
-                      find_unique_abbrev(commit->object.oid.hash, abbrev),
+                      find_unique_abbrev(&commit->object.oid, abbrev),
                       buf.buf);
                strbuf_release(&buf);
        }
index 2fc836e33086d5b70b86a3fc746f9e75706ef7ea..a71f6bd088a2666f0637463e1c168171dd319a96 100644 (file)
@@ -240,7 +240,7 @@ static void show_ce(struct repository *repo, struct dir_struct *dir,
                        printf("%s%06o %s %d\t",
                               tag,
                               ce->ce_mode,
-                              find_unique_abbrev(ce->oid.hash, abbrev),
+                              find_unique_abbrev(&ce->oid, abbrev),
                               ce_stage(ce));
                }
                write_eolinfo(repo->index, ce, fullname);
@@ -271,7 +271,7 @@ static void show_ru_info(const struct index_state *istate)
                        if (!ui->mode[i])
                                continue;
                        printf("%s%06o %s %d\t", tag_resolve_undo, ui->mode[i],
-                              find_unique_abbrev(ui->sha1[i], abbrev),
+                              find_unique_abbrev(&ui->oid[i], abbrev),
                               i + 1);
                        write_name(path);
                }
index 540d56429f5cec4ace8655dd9a870089fc872d2c..1a25df7ee15b45df142679286afdb0e8c55647dc 100644 (file)
@@ -1,7 +1,9 @@
 #include "builtin.h"
 #include "cache.h"
 #include "transport.h"
+#include "ref-filter.h"
 #include "remote.h"
+#include "refs.h"
 
 static const char * const ls_remote_usage[] = {
        N_("git ls-remote [--heads] [--tags] [--refs] [--upload-pack=<exec>]\n"
@@ -43,10 +45,15 @@ int cmd_ls_remote(int argc, const char **argv, const char *prefix)
        int show_symref_target = 0;
        const char *uploadpack = NULL;
        const char **pattern = NULL;
+       struct argv_array ref_prefixes = ARGV_ARRAY_INIT;
+       int i;
+       struct string_list server_options = STRING_LIST_INIT_DUP;
 
        struct remote *remote;
        struct transport *transport;
        const struct ref *ref;
+       struct ref_array ref_array;
+       static struct ref_sorting *sorting = NULL, **sorting_tail = &sorting;
 
        struct option options[] = {
                OPT__QUIET(&quiet, N_("do not print remote URL")),
@@ -60,14 +67,19 @@ int cmd_ls_remote(int argc, const char **argv, const char *prefix)
                OPT_BIT(0, "refs", &flags, N_("do not show peeled tags"), REF_NORMAL),
                OPT_BOOL(0, "get-url", &get_url,
                         N_("take url.<base>.insteadOf into account")),
+               OPT_CALLBACK(0 , "sort", sorting_tail, N_("key"),
+                            N_("field name to sort on"), &parse_opt_ref_sorting),
                OPT_SET_INT_F(0, "exit-code", &status,
                              N_("exit with exit code 2 if no matching refs are found"),
                              2, PARSE_OPT_NOCOMPLETE),
                OPT_BOOL(0, "symref", &show_symref_target,
                         N_("show underlying ref in addition to the object pointed by it")),
+               OPT_STRING_LIST('o', "server-option", &server_options, N_("server-specific"), N_("option to transmit")),
                OPT_END()
        };
 
+       memset(&ref_array, 0, sizeof(ref_array));
+
        argc = parse_options(argc, argv, prefix, options, ls_remote_usage,
                             PARSE_OPT_STOP_AT_NON_OPTION);
        dest = argv[0];
@@ -75,8 +87,17 @@ int cmd_ls_remote(int argc, const char **argv, const char *prefix)
        if (argc > 1) {
                int i;
                pattern = xcalloc(argc, sizeof(const char *));
-               for (i = 1; i < argc; i++)
+               for (i = 1; i < argc; i++) {
+                       const char *glob;
                        pattern[i - 1] = xstrfmt("*/%s", argv[i]);
+
+                       glob = strchr(argv[i], '*');
+                       if (glob)
+                               argv_array_pushf(&ref_prefixes, "%.*s",
+                                                (int)(glob - argv[i]), argv[i]);
+                       else
+                               expand_ref_prefix(&ref_prefixes, argv[i]);
+               }
        }
 
        remote = remote_get(dest);
@@ -90,28 +111,46 @@ int cmd_ls_remote(int argc, const char **argv, const char *prefix)
 
        if (get_url) {
                printf("%s\n", *remote->url);
+               UNLEAK(sorting);
                return 0;
        }
 
        transport = transport_get(remote, NULL);
        if (uploadpack != NULL)
                transport_set_option(transport, TRANS_OPT_UPLOADPACK, uploadpack);
+       if (server_options.nr)
+               transport->server_options = &server_options;
 
-       ref = transport_get_remote_refs(transport);
-       if (transport_disconnect(transport))
+       ref = transport_get_remote_refs(transport, &ref_prefixes);
+       if (transport_disconnect(transport)) {
+               UNLEAK(sorting);
                return 1;
+       }
 
        if (!dest && !quiet)
                fprintf(stderr, "From %s\n", *remote->url);
        for ( ; ref; ref = ref->next) {
+               struct ref_array_item *item;
                if (!check_ref_type(ref, flags))
                        continue;
                if (!tail_match(pattern, ref->name))
                        continue;
+               item = ref_array_push(&ref_array, ref->name, &ref->old_oid);
+               item->symref = xstrdup_or_null(ref->symref);
+       }
+
+       if (sorting)
+               ref_array_sort(sorting, &ref_array);
+
+       for (i = 0; i < ref_array.nr; i++) {
+               const struct ref_array_item *ref = ref_array.items[i];
                if (show_symref_target && ref->symref)
-                       printf("ref: %s\t%s\n", ref->symref, ref->name);
-               printf("%s\t%s\n", oid_to_hex(&ref->old_oid), ref->name);
+                       printf("ref: %s\t%s\n", ref->symref, ref->refname);
+               printf("%s\t%s\n", oid_to_hex(&ref->objectname), ref->refname);
                status = 0; /* we found something */
        }
+
+       UNLEAK(sorting);
+       UNLEAK(ref_array);
        return status;
 }
index ef965408e8fc5d80fa9e9daf0264a91abccd978c..409da4e8351f6d8719671d3176f0ea34fe9b756c 100644 (file)
@@ -60,7 +60,7 @@ static int show_recursive(const char *base, int baselen, const char *pathname)
        return 0;
 }
 
-static int show_tree(const unsigned char *sha1, struct strbuf *base,
+static int show_tree(const struct object_id *oid, struct strbuf *base,
                const char *pathname, unsigned mode, int stage, void *context)
 {
        int retval = 0;
@@ -94,7 +94,7 @@ static int show_tree(const unsigned char *sha1, struct strbuf *base,
                        char size_text[24];
                        if (!strcmp(type, blob_type)) {
                                unsigned long size;
-                               if (sha1_object_info(sha1, &size) == OBJ_BAD)
+                               if (oid_object_info(the_repository, oid, &size) == OBJ_BAD)
                                        xsnprintf(size_text, sizeof(size_text),
                                                  "BAD");
                                else
@@ -103,11 +103,11 @@ static int show_tree(const unsigned char *sha1, struct strbuf *base,
                        } else
                                xsnprintf(size_text, sizeof(size_text), "-");
                        printf("%06o %s %s %7s\t", mode, type,
-                              find_unique_abbrev(sha1, abbrev),
+                              find_unique_abbrev(oid, abbrev),
                               size_text);
                } else
                        printf("%06o %s %s\t", mode, type,
-                              find_unique_abbrev(sha1, abbrev));
+                              find_unique_abbrev(oid, abbrev));
        }
        baselen = base->len;
        strbuf_addstr(base, pathname);
index d01ddecf6602eabdca97a175e5c2a57bf1257865..bf01e05808313da20ed73c37d0d150c4ddb45587 100644 (file)
@@ -2,7 +2,7 @@
 #include "tree-walk.h"
 #include "xdiff-interface.h"
 #include "blob.h"
-#include "exec_cmd.h"
+#include "exec-cmd.h"
 #include "merge-blobs.h"
 
 static const char merge_tree_usage[] = "git merge-tree <base-tree> <branch1> <branch2>";
@@ -60,7 +60,7 @@ static void *result(struct merge_list *entry, unsigned long *size)
        const char *path = entry->path;
 
        if (!entry->stage)
-               return read_sha1_file(entry->blob->object.oid.hash, &type, size);
+               return read_object_file(&entry->blob->object.oid, &type, size);
        base = NULL;
        if (entry->stage == 1) {
                base = entry->blob;
@@ -82,7 +82,8 @@ static void *origin(struct merge_list *entry, unsigned long *size)
        enum object_type type;
        while (entry) {
                if (entry->stage == 2)
-                       return read_sha1_file(entry->blob->object.oid.hash, &type, size);
+                       return read_object_file(&entry->blob->object.oid,
+                                               &type, size);
                entry = entry->link;
        }
        return NULL;
index ee050a47f34d7394d048f955baabb37a9e716ef8..9db5a2cf16e189bb3bd0ceec7d34c6651d630225 100644 (file)
@@ -412,7 +412,7 @@ static void finish(struct commit *head_commit,
                         * We ignore errors in 'gc --auto', since the
                         * user should see them.
                         */
-                       close_all_packs();
+                       close_all_packs(the_repository->objects);
                        run_command_v_opt(argv_gc_auto, RUN_GIT_CMD);
                }
        }
@@ -639,7 +639,7 @@ static int read_tree_trivial(struct object_id *common, struct object_id *head,
 
 static void write_tree_trivial(struct object_id *oid)
 {
-       if (write_cache_as_tree(oid->hash, 0, NULL))
+       if (write_cache_as_tree(oid, 0, NULL))
                die(_("git write-tree failed to write a tree"));
 }
 
@@ -1324,7 +1324,7 @@ int cmd_merge(int argc, const char **argv, const char *prefix)
 
                        check_commit_signature(commit, &signature_check);
 
-                       find_unique_abbrev_r(hex, commit->object.oid.hash, DEFAULT_ABBREV);
+                       find_unique_abbrev_r(hex, &commit->object.oid, DEFAULT_ABBREV);
                        switch (signature_check.result) {
                        case 'G':
                                break;
@@ -1417,9 +1417,9 @@ int cmd_merge(int argc, const char **argv, const char *prefix)
 
                if (verbosity >= 0) {
                        printf(_("Updating %s..%s\n"),
-                              find_unique_abbrev(head_commit->object.oid.hash,
+                              find_unique_abbrev(&head_commit->object.oid,
                                                  DEFAULT_ABBREV),
-                              find_unique_abbrev(remoteheads->item->object.oid.hash,
+                              find_unique_abbrev(&remoteheads->item->object.oid,
                                                  DEFAULT_ABBREV));
                }
                strbuf_addstr(&msg, "Fast-forward");
index beb552847ba1ef36c78d9d10d3ff9e98e6a288bc..82a6e860775f872a9145e32ee9d8f315b70ad2ea 100644 (file)
@@ -1,5 +1,6 @@
 #include "builtin.h"
 #include "tag.h"
+#include "replace-object.h"
 
 /*
  * A signature file has a very simple fixed format: four lines
 /*
  * We refuse to tag something we can't verify. Just because.
  */
-static int verify_object(const unsigned char *sha1, const char *expected_type)
+static int verify_object(const struct object_id *oid, const char *expected_type)
 {
        int ret = -1;
        enum object_type type;
        unsigned long size;
-       void *buffer = read_sha1_file(sha1, &type, &size);
-       const unsigned char *repl = lookup_replace_object(sha1);
+       void *buffer = read_object_file(oid, &type, &size);
+       const struct object_id *repl = lookup_replace_object(the_repository, oid);
 
        if (buffer) {
                if (type == type_from_string(expected_type))
-                       ret = check_sha1_signature(repl, buffer, size, expected_type);
+                       ret = check_object_signature(repl, buffer, size, expected_type);
                free(buffer);
        }
        return ret;
@@ -38,8 +39,8 @@ static int verify_tag(char *buffer, unsigned long size)
 {
        int typelen;
        char type[20];
-       unsigned char sha1[20];
-       const char *object, *type_line, *tag_line, *tagger_line, *lb, *rb;
+       struct object_id oid;
+       const char *object, *type_line, *tag_line, *tagger_line, *lb, *rb, *p;
        size_t len;
 
        if (size < 84)
@@ -52,11 +53,11 @@ static int verify_tag(char *buffer, unsigned long size)
        if (memcmp(object, "object ", 7))
                return error("char%d: does not start with \"object \"", 0);
 
-       if (get_sha1_hex(object + 7, sha1))
+       if (parse_oid_hex(object + 7, &oid, &p))
                return error("char%d: could not get SHA1 hash", 7);
 
        /* Verify type line */
-       type_line = object + 48;
+       type_line = p + 1;
        if (memcmp(type_line - 1, "\ntype ", 6))
                return error("char%d: could not find \"\\ntype \"", 47);
 
@@ -80,8 +81,8 @@ static int verify_tag(char *buffer, unsigned long size)
        type[typelen] = 0;
 
        /* Verify that the object matches */
-       if (verify_object(sha1, type))
-               return error("char%d: could not verify object %s", 7, sha1_to_hex(sha1));
+       if (verify_object(&oid, type))
+               return error("char%d: could not verify object %s", 7, oid_to_hex(&oid));
 
        /* Verify the tag-name: we don't allow control characters or spaces in it */
        tag_line += 4;
index f5f3c0eea1cb6f2f61073e4d81fc0dd9c47dee60..bb76b469fd1f57319f3d202c1e2658d77962d698 100644 (file)
 
 static struct treeent {
        unsigned mode;
-       unsigned char sha1[20];
+       struct object_id oid;
        int len;
        char name[FLEX_ARRAY];
 } **entries;
 static int alloc, used;
 
-static void append_to_tree(unsigned mode, unsigned char *sha1, char *path)
+static void append_to_tree(unsigned mode, struct object_id *oid, char *path)
 {
        struct treeent *ent;
        size_t len = strlen(path);
@@ -26,7 +26,7 @@ static void append_to_tree(unsigned mode, unsigned char *sha1, char *path)
        FLEX_ALLOC_MEM(ent, name, path, len);
        ent->mode = mode;
        ent->len = len;
-       hashcpy(ent->sha1, sha1);
+       oidcpy(&ent->oid, oid);
 
        ALLOC_GROW(entries, used + 1, alloc);
        entries[used++] = ent;
@@ -54,7 +54,7 @@ static void write_tree(struct object_id *oid)
        for (i = 0; i < used; i++) {
                struct treeent *ent = entries[i];
                strbuf_addf(&buf, "%o %s%c", ent->mode, ent->name, '\0');
-               strbuf_add(&buf, ent->sha1, 20);
+               strbuf_add(&buf, ent->oid.hash, the_hash_algo->rawsz);
        }
 
        write_object_file(buf.buf, buf.len, tree_type, oid);
@@ -69,11 +69,12 @@ static const char *mktree_usage[] = {
 static void mktree_line(char *buf, size_t len, int nul_term_line, int allow_missing)
 {
        char *ptr, *ntr;
+       const char *p;
        unsigned mode;
        enum object_type mode_type; /* object type derived from mode */
        enum object_type obj_type; /* object type derived from sha */
        char *path, *to_free = NULL;
-       unsigned char sha1[20];
+       struct object_id oid;
 
        ptr = buf;
        /*
@@ -85,9 +86,8 @@ static void mktree_line(char *buf, size_t len, int nul_term_line, int allow_miss
                die("input format error: %s", buf);
        ptr = ntr + 1; /* type */
        ntr = strchr(ptr, ' ');
-       if (!ntr || buf + len <= ntr + 40 ||
-           ntr[41] != '\t' ||
-           get_sha1_hex(ntr + 1, sha1))
+       if (!ntr || parse_oid_hex(ntr + 1, &oid, &p) ||
+           *p != '\t')
                die("input format error: %s", buf);
 
        /* It is perfectly normal if we do not have a commit from a submodule */
@@ -116,12 +116,12 @@ static void mktree_line(char *buf, size_t len, int nul_term_line, int allow_miss
        }
 
        /* Check the type of object identified by sha1 */
-       obj_type = sha1_object_info(sha1, NULL);
+       obj_type = oid_object_info(the_repository, &oid, NULL);
        if (obj_type < 0) {
                if (allow_missing) {
                        ; /* no problem - missing objects are presumed to be of the right type */
                } else {
-                       die("entry '%s' object %s is unavailable", path, sha1_to_hex(sha1));
+                       die("entry '%s' object %s is unavailable", path, oid_to_hex(&oid));
                }
        } else {
                if (obj_type != mode_type) {
@@ -131,11 +131,11 @@ static void mktree_line(char *buf, size_t len, int nul_term_line, int allow_miss
                         * because the new tree entry will never be correct.
                         */
                        die("entry '%s' object %s is a %s but specified type was (%s)",
-                               path, sha1_to_hex(sha1), type_name(obj_type), type_name(mode_type));
+                               path, oid_to_hex(&oid), type_name(obj_type), type_name(mode_type));
                }
        }
 
-       append_to_tree(mode, sha1, path);
+       append_to_tree(mode, &oid, path);
        free(to_free);
 }
 
index 6d141f7a532c08e52f1f5f82330d046c60073f93..7a63667d64810c1164cf3acad3cfcc6cedf4010d 100644 (file)
@@ -276,10 +276,12 @@ int cmd_mv(int argc, const char **argv, const char *prefix)
                        die_errno(_("renaming '%s' failed"), src);
                }
                if (submodule_gitfile[i]) {
-                       if (submodule_gitfile[i] != SUBMODULE_WITH_GITDIR)
-                               connect_work_tree_and_git_dir(dst, submodule_gitfile[i]);
                        if (!update_path_in_gitmodules(src, dst))
                                gitmodules_modified = 1;
+                       if (submodule_gitfile[i] != SUBMODULE_WITH_GITDIR)
+                               connect_work_tree_and_git_dir(dst,
+                                                             submodule_gitfile[i],
+                                                             1);
                }
 
                if (mode == WORKING_DIRECTORY)
index 9e088ebd11dced248640df9e17adbbd8b9a73ffb..387ddf85d21a443f060dbb212fe95a983d9e4f58 100644 (file)
@@ -328,7 +328,7 @@ static void show_name(const struct object *obj,
        else if (allow_undefined)
                printf("undefined\n");
        else if (always)
-               printf("%s\n", find_unique_abbrev(oid->hash, DEFAULT_ABBREV));
+               printf("%s\n", find_unique_abbrev(oid, DEFAULT_ABBREV));
        else
                die("cannot describe '%s'", oid_to_hex(oid));
        strbuf_release(&buf);
index 6d2fda4a7d7ba89633f5cfee73f39cacb9f27898..e5bf80eef1d4113ac9c5361f200bb7b162de18a6 100644 (file)
@@ -14,7 +14,7 @@
 #include "blob.h"
 #include "pretty.h"
 #include "refs.h"
-#include "exec_cmd.h"
+#include "exec-cmd.h"
 #include "run-command.h"
 #include "parse-options.h"
 #include "string-list.h"
@@ -118,11 +118,11 @@ static int list_each_note(const struct object_id *object_oid,
        return 0;
 }
 
-static void copy_obj_to_fd(int fd, const unsigned char *sha1)
+static void copy_obj_to_fd(int fd, const struct object_id *oid)
 {
        unsigned long size;
        enum object_type type;
-       char *buf = read_sha1_file(sha1, &type, &size);
+       char *buf = read_object_file(oid, &type, &size);
        if (buf) {
                if (size)
                        write_or_die(fd, buf, size);
@@ -162,7 +162,7 @@ static void write_commented_object(int fd, const struct object_id *object)
 }
 
 static void prepare_note_data(const struct object_id *object, struct note_data *d,
-               const unsigned char *old_note)
+               const struct object_id *old_note)
 {
        if (d->use_editor || !d->given) {
                int fd;
@@ -253,7 +253,7 @@ static int parse_reuse_arg(const struct option *opt, const char *arg, int unset)
 
        if (get_oid(arg, &object))
                die(_("failed to resolve '%s' as a valid ref."), arg);
-       if (!(buf = read_sha1_file(object.hash, &type, &len))) {
+       if (!(buf = read_object_file(&object, &type, &len))) {
                free(buf);
                die(_("failed to read object '%s'."), arg);
        }
@@ -457,7 +457,7 @@ static int add(int argc, const char **argv, const char *prefix)
                        oid_to_hex(&object));
        }
 
-       prepare_note_data(&object, &d, note ? note->hash : NULL);
+       prepare_note_data(&object, &d, note);
        if (d.buf.len || allow_empty) {
                write_note_data(&d, &new_note);
                if (add_note(t, &object, &new_note, combine_notes_overwrite))
@@ -602,13 +602,13 @@ static int append_edit(int argc, const char **argv, const char *prefix)
        t = init_notes_check(argv[0], NOTES_INIT_WRITABLE);
        note = get_note(t, &object);
 
-       prepare_note_data(&object, &d, edit && note ? note->hash : NULL);
+       prepare_note_data(&object, &d, edit && note ? note : NULL);
 
        if (note && !edit) {
                /* Append buf to previous note contents */
                unsigned long size;
                enum object_type type;
-               char *prev_buf = read_sha1_file(note->hash, &type, &size);
+               char *prev_buf = read_object_file(note, &type, &size);
 
                strbuf_grow(&d.buf, size + 1);
                if (d.buf.len && prev_buf && size)
index e9d3cfb9e33a6b874751ac6acc5aac2361d4a58d..3df0bf0f6f7a7eec077485f7027e0c551a6d5343 100644 (file)
@@ -1,5 +1,6 @@
 #include "builtin.h"
 #include "cache.h"
+#include "repository.h"
 #include "config.h"
 #include "attr.h"
 #include "object.h"
 #include "argv-array.h"
 #include "list.h"
 #include "packfile.h"
+#include "object-store.h"
+#include "dir.h"
+
+#define IN_PACK(obj) oe_in_pack(&to_pack, obj)
+#define SIZE(obj) oe_size(&to_pack, obj)
+#define SET_SIZE(obj,size) oe_set_size(&to_pack, obj, size)
+#define DELTA_SIZE(obj) oe_delta_size(&to_pack, obj)
+#define DELTA(obj) oe_delta(&to_pack, obj)
+#define DELTA_CHILD(obj) oe_delta_child(&to_pack, obj)
+#define DELTA_SIBLING(obj) oe_delta_sibling(&to_pack, obj)
+#define SET_DELTA(obj, val) oe_set_delta(&to_pack, obj, val)
+#define SET_DELTA_SIZE(obj, val) oe_set_delta_size(&to_pack, obj, val)
+#define SET_DELTA_CHILD(obj, val) oe_set_delta_child(&to_pack, obj, val)
+#define SET_DELTA_SIBLING(obj, val) oe_set_delta_sibling(&to_pack, obj, val)
 
 static const char *pack_usage[] = {
        N_("git pack-objects --stdout [<options>...] [< <ref-list> | < <object-list>]"),
@@ -43,7 +58,7 @@ static const char *pack_usage[] = {
 static struct packing_data to_pack;
 
 static struct pack_idx_entry **written_list;
-static uint32_t nr_result, nr_written;
+static uint32_t nr_result, nr_written, nr_seen;
 
 static int non_empty;
 static int reuse_delta = 1, reuse_object = 1;
@@ -53,7 +68,8 @@ static int pack_loose_unreachable;
 static int local;
 static int have_non_local_packs;
 static int incremental;
-static int ignore_packed_keep;
+static int ignore_packed_keep_on_disk;
+static int ignore_packed_keep_in_core;
 static int allow_ofs_delta;
 static struct pack_idx_option pack_idx_opts;
 static const char *base_name;
@@ -78,7 +94,7 @@ static uint16_t write_bitmap_options;
 static int exclude_promisor_objects;
 
 static unsigned long delta_cache_size = 0;
-static unsigned long max_delta_cache_size = 256 * 1024 * 1024;
+static unsigned long max_delta_cache_size = DEFAULT_DELTA_CACHE_SIZE;
 static unsigned long cache_max_small_delta_size = 1000;
 
 static unsigned long window_memory_limit = 0;
@@ -122,17 +138,17 @@ static void *get_delta(struct object_entry *entry)
        void *buf, *base_buf, *delta_buf;
        enum object_type type;
 
-       buf = read_sha1_file(entry->idx.oid.hash, &type, &size);
+       buf = read_object_file(&entry->idx.oid, &type, &size);
        if (!buf)
                die("unable to read %s", oid_to_hex(&entry->idx.oid));
-       base_buf = read_sha1_file(entry->delta->idx.oid.hash, &type,
-                                 &base_size);
+       base_buf = read_object_file(&DELTA(entry)->idx.oid, &type,
+                                   &base_size);
        if (!base_buf)
                die("unable to read %s",
-                   oid_to_hex(&entry->delta->idx.oid));
+                   oid_to_hex(&DELTA(entry)->idx.oid));
        delta_buf = diff_delta(base_buf, base_size,
                               buf, size, &delta_size, 0);
-       if (!delta_buf || delta_size != entry->delta_size)
+       if (!delta_buf || delta_size != DELTA_SIZE(entry))
                die("delta size changed");
        free(buf);
        free(base_buf);
@@ -265,13 +281,12 @@ static unsigned long write_no_reuse_object(struct hashfile *f, struct object_ent
        struct git_istream *st = NULL;
 
        if (!usable_delta) {
-               if (entry->type == OBJ_BLOB &&
-                   entry->size > big_file_threshold &&
-                   (st = open_istream(entry->idx.oid.hash, &type, &size, NULL)) != NULL)
+               if (oe_type(entry) == OBJ_BLOB &&
+                   oe_size_greater_than(&to_pack, entry, big_file_threshold) &&
+                   (st = open_istream(&entry->idx.oid, &type, &size, NULL)) != NULL)
                        buf = NULL;
                else {
-                       buf = read_sha1_file(entry->idx.oid.hash, &type,
-                                            &size);
+                       buf = read_object_file(&entry->idx.oid, &type, &size);
                        if (!buf)
                                die(_("unable to read %s"),
                                    oid_to_hex(&entry->idx.oid));
@@ -283,15 +298,15 @@ static unsigned long write_no_reuse_object(struct hashfile *f, struct object_ent
                FREE_AND_NULL(entry->delta_data);
                entry->z_delta_size = 0;
        } else if (entry->delta_data) {
-               size = entry->delta_size;
+               size = DELTA_SIZE(entry);
                buf = entry->delta_data;
                entry->delta_data = NULL;
-               type = (allow_ofs_delta && entry->delta->idx.offset) ?
+               type = (allow_ofs_delta && DELTA(entry)->idx.offset) ?
                        OBJ_OFS_DELTA : OBJ_REF_DELTA;
        } else {
                buf = get_delta(entry);
-               size = entry->delta_size;
-               type = (allow_ofs_delta && entry->delta->idx.offset) ?
+               size = DELTA_SIZE(entry);
+               type = (allow_ofs_delta && DELTA(entry)->idx.offset) ?
                        OBJ_OFS_DELTA : OBJ_REF_DELTA;
        }
 
@@ -315,7 +330,7 @@ static unsigned long write_no_reuse_object(struct hashfile *f, struct object_ent
                 * encoding of the relative offset for the delta
                 * base from this object's position in the pack.
                 */
-               off_t ofs = entry->idx.offset - entry->delta->idx.offset;
+               off_t ofs = entry->idx.offset - DELTA(entry)->idx.offset;
                unsigned pos = sizeof(dheader) - 1;
                dheader[pos] = ofs & 127;
                while (ofs >>= 7)
@@ -341,7 +356,7 @@ static unsigned long write_no_reuse_object(struct hashfile *f, struct object_ent
                        return 0;
                }
                hashwrite(f, header, hdrlen);
-               hashwrite(f, entry->delta->idx.oid.hash, 20);
+               hashwrite(f, DELTA(entry)->idx.oid.hash, 20);
                hdrlen += 20;
        } else {
                if (limit && hdrlen + datalen + 20 >= limit) {
@@ -367,21 +382,22 @@ static unsigned long write_no_reuse_object(struct hashfile *f, struct object_ent
 static off_t write_reuse_object(struct hashfile *f, struct object_entry *entry,
                                unsigned long limit, int usable_delta)
 {
-       struct packed_git *p = entry->in_pack;
+       struct packed_git *p = IN_PACK(entry);
        struct pack_window *w_curs = NULL;
        struct revindex_entry *revidx;
        off_t offset;
-       enum object_type type = entry->type;
+       enum object_type type = oe_type(entry);
        off_t datalen;
        unsigned char header[MAX_PACK_OBJECT_HEADER],
                      dheader[MAX_PACK_OBJECT_HEADER];
        unsigned hdrlen;
+       unsigned long entry_size = SIZE(entry);
 
-       if (entry->delta)
-               type = (allow_ofs_delta && entry->delta->idx.offset) ?
+       if (DELTA(entry))
+               type = (allow_ofs_delta && DELTA(entry)->idx.offset) ?
                        OBJ_OFS_DELTA : OBJ_REF_DELTA;
        hdrlen = encode_in_pack_object_header(header, sizeof(header),
-                                             type, entry->size);
+                                             type, entry_size);
 
        offset = entry->in_pack_offset;
        revidx = find_pack_revindex(p, offset);
@@ -398,7 +414,7 @@ static off_t write_reuse_object(struct hashfile *f, struct object_entry *entry,
        datalen -= entry->in_pack_header_size;
 
        if (!pack_to_stdout && p->index_version == 1 &&
-           check_pack_inflate(p, &w_curs, offset, datalen, entry->size)) {
+           check_pack_inflate(p, &w_curs, offset, datalen, entry_size)) {
                error("corrupt packed object for %s",
                      oid_to_hex(&entry->idx.oid));
                unuse_pack(&w_curs);
@@ -406,7 +422,7 @@ static off_t write_reuse_object(struct hashfile *f, struct object_entry *entry,
        }
 
        if (type == OBJ_OFS_DELTA) {
-               off_t ofs = entry->idx.offset - entry->delta->idx.offset;
+               off_t ofs = entry->idx.offset - DELTA(entry)->idx.offset;
                unsigned pos = sizeof(dheader) - 1;
                dheader[pos] = ofs & 127;
                while (ofs >>= 7)
@@ -425,7 +441,7 @@ static off_t write_reuse_object(struct hashfile *f, struct object_entry *entry,
                        return 0;
                }
                hashwrite(f, header, hdrlen);
-               hashwrite(f, entry->delta->idx.oid.hash, 20);
+               hashwrite(f, DELTA(entry)->idx.oid.hash, 20);
                hdrlen += 20;
                reused_delta++;
        } else {
@@ -465,28 +481,29 @@ static off_t write_object(struct hashfile *f,
        else
                limit = pack_size_limit - write_offset;
 
-       if (!entry->delta)
+       if (!DELTA(entry))
                usable_delta = 0;       /* no delta */
        else if (!pack_size_limit)
               usable_delta = 1;        /* unlimited packfile */
-       else if (entry->delta->idx.offset == (off_t)-1)
+       else if (DELTA(entry)->idx.offset == (off_t)-1)
                usable_delta = 0;       /* base was written to another pack */
-       else if (entry->delta->idx.offset)
+       else if (DELTA(entry)->idx.offset)
                usable_delta = 1;       /* base already exists in this pack */
        else
                usable_delta = 0;       /* base could end up in another pack */
 
        if (!reuse_object)
                to_reuse = 0;   /* explicit */
-       else if (!entry->in_pack)
+       else if (!IN_PACK(entry))
                to_reuse = 0;   /* can't reuse what we don't have */
-       else if (entry->type == OBJ_REF_DELTA || entry->type == OBJ_OFS_DELTA)
+       else if (oe_type(entry) == OBJ_REF_DELTA ||
+                oe_type(entry) == OBJ_OFS_DELTA)
                                /* check_object() decided it for us ... */
                to_reuse = usable_delta;
                                /* ... but pack split may override that */
-       else if (entry->type != entry->in_pack_type)
+       else if (oe_type(entry) != entry->in_pack_type)
                to_reuse = 0;   /* pack has delta which is unusable */
-       else if (entry->delta)
+       else if (DELTA(entry))
                to_reuse = 0;   /* we want to pack afresh */
        else
                to_reuse = 1;   /* we have it in-pack undeltified,
@@ -538,12 +555,12 @@ static enum write_one_status write_one(struct hashfile *f,
        }
 
        /* if we are deltified, write out base object first. */
-       if (e->delta) {
+       if (DELTA(e)) {
                e->idx.offset = 1; /* now recurse */
-               switch (write_one(f, e->delta, offset)) {
+               switch (write_one(f, DELTA(e), offset)) {
                case WRITE_ONE_RECURSIVE:
                        /* we cannot depend on this one */
-                       e->delta = NULL;
+                       SET_DELTA(e, NULL);
                        break;
                default:
                        break;
@@ -605,34 +622,34 @@ static void add_descendants_to_write_order(struct object_entry **wo,
                        /* add this node... */
                        add_to_write_order(wo, endp, e);
                        /* all its siblings... */
-                       for (s = e->delta_sibling; s; s = s->delta_sibling) {
+                       for (s = DELTA_SIBLING(e); s; s = DELTA_SIBLING(s)) {
                                add_to_write_order(wo, endp, s);
                        }
                }
                /* drop down a level to add left subtree nodes if possible */
-               if (e->delta_child) {
+               if (DELTA_CHILD(e)) {
                        add_to_order = 1;
-                       e = e->delta_child;
+                       e = DELTA_CHILD(e);
                } else {
                        add_to_order = 0;
                        /* our sibling might have some children, it is next */
-                       if (e->delta_sibling) {
-                               e = e->delta_sibling;
+                       if (DELTA_SIBLING(e)) {
+                               e = DELTA_SIBLING(e);
                                continue;
                        }
                        /* go back to our parent node */
-                       e = e->delta;
-                       while (e && !e->delta_sibling) {
+                       e = DELTA(e);
+                       while (e && !DELTA_SIBLING(e)) {
                                /* we're on the right side of a subtree, keep
                                 * going up until we can go right again */
-                               e = e->delta;
+                               e = DELTA(e);
                        }
                        if (!e) {
                                /* done- we hit our original root node */
                                return;
                        }
                        /* pass it off to sibling at this level */
-                       e = e->delta_sibling;
+                       e = DELTA_SIBLING(e);
                }
        };
 }
@@ -643,7 +660,7 @@ static void add_family_to_write_order(struct object_entry **wo,
 {
        struct object_entry *root;
 
-       for (root = e; root->delta; root = root->delta)
+       for (root = e; DELTA(root); root = DELTA(root))
                ; /* nothing */
        add_descendants_to_write_order(wo, endp, root);
 }
@@ -658,8 +675,8 @@ static struct object_entry **compute_write_order(void)
        for (i = 0; i < to_pack.nr_objects; i++) {
                objects[i].tagged = 0;
                objects[i].filled = 0;
-               objects[i].delta_child = NULL;
-               objects[i].delta_sibling = NULL;
+               SET_DELTA_CHILD(&objects[i], NULL);
+               SET_DELTA_SIBLING(&objects[i], NULL);
        }
 
        /*
@@ -669,11 +686,11 @@ static struct object_entry **compute_write_order(void)
         */
        for (i = to_pack.nr_objects; i > 0;) {
                struct object_entry *e = &objects[--i];
-               if (!e->delta)
+               if (!DELTA(e))
                        continue;
                /* Mark me as the first child */
-               e->delta_sibling = e->delta->delta_child;
-               e->delta->delta_child = e;
+               e->delta_sibling_idx = DELTA(e)->delta_child_idx;
+               SET_DELTA_CHILD(DELTA(e), e);
        }
 
        /*
@@ -705,8 +722,8 @@ static struct object_entry **compute_write_order(void)
         * And then all remaining commits and tags.
         */
        for (i = last_untagged; i < to_pack.nr_objects; i++) {
-               if (objects[i].type != OBJ_COMMIT &&
-                   objects[i].type != OBJ_TAG)
+               if (oe_type(&objects[i]) != OBJ_COMMIT &&
+                   oe_type(&objects[i]) != OBJ_TAG)
                        continue;
                add_to_write_order(wo, &wo_end, &objects[i]);
        }
@@ -715,7 +732,7 @@ static struct object_entry **compute_write_order(void)
         * And then all the trees.
         */
        for (i = last_untagged; i < to_pack.nr_objects; i++) {
-               if (objects[i].type != OBJ_TREE)
+               if (oe_type(&objects[i]) != OBJ_TREE)
                        continue;
                add_to_write_order(wo, &wo_end, &objects[i]);
        }
@@ -837,11 +854,11 @@ static void write_pack_file(void)
                 * If so, rewrite it like in fast-import
                 */
                if (pack_to_stdout) {
-                       hashclose(f, oid.hash, CSUM_CLOSE);
+                       finalize_hashfile(f, oid.hash, CSUM_HASH_IN_STREAM | CSUM_CLOSE);
                } else if (nr_written == nr_remaining) {
-                       hashclose(f, oid.hash, CSUM_FSYNC);
+                       finalize_hashfile(f, oid.hash, CSUM_HASH_IN_STREAM | CSUM_FSYNC | CSUM_CLOSE);
                } else {
-                       int fd = hashclose(f, oid.hash, 0);
+                       int fd = finalize_hashfile(f, oid.hash, 0);
                        fixup_pack_header_footer(fd, oid.hash, pack_tmp_name,
                                                 nr_written, oid.hash, offset);
                        close(fd);
@@ -878,7 +895,8 @@ static void write_pack_file(void)
 
                        if (write_bitmap_index) {
                                bitmap_writer_set_checksum(oid.hash);
-                               bitmap_writer_build_type_index(written_list, nr_written);
+                               bitmap_writer_build_type_index(
+                                       &to_pack, written_list, nr_written);
                        }
 
                        finish_tmp_packfile(&tmpname, pack_tmp_name,
@@ -982,13 +1000,16 @@ static int want_found_object(int exclude, struct packed_git *p)
         * Otherwise, we signal "-1" at the end to tell the caller that we do
         * not know either way, and it needs to check more packs.
         */
-       if (!ignore_packed_keep &&
+       if (!ignore_packed_keep_on_disk &&
+           !ignore_packed_keep_in_core &&
            (!local || !have_non_local_packs))
                return 1;
 
        if (local && !p->pack_local)
                return 0;
-       if (ignore_packed_keep && p->pack_local && p->pack_keep)
+       if (p->pack_local &&
+           ((ignore_packed_keep_on_disk && p->pack_keep) ||
+            (ignore_packed_keep_in_core && p->pack_keep_in_core)))
                return 0;
 
        /* we don't know yet; keep looking for more packs */
@@ -1025,8 +1046,7 @@ static int want_object_in_pack(const struct object_id *oid,
                if (want != -1)
                        return want;
        }
-
-       list_for_each(pos, &packed_git_mru) {
+       list_for_each(pos, get_packed_git_mru(the_repository)) {
                struct packed_git *p = list_entry(pos, struct packed_git, mru);
                off_t offset;
 
@@ -1044,7 +1064,8 @@ static int want_object_in_pack(const struct object_id *oid,
                        }
                        want = want_found_object(exclude, p);
                        if (!exclude && want > 0)
-                               list_move(&p->mru, &packed_git_mru);
+                               list_move(&p->mru,
+                                         get_packed_git_mru(the_repository));
                        if (want != -1)
                                return want;
                }
@@ -1066,14 +1087,13 @@ static void create_object_entry(const struct object_id *oid,
 
        entry = packlist_alloc(&to_pack, oid->hash, index_pos);
        entry->hash = hash;
-       if (type)
-               entry->type = type;
+       oe_set_type(entry, type);
        if (exclude)
                entry->preferred_base = 1;
        else
                nr_result++;
        if (found_pack) {
-               entry->in_pack = found_pack;
+               oe_set_in_pack(&to_pack, entry, found_pack);
                entry->in_pack_offset = found_offset;
        }
 
@@ -1091,6 +1111,8 @@ static int add_object_entry(const struct object_id *oid, enum object_type type,
        off_t found_offset = 0;
        uint32_t index_pos;
 
+       display_progress(progress_state, ++nr_seen);
+
        if (have_duplicate_entry(oid, exclude, &index_pos))
                return 0;
 
@@ -1106,8 +1128,6 @@ static int add_object_entry(const struct object_id *oid, enum object_type type,
        create_object_entry(oid, type, pack_name_hash(name),
                            exclude, name && no_try_delta(name),
                            index_pos, found_pack, found_offset);
-
-       display_progress(progress_state, nr_result);
        return 1;
 }
 
@@ -1118,6 +1138,8 @@ static int add_object_entry_from_bitmap(const struct object_id *oid,
 {
        uint32_t index_pos;
 
+       display_progress(progress_state, ++nr_seen);
+
        if (have_duplicate_entry(oid, 0, &index_pos))
                return 0;
 
@@ -1125,8 +1147,6 @@ static int add_object_entry_from_bitmap(const struct object_id *oid,
                return 0;
 
        create_object_entry(oid, type, name_hash, 0, 0, index_pos, pack, offset);
-
-       display_progress(progress_state, nr_result);
        return 1;
 }
 
@@ -1190,7 +1210,7 @@ static struct pbase_tree_cache *pbase_tree_get(const struct object_id *oid)
        /* Did not find one.  Either we got a bogus request or
         * we need to read and perhaps cache.
         */
-       data = read_sha1_file(oid->hash, &type, &size);
+       data = read_object_file(oid, &type, &size);
        if (!data)
                return NULL;
        if (type != OBJ_TREE) {
@@ -1351,7 +1371,7 @@ static void add_preferred_base(struct object_id *oid)
        if (window <= num_preferred_base++)
                return;
 
-       data = read_object_with_reference(oid->hash, tree_type, &size, tree_oid.hash);
+       data = read_object_with_reference(oid, tree_type, &size, &tree_oid);
        if (!data)
                return;
 
@@ -1398,8 +1418,10 @@ static void cleanup_preferred_base(void)
 
 static void check_object(struct object_entry *entry)
 {
-       if (entry->in_pack) {
-               struct packed_git *p = entry->in_pack;
+       unsigned long canonical_size;
+
+       if (IN_PACK(entry)) {
+               struct packed_git *p = IN_PACK(entry);
                struct pack_window *w_curs = NULL;
                const unsigned char *base_ref = NULL;
                struct object_entry *base_entry;
@@ -1407,6 +1429,8 @@ static void check_object(struct object_entry *entry)
                unsigned long avail;
                off_t ofs;
                unsigned char *buf, c;
+               enum object_type type;
+               unsigned long in_pack_size;
 
                buf = use_pack(p, &w_curs, entry->in_pack_offset, &avail);
 
@@ -1415,11 +1439,15 @@ static void check_object(struct object_entry *entry)
                 * since non-delta representations could still be reused.
                 */
                used = unpack_object_header_buffer(buf, avail,
-                                                  &entry->in_pack_type,
-                                                  &entry->size);
+                                                  &type,
+                                                  &in_pack_size);
                if (used == 0)
                        goto give_up;
 
+               if (type < 0)
+                       BUG("invalid type %d", type);
+               entry->in_pack_type = type;
+
                /*
                 * Determine if this is a delta and if so whether we can
                 * reuse it or not.  Otherwise let's find out as cheaply as
@@ -1428,9 +1456,10 @@ static void check_object(struct object_entry *entry)
                switch (entry->in_pack_type) {
                default:
                        /* Not a delta hence we've already got all we need. */
-                       entry->type = entry->in_pack_type;
+                       oe_set_type(entry, entry->in_pack_type);
+                       SET_SIZE(entry, in_pack_size);
                        entry->in_pack_header_size = used;
-                       if (entry->type < OBJ_COMMIT || entry->type > OBJ_BLOB)
+                       if (oe_type(entry) < OBJ_COMMIT || oe_type(entry) > OBJ_BLOB)
                                goto give_up;
                        unuse_pack(&w_curs);
                        return;
@@ -1484,25 +1513,29 @@ static void check_object(struct object_entry *entry)
                         * deltify other objects against, in order to avoid
                         * circular deltas.
                         */
-                       entry->type = entry->in_pack_type;
-                       entry->delta = base_entry;
-                       entry->delta_size = entry->size;
-                       entry->delta_sibling = base_entry->delta_child;
-                       base_entry->delta_child = entry;
+                       oe_set_type(entry, entry->in_pack_type);
+                       SET_SIZE(entry, in_pack_size); /* delta size */
+                       SET_DELTA(entry, base_entry);
+                       SET_DELTA_SIZE(entry, in_pack_size);
+                       entry->delta_sibling_idx = base_entry->delta_child_idx;
+                       SET_DELTA_CHILD(base_entry, entry);
                        unuse_pack(&w_curs);
                        return;
                }
 
-               if (entry->type) {
+               if (oe_type(entry)) {
+                       off_t delta_pos;
+
                        /*
                         * This must be a delta and we already know what the
                         * final object type is.  Let's extract the actual
                         * object size from the delta header.
                         */
-                       entry->size = get_size_from_delta(p, &w_curs,
-                                       entry->in_pack_offset + entry->in_pack_header_size);
-                       if (entry->size == 0)
+                       delta_pos = entry->in_pack_offset + entry->in_pack_header_size;
+                       canonical_size = get_size_from_delta(p, &w_curs, delta_pos);
+                       if (canonical_size == 0)
                                goto give_up;
+                       SET_SIZE(entry, canonical_size);
                        unuse_pack(&w_curs);
                        return;
                }
@@ -1516,27 +1549,34 @@ static void check_object(struct object_entry *entry)
                unuse_pack(&w_curs);
        }
 
-       entry->type = sha1_object_info(entry->idx.oid.hash, &entry->size);
-       /*
-        * The error condition is checked in prepare_pack().  This is
-        * to permit a missing preferred base object to be ignored
-        * as a preferred base.  Doing so can result in a larger
-        * pack file, but the transfer will still take place.
-        */
+       oe_set_type(entry,
+                   oid_object_info(the_repository, &entry->idx.oid, &canonical_size));
+       if (entry->type_valid) {
+               SET_SIZE(entry, canonical_size);
+       } else {
+               /*
+                * Bad object type is checked in prepare_pack().  This is
+                * to permit a missing preferred base object to be ignored
+                * as a preferred base.  Doing so can result in a larger
+                * pack file, but the transfer will still take place.
+                */
+       }
 }
 
 static int pack_offset_sort(const void *_a, const void *_b)
 {
        const struct object_entry *a = *(struct object_entry **)_a;
        const struct object_entry *b = *(struct object_entry **)_b;
+       const struct packed_git *a_in_pack = IN_PACK(a);
+       const struct packed_git *b_in_pack = IN_PACK(b);
 
        /* avoid filesystem trashing with loose objects */
-       if (!a->in_pack && !b->in_pack)
+       if (!a_in_pack && !b_in_pack)
                return oidcmp(&a->idx.oid, &b->idx.oid);
 
-       if (a->in_pack < b->in_pack)
+       if (a_in_pack < b_in_pack)
                return -1;
-       if (a->in_pack > b->in_pack)
+       if (a_in_pack > b_in_pack)
                return 1;
        return a->in_pack_offset < b->in_pack_offset ? -1 :
                        (a->in_pack_offset > b->in_pack_offset);
@@ -1557,30 +1597,37 @@ static int pack_offset_sort(const void *_a, const void *_b)
  */
 static void drop_reused_delta(struct object_entry *entry)
 {
-       struct object_entry **p = &entry->delta->delta_child;
+       unsigned *idx = &to_pack.objects[entry->delta_idx - 1].delta_child_idx;
        struct object_info oi = OBJECT_INFO_INIT;
+       enum object_type type;
+       unsigned long size;
 
-       while (*p) {
-               if (*p == entry)
-                       *p = (*p)->delta_sibling;
+       while (*idx) {
+               struct object_entry *oe = &to_pack.objects[*idx - 1];
+
+               if (oe == entry)
+                       *idx = oe->delta_sibling_idx;
                else
-                       p = &(*p)->delta_sibling;
+                       idx = &oe->delta_sibling_idx;
        }
-       entry->delta = NULL;
+       SET_DELTA(entry, NULL);
        entry->depth = 0;
 
-       oi.sizep = &entry->size;
-       oi.typep = &entry->type;
-       if (packed_object_info(entry->in_pack, entry->in_pack_offset, &oi) < 0) {
+       oi.sizep = &size;
+       oi.typep = &type;
+       if (packed_object_info(the_repository, IN_PACK(entry), entry->in_pack_offset, &oi) < 0) {
                /*
                 * We failed to get the info from this pack for some reason;
                 * fall back to sha1_object_info, which may find another copy.
-                * And if that fails, the error will be recorded in entry->type
+                * And if that fails, the error will be recorded in oe_type(entry)
                 * and dealt with in prepare_pack().
                 */
-               entry->type = sha1_object_info(entry->idx.oid.hash,
-                                              &entry->size);
+               oe_set_type(entry,
+                           oid_object_info(the_repository, &entry->idx.oid, &size));
+       } else {
+               oe_set_type(entry, type);
        }
+       SET_SIZE(entry, size);
 }
 
 /*
@@ -1604,7 +1651,7 @@ static void break_delta_chains(struct object_entry *entry)
 
        for (cur = entry, total_depth = 0;
             cur;
-            cur = cur->delta, total_depth++) {
+            cur = DELTA(cur), total_depth++) {
                if (cur->dfs_state == DFS_DONE) {
                        /*
                         * We've already seen this object and know it isn't
@@ -1629,7 +1676,7 @@ static void break_delta_chains(struct object_entry *entry)
                 * it's not a delta, we're done traversing, but we'll mark it
                 * done to save time on future traversals.
                 */
-               if (!cur->delta) {
+               if (!DELTA(cur)) {
                        cur->dfs_state = DFS_DONE;
                        break;
                }
@@ -1652,7 +1699,7 @@ static void break_delta_chains(struct object_entry *entry)
                 * We keep all commits in the chain that we examined.
                 */
                cur->dfs_state = DFS_ACTIVE;
-               if (cur->delta->dfs_state == DFS_ACTIVE) {
+               if (DELTA(cur)->dfs_state == DFS_ACTIVE) {
                        drop_reused_delta(cur);
                        cur->dfs_state = DFS_DONE;
                        break;
@@ -1667,7 +1714,7 @@ static void break_delta_chains(struct object_entry *entry)
         * an extra "next" pointer to keep going after we reset cur->delta.
         */
        for (cur = entry; cur; cur = next) {
-               next = cur->delta;
+               next = DELTA(cur);
 
                /*
                 * We should have a chain of zero or more ACTIVE states down to
@@ -1712,6 +1759,10 @@ static void get_object_details(void)
        uint32_t i;
        struct object_entry **sorted_by_offset;
 
+       if (progress)
+               progress_state = start_progress(_("Counting objects"),
+                                               to_pack.nr_objects);
+
        sorted_by_offset = xcalloc(to_pack.nr_objects, sizeof(struct object_entry *));
        for (i = 0; i < to_pack.nr_objects; i++)
                sorted_by_offset[i] = to_pack.objects + i;
@@ -1720,9 +1771,12 @@ static void get_object_details(void)
        for (i = 0; i < to_pack.nr_objects; i++) {
                struct object_entry *entry = sorted_by_offset[i];
                check_object(entry);
-               if (big_file_threshold < entry->size)
+               if (entry->type_valid &&
+                   oe_size_greater_than(&to_pack, entry, big_file_threshold))
                        entry->no_try_delta = 1;
+               display_progress(progress_state, i + 1);
        }
+       stop_progress(&progress_state);
 
        /*
         * This must happen in a second pass, since we rely on the delta
@@ -1747,10 +1801,14 @@ static int type_size_sort(const void *_a, const void *_b)
 {
        const struct object_entry *a = *(struct object_entry **)_a;
        const struct object_entry *b = *(struct object_entry **)_b;
+       enum object_type a_type = oe_type(a);
+       enum object_type b_type = oe_type(b);
+       unsigned long a_size = SIZE(a);
+       unsigned long b_size = SIZE(b);
 
-       if (a->type > b->type)
+       if (a_type > b_type)
                return -1;
-       if (a->type < b->type)
+       if (a_type < b_type)
                return 1;
        if (a->hash > b->hash)
                return -1;
@@ -1760,9 +1818,9 @@ static int type_size_sort(const void *_a, const void *_b)
                return -1;
        if (a->preferred_base < b->preferred_base)
                return 1;
-       if (a->size > b->size)
+       if (a_size > b_size)
                return -1;
-       if (a->size < b->size)
+       if (a_size < b_size)
                return 1;
        return a < b ? -1 : (a > b);  /* newest first */
 }
@@ -1815,6 +1873,46 @@ static pthread_mutex_t progress_mutex;
 
 #endif
 
+/*
+ * Return the size of the object without doing any delta
+ * reconstruction (so non-deltas are true object sizes, but deltas
+ * return the size of the delta data).
+ */
+unsigned long oe_get_size_slow(struct packing_data *pack,
+                              const struct object_entry *e)
+{
+       struct packed_git *p;
+       struct pack_window *w_curs;
+       unsigned char *buf;
+       enum object_type type;
+       unsigned long used, avail, size;
+
+       if (e->type_ != OBJ_OFS_DELTA && e->type_ != OBJ_REF_DELTA) {
+               read_lock();
+               if (oid_object_info(the_repository, &e->idx.oid, &size) < 0)
+                       die(_("unable to get size of %s"),
+                           oid_to_hex(&e->idx.oid));
+               read_unlock();
+               return size;
+       }
+
+       p = oe_in_pack(pack, e);
+       if (!p)
+               BUG("when e->type is a delta, it must belong to a pack");
+
+       read_lock();
+       w_curs = NULL;
+       buf = use_pack(p, &w_curs, e->in_pack_offset, &avail);
+       used = unpack_object_header_buffer(buf, avail, &type, &size);
+       if (used == 0)
+               die(_("unable to parse object header of %s"),
+                   oid_to_hex(&e->idx.oid));
+
+       unuse_pack(&w_curs);
+       read_unlock();
+       return size;
+}
+
 static int try_delta(struct unpacked *trg, struct unpacked *src,
                     unsigned max_depth, unsigned long *mem_usage)
 {
@@ -1826,7 +1924,7 @@ static int try_delta(struct unpacked *trg, struct unpacked *src,
        void *delta_buf;
 
        /* Don't bother doing diffs between different types */
-       if (trg_entry->type != src_entry->type)
+       if (oe_type(trg_entry) != oe_type(src_entry))
                return -1;
 
        /*
@@ -1837,8 +1935,8 @@ static int try_delta(struct unpacked *trg, struct unpacked *src,
         * it, we will still save the transfer cost, as we already know
         * the other side has it and we won't send src_entry at all.
         */
-       if (reuse_delta && trg_entry->in_pack &&
-           trg_entry->in_pack == src_entry->in_pack &&
+       if (reuse_delta && IN_PACK(trg_entry) &&
+           IN_PACK(trg_entry) == IN_PACK(src_entry) &&
            !src_entry->preferred_base &&
            trg_entry->in_pack_type != OBJ_REF_DELTA &&
            trg_entry->in_pack_type != OBJ_OFS_DELTA)
@@ -1849,19 +1947,19 @@ static int try_delta(struct unpacked *trg, struct unpacked *src,
                return 0;
 
        /* Now some size filtering heuristics. */
-       trg_size = trg_entry->size;
-       if (!trg_entry->delta) {
+       trg_size = SIZE(trg_entry);
+       if (!DELTA(trg_entry)) {
                max_size = trg_size/2 - 20;
                ref_depth = 1;
        } else {
-               max_size = trg_entry->delta_size;
+               max_size = DELTA_SIZE(trg_entry);
                ref_depth = trg->depth;
        }
        max_size = (uint64_t)max_size * (max_depth - src->depth) /
                                                (max_depth - ref_depth + 1);
        if (max_size == 0)
                return 0;
-       src_size = src_entry->size;
+       src_size = SIZE(src_entry);
        sizediff = src_size < trg_size ? trg_size - src_size : 0;
        if (sizediff >= max_size)
                return 0;
@@ -1871,8 +1969,7 @@ static int try_delta(struct unpacked *trg, struct unpacked *src,
        /* Load data if not already done */
        if (!trg->data) {
                read_lock();
-               trg->data = read_sha1_file(trg_entry->idx.oid.hash, &type,
-                                          &sz);
+               trg->data = read_object_file(&trg_entry->idx.oid, &type, &sz);
                read_unlock();
                if (!trg->data)
                        die("object %s cannot be read",
@@ -1885,8 +1982,7 @@ static int try_delta(struct unpacked *trg, struct unpacked *src,
        }
        if (!src->data) {
                read_lock();
-               src->data = read_sha1_file(src_entry->idx.oid.hash, &type,
-                                          &sz);
+               src->data = read_object_file(&src_entry->idx.oid, &type, &sz);
                read_unlock();
                if (!src->data) {
                        if (src_entry->preferred_base) {
@@ -1925,10 +2021,14 @@ static int try_delta(struct unpacked *trg, struct unpacked *src,
        delta_buf = create_delta(src->index, trg->data, trg_size, &delta_size, max_size);
        if (!delta_buf)
                return 0;
+       if (delta_size >= (1U << OE_DELTA_SIZE_BITS)) {
+               free(delta_buf);
+               return 0;
+       }
 
-       if (trg_entry->delta) {
+       if (DELTA(trg_entry)) {
                /* Prefer only shallower same-sized deltas. */
-               if (delta_size == trg_entry->delta_size &&
+               if (delta_size == DELTA_SIZE(trg_entry) &&
                    src->depth + 1 >= trg->depth) {
                        free(delta_buf);
                        return 0;
@@ -1943,7 +2043,7 @@ static int try_delta(struct unpacked *trg, struct unpacked *src,
        free(trg_entry->delta_data);
        cache_lock();
        if (trg_entry->delta_data) {
-               delta_cache_size -= trg_entry->delta_size;
+               delta_cache_size -= DELTA_SIZE(trg_entry);
                trg_entry->delta_data = NULL;
        }
        if (delta_cacheable(src_size, trg_size, delta_size)) {
@@ -1955,8 +2055,8 @@ static int try_delta(struct unpacked *trg, struct unpacked *src,
                free(delta_buf);
        }
 
-       trg_entry->delta = src_entry;
-       trg_entry->delta_size = delta_size;
+       SET_DELTA(trg_entry, src_entry);
+       SET_DELTA_SIZE(trg_entry, delta_size);
        trg->depth = src->depth + 1;
 
        return 1;
@@ -1964,13 +2064,13 @@ static int try_delta(struct unpacked *trg, struct unpacked *src,
 
 static unsigned int check_delta_limit(struct object_entry *me, unsigned int n)
 {
-       struct object_entry *child = me->delta_child;
+       struct object_entry *child = DELTA_CHILD(me);
        unsigned int m = n;
        while (child) {
                unsigned int c = check_delta_limit(child, n + 1);
                if (m < c)
                        m = c;
-               child = child->delta_sibling;
+               child = DELTA_SIBLING(child);
        }
        return m;
 }
@@ -1981,7 +2081,7 @@ static unsigned long free_unpacked(struct unpacked *n)
        free_delta_index(n->index);
        n->index = NULL;
        if (n->data) {
-               freed_mem += n->entry->size;
+               freed_mem += SIZE(n->entry);
                FREE_AND_NULL(n->data);
        }
        n->entry = NULL;
@@ -2039,7 +2139,7 @@ static void find_deltas(struct object_entry **list, unsigned *list_size,
                 * otherwise they would become too deep.
                 */
                max_depth = depth;
-               if (entry->delta_child) {
+               if (DELTA_CHILD(entry)) {
                        max_depth -= check_delta_limit(entry, 0);
                        if (max_depth <= 0)
                                goto next;
@@ -2077,19 +2177,26 @@ static void find_deltas(struct object_entry **list, unsigned *list_size,
                 * between writes at that moment.
                 */
                if (entry->delta_data && !pack_to_stdout) {
-                       entry->z_delta_size = do_compress(&entry->delta_data,
-                                                         entry->delta_size);
-                       cache_lock();
-                       delta_cache_size -= entry->delta_size;
-                       delta_cache_size += entry->z_delta_size;
-                       cache_unlock();
+                       unsigned long size;
+
+                       size = do_compress(&entry->delta_data, DELTA_SIZE(entry));
+                       if (size < (1U << OE_Z_DELTA_BITS)) {
+                               entry->z_delta_size = size;
+                               cache_lock();
+                               delta_cache_size -= DELTA_SIZE(entry);
+                               delta_cache_size += entry->z_delta_size;
+                               cache_unlock();
+                       } else {
+                               FREE_AND_NULL(entry->delta_data);
+                               entry->z_delta_size = 0;
+                       }
                }
 
                /* if we made n a delta, and if n is already at max
                 * depth, leaving it in the window is pointless.  we
                 * should evict it first.
                 */
-               if (entry->delta && max_depth <= n->depth)
+               if (DELTA(entry) && max_depth <= n->depth)
                        continue;
 
                /*
@@ -2097,7 +2204,7 @@ static void find_deltas(struct object_entry **list, unsigned *list_size,
                 * currently deltified object, to keep it longer.  It will
                 * be the first base object to be attempted next.
                 */
-               if (entry->delta) {
+               if (DELTA(entry)) {
                        struct unpacked swap = array[best_base];
                        int dist = (window + idx - best_base) % window;
                        int dst = best_base;
@@ -2418,13 +2525,14 @@ static void prepare_pack(int window, int depth)
        for (i = 0; i < to_pack.nr_objects; i++) {
                struct object_entry *entry = to_pack.objects + i;
 
-               if (entry->delta)
+               if (DELTA(entry))
                        /* This happens if we decided to reuse existing
                         * delta from a pack.  "reuse_delta &&" is implied.
                         */
                        continue;
 
-               if (entry->size < 50)
+               if (!entry->type_valid ||
+                   oe_size_less_than(&to_pack, entry, 50))
                        continue;
 
                if (entry->no_try_delta)
@@ -2432,11 +2540,11 @@ static void prepare_pack(int window, int depth)
 
                if (!entry->preferred_base) {
                        nr_deltas++;
-                       if (entry->type < 0)
+                       if (oe_type(entry) < 0)
                                die("unable to get type of object %s",
                                    oid_to_hex(&entry->idx.oid));
                } else {
-                       if (entry->type < 0) {
+                       if (oe_type(entry) < 0) {
                                /*
                                 * This object is not found, but we
                                 * don't have to include it anyway.
@@ -2545,7 +2653,7 @@ static void read_object_list_from_stdin(void)
                        die("expected object ID, got garbage:\n %s", line);
 
                add_preferred_base_object(p + 1);
-               add_object_entry(&oid, 0, p + 1, 0);
+               add_object_entry(&oid, OBJ_NONE, p + 1, 0);
        }
 }
 
@@ -2674,11 +2782,11 @@ static void add_objects_in_unpacked_packs(struct rev_info *revs)
 
        memset(&in_pack, 0, sizeof(in_pack));
 
-       for (p = packed_git; p; p = p->next) {
+       for (p = get_packed_git(the_repository); p; p = p->next) {
                struct object_id oid;
                struct object *o;
 
-               if (!p->pack_local || p->pack_keep)
+               if (!p->pack_local || p->pack_keep || p->pack_keep_in_core)
                        continue;
                if (open_pack_index(p))
                        die("cannot open pack index");
@@ -2709,7 +2817,7 @@ static void add_objects_in_unpacked_packs(struct rev_info *revs)
 static int add_loose_object(const struct object_id *oid, const char *path,
                            void *data)
 {
-       enum object_type type = sha1_object_info(oid->hash, NULL);
+       enum object_type type = oid_object_info(the_repository, oid, NULL);
 
        if (type < 0) {
                warning("loose object at %s could not be examined", path);
@@ -2737,16 +2845,18 @@ static int has_sha1_pack_kept_or_nonlocal(const struct object_id *oid)
        static struct packed_git *last_found = (void *)1;
        struct packed_git *p;
 
-       p = (last_found != (void *)1) ? last_found : packed_git;
+       p = (last_found != (void *)1) ? last_found :
+                                       get_packed_git(the_repository);
 
        while (p) {
-               if ((!p->pack_local || p->pack_keep) &&
+               if ((!p->pack_local || p->pack_keep ||
+                               p->pack_keep_in_core) &&
                        find_pack_entry_one(oid->hash, p)) {
                        last_found = p;
                        return 1;
                }
                if (p == last_found)
-                       p = packed_git;
+                       p = get_packed_git(the_repository);
                else
                        p = p->next;
                if (p == last_found)
@@ -2782,8 +2892,8 @@ static void loosen_unused_packed_objects(struct rev_info *revs)
        uint32_t i;
        struct object_id oid;
 
-       for (p = packed_git; p; p = p->next) {
-               if (!p->pack_local || p->pack_keep)
+       for (p = get_packed_git(the_repository); p; p = p->next) {
+               if (!p->pack_local || p->pack_keep || p->pack_keep_in_core)
                        continue;
 
                if (open_pack_index(p))
@@ -2809,7 +2919,8 @@ static int pack_options_allow_reuse(void)
 {
        return pack_to_stdout &&
               allow_ofs_delta &&
-              !ignore_packed_keep &&
+              !ignore_packed_keep_on_disk &&
+              !ignore_packed_keep_in_core &&
               (!local || !have_non_local_packs) &&
               !incremental;
 }
@@ -2918,6 +3029,32 @@ static void get_object_list(int ac, const char **av)
        oid_array_clear(&recent_objects);
 }
 
+static void add_extra_kept_packs(const struct string_list *names)
+{
+       struct packed_git *p;
+
+       if (!names->nr)
+               return;
+
+       for (p = get_packed_git(the_repository); p; p = p->next) {
+               const char *name = basename(p->pack_name);
+               int i;
+
+               if (!p->pack_local)
+                       continue;
+
+               for (i = 0; i < names->nr; i++)
+                       if (!fspathcmp(name, names->items[i].string))
+                               break;
+
+               if (i < names->nr) {
+                       p->pack_keep_in_core = 1;
+                       ignore_packed_keep_in_core = 1;
+                       continue;
+               }
+       }
+}
+
 static int option_parse_index_version(const struct option *opt,
                                      const char *arg, int unset)
 {
@@ -2957,6 +3094,7 @@ int cmd_pack_objects(int argc, const char **argv, const char *prefix)
        struct argv_array rp = ARGV_ARRAY_INIT;
        int rev_list_unpacked = 0, rev_list_all = 0, rev_list_reflog = 0;
        int rev_list_index = 0;
+       struct string_list keep_pack_list = STRING_LIST_INIT_NODUP;
        struct option pack_objects_options[] = {
                OPT_SET_INT('q', "quiet", &progress,
                            N_("do not show progress meter"), 0),
@@ -3021,8 +3159,10 @@ int cmd_pack_objects(int argc, const char **argv, const char *prefix)
                         N_("create thin packs")),
                OPT_BOOL(0, "shallow", &shallow,
                         N_("create packs suitable for shallow fetches")),
-               OPT_BOOL(0, "honor-pack-keep", &ignore_packed_keep,
+               OPT_BOOL(0, "honor-pack-keep", &ignore_packed_keep_on_disk,
                         N_("ignore packs that have companion .keep file")),
+               OPT_STRING_LIST(0, "keep-pack", &keep_pack_list, N_("name"),
+                               N_("ignore this pack")),
                OPT_INTEGER(0, "compression", &pack_compression_level,
                            N_("pack compression level")),
                OPT_SET_INT(0, "keep-true-parents", &grafts_replace_parents,
@@ -3040,6 +3180,9 @@ int cmd_pack_objects(int argc, const char **argv, const char *prefix)
                OPT_END(),
        };
 
+       if (DFS_NUM_STATES > (1 << OE_DFS_STATE_BITS))
+               BUG("too many dfs states, increase OE_DFS_STATE_BITS");
+
        check_replace_refs = 0;
 
        reset_pack_idx_option(&pack_idx_opts);
@@ -3056,6 +3199,17 @@ int cmd_pack_objects(int argc, const char **argv, const char *prefix)
        if (pack_to_stdout != !base_name || argc)
                usage_with_options(pack_usage, pack_objects_options);
 
+       if (depth >= (1 << OE_DEPTH_BITS)) {
+               warning(_("delta chain depth %d is too deep, forcing %d"),
+                       depth, (1 << OE_DEPTH_BITS) - 1);
+               depth = (1 << OE_DEPTH_BITS) - 1;
+       }
+       if (cache_max_small_delta_size >= (1U << OE_Z_DELTA_BITS)) {
+               warning(_("pack.deltaCacheLimit is too high, forcing %d"),
+                       (1U << OE_Z_DELTA_BITS) - 1);
+               cache_max_small_delta_size = (1U << OE_Z_DELTA_BITS) - 1;
+       }
+
        argv_array_push(&rp, "pack-objects");
        if (thin) {
                use_internal_rev_list = 1;
@@ -3087,6 +3241,8 @@ int cmd_pack_objects(int argc, const char **argv, const char *prefix)
                fetch_if_missing = 0;
                argv_array_push(&rp, "--exclude-promisor-objects");
        }
+       if (unpack_unreachable || keep_unreachable || pack_loose_unreachable)
+               use_internal_rev_list = 1;
 
        if (!reuse_object)
                reuse_delta = 0;
@@ -3150,23 +3306,23 @@ int cmd_pack_objects(int argc, const char **argv, const char *prefix)
        if (progress && all_progress_implied)
                progress = 2;
 
-       prepare_packed_git();
-       if (ignore_packed_keep) {
+       add_extra_kept_packs(&keep_pack_list);
+       if (ignore_packed_keep_on_disk) {
                struct packed_git *p;
-               for (p = packed_git; p; p = p->next)
+               for (p = get_packed_git(the_repository); p; p = p->next)
                        if (p->pack_local && p->pack_keep)
                                break;
                if (!p) /* no keep-able packs found */
-                       ignore_packed_keep = 0;
+                       ignore_packed_keep_on_disk = 0;
        }
        if (local) {
                /*
-                * unlike ignore_packed_keep above, we do not want to
-                * unset "local" based on looking at packs, as it
-                * also covers non-local objects
+                * unlike ignore_packed_keep_on_disk above, we do not
+                * want to unset "local" based on looking at packs, as
+                * it also covers non-local objects
                 */
                struct packed_git *p;
-               for (p = packed_git; p; p = p->next) {
+               for (p = get_packed_git(the_repository); p; p = p->next) {
                        if (!p->pack_local) {
                                have_non_local_packs = 1;
                                break;
@@ -3174,8 +3330,10 @@ int cmd_pack_objects(int argc, const char **argv, const char *prefix)
                }
        }
 
+       prepare_packing_data(&to_pack);
+
        if (progress)
-               progress_state = start_progress(_("Counting objects"), 0);
+               progress_state = start_progress(_("Enumerating objects"), 0);
        if (!use_internal_rev_list)
                read_object_list_from_stdin();
        else {
index 991e1bb76fd66bb189a3523ea5cddf0cd823e343..354478a12762d0b500cf7f56baf90ffc8d2b7c93 100644 (file)
@@ -7,7 +7,9 @@
 */
 
 #include "builtin.h"
+#include "repository.h"
 #include "packfile.h"
+#include "object-store.h"
 
 #define BLKSIZE 512
 
@@ -571,7 +573,7 @@ static struct pack_list * add_pack(struct packed_git *p)
 
 static struct pack_list * add_pack_file(const char *filename)
 {
-       struct packed_git *p = packed_git;
+       struct packed_git *p = get_packed_git(the_repository);
 
        if (strlen(filename) < 40)
                die("Bad pack filename: %s", filename);
@@ -586,7 +588,7 @@ static struct pack_list * add_pack_file(const char *filename)
 
 static void load_all(void)
 {
-       struct packed_git *p = packed_git;
+       struct packed_git *p = get_packed_git(the_repository);
 
        while (p) {
                add_pack(p);
@@ -629,8 +631,6 @@ int cmd_pack_redundant(int argc, const char **argv, const char *prefix)
                        break;
        }
 
-       prepare_packed_git();
-
        if (load_all_packs)
                load_all();
        else
index b106a392a481570d4fda5a1116c523cde34557ba..f3353564f99205b278362484abcdf1537058ef29 100644 (file)
@@ -1,6 +1,7 @@
 #include "builtin.h"
 #include "parse-options.h"
 #include "refs.h"
+#include "repository.h"
 
 static char const * const pack_refs_usage[] = {
        N_("git pack-refs [<options>]"),
@@ -17,5 +18,5 @@ int cmd_pack_refs(int argc, const char **argv, const char *prefix)
        };
        if (parse_options(argc, argv, prefix, opts, pack_refs_usage, 0))
                usage_with_options(pack_refs_usage, opts);
-       return refs_pack_refs(get_main_ref_store(), flags);
+       return refs_pack_refs(get_main_ref_store(the_repository), flags);
 }
index 4394d01c9350ae3e1fa26de034edf1509128469e..518ffbea1397faa35102dc088a3830e40c9b13a6 100644 (file)
@@ -50,7 +50,8 @@ static int prune_object(const struct object_id *oid, const char *fullpath,
        if (st.st_mtime > expire)
                return 0;
        if (show_only || verbose) {
-               enum object_type type = sha1_object_info(oid->hash, NULL);
+               enum object_type type = oid_object_info(the_repository, oid,
+                                                       NULL);
                printf("%s %s\n", oid_to_hex(oid),
                       (type > 0) ? type_name(type) : "unknown");
        }
index e32d6cd5b4c999bc45b961c1387af066c72a823a..c719a4f9d738b4c30f82812397c11595b1a4e936 100644 (file)
@@ -9,7 +9,7 @@
 #include "config.h"
 #include "builtin.h"
 #include "parse-options.h"
-#include "exec_cmd.h"
+#include "exec-cmd.h"
 #include "run-command.h"
 #include "sha1-array.h"
 #include "remote.h"
@@ -27,14 +27,16 @@ enum rebase_type {
        REBASE_FALSE = 0,
        REBASE_TRUE,
        REBASE_PRESERVE,
+       REBASE_MERGES,
        REBASE_INTERACTIVE
 };
 
 /**
  * Parses the value of --rebase. If value is a false value, returns
  * REBASE_FALSE. If value is a true value, returns REBASE_TRUE. If value is
- * "preserve", returns REBASE_PRESERVE. If value is a invalid value, dies with
- * a fatal error if fatal is true, otherwise returns REBASE_INVALID.
+ * "merges", returns REBASE_MERGES. If value is "preserve", returns
+ * REBASE_PRESERVE. If value is a invalid value, dies with a fatal error if
+ * fatal is true, otherwise returns REBASE_INVALID.
  */
 static enum rebase_type parse_config_rebase(const char *key, const char *value,
                int fatal)
@@ -47,6 +49,8 @@ static enum rebase_type parse_config_rebase(const char *key, const char *value,
                return REBASE_TRUE;
        else if (!strcmp(value, "preserve"))
                return REBASE_PRESERVE;
+       else if (!strcmp(value, "merges"))
+               return REBASE_MERGES;
        else if (!strcmp(value, "interactive"))
                return REBASE_INTERACTIVE;
 
@@ -130,7 +134,7 @@ static struct option pull_options[] = {
        /* Options passed to git-merge or git-rebase */
        OPT_GROUP(N_("Options related to merging")),
        { OPTION_CALLBACK, 'r', "rebase", &opt_rebase,
-         "false|true|preserve|interactive",
+         "false|true|merges|preserve|interactive",
          N_("incorporate changes by rebasing rather than merging"),
          PARSE_OPT_OPTARG, parse_opt_rebase },
        OPT_PASSTHRU('n', NULL, &opt_diffstat, NULL,
@@ -800,7 +804,9 @@ static int run_rebase(const struct object_id *curr_head,
        argv_push_verbosity(&args);
 
        /* Options passed to git-rebase */
-       if (opt_rebase == REBASE_PRESERVE)
+       if (opt_rebase == REBASE_MERGES)
+               argv_array_push(&args, "--rebase-merges");
+       else if (opt_rebase == REBASE_PRESERVE)
                argv_array_push(&args, "--preserve-merges");
        else if (opt_rebase == REBASE_INTERACTIVE)
                argv_array_push(&args, "--interactive");
index 013c20d6164f61dc404b89271c0281d28b5069a7..ac3705370e12fda53da086f33c6b47925a48873c 100644 (file)
 #include "submodule.h"
 #include "submodule-config.h"
 #include "send-pack.h"
+#include "color.h"
 
 static const char * const push_usage[] = {
        N_("git push [<options>] [<repository> [<refspec>...]]"),
        NULL,
 };
 
+static int push_use_color = -1;
+static char push_colors[][COLOR_MAXLEN] = {
+       GIT_COLOR_RESET,
+       GIT_COLOR_RED,  /* ERROR */
+};
+
+enum color_push {
+       PUSH_COLOR_RESET = 0,
+       PUSH_COLOR_ERROR = 1
+};
+
+static int parse_push_color_slot(const char *slot)
+{
+       if (!strcasecmp(slot, "reset"))
+               return PUSH_COLOR_RESET;
+       if (!strcasecmp(slot, "error"))
+               return PUSH_COLOR_ERROR;
+       return -1;
+}
+
+static const char *push_get_color(enum color_push ix)
+{
+       if (want_color_stderr(push_use_color))
+               return push_colors[ix];
+       return "";
+}
+
 static int thin = 1;
 static int deleterefs;
 static const char *receivepack;
@@ -337,8 +365,11 @@ static int push_with_options(struct transport *transport, int flags)
                fprintf(stderr, _("Pushing to %s\n"), transport->url);
        err = transport_push(transport, refspec_nr, refspec, flags,
                             &reject_reasons);
-       if (err != 0)
+       if (err != 0) {
+               fprintf(stderr, "%s", push_get_color(PUSH_COLOR_ERROR));
                error(_("failed to push some refs to '%s'"), transport->url);
+               fprintf(stderr, "%s", push_get_color(PUSH_COLOR_RESET));
+       }
 
        err |= transport_disconnect(transport);
        if (!err)
@@ -467,6 +498,7 @@ static void set_push_cert_flags(int *flags, int v)
 
 static int git_push_config(const char *k, const char *v, void *cb)
 {
+       const char *slot_name;
        int *flags = cb;
        int status;
 
@@ -514,6 +546,16 @@ static int git_push_config(const char *k, const char *v, void *cb)
                        else
                                string_list_append(&push_options_config, v);
                return 0;
+       } else if (!strcmp(k, "color.push")) {
+               push_use_color = git_config_colorbool(k, v);
+               return 0;
+       } else if (skip_prefix(k, "color.push.", &slot_name)) {
+               int slot = parse_push_color_slot(slot_name);
+               if (slot < 0)
+                       return 0;
+               if (!v)
+                       return config_error_nonbool(k);
+               return color_parse(v, push_colors[slot]);
        }
 
        return git_default_config(k, v, NULL);
index ad074705bb51d1de4221b3c5dfaa7229903c0ef0..f7c2a5fdc815a892b99d6561d73d953740bf789a 100644 (file)
@@ -12,8 +12,8 @@ static const char * const builtin_rebase_helper_usage[] = {
 int cmd_rebase__helper(int argc, const char **argv, const char *prefix)
 {
        struct replay_opts opts = REPLAY_OPTS_INIT;
-       unsigned flags = 0, keep_empty = 0;
-       int abbreviate_commands = 0;
+       unsigned flags = 0, keep_empty = 0, rebase_merges = 0;
+       int abbreviate_commands = 0, rebase_cousins = -1;
        enum {
                CONTINUE = 1, ABORT, MAKE_SCRIPT, SHORTEN_OIDS, EXPAND_OIDS,
                CHECK_TODO_LIST, SKIP_UNNECESSARY_PICKS, REARRANGE_SQUASH,
@@ -24,6 +24,9 @@ int cmd_rebase__helper(int argc, const char **argv, const char *prefix)
                OPT_BOOL(0, "keep-empty", &keep_empty, N_("keep empty commits")),
                OPT_BOOL(0, "allow-empty-message", &opts.allow_empty_message,
                        N_("allow commits with empty messages")),
+               OPT_BOOL(0, "rebase-merges", &rebase_merges, N_("rebase merge commits")),
+               OPT_BOOL(0, "rebase-cousins", &rebase_cousins,
+                        N_("keep original branch points of cousins")),
                OPT_CMDMODE(0, "continue", &command, N_("continue rebase"),
                                CONTINUE),
                OPT_CMDMODE(0, "abort", &command, N_("abort rebase"),
@@ -57,8 +60,14 @@ int cmd_rebase__helper(int argc, const char **argv, const char *prefix)
 
        flags |= keep_empty ? TODO_LIST_KEEP_EMPTY : 0;
        flags |= abbreviate_commands ? TODO_LIST_ABBREVIATE_CMDS : 0;
+       flags |= rebase_merges ? TODO_LIST_REBASE_MERGES : 0;
+       flags |= rebase_cousins > 0 ? TODO_LIST_REBASE_COUSINS : 0;
        flags |= command == SHORTEN_OIDS ? TODO_LIST_SHORTEN_IDS : 0;
 
+       if (rebase_cousins >= 0 && !rebase_merges)
+               warning(_("--[no-]rebase-cousins has no effect without "
+                         "--rebase-merges"));
+
        if (command == CONTINUE && argc == 1)
                return !!sequencer_continue(&opts);
        if (command == ABORT && argc == 1)
index 75e7f18aceffc42b5fdc58296559c67d47203098..0dd163280d43c9d23e87fecc049ed03332aada0b 100644 (file)
@@ -1,4 +1,5 @@
 #include "builtin.h"
+#include "repository.h"
 #include "config.h"
 #include "lockfile.h"
 #include "pack.h"
@@ -6,7 +7,7 @@
 #include "pkt-line.h"
 #include "sideband.h"
 #include "run-command.h"
-#include "exec_cmd.h"
+#include "exec-cmd.h"
 #include "commit.h"
 #include "object.h"
 #include "remote.h"
@@ -1242,11 +1243,11 @@ static void check_aliased_update(struct command *cmd, struct string_list *list)
        rp_error("refusing inconsistent update between symref '%s' (%s..%s) and"
                 " its target '%s' (%s..%s)",
                 cmd->ref_name,
-                find_unique_abbrev(cmd->old_oid.hash, DEFAULT_ABBREV),
-                find_unique_abbrev(cmd->new_oid.hash, DEFAULT_ABBREV),
+                find_unique_abbrev(&cmd->old_oid, DEFAULT_ABBREV),
+                find_unique_abbrev(&cmd->new_oid, DEFAULT_ABBREV),
                 dst_cmd->ref_name,
-                find_unique_abbrev(dst_cmd->old_oid.hash, DEFAULT_ABBREV),
-                find_unique_abbrev(dst_cmd->new_oid.hash, DEFAULT_ABBREV));
+                find_unique_abbrev(&dst_cmd->old_oid, DEFAULT_ABBREV),
+                find_unique_abbrev(&dst_cmd->new_oid, DEFAULT_ABBREV));
 
        cmd->error_string = dst_cmd->error_string =
                "inconsistent aliased update";
@@ -1778,7 +1779,7 @@ static const char *unpack(int err_fd, struct shallow_info *si)
                status = finish_command(&child);
                if (status)
                        return "index-pack abnormal exit";
-               reprepare_packed_git();
+               reprepare_packed_git(the_repository);
        }
        return NULL;
 }
@@ -1964,6 +1965,12 @@ int cmd_receive_pack(int argc, const char **argv, const char *prefix)
                unpack_limit = receive_unpack_limit;
 
        switch (determine_protocol_version_server()) {
+       case protocol_v2:
+               /*
+                * push support for protocol v2 has not been implemented yet,
+                * so ignore the request to use v2 and fallback to using v0.
+                */
+               break;
        case protocol_v1:
                /*
                 * v1 is just the original protocol with a version string,
@@ -2027,7 +2034,7 @@ int cmd_receive_pack(int argc, const char **argv, const char *prefix)
                        proc.git_cmd = 1;
                        proc.argv = argv_gc_auto;
 
-                       close_all_packs();
+                       close_all_packs(the_repository->objects);
                        if (!start_command(&proc)) {
                                if (use_sideband)
                                        copy_to_sideband(proc.err, -1, NULL);
index 4719a5354cf182eb91b257a99da97b9b81da5257..a48984d37e4f5f9e1f07a840582f82e202bd09f0 100644 (file)
@@ -75,7 +75,7 @@ static int tree_is_complete(const struct object_id *oid)
        if (!tree->buffer) {
                enum object_type type;
                unsigned long size;
-               void *data = read_sha1_file(oid->hash, &type, &size);
+               void *data = read_object_file(oid, &type, &size);
                if (!data) {
                        tree->object.flags |= INCOMPLETE;
                        return 0;
@@ -154,7 +154,7 @@ static int commit_is_complete(struct commit *commit)
                for (i = 0; i < found.nr; i++) {
                        struct commit *c =
                                (struct commit *)found.objects[i].item;
-                       if (!tree_is_complete(&c->tree->object.oid)) {
+                       if (!tree_is_complete(get_commit_tree_oid(c))) {
                                is_incomplete = 1;
                                c->object.flags |= INCOMPLETE;
                        }
index 805ffc05cdb80e4a69de4134e757f9c71e8033dc..0bbf9f4c9e81f92b7a74b112f7c4f328bb1c92a2 100644 (file)
@@ -245,7 +245,9 @@ static int add(int argc, const char **argv)
 struct branch_info {
        char *remote_name;
        struct string_list merge;
-       enum { NO_REBASE, NORMAL_REBASE, INTERACTIVE_REBASE } rebase;
+       enum {
+               NO_REBASE, NORMAL_REBASE, INTERACTIVE_REBASE, REBASE_MERGES
+       } rebase;
 };
 
 static struct string_list branch_list = STRING_LIST_INIT_NODUP;
@@ -306,6 +308,8 @@ static int config_read_branches(const char *key, const char *value, void *cb)
                                info->rebase = v;
                        else if (!strcmp(value, "preserve"))
                                info->rebase = NORMAL_REBASE;
+                       else if (!strcmp(value, "merges"))
+                               info->rebase = REBASE_MERGES;
                        else if (!strcmp(value, "interactive"))
                                info->rebase = INTERACTIVE_REBASE;
                }
@@ -862,7 +866,7 @@ static int get_remote_ref_states(const char *name,
        if (query) {
                transport = transport_get(states->remote, states->remote->url_nr > 0 ?
                        states->remote->url[0] : NULL);
-               remote_refs = transport_get_remote_refs(transport);
+               remote_refs = transport_get_remote_refs(transport, NULL);
                transport_disconnect(transport);
 
                states->queried = 1;
@@ -963,9 +967,15 @@ static int show_local_info_item(struct string_list_item *item, void *cb_data)
 
        printf("    %-*s ", show_info->width, item->string);
        if (branch_info->rebase) {
-               printf_ln(branch_info->rebase == INTERACTIVE_REBASE
-                         ? _("rebases interactively onto remote %s")
-                         : _("rebases onto remote %s"), merge->items[0].string);
+               const char *msg;
+               if (branch_info->rebase == INTERACTIVE_REBASE)
+                       msg = _("rebases interactively onto remote %s");
+               else if (branch_info->rebase == REBASE_MERGES)
+                       msg = _("rebases interactively (with merges) onto "
+                               "remote %s");
+               else
+                       msg = _("rebases onto remote %s");
+               printf_ln(msg, merge->items[0].string);
                return 0;
        } else if (show_info->any_rebase) {
                printf_ln(_(" merges with remote %s"), merge->items[0].string);
index 7bdb40142f9261dac6514d98ee01dc44e595ec71..6c636e159eaf2d67d617c459aceddd7423e326ab 100644 (file)
@@ -86,7 +86,8 @@ static void remove_pack_on_signal(int signo)
  * have a corresponding .keep or .promisor file. These packs are not to
  * be kept if we are going to pack everything into one file.
  */
-static void get_non_kept_pack_filenames(struct string_list *fname_list)
+static void get_non_kept_pack_filenames(struct string_list *fname_list,
+                                       const struct string_list *extra_keep)
 {
        DIR *dir;
        struct dirent *e;
@@ -97,6 +98,14 @@ static void get_non_kept_pack_filenames(struct string_list *fname_list)
 
        while ((e = readdir(dir)) != NULL) {
                size_t len;
+               int i;
+
+               for (i = 0; i < extra_keep->nr; i++)
+                       if (!fspathcmp(e->d_name, extra_keep->items[i].string))
+                               break;
+               if (extra_keep->nr > 0 && i < extra_keep->nr)
+                       continue;
+
                if (!strip_suffix(e->d_name, ".pack", &len))
                        continue;
 
@@ -148,7 +157,7 @@ int cmd_repack(int argc, const char **argv, const char *prefix)
        struct string_list rollback = STRING_LIST_INIT_NODUP;
        struct string_list existing_packs = STRING_LIST_INIT_DUP;
        struct strbuf line = STRBUF_INIT;
-       int ext, ret, failed;
+       int i, ext, ret, failed;
        FILE *out;
 
        /* variables to be filled by option parsing */
@@ -160,6 +169,7 @@ int cmd_repack(int argc, const char **argv, const char *prefix)
        const char *depth = NULL;
        const char *threads = NULL;
        const char *max_pack_size = NULL;
+       struct string_list keep_pack_list = STRING_LIST_INIT_NODUP;
        int no_reuse_delta = 0, no_reuse_object = 0;
        int no_update_server_info = 0;
        int quiet = 0;
@@ -200,6 +210,8 @@ int cmd_repack(int argc, const char **argv, const char *prefix)
                                N_("maximum size of each packfile")),
                OPT_BOOL(0, "pack-kept-objects", &pack_kept_objects,
                                N_("repack objects in packs marked with .keep")),
+               OPT_STRING_LIST(0, "keep-pack", &keep_pack_list, N_("name"),
+                               N_("do not repack this pack")),
                OPT_END()
        };
 
@@ -230,6 +242,9 @@ int cmd_repack(int argc, const char **argv, const char *prefix)
        argv_array_push(&cmd.args, "--keep-true-parents");
        if (!pack_kept_objects)
                argv_array_push(&cmd.args, "--honor-pack-keep");
+       for (i = 0; i < keep_pack_list.nr; i++)
+               argv_array_pushf(&cmd.args, "--keep-pack=%s",
+                                keep_pack_list.items[i].string);
        argv_array_push(&cmd.args, "--non-empty");
        argv_array_push(&cmd.args, "--all");
        argv_array_push(&cmd.args, "--reflog");
@@ -254,7 +269,7 @@ int cmd_repack(int argc, const char **argv, const char *prefix)
                argv_array_push(&cmd.args, "--write-bitmap-index");
 
        if (pack_everything & ALL_INTO_ONE) {
-               get_non_kept_pack_filenames(&existing_packs);
+               get_non_kept_pack_filenames(&existing_packs, &keep_pack_list);
 
                if (existing_packs.nr && delete_redundant) {
                        if (unpack_unreachable) {
index 482f12018fa912eeea860721af2acbf5f8a2e87d..6da2411e14b9f94f309671e14124ef32d89ef528 100644 (file)
 #include "refs.h"
 #include "parse-options.h"
 #include "run-command.h"
+#include "object-store.h"
+#include "repository.h"
 #include "tag.h"
 
 static const char * const git_replace_usage[] = {
        N_("git replace [-f] <object> <replacement>"),
        N_("git replace [-f] --edit <object>"),
        N_("git replace [-f] --graft <commit> [<parent>...]"),
+       N_("git replace [-f] --convert-graft-file"),
        N_("git replace -d <object>..."),
        N_("git replace [--format=<format>] [-l [<pattern>]]"),
        NULL
@@ -53,8 +56,9 @@ static int show_reference(const char *refname, const struct object_id *oid,
                        if (get_oid(refname, &object))
                                return error("Failed to resolve '%s' as a valid ref.", refname);
 
-                       obj_type = sha1_object_info(object.hash, NULL);
-                       repl_type = sha1_object_info(oid->hash, NULL);
+                       obj_type = oid_object_info(the_repository, &object,
+                                                  NULL);
+                       repl_type = oid_object_info(the_repository, oid, NULL);
 
                        printf("%s (%s) -> %s (%s)\n", refname, type_name(obj_type),
                               oid_to_hex(oid), type_name(repl_type));
@@ -79,11 +83,11 @@ static int list_replace_refs(const char *pattern, const char *format)
        else if (!strcmp(format, "long"))
                data.format = REPLACE_FORMAT_LONG;
        else
-               die("invalid replace format '%s'\n"
-                   "valid formats are 'short', 'medium' and 'long'\n",
-                   format);
+               return error("invalid replace format '%s'\n"
+                            "valid formats are 'short', 'medium' and 'long'\n",
+                            format);
 
-       for_each_replace_ref(show_reference, (void *)&data);
+       for_each_replace_ref(the_repository, show_reference, (void *)&data);
 
        return 0;
 }
@@ -134,7 +138,7 @@ static int delete_replace_ref(const char *name, const char *ref,
        return 0;
 }
 
-static void check_ref_valid(struct object_id *object,
+static int check_ref_valid(struct object_id *object,
                            struct object_id *prev,
                            struct strbuf *ref,
                            int force)
@@ -142,12 +146,13 @@ static void check_ref_valid(struct object_id *object,
        strbuf_reset(ref);
        strbuf_addf(ref, "%s%s", git_replace_ref_base, oid_to_hex(object));
        if (check_refname_format(ref->buf, 0))
-               die("'%s' is not a valid ref name.", ref->buf);
+               return error("'%s' is not a valid ref name.", ref->buf);
 
        if (read_ref(ref->buf, prev))
                oidclr(prev);
        else if (!force)
-               die("replace ref '%s' already exists", ref->buf);
+               return error("replace ref '%s' already exists", ref->buf);
+       return 0;
 }
 
 static int replace_object_oid(const char *object_ref,
@@ -161,28 +166,33 @@ static int replace_object_oid(const char *object_ref,
        struct strbuf ref = STRBUF_INIT;
        struct ref_transaction *transaction;
        struct strbuf err = STRBUF_INIT;
+       int res = 0;
 
-       obj_type = sha1_object_info(object->hash, NULL);
-       repl_type = sha1_object_info(repl->hash, NULL);
+       obj_type = oid_object_info(the_repository, object, NULL);
+       repl_type = oid_object_info(the_repository, repl, NULL);
        if (!force && obj_type != repl_type)
-               die("Objects must be of the same type.\n"
-                   "'%s' points to a replaced object of type '%s'\n"
-                   "while '%s' points to a replacement object of type '%s'.",
-                   object_ref, type_name(obj_type),
-                   replace_ref, type_name(repl_type));
-
-       check_ref_valid(object, &prev, &ref, force);
+               return error("Objects must be of the same type.\n"
+                            "'%s' points to a replaced object of type '%s'\n"
+                            "while '%s' points to a replacement object of "
+                            "type '%s'.",
+                            object_ref, type_name(obj_type),
+                            replace_ref, type_name(repl_type));
+
+       if (check_ref_valid(object, &prev, &ref, force)) {
+               strbuf_release(&ref);
+               return -1;
+       }
 
        transaction = ref_transaction_begin(&err);
        if (!transaction ||
            ref_transaction_update(transaction, ref.buf, repl, &prev,
                                   0, NULL, &err) ||
            ref_transaction_commit(transaction, &err))
-               die("%s", err.buf);
+               res = error("%s", err.buf);
 
        ref_transaction_free(transaction);
        strbuf_release(&ref);
-       return 0;
+       return res;
 }
 
 static int replace_object(const char *object_ref, const char *replace_ref, int force)
@@ -190,9 +200,11 @@ static int replace_object(const char *object_ref, const char *replace_ref, int f
        struct object_id object, repl;
 
        if (get_oid(object_ref, &object))
-               die("Failed to resolve '%s' as a valid ref.", object_ref);
+               return error("Failed to resolve '%s' as a valid ref.",
+                            object_ref);
        if (get_oid(replace_ref, &repl))
-               die("Failed to resolve '%s' as a valid ref.", replace_ref);
+               return error("Failed to resolve '%s' as a valid ref.",
+                            replace_ref);
 
        return replace_object_oid(object_ref, &object, replace_ref, &repl, force);
 }
@@ -202,7 +214,7 @@ static int replace_object(const char *object_ref, const char *replace_ref, int f
  * If "raw" is true, then the object's raw contents are printed according to
  * "type". Otherwise, we pretty-print the contents for human editing.
  */
-static void export_object(const struct object_id *oid, enum object_type type,
+static int export_object(const struct object_id *oid, enum object_type type,
                          int raw, const char *filename)
 {
        struct child_process cmd = CHILD_PROCESS_INIT;
@@ -210,7 +222,7 @@ static void export_object(const struct object_id *oid, enum object_type type,
 
        fd = open(filename, O_WRONLY | O_CREAT | O_TRUNC, 0666);
        if (fd < 0)
-               die_errno("unable to open %s for writing", filename);
+               return error_errno("unable to open %s for writing", filename);
 
        argv_array_push(&cmd.args, "--no-replace-objects");
        argv_array_push(&cmd.args, "cat-file");
@@ -223,7 +235,8 @@ static void export_object(const struct object_id *oid, enum object_type type,
        cmd.out = fd;
 
        if (run_command(&cmd))
-               die("cat-file reported failure");
+               return error("cat-file reported failure");
+       return 0;
 }
 
 /*
@@ -231,14 +244,14 @@ static void export_object(const struct object_id *oid, enum object_type type,
  * interpreting it as "type", and writing the result to the object database.
  * The sha1 of the written object is returned via sha1.
  */
-static void import_object(struct object_id *oid, enum object_type type,
+static int import_object(struct object_id *oid, enum object_type type,
                          int raw, const char *filename)
 {
        int fd;
 
        fd = open(filename, O_RDONLY);
        if (fd < 0)
-               die_errno("unable to open %s for reading", filename);
+               return error_errno("unable to open %s for reading", filename);
 
        if (!raw && type == OBJ_TREE) {
                const char *argv[] = { "mktree", NULL };
@@ -250,27 +263,40 @@ static void import_object(struct object_id *oid, enum object_type type,
                cmd.in = fd;
                cmd.out = -1;
 
-               if (start_command(&cmd))
-                       die("unable to spawn mktree");
+               if (start_command(&cmd)) {
+                       close(fd);
+                       return error("unable to spawn mktree");
+               }
 
-               if (strbuf_read(&result, cmd.out, 41) < 0)
-                       die_errno("unable to read from mktree");
+               if (strbuf_read(&result, cmd.out, 41) < 0) {
+                       error_errno("unable to read from mktree");
+                       close(fd);
+                       close(cmd.out);
+                       return -1;
+               }
                close(cmd.out);
 
-               if (finish_command(&cmd))
-                       die("mktree reported failure");
-               if (get_oid_hex(result.buf, oid) < 0)
-                       die("mktree did not return an object name");
+               if (finish_command(&cmd)) {
+                       strbuf_release(&result);
+                       return error("mktree reported failure");
+               }
+               if (get_oid_hex(result.buf, oid) < 0) {
+                       strbuf_release(&result);
+                       return error("mktree did not return an object name");
+               }
 
                strbuf_release(&result);
        } else {
                struct stat st;
                int flags = HASH_FORMAT_CHECK | HASH_WRITE_OBJECT;
 
-               if (fstat(fd, &st) < 0)
-                       die_errno("unable to fstat %s", filename);
+               if (fstat(fd, &st) < 0) {
+                       error_errno("unable to fstat %s", filename);
+                       close(fd);
+                       return -1;
+               }
                if (index_fd(oid, fd, &st, type, NULL, flags) < 0)
-                       die("unable to write object to database");
+                       return error("unable to write object to database");
                /* index_fd close()s fd for us */
        }
 
@@ -278,30 +304,43 @@ static void import_object(struct object_id *oid, enum object_type type,
         * No need to close(fd) here; both run-command and index-fd
         * will have done it for us.
         */
+       return 0;
 }
 
 static int edit_and_replace(const char *object_ref, int force, int raw)
 {
-       char *tmpfile = git_pathdup("REPLACE_EDITOBJ");
+       char *tmpfile;
        enum object_type type;
        struct object_id old_oid, new_oid, prev;
        struct strbuf ref = STRBUF_INIT;
 
        if (get_oid(object_ref, &old_oid) < 0)
-               die("Not a valid object name: '%s'", object_ref);
+               return error("Not a valid object name: '%s'", object_ref);
 
-       type = sha1_object_info(old_oid.hash, NULL);
+       type = oid_object_info(the_repository, &old_oid, NULL);
        if (type < 0)
-               die("unable to get object type for %s", oid_to_hex(&old_oid));
+               return error("unable to get object type for %s",
+                            oid_to_hex(&old_oid));
 
-       check_ref_valid(&old_oid, &prev, &ref, force);
+       if (check_ref_valid(&old_oid, &prev, &ref, force)) {
+               strbuf_release(&ref);
+               return -1;
+       }
        strbuf_release(&ref);
 
-       export_object(&old_oid, type, raw, tmpfile);
-       if (launch_editor(tmpfile, NULL, NULL) < 0)
-               die("editing object file failed");
-       import_object(&new_oid, type, raw, tmpfile);
-
+       tmpfile = git_pathdup("REPLACE_EDITOBJ");
+       if (export_object(&old_oid, type, raw, tmpfile)) {
+               free(tmpfile);
+               return -1;
+       }
+       if (launch_editor(tmpfile, NULL, NULL) < 0) {
+               free(tmpfile);
+               return error("editing object file failed");
+       }
+       if (import_object(&new_oid, type, raw, tmpfile)) {
+               free(tmpfile);
+               return -1;
+       }
        free(tmpfile);
 
        if (!oidcmp(&old_oid, &new_oid))
@@ -310,7 +349,7 @@ static int edit_and_replace(const char *object_ref, int force, int raw)
        return replace_object_oid(object_ref, &old_oid, "replacement", &new_oid, force);
 }
 
-static void replace_parents(struct strbuf *buf, int argc, const char **argv)
+static int replace_parents(struct strbuf *buf, int argc, const char **argv)
 {
        struct strbuf new_parents = STRBUF_INIT;
        const char *parent_start, *parent_end;
@@ -327,9 +366,15 @@ static void replace_parents(struct strbuf *buf, int argc, const char **argv)
        /* prepare new parents */
        for (i = 0; i < argc; i++) {
                struct object_id oid;
-               if (get_oid(argv[i], &oid) < 0)
-                       die(_("Not a valid object name: '%s'"), argv[i]);
-               lookup_commit_or_die(&oid, argv[i]);
+               if (get_oid(argv[i], &oid) < 0) {
+                       strbuf_release(&new_parents);
+                       return error(_("Not a valid object name: '%s'"),
+                                    argv[i]);
+               }
+               if (!lookup_commit_reference(&oid)) {
+                       strbuf_release(&new_parents);
+                       return error(_("could not parse %s"), argv[i]);
+               }
                strbuf_addf(&new_parents, "parent %s\n", oid_to_hex(&oid));
        }
 
@@ -338,6 +383,7 @@ static void replace_parents(struct strbuf *buf, int argc, const char **argv)
                      new_parents.buf, new_parents.len);
 
        strbuf_release(&new_parents);
+       return 0;
 }
 
 struct check_mergetag_data {
@@ -345,7 +391,7 @@ struct check_mergetag_data {
        const char **argv;
 };
 
-static void check_one_mergetag(struct commit *commit,
+static int check_one_mergetag(struct commit *commit,
                               struct commit_extra_header *extra,
                               void *data)
 {
@@ -358,33 +404,35 @@ static void check_one_mergetag(struct commit *commit,
        hash_object_file(extra->value, extra->len, type_name(OBJ_TAG), &tag_oid);
        tag = lookup_tag(&tag_oid);
        if (!tag)
-               die(_("bad mergetag in commit '%s'"), ref);
+               return error(_("bad mergetag in commit '%s'"), ref);
        if (parse_tag_buffer(tag, extra->value, extra->len))
-               die(_("malformed mergetag in commit '%s'"), ref);
+               return error(_("malformed mergetag in commit '%s'"), ref);
 
        /* iterate over new parents */
        for (i = 1; i < mergetag_data->argc; i++) {
                struct object_id oid;
                if (get_oid(mergetag_data->argv[i], &oid) < 0)
-                       die(_("Not a valid object name: '%s'"), mergetag_data->argv[i]);
+                       return error(_("Not a valid object name: '%s'"),
+                                    mergetag_data->argv[i]);
                if (!oidcmp(&tag->tagged->oid, &oid))
-                       return; /* found */
+                       return 0; /* found */
        }
 
-       die(_("original commit '%s' contains mergetag '%s' that is discarded; "
-             "use --edit instead of --graft"), ref, oid_to_hex(&tag_oid));
+       return error(_("original commit '%s' contains mergetag '%s' that is "
+                      "discarded; use --edit instead of --graft"), ref,
+                    oid_to_hex(&tag_oid));
 }
 
-static void check_mergetags(struct commit *commit, int argc, const char **argv)
+static int check_mergetags(struct commit *commit, int argc, const char **argv)
 {
        struct check_mergetag_data mergetag_data;
 
        mergetag_data.argc = argc;
        mergetag_data.argv = argv;
-       for_each_mergetag(check_one_mergetag, commit, &mergetag_data);
+       return for_each_mergetag(check_one_mergetag, commit, &mergetag_data);
 }
 
-static int create_graft(int argc, const char **argv, int force)
+static int create_graft(int argc, const char **argv, int force, int gentle)
 {
        struct object_id old_oid, new_oid;
        const char *old_ref = argv[0];
@@ -394,33 +442,81 @@ static int create_graft(int argc, const char **argv, int force)
        unsigned long size;
 
        if (get_oid(old_ref, &old_oid) < 0)
-               die(_("Not a valid object name: '%s'"), old_ref);
-       commit = lookup_commit_or_die(&old_oid, old_ref);
+               return error(_("Not a valid object name: '%s'"), old_ref);
+       commit = lookup_commit_reference(&old_oid);
+       if (!commit)
+               return error(_("could not parse %s"), old_ref);
 
        buffer = get_commit_buffer(commit, &size);
        strbuf_add(&buf, buffer, size);
        unuse_commit_buffer(commit, buffer);
 
-       replace_parents(&buf, argc - 1, &argv[1]);
+       if (replace_parents(&buf, argc - 1, &argv[1]) < 0) {
+               strbuf_release(&buf);
+               return -1;
+       }
 
        if (remove_signature(&buf)) {
                warning(_("the original commit '%s' has a gpg signature."), old_ref);
                warning(_("the signature will be removed in the replacement commit!"));
        }
 
-       check_mergetags(commit, argc, argv);
+       if (check_mergetags(commit, argc, argv)) {
+               strbuf_release(&buf);
+               return -1;
+       }
 
-       if (write_object_file(buf.buf, buf.len, commit_type, &new_oid))
-               die(_("could not write replacement commit for: '%s'"), old_ref);
+       if (write_object_file(buf.buf, buf.len, commit_type, &new_oid)) {
+               strbuf_release(&buf);
+               return error(_("could not write replacement commit for: '%s'"),
+                            old_ref);
+       }
 
        strbuf_release(&buf);
 
-       if (!oidcmp(&old_oid, &new_oid))
+       if (!oidcmp(&old_oid, &new_oid)) {
+               if (gentle) {
+                       warning("graft for '%s' unnecessary", oid_to_hex(&old_oid));
+                       return 0;
+               }
                return error("new commit is the same as the old one: '%s'", oid_to_hex(&old_oid));
+       }
 
        return replace_object_oid(old_ref, &old_oid, "replacement", &new_oid, force);
 }
 
+static int convert_graft_file(int force)
+{
+       const char *graft_file = get_graft_file();
+       FILE *fp = fopen_or_warn(graft_file, "r");
+       struct strbuf buf = STRBUF_INIT, err = STRBUF_INIT;
+       struct argv_array args = ARGV_ARRAY_INIT;
+
+       if (!fp)
+               return -1;
+
+       while (strbuf_getline(&buf, fp) != EOF) {
+               if (*buf.buf == '#')
+                       continue;
+
+               argv_array_split(&args, buf.buf);
+               if (args.argc && create_graft(args.argc, args.argv, force, 1))
+                       strbuf_addf(&err, "\n\t%s", buf.buf);
+               argv_array_clear(&args);
+       }
+       fclose(fp);
+
+       strbuf_release(&buf);
+
+       if (!err.len)
+               return unlink_or_warn(graft_file);
+
+       warning(_("could not convert the following graft(s):\n%s"), err.buf);
+       strbuf_release(&err);
+
+       return -1;
+}
+
 int cmd_replace(int argc, const char **argv, const char *prefix)
 {
        int force = 0;
@@ -432,6 +528,7 @@ int cmd_replace(int argc, const char **argv, const char *prefix)
                MODE_DELETE,
                MODE_EDIT,
                MODE_GRAFT,
+               MODE_CONVERT_GRAFT_FILE,
                MODE_REPLACE
        } cmdmode = MODE_UNSPECIFIED;
        struct option options[] = {
@@ -439,6 +536,7 @@ int cmd_replace(int argc, const char **argv, const char *prefix)
                OPT_CMDMODE('d', "delete", &cmdmode, N_("delete replace refs"), MODE_DELETE),
                OPT_CMDMODE('e', "edit", &cmdmode, N_("edit existing object"), MODE_EDIT),
                OPT_CMDMODE('g', "graft", &cmdmode, N_("change a commit's parents"), MODE_GRAFT),
+               OPT_CMDMODE(0, "convert-graft-file", &cmdmode, N_("convert existing graft file"), MODE_CONVERT_GRAFT_FILE),
                OPT_BOOL_F('f', "force", &force, N_("replace the ref if it exists"),
                           PARSE_OPT_NOCOMPLETE),
                OPT_BOOL(0, "raw", &raw, N_("do not pretty-print contents for --edit")),
@@ -461,7 +559,8 @@ int cmd_replace(int argc, const char **argv, const char *prefix)
        if (force &&
            cmdmode != MODE_REPLACE &&
            cmdmode != MODE_EDIT &&
-           cmdmode != MODE_GRAFT)
+           cmdmode != MODE_GRAFT &&
+           cmdmode != MODE_CONVERT_GRAFT_FILE)
                usage_msg_opt("-f only makes sense when writing a replacement",
                              git_replace_usage, options);
 
@@ -492,7 +591,13 @@ int cmd_replace(int argc, const char **argv, const char *prefix)
                if (argc < 1)
                        usage_msg_opt("-g needs at least one argument",
                                      git_replace_usage, options);
-               return create_graft(argc, argv, force);
+               return create_graft(argc, argv, force, 0);
+
+       case MODE_CONVERT_GRAFT_FILE:
+               if (argc != 0)
+                       usage_msg_opt("--convert-graft-file takes no argument",
+                                     git_replace_usage, options);
+               return !!convert_graft_file(force);
 
        case MODE_LIST:
                if (argc > 1)
@@ -501,6 +606,6 @@ int cmd_replace(int argc, const char **argv, const char *prefix)
                return list_replace_refs(argv[0], format);
 
        default:
-               die("BUG: invalid cmdmode %d", (int)cmdmode);
+               BUG("invalid cmdmode %d", (int)cmdmode);
        }
 }
index 5da0f75de95cc9dc19db7d53a5e4df5140845082..7f1c3f02a302128d6c00c35b8783c1a62353b37a 100644 (file)
@@ -109,7 +109,7 @@ static void print_new_head_line(struct commit *commit)
        struct strbuf buf = STRBUF_INIT;
 
        printf(_("HEAD is now at %s"),
-               find_unique_abbrev(commit->object.oid.hash, DEFAULT_ABBREV));
+               find_unique_abbrev(&commit->object.oid, DEFAULT_ABBREV));
 
        pp_commit_easy(CMIT_FMT_ONELINE, commit, &buf);
        if (buf.len > 0)
index 6f5b9b0847321ca214b4d32719eaeacefffd5ce4..fadd3ec14cbf0469c332a85278e5d1b4932ef788 100644 (file)
@@ -108,7 +108,7 @@ static void show_commit(struct commit *commit, void *data)
        if (!revs->graph)
                fputs(get_revision_mark(revs, commit), stdout);
        if (revs->abbrev_commit && revs->abbrev)
-               fputs(find_unique_abbrev(commit->object.oid.hash, revs->abbrev),
+               fputs(find_unique_abbrev(&commit->object.oid, revs->abbrev),
                      stdout);
        else
                fputs(oid_to_hex(&commit->object.oid), stdout);
index a1e680b5e912beeed183dc271b0ae970a45a4814..36b208778280e6019d9bc4fb4063dff3d44f08e6 100644 (file)
@@ -159,7 +159,7 @@ static void show_rev(int type, const struct object_id *oid, const char *name)
                }
        }
        else if (abbrev)
-               show_with_type(type, find_unique_abbrev(oid->hash, abbrev));
+               show_with_type(type, find_unique_abbrev(oid, abbrev));
        else
                show_with_type(type, oid_to_hex(oid));
 }
index 4447bb4d0faf8c34f3fc96361a651eaad396d6d4..5b6fc7ee818be4a4f060dc06f12fb45a25a2ea9b 100644 (file)
@@ -178,7 +178,7 @@ static int check_local_mod(struct object_id *head, int index_only)
                 * way as changed from the HEAD.
                 */
                if (no_head
-                    || get_tree_entry(head->hash, name, oid.hash, &mode)
+                    || get_tree_entry(head, name, &oid, &mode)
                     || ce->ce_mode != create_ce_mode(mode)
                     || oidcmp(&ce->oid, &oid))
                        staged_changes = 1;
index fc4f0bb5fbc033604a13a147094c0d1bc661db17..b5427f75e34901ad8fa876cc6c53066211e4a2a0 100644 (file)
@@ -14,6 +14,7 @@
 #include "sha1-array.h"
 #include "gpg-interface.h"
 #include "gettext.h"
+#include "protocol.h"
 
 static const char * const send_pack_usage[] = {
        N_("git send-pack [--all | --mirror] [--dry-run] [--force] "
@@ -154,6 +155,7 @@ int cmd_send_pack(int argc, const char **argv, const char *prefix)
        int progress = -1;
        int from_stdin = 0;
        struct push_cas_option cas = {0};
+       struct packet_reader reader;
 
        struct option options[] = {
                OPT__VERBOSITY(&verbose),
@@ -256,8 +258,22 @@ int cmd_send_pack(int argc, const char **argv, const char *prefix)
                        args.verbose ? CONNECT_VERBOSE : 0);
        }
 
-       get_remote_heads(fd[0], NULL, 0, &remote_refs, REF_NORMAL,
-                        &extra_have, &shallow);
+       packet_reader_init(&reader, fd[0], NULL, 0,
+                          PACKET_READ_CHOMP_NEWLINE |
+                          PACKET_READ_GENTLE_ON_EOF);
+
+       switch (discover_version(&reader)) {
+       case protocol_v2:
+               die("support for protocol v2 not implemented yet");
+               break;
+       case protocol_v1:
+       case protocol_v0:
+               get_remote_heads(&reader, &remote_refs, REF_NORMAL,
+                                &extra_have, &shallow);
+               break;
+       case protocol_unknown_version:
+               BUG("unknown protocol version");
+       }
 
        transport_verify_remote_names(nr_refspecs, refspecs);
 
diff --git a/builtin/serve.c b/builtin/serve.c
new file mode 100644 (file)
index 0000000..d3fd240
--- /dev/null
@@ -0,0 +1,30 @@
+#include "cache.h"
+#include "builtin.h"
+#include "parse-options.h"
+#include "serve.h"
+
+static char const * const serve_usage[] = {
+       N_("git serve [<options>]"),
+       NULL
+};
+
+int cmd_serve(int argc, const char **argv, const char *prefix)
+{
+       struct serve_options opts = SERVE_OPTIONS_INIT;
+
+       struct option options[] = {
+               OPT_BOOL(0, "stateless-rpc", &opts.stateless_rpc,
+                        N_("quit after a single request/response exchange")),
+               OPT_BOOL(0, "advertise-capabilities", &opts.advertise_capabilities,
+                        N_("exit immediately after advertising capabilities")),
+               OPT_END()
+       };
+
+       /* ignore all unknown cmdline switches for now */
+       argc = parse_options(argc, argv, prefix, options, serve_usage,
+                            PARSE_OPT_KEEP_DASHDASH |
+                            PARSE_OPT_KEEP_UNKNOWN);
+       serve(&opts);
+
+       return 0;
+}
index e29875b84389b25237e39e0112eafa8ac34599ee..608d6ba77bdfb4673513444651053d0e8e789020 100644 (file)
@@ -11,7 +11,8 @@
 #include "parse-options.h"
 
 static char const * const shortlog_usage[] = {
-       N_("git shortlog [<options>] [<revision-range>] [[--] [<path>...]]"),
+       N_("git shortlog [<options>] [<revision-range>] [[--] <path>...]"),
+       N_("git log --pretty=short | git shortlog [<options>]"),
        NULL
 };
 
@@ -283,6 +284,7 @@ int cmd_shortlog(int argc, const char **argv, const char *prefix)
        for (;;) {
                switch (parse_options_step(&ctx, options, shortlog_usage)) {
                case PARSE_OPT_HELP:
+               case PARSE_OPT_ERROR:
                        exit(129);
                case PARSE_OPT_DONE:
                        goto parse_done;
@@ -292,6 +294,11 @@ int cmd_shortlog(int argc, const char **argv, const char *prefix)
 parse_done:
        argc = parse_options_end(&ctx);
 
+       if (nongit && argc > 1) {
+               error(_("too many arguments given outside repository"));
+               usage_with_options(shortlog_usage, options);
+       }
+
        if (setup_revisions(argc, argv, &rev, NULL) != 1) {
                error(_("unrecognized argument: %s"), argv[1]);
                usage_with_options(shortlog_usage, options);
index e8a4aa40cb4b6cf8787af3dd35d833a92a85bba3..6c2148b71db593af1a4ef8d2d1bdbdfe16661851 100644 (file)
@@ -292,7 +292,7 @@ static void show_one_commit(struct commit *commit, int no_name)
                }
                else
                        printf("[%s] ",
-                              find_unique_abbrev(commit->object.oid.hash,
+                              find_unique_abbrev(&commit->object.oid,
                                                  DEFAULT_ABBREV));
        }
        puts(pretty_str);
index 41e5e71cad660d26ddc90ffeaa383fd7bf10f79f..f2eb1a7724058bb1db237a6199d16e5ff1ef495a 100644 (file)
@@ -29,7 +29,7 @@ static void show_one(const char *refname, const struct object_id *oid)
        if (quiet)
                return;
 
-       hex = find_unique_abbrev(oid->hash, abbrev);
+       hex = find_unique_abbrev(oid, abbrev);
        if (hash_only)
                printf("%s\n", hex);
        else
@@ -39,7 +39,7 @@ static void show_one(const char *refname, const struct object_id *oid)
                return;
 
        if (!peel_ref(refname, &peeled)) {
-               hex = find_unique_abbrev(peeled.hash, abbrev);
+               hex = find_unique_abbrev(&peeled, abbrev);
                printf("%s %s^{}\n", hex, refname);
        }
 }
index 6ba8587b6d3b7b8b1bc7a96451916c60210b093b..c2403a915ffe29e152832ae16e5bc902703a9903 100644 (file)
@@ -16,6 +16,7 @@
 #include "revision.h"
 #include "diffcore.h"
 #include "diff.h"
+#include "object-store.h"
 
 #define OPT_QUIET (1 << 0)
 #define OPT_CACHED (1 << 1)
@@ -454,7 +455,7 @@ static void init_submodule(const char *path, const char *prefix,
 
        displaypath = get_submodule_displaypath(path, prefix);
 
-       sub = submodule_from_path(&null_oid, path);
+       sub = submodule_from_path(the_repository, &null_oid, path);
 
        if (!sub)
                die(_("No url found for submodule path '%s' in .gitmodules"),
@@ -595,8 +596,12 @@ static void print_status(unsigned int flags, char state, const char *path,
 
        printf("%c%s %s", state, oid_to_hex(oid), displaypath);
 
-       if (state == ' ' || state == '+')
-               printf(" (%s)", compute_rev_name(path, oid_to_hex(oid)));
+       if (state == ' ' || state == '+') {
+               const char *name = compute_rev_name(path, oid_to_hex(oid));
+
+               if (name)
+                       printf(" (%s)", name);
+       }
 
        printf("\n");
 }
@@ -621,7 +626,7 @@ static void status_submodule(const char *path, const struct object_id *ce_oid,
        struct rev_info rev;
        int diff_files_result;
 
-       if (!submodule_from_path(&null_oid, path))
+       if (!submodule_from_path(the_repository, &null_oid, path))
                die(_("no submodule mapping found in .gitmodules for path '%s'"),
                      path);
 
@@ -654,9 +659,13 @@ static void status_submodule(const char *path, const struct object_id *ce_oid,
                             displaypath);
        } else if (!(flags & OPT_CACHED)) {
                struct object_id oid;
+               struct ref_store *refs = get_submodule_ref_store(path);
 
-               if (refs_head_ref(get_submodule_ref_store(path),
-                                 handle_submodule_head_ref, &oid))
+               if (!refs) {
+                       print_status(flags, '-', path, ce_oid, displaypath);
+                       goto cleanup;
+               }
+               if (refs_head_ref(refs, handle_submodule_head_ref, &oid))
                        die(_("could not resolve HEAD ref inside the "
                              "submodule '%s'"), path);
 
@@ -741,7 +750,7 @@ static int module_name(int argc, const char **argv, const char *prefix)
        if (argc != 2)
                usage(_("git submodule--helper name <path>"));
 
-       sub = submodule_from_path(&null_oid, argv[1]);
+       sub = submodule_from_path(the_repository, &null_oid, argv[1]);
 
        if (!sub)
                die(_("no submodule mapping found in .gitmodules for path '%s'"),
@@ -772,7 +781,7 @@ static void sync_submodule(const char *path, const char *prefix,
        if (!is_submodule_active(the_repository, path))
                return;
 
-       sub = submodule_from_path(&null_oid, path);
+       sub = submodule_from_path(the_repository, &null_oid, path);
 
        if (sub && sub->url) {
                if (starts_with_dot_dot_slash(sub->url) ||
@@ -925,7 +934,7 @@ static void deinit_submodule(const char *path, const char *prefix,
        struct strbuf sb_config = STRBUF_INIT;
        char *sub_git_dir = xstrfmt("%s/.git", path);
 
-       sub = submodule_from_path(&null_oid, path);
+       sub = submodule_from_path(the_repository, &null_oid, path);
 
        if (!sub || !sub->name)
                goto cleanup;
@@ -1259,8 +1268,7 @@ static int module_clone(int argc, const char **argv, const char *prefix)
                strbuf_reset(&sb);
        }
 
-       /* Connect module worktree and git dir */
-       connect_work_tree_and_git_dir(path, sm_gitdir);
+       connect_work_tree_and_git_dir(path, sm_gitdir, 0);
 
        p = git_pathdup_submodule(path, "config");
        if (!p)
@@ -1367,7 +1375,7 @@ static int prepare_to_clone_next_submodule(const struct cache_entry *ce,
                goto cleanup;
        }
 
-       sub = submodule_from_path(&null_oid, ce->name);
+       sub = submodule_from_path(the_repository, &null_oid, ce->name);
 
        if (suc->recursive_prefix)
                displaypath = relative_path(suc->recursive_prefix,
@@ -1650,7 +1658,7 @@ static const char *remote_submodule_branch(const char *path)
        const char *branch = NULL;
        char *key;
 
-       sub = submodule_from_path(&null_oid, path);
+       sub = submodule_from_path(the_repository, &null_oid, path);
        if (!sub)
                return NULL;
 
index da186691ed8853bc5848c4106c752b015756eda3..5d0dd112408419a0090cbe179d69c0ea709f22a7 100644 (file)
@@ -99,7 +99,8 @@ static int delete_tag(const char *name, const char *ref,
 {
        if (delete_ref(NULL, ref, oid, 0))
                return 1;
-       printf(_("Deleted tag '%s' (was %s)\n"), name, find_unique_abbrev(oid->hash, DEFAULT_ABBREV));
+       printf(_("Deleted tag '%s' (was %s)\n"), name,
+              find_unique_abbrev(oid, DEFAULT_ABBREV));
        return 0;
 }
 
@@ -117,7 +118,7 @@ static int verify_tag(const char *name, const char *ref,
                return -1;
 
        if (format->format)
-               pretty_print_ref(name, oid->hash, format);
+               pretty_print_ref(name, oid, format);
 
        return 0;
 }
@@ -167,7 +168,7 @@ static void write_tag_body(int fd, const struct object_id *oid)
        enum object_type type;
        char *buf, *sp;
 
-       buf = read_sha1_file(oid->hash, &type, &size);
+       buf = read_object_file(oid, &type, &size);
        if (!buf)
                return;
        /* skip header */
@@ -211,7 +212,7 @@ static void create_tag(const struct object_id *object, const char *tag,
        struct strbuf header = STRBUF_INIT;
        char *path = NULL;
 
-       type = sha1_object_info(object->hash, NULL);
+       type = oid_object_info(the_repository, object, NULL);
        if (type <= OBJ_NONE)
            die(_("bad object type."));
 
@@ -293,17 +294,17 @@ static void create_reflog_msg(const struct object_id *oid, struct strbuf *sb)
                strbuf_addstr(sb, rla);
        } else {
                strbuf_addstr(sb, "tag: tagging ");
-               strbuf_add_unique_abbrev(sb, oid->hash, DEFAULT_ABBREV);
+               strbuf_add_unique_abbrev(sb, oid, DEFAULT_ABBREV);
        }
 
        strbuf_addstr(sb, " (");
-       type = sha1_object_info(oid->hash, NULL);
+       type = oid_object_info(the_repository, oid, NULL);
        switch (type) {
        default:
                strbuf_addstr(sb, "object of unknown type");
                break;
        case OBJ_COMMIT:
-               if ((buf = read_sha1_file(oid->hash, &type, &size)) != NULL) {
+               if ((buf = read_object_file(oid, &type, &size)) != NULL) {
                        subject_len = find_commit_subject(buf, &subject_start);
                        strbuf_insert(sb, sb->len, subject_start, subject_len);
                } else {
@@ -558,7 +559,8 @@ int cmd_tag(int argc, const char **argv, const char *prefix)
                die("%s", err.buf);
        ref_transaction_free(transaction);
        if (force && !is_null_oid(&prev) && oidcmp(&prev, &object))
-               printf(_("Updated tag '%s' (was %s)\n"), tag, find_unique_abbrev(prev.hash, DEFAULT_ABBREV));
+               printf(_("Updated tag '%s' (was %s)\n"), tag,
+                      find_unique_abbrev(&prev, DEFAULT_ABBREV));
 
        UNLEAK(buf);
        UNLEAK(ref);
index 32e01555774c838e489fd33c675488e754c3e8e2..300eb59657e29cace38798029a9170834cac7c9e 100644 (file)
@@ -9,7 +9,7 @@ static char *create_temp_file(struct object_id *oid)
        unsigned long size;
        int fd;
 
-       buf = read_sha1_file(oid->hash, &type, &size);
+       buf = read_object_file(oid, &type, &size);
        if (!buf || type != OBJ_BLOB)
                die("unable to read blob object %s", oid_to_hex(oid));
 
index 6620feec68b15573340f4c28fe6be952e3c00e3a..cfe9019f800ad0bd331e31a20a8b7a240dc3fde1 100644 (file)
@@ -199,7 +199,7 @@ static int check_object(struct object *obj, int type, void *data, struct fsck_op
 
        if (!(obj->flags & FLAG_OPEN)) {
                unsigned long size;
-               int type = sha1_object_info(obj->oid.hash, &size);
+               int type = oid_object_info(the_repository, &obj->oid, &size);
                if (type != obj->type || type <= 0)
                        die("object of unexpected type");
                obj->flags |= FLAG_WRITTEN;
@@ -423,7 +423,7 @@ static void unpack_delta_entry(enum object_type type, unsigned long delta_size,
        if (resolve_against_held(nr, &base_oid, delta_data, delta_size))
                return;
 
-       base = read_sha1_file(base_oid.hash, &type, &base_size);
+       base = read_object_file(&base_oid, &type, &base_size);
        if (!base) {
                error("failed to read delta-pack base object %s",
                      oid_to_hex(&base_oid));
index 58d1c2d2827d61899d73f1ea7632c5ee219f3ace..10d070a76fb1b0b94c058f60934bb05db37a4164 100644 (file)
@@ -592,7 +592,7 @@ static struct cache_entry *read_one_ent(const char *which,
        int size;
        struct cache_entry *ce;
 
-       if (get_tree_entry(ent->hash, path, oid.hash, &mode)) {
+       if (get_tree_entry(ent, path, &oid, &mode)) {
                if (which)
                        error("%s: not in %s branch.", path, which);
                return NULL;
@@ -1059,6 +1059,7 @@ int cmd_update_index(int argc, const char **argv, const char *prefix)
                        break;
                switch (parseopt_state) {
                case PARSE_OPT_HELP:
+               case PARSE_OPT_ERROR:
                        exit(129);
                case PARSE_OPT_NON_OPTION:
                case PARSE_OPT_DONE:
diff --git a/builtin/upload-pack.c b/builtin/upload-pack.c
new file mode 100644 (file)
index 0000000..decde5a
--- /dev/null
@@ -0,0 +1,74 @@
+#include "cache.h"
+#include "builtin.h"
+#include "exec-cmd.h"
+#include "pkt-line.h"
+#include "parse-options.h"
+#include "protocol.h"
+#include "upload-pack.h"
+#include "serve.h"
+
+static const char * const upload_pack_usage[] = {
+       N_("git upload-pack [<options>] <dir>"),
+       NULL
+};
+
+int cmd_upload_pack(int argc, const char **argv, const char *prefix)
+{
+       const char *dir;
+       int strict = 0;
+       struct upload_pack_options opts = { 0 };
+       struct serve_options serve_opts = SERVE_OPTIONS_INIT;
+       struct option options[] = {
+               OPT_BOOL(0, "stateless-rpc", &opts.stateless_rpc,
+                        N_("quit after a single request/response exchange")),
+               OPT_BOOL(0, "advertise-refs", &opts.advertise_refs,
+                        N_("exit immediately after initial ref advertisement")),
+               OPT_BOOL(0, "strict", &strict,
+                        N_("do not try <directory>/.git/ if <directory> is no Git directory")),
+               OPT_INTEGER(0, "timeout", &opts.timeout,
+                           N_("interrupt transfer after <n> seconds of inactivity")),
+               OPT_END()
+       };
+
+       packet_trace_identity("upload-pack");
+       check_replace_refs = 0;
+
+       argc = parse_options(argc, argv, NULL, options, upload_pack_usage, 0);
+
+       if (argc != 1)
+               usage_with_options(upload_pack_usage, options);
+
+       if (opts.timeout)
+               opts.daemon_mode = 1;
+
+       setup_path();
+
+       dir = argv[0];
+
+       if (!enter_repo(dir, strict))
+               die("'%s' does not appear to be a git repository", dir);
+
+       switch (determine_protocol_version_server()) {
+       case protocol_v2:
+               serve_opts.advertise_capabilities = opts.advertise_refs;
+               serve_opts.stateless_rpc = opts.stateless_rpc;
+               serve(&serve_opts);
+               break;
+       case protocol_v1:
+               /*
+                * v1 is just the original protocol with a version string,
+                * so just fall through after writing the version string.
+                */
+               if (opts.advertise_refs || !opts.stateless_rpc)
+                       packet_write_fmt(1, "version 1\n");
+
+               /* fallthrough */
+       case protocol_v0:
+               upload_pack(&opts);
+               break;
+       case protocol_unknown_version:
+               BUG("unknown protocol version");
+       }
+
+       return 0;
+}
index 05315ea7c966d3ed5b91e4cbd77eaa9943af253d..dcdaada111071c84b56022d9050ae06ffafbc25f 100644 (file)
@@ -44,7 +44,7 @@ static int verify_commit(const char *name, unsigned flags)
        if (get_oid(name, &oid))
                return error("commit '%s' not found.", name);
 
-       buf = read_sha1_file(oid.hash, &type, &size);
+       buf = read_object_file(&oid, &type, &size);
        if (!buf)
                return error("%s: unable to read file.", name);
        if (type != OBJ_COMMIT)
index ad7b79fa5cd718daf3be5f1a46a1fbb2bed41ad5..6fa04b751ac1d6ae18b8a42e700e2cefa6427a41 100644 (file)
@@ -72,7 +72,7 @@ int cmd_verify_tag(int argc, const char **argv, const char *prefix)
                }
 
                if (format.format)
-                       pretty_print_ref(name, oid.hash, &format);
+                       pretty_print_ref(name, &oid, &format);
        }
        return had_error;
 }
index 670555deddaca8ff8050c03ea68c83c7b4e459e6..5c7d2bb1807f942139b3ec41b426320e4b0cfc2a 100644 (file)
@@ -29,8 +29,6 @@ struct add_opts {
        int detach;
        int checkout;
        int keep_locked;
-       const char *new_branch;
-       int force_new_branch;
 };
 
 static int show_only;
@@ -101,16 +99,9 @@ static int prune_worktree(const char *id, struct strbuf *reason)
        }
        path[len] = '\0';
        if (!file_exists(path)) {
-               struct stat st_link;
                free(path);
-               /*
-                * the repo is moved manually and has not been
-                * accessed since?
-                */
-               if (!stat(git_path("worktrees/%s/link", id), &st_link) &&
-                   st_link.st_nlink > 1)
-                       return 0;
-               if (st.st_mtime <= expire) {
+               if (stat(git_path("worktrees/%s/index", id), &st) ||
+                   st.st_mtime <= expire) {
                        strbuf_addf(reason, _("Removing worktrees/%s: gitdir file points to non-existent location"), id);
                        return 1;
                } else {
@@ -305,8 +296,6 @@ static int add_worktree(const char *path, const char *refname,
        strbuf_addf(&sb, "%s/commondir", sb_repo.buf);
        write_file(sb.buf, "../..");
 
-       fprintf_ln(stderr, _("Preparing %s (identifier %s)"), path, name);
-
        argv_array_pushf(&child_env, "%s=%s", GIT_DIR_ENVIRONMENT, sb_git.buf);
        argv_array_pushf(&child_env, "%s=%s", GIT_WORK_TREE_ENVIRONMENT, path);
        cp.git_cmd = 1;
@@ -373,18 +362,75 @@ static int add_worktree(const char *path, const char *refname,
        return ret;
 }
 
+static void print_preparing_worktree_line(int detach,
+                                         const char *branch,
+                                         const char *new_branch,
+                                         int force_new_branch)
+{
+       if (force_new_branch) {
+               struct commit *commit = lookup_commit_reference_by_name(new_branch);
+               if (!commit)
+                       printf_ln(_("Preparing worktree (new branch '%s')"), new_branch);
+               else
+                       printf_ln(_("Preparing worktree (resetting branch '%s'; was at %s)"),
+                                 new_branch,
+                                 find_unique_abbrev(&commit->object.oid, DEFAULT_ABBREV));
+       } else if (new_branch) {
+               printf_ln(_("Preparing worktree (new branch '%s')"), new_branch);
+       } else {
+               struct strbuf s = STRBUF_INIT;
+               if (!detach && !strbuf_check_branch_ref(&s, branch) &&
+                   ref_exists(s.buf))
+                       printf_ln(_("Preparing worktree (checking out '%s')"),
+                                 branch);
+               else {
+                       struct commit *commit = lookup_commit_reference_by_name(branch);
+                       if (!commit)
+                               die(_("invalid reference: %s"), branch);
+                       printf_ln(_("Preparing worktree (detached HEAD %s)"),
+                                 find_unique_abbrev(&commit->object.oid, DEFAULT_ABBREV));
+               }
+               strbuf_release(&s);
+       }
+}
+
+static const char *dwim_branch(const char *path, const char **new_branch)
+{
+       int n;
+       const char *s = worktree_basename(path, &n);
+       const char *branchname = xstrndup(s, n);
+       struct strbuf ref = STRBUF_INIT;
+
+       UNLEAK(branchname);
+       if (!strbuf_check_branch_ref(&ref, branchname) &&
+           ref_exists(ref.buf)) {
+               strbuf_release(&ref);
+               return branchname;
+       }
+
+       *new_branch = branchname;
+       if (guess_remote) {
+               struct object_id oid;
+               const char *remote =
+                       unique_tracking_name(*new_branch, &oid);
+               return remote;
+       }
+       return NULL;
+}
+
 static int add(int ac, const char **av, const char *prefix)
 {
        struct add_opts opts;
        const char *new_branch_force = NULL;
        char *path;
        const char *branch;
+       const char *new_branch = NULL;
        const char *opt_track = NULL;
        struct option options[] = {
                OPT__FORCE(&opts.force,
                           N_("checkout <branch> even if already checked out in other worktree"),
                           PARSE_OPT_NOCOMPLETE),
-               OPT_STRING('b', NULL, &opts.new_branch, N_("branch"),
+               OPT_STRING('b', NULL, &new_branch, N_("branch"),
                           N_("create a new branch")),
                OPT_STRING('B', NULL, &new_branch_force, N_("branch"),
                           N_("create or reset a branch")),
@@ -402,7 +448,7 @@ static int add(int ac, const char **av, const char *prefix)
        memset(&opts, 0, sizeof(opts));
        opts.checkout = 1;
        ac = parse_options(ac, av, prefix, options, worktree_usage, 0);
-       if (!!opts.detach + !!opts.new_branch + !!new_branch_force > 1)
+       if (!!opts.detach + !!new_branch + !!new_branch_force > 1)
                die(_("-b, -B, and --detach are mutually exclusive"));
        if (ac < 1 || ac > 2)
                usage_with_options(worktree_usage, options);
@@ -413,33 +459,25 @@ static int add(int ac, const char **av, const char *prefix)
        if (!strcmp(branch, "-"))
                branch = "@{-1}";
 
-       opts.force_new_branch = !!new_branch_force;
-       if (opts.force_new_branch) {
+       if (new_branch_force) {
                struct strbuf symref = STRBUF_INIT;
 
-               opts.new_branch = new_branch_force;
+               new_branch = new_branch_force;
 
                if (!opts.force &&
-                   !strbuf_check_branch_ref(&symref, opts.new_branch) &&
+                   !strbuf_check_branch_ref(&symref, new_branch) &&
                    ref_exists(symref.buf))
                        die_if_checked_out(symref.buf, 0);
                strbuf_release(&symref);
        }
 
-       if (ac < 2 && !opts.new_branch && !opts.detach) {
-               int n;
-               const char *s = worktree_basename(path, &n);
-               opts.new_branch = xstrndup(s, n);
-               if (guess_remote) {
-                       struct object_id oid;
-                       const char *remote =
-                               unique_tracking_name(opts.new_branch, &oid);
-                       if (remote)
-                               branch = remote;
-               }
+       if (ac < 2 && !new_branch && !opts.detach) {
+               const char *s = dwim_branch(path, &new_branch);
+               if (s)
+                       branch = s;
        }
 
-       if (ac == 2 && !opts.new_branch && !opts.detach) {
+       if (ac == 2 && !new_branch && !opts.detach) {
                struct object_id oid;
                struct commit *commit;
                const char *remote;
@@ -448,25 +486,27 @@ static int add(int ac, const char **av, const char *prefix)
                if (!commit) {
                        remote = unique_tracking_name(branch, &oid);
                        if (remote) {
-                               opts.new_branch = branch;
+                               new_branch = branch;
                                branch = remote;
                        }
                }
        }
 
-       if (opts.new_branch) {
+       print_preparing_worktree_line(opts.detach, branch, new_branch, !!new_branch_force);
+
+       if (new_branch) {
                struct child_process cp = CHILD_PROCESS_INIT;
                cp.git_cmd = 1;
                argv_array_push(&cp.args, "branch");
-               if (opts.force_new_branch)
+               if (new_branch_force)
                        argv_array_push(&cp.args, "--force");
-               argv_array_push(&cp.args, opts.new_branch);
+               argv_array_push(&cp.args, new_branch);
                argv_array_push(&cp.args, branch);
                if (opt_track)
                        argv_array_push(&cp.args, opt_track);
                if (run_command(&cp))
                        return -1;
-               branch = opts.new_branch;
+               branch = new_branch;
        } else if (opt_track) {
                die(_("--[no-]track can only be used if a new branch is created"));
        }
@@ -502,7 +542,7 @@ static void show_worktree(struct worktree *wt, int path_maxlen, int abbrev_len)
                strbuf_addstr(&sb, "(bare)");
        else {
                strbuf_addf(&sb, "%-*s ", abbrev_len,
-                               find_unique_abbrev(wt->head_oid.hash, DEFAULT_ABBREV));
+                               find_unique_abbrev(&wt->head_oid, DEFAULT_ABBREV));
                if (wt->is_detached)
                        strbuf_addstr(&sb, "(detached HEAD)");
                else if (wt->head_ref) {
@@ -527,7 +567,7 @@ static void measure_widths(struct worktree **wt, int *abbrev, int *maxlen)
 
                if (path_len > *maxlen)
                        *maxlen = path_len;
-               sha1_len = strlen(find_unique_abbrev(wt[i]->head_oid.hash, *abbrev));
+               sha1_len = strlen(find_unique_abbrev(&wt[i]->head_oid, *abbrev));
                if (sha1_len > *abbrev)
                        *abbrev = sha1_len;
        }
@@ -790,8 +830,9 @@ static int remove_worktree(int ac, const char **av, const char *prefix)
 {
        int force = 0;
        struct option options[] = {
-               OPT_BOOL(0, "force", &force,
-                        N_("force removing even if the worktree is dirty")),
+               OPT__FORCE(&force,
+                        N_("force removing even if the worktree is dirty"),
+                        PARSE_OPT_NOCOMPLETE),
                OPT_END()
        };
        struct worktree **worktrees, *wt;
index bd0a78aa3c56b7e817c7ddf6fcaba703d6d5fecc..c9d3c544e79f46bab9e5fd50079d1bb574b722f2 100644 (file)
@@ -19,7 +19,7 @@ int cmd_write_tree(int argc, const char **argv, const char *unused_prefix)
 {
        int flags = 0, ret;
        const char *prefix = NULL;
-       unsigned char sha1[20];
+       struct object_id oid;
        const char *me = "git-write-tree";
        struct option write_tree_options[] = {
                OPT_BIT(0, "missing-ok", &flags, N_("allow missing objects"),
@@ -38,10 +38,10 @@ int cmd_write_tree(int argc, const char **argv, const char *unused_prefix)
        argc = parse_options(argc, argv, unused_prefix, write_tree_options,
                             write_tree_usage, 0);
 
-       ret = write_cache_as_tree(sha1, flags, prefix);
+       ret = write_cache_as_tree(&oid, flags, prefix);
        switch (ret) {
        case 0:
-               printf("%s\n", sha1_to_hex(sha1));
+               printf("%s\n", oid_to_hex(&oid));
                break;
        case WRITE_TREE_UNREADABLE_INDEX:
                die("%s: error reading the index", me);
index 9d87eac07ba1d9bf01bfe4a24c8ba784e71dbac4..c0bc8de107a425c10ba750812247c481de463b02 100644 (file)
@@ -3,6 +3,7 @@
  */
 #include "cache.h"
 #include "bulk-checkin.h"
+#include "repository.h"
 #include "csum-file.h"
 #include "pack.h"
 #include "strbuf.h"
@@ -35,9 +36,9 @@ static void finish_bulk_checkin(struct bulk_checkin_state *state)
                unlink(state->pack_tmp_name);
                goto clear_exit;
        } else if (state->nr_written == 1) {
-               hashclose(state->f, oid.hash, CSUM_FSYNC);
+               finalize_hashfile(state->f, oid.hash, CSUM_HASH_IN_STREAM | CSUM_FSYNC | CSUM_CLOSE);
        } else {
-               int fd = hashclose(state->f, oid.hash, 0);
+               int fd = finalize_hashfile(state->f, oid.hash, 0);
                fixup_pack_header_footer(fd, oid.hash, state->pack_tmp_name,
                                         state->nr_written, oid.hash,
                                         state->offset);
@@ -57,20 +58,20 @@ static void finish_bulk_checkin(struct bulk_checkin_state *state)
 
        strbuf_release(&packname);
        /* Make objects we just wrote available to ourselves */
-       reprepare_packed_git();
+       reprepare_packed_git(the_repository);
 }
 
-static int already_written(struct bulk_checkin_state *state, unsigned char sha1[])
+static int already_written(struct bulk_checkin_state *state, struct object_id *oid)
 {
        int i;
 
        /* The object may already exist in the repository */
-       if (has_sha1_file(sha1))
+       if (has_sha1_file(oid->hash))
                return 1;
 
        /* Might want to keep the list sorted */
        for (i = 0; i < state->nr_written; i++)
-               if (!hashcmp(state->written[i]->oid.hash, sha1))
+               if (!oidcmp(&state->written[i]->oid, oid))
                        return 1;
 
        /* This is a new object we need to keep */
@@ -186,7 +187,7 @@ static void prepare_to_stream(struct bulk_checkin_state *state,
 }
 
 static int deflate_to_pack(struct bulk_checkin_state *state,
-                          unsigned char result_sha1[],
+                          struct object_id *result_oid,
                           int fd, size_t size,
                           enum object_type type, const char *path,
                           unsigned flags)
@@ -236,17 +237,17 @@ static int deflate_to_pack(struct bulk_checkin_state *state,
                if (lseek(fd, seekback, SEEK_SET) == (off_t) -1)
                        return error("cannot seek back");
        }
-       the_hash_algo->final_fn(result_sha1, &ctx);
+       the_hash_algo->final_fn(result_oid->hash, &ctx);
        if (!idx)
                return 0;
 
        idx->crc32 = crc32_end(state->f);
-       if (already_written(state, result_sha1)) {
+       if (already_written(state, result_oid)) {
                hashfile_truncate(state->f, &checkpoint);
                state->offset = checkpoint.offset;
                free(idx);
        } else {
-               hashcpy(idx->oid.hash, result_sha1);
+               oidcpy(&idx->oid, result_oid);
                ALLOC_GROW(state->written,
                           state->nr_written + 1,
                           state->alloc_written);
@@ -255,11 +256,11 @@ static int deflate_to_pack(struct bulk_checkin_state *state,
        return 0;
 }
 
-int index_bulk_checkin(unsigned char *sha1,
+int index_bulk_checkin(struct object_id *oid,
                       int fd, size_t size, enum object_type type,
                       const char *path, unsigned flags)
 {
-       int status = deflate_to_pack(&state, sha1, fd, size, type,
+       int status = deflate_to_pack(&state, oid, fd, size, type,
                                     path, flags);
        if (!state.plugged)
                finish_bulk_checkin(&state);
index fbd40fc98c955c192a6de698a75b8d56af766f09..a85527318b15b36bb60b0b6b166569b4fcaa9dcf 100644 (file)
@@ -4,7 +4,7 @@
 #ifndef BULK_CHECKIN_H
 #define BULK_CHECKIN_H
 
-extern int index_bulk_checkin(unsigned char sha1[],
+extern int index_bulk_checkin(struct object_id *oid,
                              int fd, size_t size, enum object_type type,
                              const char *path, unsigned flags);
 
index efe547e25fe2a53bd0ef7954cf3bec6d55218365..902c9b54485be2000696a697472fa10d97b36153 100644 (file)
--- a/bundle.c
+++ b/bundle.c
@@ -222,7 +222,7 @@ static int is_tag_in_date_range(struct object *tag, struct rev_info *revs)
        if (revs->max_age == -1 && revs->min_age == -1)
                goto out;
 
-       buf = read_sha1_file(tag->oid.hash, &type, &size);
+       buf = read_object_file(&tag->oid, &type, &size);
        if (!buf)
                goto out;
        line = memmem(buf, size, "\ntagger ", 8);
index c52e4303dfcbc6a17796f536e8602017b1d951f0..6a555f4d431f9f6dbf8dad06d75a4ec81a4254fd 100644 (file)
@@ -320,7 +320,7 @@ static int update_one(struct cache_tree *it,
                struct cache_tree_sub *sub = NULL;
                const char *path, *slash;
                int pathlen, entlen;
-               const unsigned char *sha1;
+               const struct object_id *oid;
                unsigned mode;
                int expected_missing = 0;
                int contains_ita = 0;
@@ -338,7 +338,7 @@ static int update_one(struct cache_tree *it,
                                die("cache-tree.c: '%.*s' in '%s' not found",
                                    entlen, path + baselen, path);
                        i += sub->count;
-                       sha1 = sub->cache_tree->oid.hash;
+                       oid = &sub->cache_tree->oid;
                        mode = S_IFDIR;
                        contains_ita = sub->cache_tree->entry_count < 0;
                        if (contains_ita) {
@@ -347,19 +347,19 @@ static int update_one(struct cache_tree *it,
                        }
                }
                else {
-                       sha1 = ce->oid.hash;
+                       oid = &ce->oid;
                        mode = ce->ce_mode;
                        entlen = pathlen - baselen;
                        i++;
                }
 
-               if (is_null_sha1(sha1) ||
-                   (mode != S_IFGITLINK && !missing_ok && !has_sha1_file(sha1))) {
+               if (is_null_oid(oid) ||
+                   (mode != S_IFGITLINK && !missing_ok && !has_object_file(oid))) {
                        strbuf_release(&buffer);
                        if (expected_missing)
                                return -1;
                        return error("invalid object %06o %s for '%.*s'",
-                               mode, sha1_to_hex(sha1), entlen+baselen, path);
+                               mode, oid_to_hex(oid), entlen+baselen, path);
                }
 
                /*
@@ -385,12 +385,12 @@ static int update_one(struct cache_tree *it,
                /*
                 * "sub" can be an empty tree if all subentries are i-t-a.
                 */
-               if (contains_ita && !hashcmp(sha1, EMPTY_TREE_SHA1_BIN))
+               if (contains_ita && !oidcmp(oid, &empty_tree_oid))
                        continue;
 
                strbuf_grow(&buffer, entlen + 100);
                strbuf_addf(&buffer, "%o %.*s%c", mode, entlen, path + baselen, '\0');
-               strbuf_add(&buffer, sha1, 20);
+               strbuf_add(&buffer, oid->hash, the_hash_algo->rawsz);
 
 #if DEBUG
                fprintf(stderr, "cache-tree update-one %o %.*s\n",
@@ -401,7 +401,7 @@ static int update_one(struct cache_tree *it,
        if (repair) {
                struct object_id oid;
                hash_object_file(buffer.buf, buffer.len, tree_type, &oid);
-               if (has_sha1_file(oid.hash))
+               if (has_object_file(&oid))
                        oidcpy(&it->oid, &oid);
                else
                        to_invalidate = 1;
@@ -465,7 +465,7 @@ static void write_one(struct strbuf *buffer, struct cache_tree *it,
 #endif
 
        if (0 <= it->entry_count) {
-               strbuf_add(buffer, it->oid.hash, 20);
+               strbuf_add(buffer, it->oid.hash, the_hash_algo->rawsz);
        }
        for (i = 0; i < it->subtree_nr; i++) {
                struct cache_tree_sub *down = it->down[i];
@@ -492,6 +492,7 @@ static struct cache_tree *read_one(const char **buffer, unsigned long *size_p)
        char *ep;
        struct cache_tree *it;
        int i, subtree_nr;
+       const unsigned rawsz = the_hash_algo->rawsz;
 
        it = NULL;
        /* skip name, but make sure name exists */
@@ -520,11 +521,11 @@ static struct cache_tree *read_one(const char **buffer, unsigned long *size_p)
                goto free_return;
        buf++; size--;
        if (0 <= it->entry_count) {
-               if (size < 20)
+               if (size < rawsz)
                        goto free_return;
-               hashcpy(it->oid.hash, (const unsigned char*)buf);
-               buf += 20;
-               size -= 20;
+               memcpy(it->oid.hash, (const unsigned char*)buf, rawsz);
+               buf += rawsz;
+               size -= rawsz;
        }
 
 #if DEBUG
@@ -599,7 +600,7 @@ static struct cache_tree *cache_tree_find(struct cache_tree *it, const char *pat
        return it;
 }
 
-int write_index_as_tree(unsigned char *sha1, struct index_state *index_state, const char *index_path, int flags, const char *prefix)
+int write_index_as_tree(struct object_id *oid, struct index_state *index_state, const char *index_path, int flags, const char *prefix)
 {
        int entries, was_valid;
        struct lock_file lock_file = LOCK_INIT;
@@ -640,19 +641,19 @@ int write_index_as_tree(unsigned char *sha1, struct index_state *index_state, co
                        ret = WRITE_TREE_PREFIX_ERROR;
                        goto out;
                }
-               hashcpy(sha1, subtree->oid.hash);
+               oidcpy(oid, &subtree->oid);
        }
        else
-               hashcpy(sha1, index_state->cache_tree->oid.hash);
+               oidcpy(oid, &index_state->cache_tree->oid);
 
 out:
        rollback_lock_file(&lock_file);
        return ret;
 }
 
-int write_cache_as_tree(unsigned char *sha1, int flags, const char *prefix)
+int write_cache_as_tree(struct object_id *oid, int flags, const char *prefix)
 {
-       return write_index_as_tree(sha1, &the_index, get_index_file(), flags, prefix);
+       return write_index_as_tree(oid, &the_index, get_index_file(), flags, prefix);
 }
 
 static void prime_cache_tree_rec(struct cache_tree *it, struct tree *tree)
index f7b9cab7ee87dd04cecbd25f34101a58fc002014..cfd5328cc93694e23037e15148241e17bd4f3a04 100644 (file)
@@ -47,8 +47,8 @@ int update_main_cache_tree(int);
 #define WRITE_TREE_UNMERGED_INDEX (-2)
 #define WRITE_TREE_PREFIX_ERROR (-3)
 
-int write_index_as_tree(unsigned char *sha1, struct index_state *index_state, const char *index_path, int flags, const char *prefix);
-int write_cache_as_tree(unsigned char *sha1, int flags, const char *prefix);
+int write_index_as_tree(struct object_id *oid, struct index_state *index_state, const char *index_path, int flags, const char *prefix);
+int write_cache_as_tree(struct object_id *oid, int flags, const char *prefix);
 void prime_cache_tree(struct index_state *, struct tree *);
 
 extern int cache_tree_matches_traversal(struct cache_tree *, struct name_entry *ent, struct traverse_info *info);
diff --git a/cache.h b/cache.h
index a61b2d3f0d79b0f56992e0343803811f5265d716..6dedf3c4f969a64b3cc7fea115f507adaccc44fe 100644 (file)
--- a/cache.h
+++ b/cache.h
@@ -373,6 +373,13 @@ extern void free_name_hash(struct index_state *istate);
 #define read_blob_data_from_cache(path, sz) read_blob_data_from_index(&the_index, (path), (sz))
 #endif
 
+#define TYPE_BITS 3
+
+/*
+ * Values in this enum (except those outside the 3 bit range) are part
+ * of pack file format. See Documentation/technical/pack-format.txt
+ * for more information.
+ */
 enum object_type {
        OBJ_BAD = -1,
        OBJ_NONE = 0,
@@ -428,6 +435,7 @@ static inline enum object_type object_type(unsigned int mode)
 #define GIT_ICASE_PATHSPECS_ENVIRONMENT "GIT_ICASE_PATHSPECS"
 #define GIT_QUARANTINE_ENVIRONMENT "GIT_QUARANTINE_PATH"
 #define GIT_OPTIONAL_LOCKS_ENVIRONMENT "GIT_OPTIONAL_LOCKS"
+#define GIT_TEXT_DOMAIN_DIR_ENVIRONMENT "GIT_TEXTDOMAINDIR"
 
 /*
  * Environment variable used in handshaking the wire protocol.
@@ -459,7 +467,7 @@ static inline enum object_type object_type(unsigned int mode)
  */
 extern const char * const local_repo_env[];
 
-extern void setup_git_env(void);
+extern void setup_git_env(const char *git_dir);
 
 /*
  * Returns true iff we have a configured git repository (either via
@@ -477,7 +485,7 @@ extern const char *get_git_common_dir(void);
 extern char *get_object_directory(void);
 extern char *get_index_file(void);
 extern char *get_graft_file(void);
-extern int set_git_dir(const char *path);
+extern void set_git_dir(const char *path);
 extern int get_common_dir_noenv(struct strbuf *sb, const char *gitdir);
 extern int get_common_dir(struct strbuf *sb, const char *gitdir);
 extern const char *get_git_namespace(void);
@@ -805,6 +813,7 @@ extern char *git_replace_ref_base;
 
 extern int fsync_object_files;
 extern int core_preload_index;
+extern int core_commit_graph;
 extern int core_apply_sparse_checkout;
 extern int precomposed_unicode;
 extern int protect_hfs;
@@ -940,12 +949,6 @@ extern void check_repository_format(void);
 #define DATA_CHANGED    0x0020
 #define TYPE_CHANGED    0x0040
 
-/*
- * Put in `buf` the name of the file in the local object database that
- * would be used to store a loose object with the specified sha1.
- */
-extern void sha1_file_name(struct strbuf *buf, const unsigned char *sha1);
-
 /*
  * Return an abbreviated sha1 unique within this repository's object database.
  * The result will be at least `len` characters long, and will be NUL
@@ -955,14 +958,14 @@ extern void sha1_file_name(struct strbuf *buf, const unsigned char *sha1);
  * more calls to find_unique_abbrev are made.
  *
  * The `_r` variant writes to a buffer supplied by the caller, which must be at
- * least `GIT_SHA1_HEXSZ + 1` bytes. The return value is the number of bytes
+ * least `GIT_MAX_HEXSZ + 1` bytes. The return value is the number of bytes
  * written (excluding the NUL terminator).
  *
  * Note that while this version avoids the static buffer, it is not fully
  * reentrant, as it calls into other non-reentrant git code.
  */
-extern const char *find_unique_abbrev(const unsigned char *sha1, int len);
-extern int find_unique_abbrev_r(char *hex, const unsigned char *sha1, int len);
+extern const char *find_unique_abbrev(const struct object_id *oid, int len);
+extern int find_unique_abbrev_r(char *hex, const struct object_id *oid, int len);
 
 extern const unsigned char null_sha1[GIT_MAX_RAWSZ];
 extern const struct object_id null_oid;
@@ -1189,35 +1192,16 @@ extern char *xdg_config_home(const char *filename);
  */
 extern char *xdg_cache_home(const char *filename);
 
-extern void *read_sha1_file_extended(const unsigned char *sha1,
-                                    enum object_type *type,
-                                    unsigned long *size, int lookup_replace);
-static inline void *read_sha1_file(const unsigned char *sha1, enum object_type *type, unsigned long *size)
+extern void *read_object_file_extended(const struct object_id *oid,
+                                      enum object_type *type,
+                                      unsigned long *size, int lookup_replace);
+static inline void *read_object_file(const struct object_id *oid, enum object_type *type, unsigned long *size)
 {
-       return read_sha1_file_extended(sha1, type, size, 1);
+       return read_object_file_extended(oid, type, size, 1);
 }
 
-/*
- * This internal function is only declared here for the benefit of
- * lookup_replace_object().  Please do not call it directly.
- */
-extern const unsigned char *do_lookup_replace_object(const unsigned char *sha1);
-
-/*
- * If object sha1 should be replaced, return the replacement object's
- * name (replaced recursively, if necessary).  The return value is
- * either sha1 or a pointer to a permanently-allocated value.  When
- * object replacement is suppressed, always return sha1.
- */
-static inline const unsigned char *lookup_replace_object(const unsigned char *sha1)
-{
-       if (!check_replace_refs)
-               return sha1;
-       return do_lookup_replace_object(sha1);
-}
-
-/* Read and unpack a sha1 file into memory, write memory to a sha1 file */
-extern int sha1_object_info(const unsigned char *, unsigned long *);
+/* Read and unpack an object file into memory, write memory to an object file */
+int oid_object_info(struct repository *r, const struct object_id *, unsigned long *);
 
 extern int hash_object_file(const void *buf, unsigned long len,
                            const char *type, struct object_id *oid);
@@ -1236,23 +1220,22 @@ extern int force_object_loose(const struct object_id *oid, time_t mtime);
 
 extern int git_open_cloexec(const char *name, int flags);
 #define git_open(name) git_open_cloexec(name, O_RDONLY)
-extern void *map_sha1_file(const unsigned char *sha1, unsigned long *size);
 extern int unpack_sha1_header(git_zstream *stream, unsigned char *map, unsigned long mapsize, void *buffer, unsigned long bufsiz);
 extern int parse_sha1_header(const char *hdr, unsigned long *sizep);
 
-extern int check_sha1_signature(const unsigned char *sha1, void *buf, unsigned long size, const char *type);
+extern int check_object_signature(const struct object_id *oid, void *buf, unsigned long size, const char *type);
 
 extern int finalize_object_file(const char *tmpfile, const char *filename);
 
 /*
- * Open the loose object at path, check its sha1, and return the contents,
+ * Open the loose object at path, check its hash, and return the contents,
  * type, and size. If the object is a blob, then "contents" may return NULL,
  * to allow streaming of large blobs.
  *
  * Returns 0 on success, negative on error (details may be written to stderr).
  */
 int read_loose_object(const char *path,
-                     const unsigned char *expected_sha1,
+                     const struct object_id *expected_oid,
                      enum object_type *type,
                      unsigned long *size,
                      void **contents);
@@ -1279,7 +1262,7 @@ extern int has_object_file_with_flags(const struct object_id *oid, int flags);
  */
 extern int has_loose_object_nonlocal(const unsigned char *sha1);
 
-extern void assert_sha1_type(const unsigned char *sha1, enum object_type expect);
+extern void assert_oid_type(const struct object_id *oid, enum object_type expect);
 
 /* Helper to check and "touch" a file */
 extern int check_and_freshen_file(const char *fn, int freshen);
@@ -1435,10 +1418,10 @@ extern int df_name_compare(const char *name1, int len1, int mode1, const char *n
 extern int name_compare(const char *name1, size_t len1, const char *name2, size_t len2);
 extern int cache_name_stage_compare(const char *name1, int len1, int stage1, const char *name2, int len2, int stage2);
 
-extern void *read_object_with_reference(const unsigned char *sha1,
+extern void *read_object_with_reference(const struct object_id *oid,
                                        const char *required_type,
                                        unsigned long *size,
-                                       unsigned char *sha1_ret);
+                                       struct object_id *oid_ret);
 
 extern struct object *peel_to_type(const char *name, int namelen,
                                   struct object *o, enum object_type);
@@ -1564,57 +1547,6 @@ extern int has_dirs_only_path(const char *name, int len, int prefix_len);
 extern void schedule_dir_for_removal(const char *name, int len);
 extern void remove_scheduled_dirs(void);
 
-extern struct alternate_object_database {
-       struct alternate_object_database *next;
-
-       /* see alt_scratch_buf() */
-       struct strbuf scratch;
-       size_t base_len;
-
-       /*
-        * Used to store the results of readdir(3) calls when searching
-        * for unique abbreviated hashes.  This cache is never
-        * invalidated, thus it's racy and not necessarily accurate.
-        * That's fine for its purpose; don't use it for tasks requiring
-        * greater accuracy!
-        */
-       char loose_objects_subdir_seen[256];
-       struct oid_array loose_objects_cache;
-
-       char path[FLEX_ARRAY];
-} *alt_odb_list;
-extern void prepare_alt_odb(void);
-extern char *compute_alternate_path(const char *path, struct strbuf *err);
-typedef int alt_odb_fn(struct alternate_object_database *, void *);
-extern int foreach_alt_odb(alt_odb_fn, void*);
-
-/*
- * Allocate a "struct alternate_object_database" but do _not_ actually
- * add it to the list of alternates.
- */
-struct alternate_object_database *alloc_alt_odb(const char *dir);
-
-/*
- * Add the directory to the on-disk alternates file; the new entry will also
- * take effect in the current process.
- */
-extern void add_to_alternates_file(const char *dir);
-
-/*
- * Add the directory to the in-memory list of alternates (along with any
- * recursive alternates it points to), but do not modify the on-disk alternates
- * file.
- */
-extern void add_to_alternates_memory(const char *dir);
-
-/*
- * Returns a scratch strbuf pre-filled with the alternate object directory,
- * including a trailing slash, which can be used to access paths in the
- * alternate. Always use this over direct access to alt->scratch, as it
- * cleans up any previous use of the scratch buffer.
- */
-extern struct strbuf *alt_scratch_buf(struct alternate_object_database *alt);
-
 struct pack_window {
        struct pack_window *next;
        unsigned char *base;
@@ -1624,35 +1556,6 @@ struct pack_window {
        unsigned int inuse_cnt;
 };
 
-extern struct packed_git {
-       struct packed_git *next;
-       struct list_head mru;
-       struct pack_window *windows;
-       off_t pack_size;
-       const void *index_data;
-       size_t index_size;
-       uint32_t num_objects;
-       uint32_t num_bad_objects;
-       unsigned char *bad_object_sha1;
-       int index_version;
-       time_t mtime;
-       int pack_fd;
-       unsigned pack_local:1,
-                pack_keep:1,
-                freshened:1,
-                do_not_close:1,
-                pack_promisor:1;
-       unsigned char sha1[20];
-       struct revindex_entry *revindex;
-       /* something like ".git/objects/pack/xxxxx.pack" */
-       char pack_name[FLEX_ARRAY]; /* more */
-} *packed_git;
-
-/*
- * A most-recently-used ordered version of the packed_git list.
- */
-extern struct list_head packed_git_mru;
-
 struct pack_entry {
        off_t offset;
        unsigned char sha1[20];
@@ -1777,7 +1680,12 @@ struct object_info {
 #define OBJECT_INFO_SKIP_CACHED 4
 /* Do not retry packed storage after checking packed and loose storage */
 #define OBJECT_INFO_QUICK 8
-extern int sha1_object_info_extended(const unsigned char *, struct object_info *, unsigned flags);
+/* Do not check loose object */
+#define OBJECT_INFO_IGNORE_LOOSE 16
+
+int oid_object_info_extended(struct repository *r,
+                            const struct object_id *,
+                            struct object_info *, unsigned flags);
 
 /*
  * Set this to 0 to prevent sha1_object_info_extended() from fetching missing
diff --git a/chdir-notify.c b/chdir-notify.c
new file mode 100644 (file)
index 0000000..5f7f2c2
--- /dev/null
@@ -0,0 +1,93 @@
+#include "cache.h"
+#include "chdir-notify.h"
+#include "list.h"
+#include "strbuf.h"
+
+struct chdir_notify_entry {
+       const char *name;
+       chdir_notify_callback cb;
+       void *data;
+       struct list_head list;
+};
+static LIST_HEAD(chdir_notify_entries);
+
+void chdir_notify_register(const char *name,
+                          chdir_notify_callback cb,
+                          void *data)
+{
+       struct chdir_notify_entry *e = xmalloc(sizeof(*e));
+       e->name = name;
+       e->cb = cb;
+       e->data = data;
+       list_add_tail(&e->list, &chdir_notify_entries);
+}
+
+static void reparent_cb(const char *name,
+                       const char *old_cwd,
+                       const char *new_cwd,
+                       void *data)
+{
+       char **path = data;
+       char *tmp = *path;
+
+       if (!tmp)
+               return;
+
+       *path = reparent_relative_path(old_cwd, new_cwd, tmp);
+       free(tmp);
+
+       if (name) {
+               trace_printf_key(&trace_setup_key,
+                                "setup: reparent %s to '%s'",
+                                name, *path);
+       }
+}
+
+void chdir_notify_reparent(const char *name, char **path)
+{
+       chdir_notify_register(name, reparent_cb, path);
+}
+
+int chdir_notify(const char *new_cwd)
+{
+       struct strbuf old_cwd = STRBUF_INIT;
+       struct list_head *pos;
+
+       if (strbuf_getcwd(&old_cwd) < 0)
+               return -1;
+       if (chdir(new_cwd) < 0) {
+               int saved_errno = errno;
+               strbuf_release(&old_cwd);
+               errno = saved_errno;
+               return -1;
+       }
+
+       trace_printf_key(&trace_setup_key,
+                        "setup: chdir from '%s' to '%s'",
+                        old_cwd.buf, new_cwd);
+
+       list_for_each(pos, &chdir_notify_entries) {
+               struct chdir_notify_entry *e =
+                       list_entry(pos, struct chdir_notify_entry, list);
+               e->cb(e->name, old_cwd.buf, new_cwd, e->data);
+       }
+
+       strbuf_release(&old_cwd);
+       return 0;
+}
+
+char *reparent_relative_path(const char *old_cwd,
+                            const char *new_cwd,
+                            const char *path)
+{
+       char *ret, *full;
+
+       if (is_absolute_path(path))
+               return xstrdup(path);
+
+       full = xstrfmt("%s/%s", old_cwd, path);
+       ret = xstrdup(remove_leading_path(full, new_cwd));
+       free(full);
+
+       return ret;
+}
diff --git a/chdir-notify.h b/chdir-notify.h
new file mode 100644 (file)
index 0000000..366e4c1
--- /dev/null
@@ -0,0 +1,73 @@
+#ifndef CHDIR_NOTIFY_H
+#define CHDIR_NOTIFY_H
+
+/*
+ * An API to let code "subscribe" to changes to the current working directory.
+ * The general idea is that some code asks to be notified when the working
+ * directory changes, and other code that calls chdir uses a special wrapper
+ * that notifies everyone.
+ */
+
+/*
+ * Callers who need to know about changes can do:
+ *
+ *   void foo(const char *old_path, const char *new_path, void *data)
+ *   {
+ *     warning("switched from %s to %s!", old_path, new_path);
+ *   }
+ *   ...
+ *   chdir_notify_register("description", foo, data);
+ *
+ * In practice most callers will want to move a relative path to the new root;
+ * they can use the reparent_relative_path() helper for that. If that's all
+ * you're doing, you can also use the convenience function:
+ *
+ *   chdir_notify_reparent("description", &my_path);
+ *
+ * Whenever a chdir event occurs, that will update my_path (if it's relative)
+ * to adjust for the new cwd by freeing any existing string and allocating a
+ * new one.
+ *
+ * Registered functions are called in the order in which they were added. Note
+ * that there's currently no way to remove a function, so make sure that the
+ * data parameter remains valid for the rest of the program.
+ *
+ * The "name" argument is used only for printing trace output from
+ * $GIT_TRACE_SETUP. It may be NULL, but if non-NULL should point to
+ * storage which lasts as long as the registration is active.
+ */
+typedef void (*chdir_notify_callback)(const char *name,
+                                     const char *old_cwd,
+                                     const char *new_cwd,
+                                     void *data);
+void chdir_notify_register(const char *name, chdir_notify_callback cb, void *data);
+void chdir_notify_reparent(const char *name, char **path);
+
+/*
+ *
+ * Callers that want to chdir:
+ *
+ *   chdir_notify(new_path);
+ *
+ * to switch to the new path and notify any callbacks.
+ *
+ * Note that you don't need to chdir_notify() if you're just temporarily moving
+ * to a directory and back, as long as you don't call any subscribed code in
+ * between (but it should be safe to do so if you're unsure).
+ */
+int chdir_notify(const char *new_cwd);
+
+/*
+ * Reparent a relative path from old_root to new_root. For example:
+ *
+ *   reparent_relative_path("/a", "/a/b", "b/rel");
+ *
+ * would return the (newly allocated) string "rel". Note that we may return an
+ * absolute path in some cases (e.g., if the resulting path is not inside
+ * new_cwd).
+ */
+char *reparent_relative_path(const char *old_cwd,
+                            const char *new_cwd,
+                            const char *path);
+
+#endif /* CHDIR_NOTIFY_H */
index 3735ce413f1835b3222fab05ba9ff5ab5205c2c0..4b04c75b7f81a749c0d48674b8e2042abe5769eb 100755 (executable)
@@ -11,7 +11,10 @@ make --jobs=2
 make --quiet test
 if test "$jobname" = "linux-gcc"
 then
-       GIT_TEST_SPLIT_INDEX=YesPlease make --quiet test
+       export GIT_TEST_SPLIT_INDEX=yes
+       export GIT_TEST_FULL_IN_PACK_ARRAY=true
+       export GIT_TEST_OE_SIZE=10
+       make --quiet test
 fi
 
 check_unignored_build_artifacts
diff --git a/color.c b/color.c
index f277e72e4ce04815f71c949dfdf7c89c9462c5b7..c6c6c4f580fe9bde55bd5f433b1bffd3932053f8 100644 (file)
--- a/color.c
+++ b/color.c
@@ -319,18 +319,20 @@ int git_config_colorbool(const char *var, const char *value)
        return GIT_COLOR_AUTO;
 }
 
-static int check_auto_color(void)
+static int check_auto_color(int fd)
 {
-       if (color_stdout_is_tty < 0)
-               color_stdout_is_tty = isatty(1);
-       if (color_stdout_is_tty || (pager_in_use() && pager_use_color)) {
+       static int color_stderr_is_tty = -1;
+       int *is_tty_p = fd == 1 ? &color_stdout_is_tty : &color_stderr_is_tty;
+       if (*is_tty_p < 0)
+               *is_tty_p = isatty(fd);
+       if (*is_tty_p || (fd == 1 && pager_in_use() && pager_use_color)) {
                if (!is_terminal_dumb())
                        return 1;
        }
        return 0;
 }
 
-int want_color(int var)
+int want_color_fd(int fd, int var)
 {
        /*
         * NEEDSWORK: This function is sometimes used from multiple threads, and
@@ -339,15 +341,15 @@ int want_color(int var)
         * is listed in .tsan-suppressions for the time being.
         */
 
-       static int want_auto = -1;
+       static int want_auto[3] = { -1, -1, -1 };
 
        if (var < 0)
                var = git_use_color_default;
 
        if (var == GIT_COLOR_AUTO) {
-               if (want_auto < 0)
-                       want_auto = check_auto_color();
-               return want_auto;
+               if (want_auto[fd] < 0)
+                       want_auto[fd] = check_auto_color(fd);
+               return want_auto[fd];
        }
        return var;
 }
diff --git a/color.h b/color.h
index cd0bcedd084f3741fad55569b18ec15e12d75cf8..5b744e1bc68617d196bdd864042e738cb4d75ebe 100644 (file)
--- a/color.h
+++ b/color.h
@@ -88,7 +88,9 @@ int git_config_colorbool(const char *var, const char *value);
  * Return a boolean whether to use color, where the argument 'var' is
  * one of GIT_COLOR_UNKNOWN, GIT_COLOR_NEVER, GIT_COLOR_ALWAYS, GIT_COLOR_AUTO.
  */
-int want_color(int var);
+int want_color_fd(int fd, int var);
+#define want_color(colorbool) want_color_fd(1, (colorbool))
+#define want_color_stderr(colorbool) want_color_fd(2, (colorbool))
 
 /*
  * Translate a Git color from 'value' into a string that the terminal can
index 1ec9af1f813968830ee153e23a4670014124683f..2ef495963fc1cf2e092778d35f1965912d7eaac0 100644 (file)
@@ -306,7 +306,7 @@ static char *grab_blob(const struct object_id *oid, unsigned int mode,
                *size = fill_textconv(textconv, df, &blob);
                free_filespec(df);
        } else {
-               blob = read_sha1_file(oid->hash, &type, size);
+               blob = read_object_file(oid, &type, size);
                if (type != OBJ_BLOB)
                        die("object '%s' is not a blob!", oid_to_hex(oid));
        }
@@ -915,11 +915,11 @@ static void show_combined_header(struct combine_diff_path *elem,
                         "", elem->path, line_prefix, c_meta, c_reset);
        printf("%s%sindex ", line_prefix, c_meta);
        for (i = 0; i < num_parent; i++) {
-               abb = find_unique_abbrev(elem->parent[i].oid.hash,
+               abb = find_unique_abbrev(&elem->parent[i].oid,
                                         abbrev);
                printf("%s%s", i ? "," : "", abb);
        }
-       abb = find_unique_abbrev(elem->oid.hash, abbrev);
+       abb = find_unique_abbrev(&elem->oid, abbrev);
        printf("..%s%s\n", abb, c_reset);
 
        if (mode_differs) {
index a1fad28fd82da18cc2b8f43e8eb26fed9864411b..835c5890be93abc1852dd0e1e19dbb627fee6041 100644 (file)
@@ -34,6 +34,7 @@ git-clean                               mainporcelain
 git-clone                               mainporcelain           init
 git-column                              purehelpers
 git-commit                              mainporcelain           history
+git-commit-graph                        plumbingmanipulators
 git-commit-tree                         plumbingmanipulators
 git-config                              ancillarymanipulators
 git-count-objects                       ancillaryinterrogators
diff --git a/commit-graph.c b/commit-graph.c
new file mode 100644 (file)
index 0000000..4c61270
--- /dev/null
@@ -0,0 +1,761 @@
+#include "cache.h"
+#include "config.h"
+#include "git-compat-util.h"
+#include "lockfile.h"
+#include "pack.h"
+#include "packfile.h"
+#include "commit.h"
+#include "object.h"
+#include "revision.h"
+#include "sha1-lookup.h"
+#include "commit-graph.h"
+#include "object-store.h"
+
+#define GRAPH_SIGNATURE 0x43475048 /* "CGPH" */
+#define GRAPH_CHUNKID_OIDFANOUT 0x4f494446 /* "OIDF" */
+#define GRAPH_CHUNKID_OIDLOOKUP 0x4f49444c /* "OIDL" */
+#define GRAPH_CHUNKID_DATA 0x43444154 /* "CDAT" */
+#define GRAPH_CHUNKID_LARGEEDGES 0x45444745 /* "EDGE" */
+
+#define GRAPH_DATA_WIDTH 36
+
+#define GRAPH_VERSION_1 0x1
+#define GRAPH_VERSION GRAPH_VERSION_1
+
+#define GRAPH_OID_VERSION_SHA1 1
+#define GRAPH_OID_LEN_SHA1 GIT_SHA1_RAWSZ
+#define GRAPH_OID_VERSION GRAPH_OID_VERSION_SHA1
+#define GRAPH_OID_LEN GRAPH_OID_LEN_SHA1
+
+#define GRAPH_OCTOPUS_EDGES_NEEDED 0x80000000
+#define GRAPH_PARENT_MISSING 0x7fffffff
+#define GRAPH_EDGE_LAST_MASK 0x7fffffff
+#define GRAPH_PARENT_NONE 0x70000000
+
+#define GRAPH_LAST_EDGE 0x80000000
+
+#define GRAPH_FANOUT_SIZE (4 * 256)
+#define GRAPH_CHUNKLOOKUP_WIDTH 12
+#define GRAPH_MIN_SIZE (5 * GRAPH_CHUNKLOOKUP_WIDTH + GRAPH_FANOUT_SIZE + \
+                       GRAPH_OID_LEN + 8)
+
+char *get_commit_graph_filename(const char *obj_dir)
+{
+       return xstrfmt("%s/info/commit-graph", obj_dir);
+}
+
+static struct commit_graph *alloc_commit_graph(void)
+{
+       struct commit_graph *g = xcalloc(1, sizeof(*g));
+       g->graph_fd = -1;
+
+       return g;
+}
+
+struct commit_graph *load_commit_graph_one(const char *graph_file)
+{
+       void *graph_map;
+       const unsigned char *data, *chunk_lookup;
+       size_t graph_size;
+       struct stat st;
+       uint32_t i;
+       struct commit_graph *graph;
+       int fd = git_open(graph_file);
+       uint64_t last_chunk_offset;
+       uint32_t last_chunk_id;
+       uint32_t graph_signature;
+       unsigned char graph_version, hash_version;
+
+       if (fd < 0)
+               return NULL;
+       if (fstat(fd, &st)) {
+               close(fd);
+               return NULL;
+       }
+       graph_size = xsize_t(st.st_size);
+
+       if (graph_size < GRAPH_MIN_SIZE) {
+               close(fd);
+               die("graph file %s is too small", graph_file);
+       }
+       graph_map = xmmap(NULL, graph_size, PROT_READ, MAP_PRIVATE, fd, 0);
+       data = (const unsigned char *)graph_map;
+
+       graph_signature = get_be32(data);
+       if (graph_signature != GRAPH_SIGNATURE) {
+               error("graph signature %X does not match signature %X",
+                     graph_signature, GRAPH_SIGNATURE);
+               goto cleanup_fail;
+       }
+
+       graph_version = *(unsigned char*)(data + 4);
+       if (graph_version != GRAPH_VERSION) {
+               error("graph version %X does not match version %X",
+                     graph_version, GRAPH_VERSION);
+               goto cleanup_fail;
+       }
+
+       hash_version = *(unsigned char*)(data + 5);
+       if (hash_version != GRAPH_OID_VERSION) {
+               error("hash version %X does not match version %X",
+                     hash_version, GRAPH_OID_VERSION);
+               goto cleanup_fail;
+       }
+
+       graph = alloc_commit_graph();
+
+       graph->hash_len = GRAPH_OID_LEN;
+       graph->num_chunks = *(unsigned char*)(data + 6);
+       graph->graph_fd = fd;
+       graph->data = graph_map;
+       graph->data_len = graph_size;
+
+       last_chunk_id = 0;
+       last_chunk_offset = 8;
+       chunk_lookup = data + 8;
+       for (i = 0; i < graph->num_chunks; i++) {
+               uint32_t chunk_id = get_be32(chunk_lookup + 0);
+               uint64_t chunk_offset = get_be64(chunk_lookup + 4);
+               int chunk_repeated = 0;
+
+               chunk_lookup += GRAPH_CHUNKLOOKUP_WIDTH;
+
+               if (chunk_offset > graph_size - GIT_MAX_RAWSZ) {
+                       error("improper chunk offset %08x%08x", (uint32_t)(chunk_offset >> 32),
+                             (uint32_t)chunk_offset);
+                       goto cleanup_fail;
+               }
+
+               switch (chunk_id) {
+               case GRAPH_CHUNKID_OIDFANOUT:
+                       if (graph->chunk_oid_fanout)
+                               chunk_repeated = 1;
+                       else
+                               graph->chunk_oid_fanout = (uint32_t*)(data + chunk_offset);
+                       break;
+
+               case GRAPH_CHUNKID_OIDLOOKUP:
+                       if (graph->chunk_oid_lookup)
+                               chunk_repeated = 1;
+                       else
+                               graph->chunk_oid_lookup = data + chunk_offset;
+                       break;
+
+               case GRAPH_CHUNKID_DATA:
+                       if (graph->chunk_commit_data)
+                               chunk_repeated = 1;
+                       else
+                               graph->chunk_commit_data = data + chunk_offset;
+                       break;
+
+               case GRAPH_CHUNKID_LARGEEDGES:
+                       if (graph->chunk_large_edges)
+                               chunk_repeated = 1;
+                       else
+                               graph->chunk_large_edges = data + chunk_offset;
+                       break;
+               }
+
+               if (chunk_repeated) {
+                       error("chunk id %08x appears multiple times", chunk_id);
+                       goto cleanup_fail;
+               }
+
+               if (last_chunk_id == GRAPH_CHUNKID_OIDLOOKUP)
+               {
+                       graph->num_commits = (chunk_offset - last_chunk_offset)
+                                            / graph->hash_len;
+               }
+
+               last_chunk_id = chunk_id;
+               last_chunk_offset = chunk_offset;
+       }
+
+       return graph;
+
+cleanup_fail:
+       munmap(graph_map, graph_size);
+       close(fd);
+       exit(1);
+}
+
+/* global storage */
+static struct commit_graph *commit_graph = NULL;
+
+static void prepare_commit_graph_one(const char *obj_dir)
+{
+       char *graph_name;
+
+       if (commit_graph)
+               return;
+
+       graph_name = get_commit_graph_filename(obj_dir);
+       commit_graph = load_commit_graph_one(graph_name);
+
+       FREE_AND_NULL(graph_name);
+}
+
+static int prepare_commit_graph_run_once = 0;
+static void prepare_commit_graph(void)
+{
+       struct alternate_object_database *alt;
+       char *obj_dir;
+
+       if (prepare_commit_graph_run_once)
+               return;
+       prepare_commit_graph_run_once = 1;
+
+       obj_dir = get_object_directory();
+       prepare_commit_graph_one(obj_dir);
+       prepare_alt_odb(the_repository);
+       for (alt = the_repository->objects->alt_odb_list;
+            !commit_graph && alt;
+            alt = alt->next)
+               prepare_commit_graph_one(alt->path);
+}
+
+static void close_commit_graph(void)
+{
+       if (!commit_graph)
+               return;
+
+       if (commit_graph->graph_fd >= 0) {
+               munmap((void *)commit_graph->data, commit_graph->data_len);
+               commit_graph->data = NULL;
+               close(commit_graph->graph_fd);
+       }
+
+       FREE_AND_NULL(commit_graph);
+}
+
+static int bsearch_graph(struct commit_graph *g, struct object_id *oid, uint32_t *pos)
+{
+       return bsearch_hash(oid->hash, g->chunk_oid_fanout,
+                           g->chunk_oid_lookup, g->hash_len, pos);
+}
+
+static struct commit_list **insert_parent_or_die(struct commit_graph *g,
+                                                uint64_t pos,
+                                                struct commit_list **pptr)
+{
+       struct commit *c;
+       struct object_id oid;
+       hashcpy(oid.hash, g->chunk_oid_lookup + g->hash_len * pos);
+       c = lookup_commit(&oid);
+       if (!c)
+               die("could not find commit %s", oid_to_hex(&oid));
+       c->graph_pos = pos;
+       return &commit_list_insert(c, pptr)->next;
+}
+
+static int fill_commit_in_graph(struct commit *item, struct commit_graph *g, uint32_t pos)
+{
+       uint32_t edge_value;
+       uint32_t *parent_data_ptr;
+       uint64_t date_low, date_high;
+       struct commit_list **pptr;
+       const unsigned char *commit_data = g->chunk_commit_data + (g->hash_len + 16) * pos;
+
+       item->object.parsed = 1;
+       item->graph_pos = pos;
+
+       item->maybe_tree = NULL;
+
+       date_high = get_be32(commit_data + g->hash_len + 8) & 0x3;
+       date_low = get_be32(commit_data + g->hash_len + 12);
+       item->date = (timestamp_t)((date_high << 32) | date_low);
+
+       pptr = &item->parents;
+
+       edge_value = get_be32(commit_data + g->hash_len);
+       if (edge_value == GRAPH_PARENT_NONE)
+               return 1;
+       pptr = insert_parent_or_die(g, edge_value, pptr);
+
+       edge_value = get_be32(commit_data + g->hash_len + 4);
+       if (edge_value == GRAPH_PARENT_NONE)
+               return 1;
+       if (!(edge_value & GRAPH_OCTOPUS_EDGES_NEEDED)) {
+               pptr = insert_parent_or_die(g, edge_value, pptr);
+               return 1;
+       }
+
+       parent_data_ptr = (uint32_t*)(g->chunk_large_edges +
+                         4 * (uint64_t)(edge_value & GRAPH_EDGE_LAST_MASK));
+       do {
+               edge_value = get_be32(parent_data_ptr);
+               pptr = insert_parent_or_die(g,
+                                           edge_value & GRAPH_EDGE_LAST_MASK,
+                                           pptr);
+               parent_data_ptr++;
+       } while (!(edge_value & GRAPH_LAST_EDGE));
+
+       return 1;
+}
+
+int parse_commit_in_graph(struct commit *item)
+{
+       if (!core_commit_graph)
+               return 0;
+       if (item->object.parsed)
+               return 1;
+
+       prepare_commit_graph();
+       if (commit_graph) {
+               uint32_t pos;
+               int found;
+               if (item->graph_pos != COMMIT_NOT_FROM_GRAPH) {
+                       pos = item->graph_pos;
+                       found = 1;
+               } else {
+                       found = bsearch_graph(commit_graph, &(item->object.oid), &pos);
+               }
+
+               if (found)
+                       return fill_commit_in_graph(item, commit_graph, pos);
+       }
+
+       return 0;
+}
+
+static struct tree *load_tree_for_commit(struct commit_graph *g, struct commit *c)
+{
+       struct object_id oid;
+       const unsigned char *commit_data = g->chunk_commit_data +
+                                          GRAPH_DATA_WIDTH * (c->graph_pos);
+
+       hashcpy(oid.hash, commit_data);
+       c->maybe_tree = lookup_tree(&oid);
+
+       return c->maybe_tree;
+}
+
+struct tree *get_commit_tree_in_graph(const struct commit *c)
+{
+       if (c->maybe_tree)
+               return c->maybe_tree;
+       if (c->graph_pos == COMMIT_NOT_FROM_GRAPH)
+               BUG("get_commit_tree_in_graph called from non-commit-graph commit");
+
+       return load_tree_for_commit(commit_graph, (struct commit *)c);
+}
+
+static void write_graph_chunk_fanout(struct hashfile *f,
+                                    struct commit **commits,
+                                    int nr_commits)
+{
+       int i, count = 0;
+       struct commit **list = commits;
+
+       /*
+        * Write the first-level table (the list is sorted,
+        * but we use a 256-entry lookup to be able to avoid
+        * having to do eight extra binary search iterations).
+        */
+       for (i = 0; i < 256; i++) {
+               while (count < nr_commits) {
+                       if ((*list)->object.oid.hash[0] != i)
+                               break;
+                       count++;
+                       list++;
+               }
+
+               hashwrite_be32(f, count);
+       }
+}
+
+static void write_graph_chunk_oids(struct hashfile *f, int hash_len,
+                                  struct commit **commits, int nr_commits)
+{
+       struct commit **list = commits;
+       int count;
+       for (count = 0; count < nr_commits; count++, list++)
+               hashwrite(f, (*list)->object.oid.hash, (int)hash_len);
+}
+
+static const unsigned char *commit_to_sha1(size_t index, void *table)
+{
+       struct commit **commits = table;
+       return commits[index]->object.oid.hash;
+}
+
+static void write_graph_chunk_data(struct hashfile *f, int hash_len,
+                                  struct commit **commits, int nr_commits)
+{
+       struct commit **list = commits;
+       struct commit **last = commits + nr_commits;
+       uint32_t num_extra_edges = 0;
+
+       while (list < last) {
+               struct commit_list *parent;
+               int edge_value;
+               uint32_t packedDate[2];
+
+               parse_commit(*list);
+               hashwrite(f, get_commit_tree_oid(*list)->hash, hash_len);
+
+               parent = (*list)->parents;
+
+               if (!parent)
+                       edge_value = GRAPH_PARENT_NONE;
+               else {
+                       edge_value = sha1_pos(parent->item->object.oid.hash,
+                                             commits,
+                                             nr_commits,
+                                             commit_to_sha1);
+
+                       if (edge_value < 0)
+                               edge_value = GRAPH_PARENT_MISSING;
+               }
+
+               hashwrite_be32(f, edge_value);
+
+               if (parent)
+                       parent = parent->next;
+
+               if (!parent)
+                       edge_value = GRAPH_PARENT_NONE;
+               else if (parent->next)
+                       edge_value = GRAPH_OCTOPUS_EDGES_NEEDED | num_extra_edges;
+               else {
+                       edge_value = sha1_pos(parent->item->object.oid.hash,
+                                             commits,
+                                             nr_commits,
+                                             commit_to_sha1);
+                       if (edge_value < 0)
+                               edge_value = GRAPH_PARENT_MISSING;
+               }
+
+               hashwrite_be32(f, edge_value);
+
+               if (edge_value & GRAPH_OCTOPUS_EDGES_NEEDED) {
+                       do {
+                               num_extra_edges++;
+                               parent = parent->next;
+                       } while (parent);
+               }
+
+               if (sizeof((*list)->date) > 4)
+                       packedDate[0] = htonl(((*list)->date >> 32) & 0x3);
+               else
+                       packedDate[0] = 0;
+
+               packedDate[1] = htonl((*list)->date);
+               hashwrite(f, packedDate, 8);
+
+               list++;
+       }
+}
+
+static void write_graph_chunk_large_edges(struct hashfile *f,
+                                         struct commit **commits,
+                                         int nr_commits)
+{
+       struct commit **list = commits;
+       struct commit **last = commits + nr_commits;
+       struct commit_list *parent;
+
+       while (list < last) {
+               int num_parents = 0;
+               for (parent = (*list)->parents; num_parents < 3 && parent;
+                    parent = parent->next)
+                       num_parents++;
+
+               if (num_parents <= 2) {
+                       list++;
+                       continue;
+               }
+
+               /* Since num_parents > 2, this initializer is safe. */
+               for (parent = (*list)->parents->next; parent; parent = parent->next) {
+                       int edge_value = sha1_pos(parent->item->object.oid.hash,
+                                                 commits,
+                                                 nr_commits,
+                                                 commit_to_sha1);
+
+                       if (edge_value < 0)
+                               edge_value = GRAPH_PARENT_MISSING;
+                       else if (!parent->next)
+                               edge_value |= GRAPH_LAST_EDGE;
+
+                       hashwrite_be32(f, edge_value);
+               }
+
+               list++;
+       }
+}
+
+static int commit_compare(const void *_a, const void *_b)
+{
+       const struct object_id *a = (const struct object_id *)_a;
+       const struct object_id *b = (const struct object_id *)_b;
+       return oidcmp(a, b);
+}
+
+struct packed_commit_list {
+       struct commit **list;
+       int nr;
+       int alloc;
+};
+
+struct packed_oid_list {
+       struct object_id *list;
+       int nr;
+       int alloc;
+};
+
+static int add_packed_commits(const struct object_id *oid,
+                             struct packed_git *pack,
+                             uint32_t pos,
+                             void *data)
+{
+       struct packed_oid_list *list = (struct packed_oid_list*)data;
+       enum object_type type;
+       off_t offset = nth_packed_object_offset(pack, pos);
+       struct object_info oi = OBJECT_INFO_INIT;
+
+       oi.typep = &type;
+       if (packed_object_info(the_repository, pack, offset, &oi) < 0)
+               die("unable to get type of object %s", oid_to_hex(oid));
+
+       if (type != OBJ_COMMIT)
+               return 0;
+
+       ALLOC_GROW(list->list, list->nr + 1, list->alloc);
+       oidcpy(&(list->list[list->nr]), oid);
+       list->nr++;
+
+       return 0;
+}
+
+static void add_missing_parents(struct packed_oid_list *oids, struct commit *commit)
+{
+       struct commit_list *parent;
+       for (parent = commit->parents; parent; parent = parent->next) {
+               if (!(parent->item->object.flags & UNINTERESTING)) {
+                       ALLOC_GROW(oids->list, oids->nr + 1, oids->alloc);
+                       oidcpy(&oids->list[oids->nr], &(parent->item->object.oid));
+                       oids->nr++;
+                       parent->item->object.flags |= UNINTERESTING;
+               }
+       }
+}
+
+static void close_reachable(struct packed_oid_list *oids)
+{
+       int i;
+       struct commit *commit;
+
+       for (i = 0; i < oids->nr; i++) {
+               commit = lookup_commit(&oids->list[i]);
+               if (commit)
+                       commit->object.flags |= UNINTERESTING;
+       }
+
+       /*
+        * As this loop runs, oids->nr may grow, but not more
+        * than the number of missing commits in the reachable
+        * closure.
+        */
+       for (i = 0; i < oids->nr; i++) {
+               commit = lookup_commit(&oids->list[i]);
+
+               if (commit && !parse_commit(commit))
+                       add_missing_parents(oids, commit);
+       }
+
+       for (i = 0; i < oids->nr; i++) {
+               commit = lookup_commit(&oids->list[i]);
+
+               if (commit)
+                       commit->object.flags &= ~UNINTERESTING;
+       }
+}
+
+void write_commit_graph(const char *obj_dir,
+                       const char **pack_indexes,
+                       int nr_packs,
+                       const char **commit_hex,
+                       int nr_commits,
+                       int append)
+{
+       struct packed_oid_list oids;
+       struct packed_commit_list commits;
+       struct hashfile *f;
+       uint32_t i, count_distinct = 0;
+       char *graph_name;
+       int fd;
+       struct lock_file lk = LOCK_INIT;
+       uint32_t chunk_ids[5];
+       uint64_t chunk_offsets[5];
+       int num_chunks;
+       int num_extra_edges;
+       struct commit_list *parent;
+
+       oids.nr = 0;
+       oids.alloc = approximate_object_count() / 4;
+
+       if (append) {
+               prepare_commit_graph_one(obj_dir);
+               if (commit_graph)
+                       oids.alloc += commit_graph->num_commits;
+       }
+
+       if (oids.alloc < 1024)
+               oids.alloc = 1024;
+       ALLOC_ARRAY(oids.list, oids.alloc);
+
+       if (append && commit_graph) {
+               for (i = 0; i < commit_graph->num_commits; i++) {
+                       const unsigned char *hash = commit_graph->chunk_oid_lookup +
+                               commit_graph->hash_len * i;
+                       hashcpy(oids.list[oids.nr++].hash, hash);
+               }
+       }
+
+       if (pack_indexes) {
+               struct strbuf packname = STRBUF_INIT;
+               int dirlen;
+               strbuf_addf(&packname, "%s/pack/", obj_dir);
+               dirlen = packname.len;
+               for (i = 0; i < nr_packs; i++) {
+                       struct packed_git *p;
+                       strbuf_setlen(&packname, dirlen);
+                       strbuf_addstr(&packname, pack_indexes[i]);
+                       p = add_packed_git(packname.buf, packname.len, 1);
+                       if (!p)
+                               die("error adding pack %s", packname.buf);
+                       if (open_pack_index(p))
+                               die("error opening index for %s", packname.buf);
+                       for_each_object_in_pack(p, add_packed_commits, &oids);
+                       close_pack(p);
+               }
+               strbuf_release(&packname);
+       }
+
+       if (commit_hex) {
+               for (i = 0; i < nr_commits; i++) {
+                       const char *end;
+                       struct object_id oid;
+                       struct commit *result;
+
+                       if (commit_hex[i] && parse_oid_hex(commit_hex[i], &oid, &end))
+                               continue;
+
+                       result = lookup_commit_reference_gently(&oid, 1);
+
+                       if (result) {
+                               ALLOC_GROW(oids.list, oids.nr + 1, oids.alloc);
+                               oidcpy(&oids.list[oids.nr], &(result->object.oid));
+                               oids.nr++;
+                       }
+               }
+       }
+
+       if (!pack_indexes && !commit_hex)
+               for_each_packed_object(add_packed_commits, &oids, 0);
+
+       close_reachable(&oids);
+
+       QSORT(oids.list, oids.nr, commit_compare);
+
+       count_distinct = 1;
+       for (i = 1; i < oids.nr; i++) {
+               if (oidcmp(&oids.list[i-1], &oids.list[i]))
+                       count_distinct++;
+       }
+
+       if (count_distinct >= GRAPH_PARENT_MISSING)
+               die(_("the commit graph format cannot write %d commits"), count_distinct);
+
+       commits.nr = 0;
+       commits.alloc = count_distinct;
+       ALLOC_ARRAY(commits.list, commits.alloc);
+
+       num_extra_edges = 0;
+       for (i = 0; i < oids.nr; i++) {
+               int num_parents = 0;
+               if (i > 0 && !oidcmp(&oids.list[i-1], &oids.list[i]))
+                       continue;
+
+               commits.list[commits.nr] = lookup_commit(&oids.list[i]);
+               parse_commit(commits.list[commits.nr]);
+
+               for (parent = commits.list[commits.nr]->parents;
+                    parent; parent = parent->next)
+                       num_parents++;
+
+               if (num_parents > 2)
+                       num_extra_edges += num_parents - 1;
+
+               commits.nr++;
+       }
+       num_chunks = num_extra_edges ? 4 : 3;
+
+       if (commits.nr >= GRAPH_PARENT_MISSING)
+               die(_("too many commits to write graph"));
+
+       graph_name = get_commit_graph_filename(obj_dir);
+       fd = hold_lock_file_for_update(&lk, graph_name, 0);
+
+       if (fd < 0) {
+               struct strbuf folder = STRBUF_INIT;
+               strbuf_addstr(&folder, graph_name);
+               strbuf_setlen(&folder, strrchr(folder.buf, '/') - folder.buf);
+
+               if (mkdir(folder.buf, 0777) < 0)
+                       die_errno(_("cannot mkdir %s"), folder.buf);
+               strbuf_release(&folder);
+
+               fd = hold_lock_file_for_update(&lk, graph_name, LOCK_DIE_ON_ERROR);
+
+               if (fd < 0)
+                       die_errno("unable to create '%s'", graph_name);
+       }
+
+       f = hashfd(lk.tempfile->fd, lk.tempfile->filename.buf);
+
+       hashwrite_be32(f, GRAPH_SIGNATURE);
+
+       hashwrite_u8(f, GRAPH_VERSION);
+       hashwrite_u8(f, GRAPH_OID_VERSION);
+       hashwrite_u8(f, num_chunks);
+       hashwrite_u8(f, 0); /* unused padding byte */
+
+       chunk_ids[0] = GRAPH_CHUNKID_OIDFANOUT;
+       chunk_ids[1] = GRAPH_CHUNKID_OIDLOOKUP;
+       chunk_ids[2] = GRAPH_CHUNKID_DATA;
+       if (num_extra_edges)
+               chunk_ids[3] = GRAPH_CHUNKID_LARGEEDGES;
+       else
+               chunk_ids[3] = 0;
+       chunk_ids[4] = 0;
+
+       chunk_offsets[0] = 8 + (num_chunks + 1) * GRAPH_CHUNKLOOKUP_WIDTH;
+       chunk_offsets[1] = chunk_offsets[0] + GRAPH_FANOUT_SIZE;
+       chunk_offsets[2] = chunk_offsets[1] + GRAPH_OID_LEN * commits.nr;
+       chunk_offsets[3] = chunk_offsets[2] + (GRAPH_OID_LEN + 16) * commits.nr;
+       chunk_offsets[4] = chunk_offsets[3] + 4 * num_extra_edges;
+
+       for (i = 0; i <= num_chunks; i++) {
+               uint32_t chunk_write[3];
+
+               chunk_write[0] = htonl(chunk_ids[i]);
+               chunk_write[1] = htonl(chunk_offsets[i] >> 32);
+               chunk_write[2] = htonl(chunk_offsets[i] & 0xffffffff);
+               hashwrite(f, chunk_write, 12);
+       }
+
+       write_graph_chunk_fanout(f, commits.list, commits.nr);
+       write_graph_chunk_oids(f, GRAPH_OID_LEN, commits.list, commits.nr);
+       write_graph_chunk_data(f, GRAPH_OID_LEN, commits.list, commits.nr);
+       write_graph_chunk_large_edges(f, commits.list, commits.nr);
+
+       close_commit_graph();
+       finalize_hashfile(f, NULL, CSUM_HASH_IN_STREAM | CSUM_FSYNC);
+       commit_lock_file(&lk);
+
+       free(oids.list);
+       oids.alloc = 0;
+       oids.nr = 0;
+}
diff --git a/commit-graph.h b/commit-graph.h
new file mode 100644 (file)
index 0000000..260a468
--- /dev/null
@@ -0,0 +1,48 @@
+#ifndef COMMIT_GRAPH_H
+#define COMMIT_GRAPH_H
+
+#include "git-compat-util.h"
+
+char *get_commit_graph_filename(const char *obj_dir);
+
+/*
+ * Given a commit struct, try to fill the commit struct info, including:
+ *  1. tree object
+ *  2. date
+ *  3. parents.
+ *
+ * Returns 1 if and only if the commit was found in the packed graph.
+ *
+ * See parse_commit_buffer() for the fallback after this call.
+ */
+int parse_commit_in_graph(struct commit *item);
+
+struct tree *get_commit_tree_in_graph(const struct commit *c);
+
+struct commit_graph {
+       int graph_fd;
+
+       const unsigned char *data;
+       size_t data_len;
+
+       unsigned char hash_len;
+       unsigned char num_chunks;
+       uint32_t num_commits;
+       struct object_id oid;
+
+       const uint32_t *chunk_oid_fanout;
+       const unsigned char *chunk_oid_lookup;
+       const unsigned char *chunk_commit_data;
+       const unsigned char *chunk_large_edges;
+};
+
+struct commit_graph *load_commit_graph_one(const char *graph_file);
+
+void write_commit_graph(const char *obj_dir,
+                       const char **pack_indexes,
+                       int nr_packs,
+                       const char **commit_hex,
+                       int nr_commits,
+                       int append);
+
+#endif
index 00c99c7272badc2d5a8f16c29db7d4040f5eb4d3..f9714ed74ccdccba4d9b5f00bc8ac31cb022dd02 100644 (file)
--- a/commit.c
+++ b/commit.c
@@ -1,6 +1,7 @@
 #include "cache.h"
 #include "tag.h"
 #include "commit.h"
+#include "commit-graph.h"
 #include "pkt-line.h"
 #include "utf8.h"
 #include "diff.h"
@@ -12,6 +13,7 @@
 #include "prio-queue.h"
 #include "sha1-lookup.h"
 #include "wt-status.h"
+#include "advice.h"
 
 static struct commit_extra_header *read_commit_extra_header_lines(const char *buf, size_t len, const char **);
 
@@ -176,6 +178,15 @@ static int read_graft_file(const char *graft_file)
        struct strbuf buf = STRBUF_INIT;
        if (!fp)
                return -1;
+       if (advice_graft_file_deprecated)
+               advise(_("Support for <GIT_DIR>/info/grafts is deprecated\n"
+                        "and will be removed in a future Git version.\n"
+                        "\n"
+                        "Please use \"git replace --convert-graft-file\"\n"
+                        "to convert the grafts into replace refs.\n"
+                        "\n"
+                        "Turn this message off by running\n"
+                        "\"git config advice.graftFileDeprecated false\""));
        while (!strbuf_getwholeline(&buf, fp, '\n')) {
                /* The format is just "Commit Parent1 Parent2 ...\n" */
                struct commit_graft *graft = read_graft_line(&buf);
@@ -266,7 +277,7 @@ const void *get_commit_buffer(const struct commit *commit, unsigned long *sizep)
        if (!ret) {
                enum object_type type;
                unsigned long size;
-               ret = read_sha1_file(commit->object.oid.hash, &type, &size);
+               ret = read_object_file(&commit->object.oid, &type, &size);
                if (!ret)
                        die("cannot read commit object %s",
                            oid_to_hex(&commit->object.oid));
@@ -295,6 +306,22 @@ void free_commit_buffer(struct commit *commit)
        }
 }
 
+struct tree *get_commit_tree(const struct commit *commit)
+{
+       if (commit->maybe_tree || !commit->object.parsed)
+               return commit->maybe_tree;
+
+       if (commit->graph_pos == COMMIT_NOT_FROM_GRAPH)
+               BUG("commit has NULL tree, but was not loaded from commit-graph");
+
+       return get_commit_tree_in_graph(commit);
+}
+
+struct object_id *get_commit_tree_oid(const struct commit *commit)
+{
+       return &get_commit_tree(commit)->object.oid;
+}
+
 const void *detach_commit_buffer(struct commit *commit, unsigned long *sizep)
 {
        struct commit_buffer *v = buffer_slab_peek(&buffer_slab, commit);
@@ -334,7 +361,7 @@ int parse_commit_buffer(struct commit *item, const void *buffer, unsigned long s
        if (get_sha1_hex(bufptr + 5, parent.hash) < 0)
                return error("bad tree pointer in commit %s",
                             oid_to_hex(&item->object.oid));
-       item->tree = lookup_tree(&parent);
+       item->maybe_tree = lookup_tree(&parent);
        bufptr += tree_entry_len + 1; /* "tree " + "hex sha1" + "\n" */
        pptr = &item->parents;
 
@@ -383,7 +410,9 @@ int parse_commit_gently(struct commit *item, int quiet_on_missing)
                return -1;
        if (item->object.parsed)
                return 0;
-       buffer = read_sha1_file(item->object.oid.hash, &type, &size);
+       if (parse_commit_in_graph(item))
+               return 0;
+       buffer = read_object_file(&item->object.oid, &type, &size);
        if (!buffer)
                return quiet_on_missing ? -1 :
                        error("Could not read %s",
@@ -1206,7 +1235,7 @@ static void handle_signed_tag(struct commit *parent, struct commit_extra_header
        desc = merge_remote_util(parent);
        if (!desc || !desc->obj)
                return;
-       buf = read_sha1_file(desc->obj->oid.hash, &type, &size);
+       buf = read_object_file(&desc->obj->oid, &type, &size);
        if (!buf || type != OBJ_TAG)
                goto free_return;
        len = parse_signature(buf, size);
@@ -1288,17 +1317,19 @@ struct commit_extra_header *read_commit_extra_headers(struct commit *commit,
        return extra;
 }
 
-void for_each_mergetag(each_mergetag_fn fn, struct commit *commit, void *data)
+int for_each_mergetag(each_mergetag_fn fn, struct commit *commit, void *data)
 {
        struct commit_extra_header *extra, *to_free;
+       int res = 0;
 
        to_free = read_commit_extra_headers(commit, NULL);
-       for (extra = to_free; extra; extra = extra->next) {
+       for (extra = to_free; !res && extra; extra = extra->next) {
                if (strcmp(extra->key, "mergetag"))
                        continue; /* not a merge tag */
-               fn(commit, extra, data);
+               res = fn(commit, extra, data);
        }
        free_commit_extra_headers(to_free);
+       return res;
 }
 
 static inline int standard_header_field(const char *field, size_t len)
@@ -1517,7 +1548,7 @@ int commit_tree_extended(const char *msg, size_t msg_len,
        int encoding_is_utf8;
        struct strbuf buffer;
 
-       assert_sha1_type(tree->hash, OBJ_TREE);
+       assert_oid_type(tree, OBJ_TREE);
 
        if (memchr(msg, '\0', msg_len))
                return error("a NUL byte in commit log message not allowed.");
index 0fb8271665c6c98ccca803fbe002327bf38fcfb3..10e34e1a18b726b0168a9fe2e4937a23d6cefd11 100644 (file)
--- a/commit.h
+++ b/commit.h
@@ -9,6 +9,8 @@
 #include "string-list.h"
 #include "pretty.h"
 
+#define COMMIT_NOT_FROM_GRAPH 0xFFFFFFFF
+
 struct commit_list {
        struct commit *item;
        struct commit_list *next;
@@ -20,7 +22,14 @@ struct commit {
        unsigned int index;
        timestamp_t date;
        struct commit_list *parents;
-       struct tree *tree;
+
+       /*
+        * If the commit is loaded from the commit-graph file, then this
+        * member may be NULL. Only access it through get_commit_tree()
+        * or get_commit_tree_oid().
+        */
+       struct tree *maybe_tree;
+       uint32_t graph_pos;
 };
 
 extern int save_commit_buffer;
@@ -99,6 +108,9 @@ void unuse_commit_buffer(const struct commit *, const void *buffer);
  */
 void free_commit_buffer(struct commit *);
 
+struct tree *get_commit_tree(const struct commit *);
+struct object_id *get_commit_tree_oid(const struct commit *);
+
 /*
  * Disassociate any cached object buffer from the commit, but do not free it.
  * The buffer (or NULL, if none) is returned.
@@ -291,10 +303,10 @@ extern const char *find_commit_header(const char *msg, const char *key,
 /* Find the end of the log message, the right place for a new trailer. */
 extern int ignore_non_trailer(const char *buf, size_t len);
 
-typedef void (*each_mergetag_fn)(struct commit *commit, struct commit_extra_header *extra,
+typedef int (*each_mergetag_fn)(struct commit *commit, struct commit_extra_header *extra,
                                 void *cb_data);
 
-extern void for_each_mergetag(each_mergetag_fn fn, struct commit *commit, void *data);
+extern int for_each_mergetag(each_mergetag_fn fn, struct commit *commit, void *data);
 
 struct merge_remote_desc {
        struct object *obj; /* the named object, could be a tag */
index 6a689007e7ce3fe08f148e8b82c0a1c618c513a5..3728f66b4cce80d298aab0e551a2d3c03e2c4357 100644 (file)
@@ -1,5 +1,5 @@
 #include "cache.h"
-#include "exec_cmd.h"
+#include "exec-cmd.h"
 #include "attr.h"
 
 /*
@@ -32,11 +32,13 @@ int main(int argc, const char **argv)
         */
        sanitize_stdfds();
 
+       git_resolve_executable_dir(argv[0]);
+
        git_setup_gettext();
 
-       attr_start();
+       initialize_the_repository();
 
-       git_extract_argv0_path(argv[0]);
+       attr_start();
 
        restore_sigpipe_to_default();
 
index a67872babf332b7d8177e8477c2ee595d8cbbd3f..6ded1c859f1b5ae1ffe035ac228c0f8a5298097a 100644 (file)
@@ -2221,7 +2221,7 @@ void mingw_startup(void)
                die_startup();
 
        /* determine size of argv and environ conversion buffer */
-       maxlen = wcslen(_wpgmptr);
+       maxlen = wcslen(wargv[0]);
        for (i = 1; i < argc; i++)
                maxlen = max(maxlen, wcslen(wargv[i]));
        for (i = 0; wenv[i]; i++)
@@ -2241,8 +2241,7 @@ void mingw_startup(void)
        buffer = malloc_startup(maxlen);
 
        /* convert command line arguments and environment to UTF-8 */
-       __argv[0] = wcstoutfdup_startup(buffer, _wpgmptr, maxlen);
-       for (i = 1; i < argc; i++)
+       for (i = 0; i < argc; i++)
                __argv[i] = wcstoutfdup_startup(buffer, wargv[i], maxlen);
        for (i = 0; wenv[i]; i++)
                environ[i] = wcstoutfdup_startup(buffer, wenv[i], maxlen);
index b0c20e6cb8ab1f5dc3cd3b4573b7e9d6dbf8cd2b..6f8f1d8c1130f89ccf913cc7f7cafac3dd39e123 100644 (file)
--- a/config.c
+++ b/config.c
@@ -9,13 +9,14 @@
 #include "config.h"
 #include "repository.h"
 #include "lockfile.h"
-#include "exec_cmd.h"
+#include "exec-cmd.h"
 #include "strbuf.h"
 #include "quote.h"
 #include "hashmap.h"
 #include "string-list.h"
 #include "utf8.h"
 #include "dir.h"
+#include "color.h"
 
 struct config_source {
        struct config_source *prev;
@@ -653,7 +654,45 @@ static int get_base_var(struct strbuf *name)
        }
 }
 
-static int git_parse_source(config_fn_t fn, void *data)
+struct parse_event_data {
+       enum config_event_t previous_type;
+       size_t previous_offset;
+       const struct config_options *opts;
+};
+
+static int do_event(enum config_event_t type, struct parse_event_data *data)
+{
+       size_t offset;
+
+       if (!data->opts || !data->opts->event_fn)
+               return 0;
+
+       if (type == CONFIG_EVENT_WHITESPACE &&
+           data->previous_type == type)
+               return 0;
+
+       offset = cf->do_ftell(cf);
+       /*
+        * At EOF, the parser always "inserts" an extra '\n', therefore
+        * the end offset of the event is the current file position, otherwise
+        * we will already have advanced to the next event.
+        */
+       if (type != CONFIG_EVENT_EOF)
+               offset--;
+
+       if (data->previous_type != CONFIG_EVENT_EOF &&
+           data->opts->event_fn(data->previous_type, data->previous_offset,
+                                offset, data->opts->event_fn_data) < 0)
+               return -1;
+
+       data->previous_type = type;
+       data->previous_offset = offset;
+
+       return 0;
+}
+
+static int git_parse_source(config_fn_t fn, void *data,
+                           const struct config_options *opts)
 {
        int comment = 0;
        int baselen = 0;
@@ -664,8 +703,15 @@ static int git_parse_source(config_fn_t fn, void *data)
        /* U+FEFF Byte Order Mark in UTF8 */
        const char *bomptr = utf8_bom;
 
+       /* For the parser event callback */
+       struct parse_event_data event_data = {
+               CONFIG_EVENT_EOF, 0, opts
+       };
+
        for (;;) {
-               int c = get_next_char();
+               int c;
+
+               c = get_next_char();
                if (bomptr && *bomptr) {
                        /* We are at the file beginning; skip UTF8-encoded BOM
                         * if present. Sane editors won't put this in on their
@@ -682,18 +728,33 @@ static int git_parse_source(config_fn_t fn, void *data)
                        }
                }
                if (c == '\n') {
-                       if (cf->eof)
+                       if (cf->eof) {
+                               if (do_event(CONFIG_EVENT_EOF, &event_data) < 0)
+                                       return -1;
                                return 0;
+                       }
+                       if (do_event(CONFIG_EVENT_WHITESPACE, &event_data) < 0)
+                               return -1;
                        comment = 0;
                        continue;
                }
-               if (comment || isspace(c))
+               if (comment)
                        continue;
+               if (isspace(c)) {
+                       if (do_event(CONFIG_EVENT_WHITESPACE, &event_data) < 0)
+                                       return -1;
+                       continue;
+               }
                if (c == '#' || c == ';') {
+                       if (do_event(CONFIG_EVENT_COMMENT, &event_data) < 0)
+                                       return -1;
                        comment = 1;
                        continue;
                }
                if (c == '[') {
+                       if (do_event(CONFIG_EVENT_SECTION, &event_data) < 0)
+                                       return -1;
+
                        /* Reset prior to determining a new stem */
                        strbuf_reset(var);
                        if (get_base_var(var) < 0 || var->len < 1)
@@ -704,6 +765,10 @@ static int git_parse_source(config_fn_t fn, void *data)
                }
                if (!isalpha(c))
                        break;
+
+               if (do_event(CONFIG_EVENT_ENTRY, &event_data) < 0)
+                       return -1;
+
                /*
                 * Truncate the var name back to the section header
                 * stem prior to grabbing the suffix part of the name
@@ -715,6 +780,9 @@ static int git_parse_source(config_fn_t fn, void *data)
                        break;
        }
 
+       if (do_event(CONFIG_EVENT_ERROR, &event_data) < 0)
+               return -1;
+
        switch (cf->origin_type) {
        case CONFIG_ORIGIN_BLOB:
                error_msg = xstrfmt(_("bad config line %d in blob %s"),
@@ -1000,6 +1068,15 @@ int git_config_expiry_date(timestamp_t *timestamp, const char *var, const char *
        return 0;
 }
 
+int git_config_color(char *dest, const char *var, const char *value)
+{
+       if (!value)
+               return config_error_nonbool(var);
+       if (color_parse(value, dest) < 0)
+               return -1;
+       return 0;
+}
+
 static int git_default_core_config(const char *var, const char *value)
 {
        /* This needs a better name */
@@ -1172,6 +1249,11 @@ static int git_default_core_config(const char *var, const char *value)
                return 0;
        }
 
+       if (!strcmp(var, "core.checkroundtripencoding")) {
+               check_roundtrip_encoding = xstrdup(value);
+               return 0;
+       }
+
        if (!strcmp(var, "core.notesref")) {
                notes_ref_name = xstrdup(value);
                return 0;
@@ -1226,6 +1308,11 @@ static int git_default_core_config(const char *var, const char *value)
                return 0;
        }
 
+       if (!strcmp(var, "core.commitgraph")) {
+               core_commit_graph = git_config_bool(var, value);
+               return 0;
+       }
+
        if (!strcmp(var, "core.sparsecheckout")) {
                core_apply_sparse_checkout = git_config_bool(var, value);
                return 0;
@@ -1365,7 +1452,7 @@ int git_default_config(const char *var, const char *value, void *dummy)
        if (starts_with(var, "mailmap."))
                return git_default_mailmap_config(var, value);
 
-       if (starts_with(var, "advice."))
+       if (starts_with(var, "advice.") || starts_with(var, "color.advice"))
                return git_default_advice_config(var, value);
 
        if (!strcmp(var, "pager.color") || !strcmp(var, "color.pager")) {
@@ -1398,7 +1485,8 @@ int git_default_config(const char *var, const char *value, void *dummy)
  * fgetc, ungetc, ftell of top need to be initialized before calling
  * this function.
  */
-static int do_config_from(struct config_source *top, config_fn_t fn, void *data)
+static int do_config_from(struct config_source *top, config_fn_t fn, void *data,
+                         const struct config_options *opts)
 {
        int ret;
 
@@ -1410,7 +1498,7 @@ static int do_config_from(struct config_source *top, config_fn_t fn, void *data)
        strbuf_init(&top->var, 1024);
        cf = top;
 
-       ret = git_parse_source(fn, data);
+       ret = git_parse_source(fn, data, opts);
 
        /* pop config-file parsing state stack */
        strbuf_release(&top->value);
@@ -1423,9 +1511,10 @@ static int do_config_from(struct config_source *top, config_fn_t fn, void *data)
 static int do_config_from_file(config_fn_t fn,
                const enum config_origin_type origin_type,
                const char *name, const char *path, FILE *f,
-               void *data)
+               void *data, const struct config_options *opts)
 {
        struct config_source top;
+       int ret;
 
        top.u.file = f;
        top.origin_type = origin_type;
@@ -1436,29 +1525,39 @@ static int do_config_from_file(config_fn_t fn,
        top.do_ungetc = config_file_ungetc;
        top.do_ftell = config_file_ftell;
 
-       return do_config_from(&top, fn, data);
+       flockfile(f);
+       ret = do_config_from(&top, fn, data, opts);
+       funlockfile(f);
+       return ret;
 }
 
 static int git_config_from_stdin(config_fn_t fn, void *data)
 {
-       return do_config_from_file(fn, CONFIG_ORIGIN_STDIN, "", NULL, stdin, data);
+       return do_config_from_file(fn, CONFIG_ORIGIN_STDIN, "", NULL, stdin,
+                                  data, NULL);
 }
 
-int git_config_from_file(config_fn_t fn, const char *filename, void *data)
+int git_config_from_file_with_options(config_fn_t fn, const char *filename,
+                                     void *data,
+                                     const struct config_options *opts)
 {
        int ret = -1;
        FILE *f;
 
        f = fopen_or_warn(filename, "r");
        if (f) {
-               flockfile(f);
-               ret = do_config_from_file(fn, CONFIG_ORIGIN_FILE, filename, filename, f, data);
-               funlockfile(f);
+               ret = do_config_from_file(fn, CONFIG_ORIGIN_FILE, filename,
+                                         filename, f, data, opts);
                fclose(f);
        }
        return ret;
 }
 
+int git_config_from_file(config_fn_t fn, const char *filename, void *data)
+{
+       return git_config_from_file_with_options(fn, filename, data, NULL);
+}
+
 int git_config_from_mem(config_fn_t fn, const enum config_origin_type origin_type,
                        const char *name, const char *buf, size_t len, void *data)
 {
@@ -1475,7 +1574,7 @@ int git_config_from_mem(config_fn_t fn, const enum config_origin_type origin_typ
        top.do_ungetc = config_buf_ungetc;
        top.do_ftell = config_buf_ftell;
 
-       return do_config_from(&top, fn, data);
+       return do_config_from(&top, fn, data, NULL);
 }
 
 int git_config_from_blob_oid(config_fn_t fn,
@@ -1488,7 +1587,7 @@ int git_config_from_blob_oid(config_fn_t fn,
        unsigned long size;
        int ret;
 
-       buf = read_sha1_file(oid->hash, &type, &size);
+       buf = read_object_file(oid, &type, &size);
        if (!buf)
                return error("unable to load config blob object '%s'", name);
        if (type != OBJ_BLOB) {
@@ -2219,96 +2318,98 @@ void git_die_config(const char *key, const char *err, ...)
  * Find all the stuff for git_config_set() below.
  */
 
-static struct {
+struct config_store_data {
        int baselen;
        char *key;
        int do_not_match;
        regex_t *value_regex;
        int multi_replace;
-       size_t *offset;
-       unsigned int offset_alloc;
-       enum { START, SECTION_SEEN, SECTION_END_SEEN, KEY_SEEN } state;
-       unsigned int seen;
-} store;
+       struct {
+               size_t begin, end;
+               enum config_event_t type;
+               int is_keys_section;
+       } *parsed;
+       unsigned int parsed_nr, parsed_alloc, *seen, seen_nr, seen_alloc;
+       unsigned int key_seen:1, section_seen:1, is_keys_section:1;
+};
 
-static int matches(const char *key, const char *value)
+static int matches(const char *key, const char *value,
+                  const struct config_store_data *store)
 {
-       if (strcmp(key, store.key))
+       if (strcmp(key, store->key))
                return 0; /* not ours */
-       if (!store.value_regex)
+       if (!store->value_regex)
                return 1; /* always matches */
-       if (store.value_regex == CONFIG_REGEX_NONE)
+       if (store->value_regex == CONFIG_REGEX_NONE)
                return 0; /* never matches */
 
-       return store.do_not_match ^
-               (value && !regexec(store.value_regex, value, 0, NULL, 0));
+       return store->do_not_match ^
+               (value && !regexec(store->value_regex, value, 0, NULL, 0));
+}
+
+static int store_aux_event(enum config_event_t type,
+                          size_t begin, size_t end, void *data)
+{
+       struct config_store_data *store = data;
+
+       ALLOC_GROW(store->parsed, store->parsed_nr + 1, store->parsed_alloc);
+       store->parsed[store->parsed_nr].begin = begin;
+       store->parsed[store->parsed_nr].end = end;
+       store->parsed[store->parsed_nr].type = type;
+
+       if (type == CONFIG_EVENT_SECTION) {
+               if (cf->var.len < 2 || cf->var.buf[cf->var.len - 1] != '.')
+                       BUG("Invalid section name '%s'", cf->var.buf);
+
+               /* Is this the section we were looking for? */
+               store->is_keys_section =
+                       store->parsed[store->parsed_nr].is_keys_section =
+                       cf->var.len - 1 == store->baselen &&
+                       !strncasecmp(cf->var.buf, store->key, store->baselen);
+               if (store->is_keys_section) {
+                       store->section_seen = 1;
+                       ALLOC_GROW(store->seen, store->seen_nr + 1,
+                                  store->seen_alloc);
+                       store->seen[store->seen_nr] = store->parsed_nr;
+               }
+       }
+
+       store->parsed_nr++;
+
+       return 0;
 }
 
 static int store_aux(const char *key, const char *value, void *cb)
 {
-       const char *ep;
-       size_t section_len;
+       struct config_store_data *store = cb;
 
-       switch (store.state) {
-       case KEY_SEEN:
-               if (matches(key, value)) {
-                       if (store.seen == 1 && store.multi_replace == 0) {
+       if (store->key_seen) {
+               if (matches(key, value, store)) {
+                       if (store->seen_nr == 1 && store->multi_replace == 0) {
                                warning(_("%s has multiple values"), key);
                        }
 
-                       ALLOC_GROW(store.offset, store.seen + 1,
-                                  store.offset_alloc);
+                       ALLOC_GROW(store->seen, store->seen_nr + 1,
+                                  store->seen_alloc);
 
-                       store.offset[store.seen] = cf->do_ftell(cf);
-                       store.seen++;
+                       store->seen[store->seen_nr] = store->parsed_nr;
+                       store->seen_nr++;
                }
-               break;
-       case SECTION_SEEN:
+       } else if (store->is_keys_section) {
                /*
-                * What we are looking for is in store.key (both
-                * section and var), and its section part is baselen
-                * long.  We found key (again, both section and var).
-                * We would want to know if this key is in the same
-                * section as what we are looking for.  We already
-                * know we are in the same section as what should
-                * hold store.key.
+                * Do not increment matches yet: this may not be a match, but we
+                * are in the desired section.
                 */
-               ep = strrchr(key, '.');
-               section_len = ep - key;
-
-               if ((section_len != store.baselen) ||
-                   memcmp(key, store.key, section_len+1)) {
-                       store.state = SECTION_END_SEEN;
-                       break;
-               }
+               ALLOC_GROW(store->seen, store->seen_nr + 1, store->seen_alloc);
+               store->seen[store->seen_nr] = store->parsed_nr;
+               store->section_seen = 1;
 
-               /*
-                * Do not increment matches: this is no match, but we
-                * just made sure we are in the desired section.
-                */
-               ALLOC_GROW(store.offset, store.seen + 1,
-                          store.offset_alloc);
-               store.offset[store.seen] = cf->do_ftell(cf);
-               /* fallthru */
-       case SECTION_END_SEEN:
-       case START:
-               if (matches(key, value)) {
-                       ALLOC_GROW(store.offset, store.seen + 1,
-                                  store.offset_alloc);
-                       store.offset[store.seen] = cf->do_ftell(cf);
-                       store.state = KEY_SEEN;
-                       store.seen++;
-               } else {
-                       if (strrchr(key, '.') - key == store.baselen &&
-                             !strncmp(key, store.key, store.baselen)) {
-                                       store.state = SECTION_SEEN;
-                                       ALLOC_GROW(store.offset,
-                                                  store.seen + 1,
-                                                  store.offset_alloc);
-                                       store.offset[store.seen] = cf->do_ftell(cf);
-                       }
+               if (matches(key, value, store)) {
+                       store->seen_nr++;
+                       store->key_seen = 1;
                }
        }
+
        return 0;
 }
 
@@ -2320,31 +2421,33 @@ static int write_error(const char *filename)
        return 4;
 }
 
-static struct strbuf store_create_section(const char *key)
+static struct strbuf store_create_section(const char *key,
+                                         const struct config_store_data *store)
 {
        const char *dot;
        int i;
        struct strbuf sb = STRBUF_INIT;
 
-       dot = memchr(key, '.', store.baselen);
+       dot = memchr(key, '.', store->baselen);
        if (dot) {
                strbuf_addf(&sb, "[%.*s \"", (int)(dot - key), key);
-               for (i = dot - key + 1; i < store.baselen; i++) {
+               for (i = dot - key + 1; i < store->baselen; i++) {
                        if (key[i] == '"' || key[i] == '\\')
                                strbuf_addch(&sb, '\\');
                        strbuf_addch(&sb, key[i]);
                }
                strbuf_addstr(&sb, "\"]\n");
        } else {
-               strbuf_addf(&sb, "[%.*s]\n", store.baselen, key);
+               strbuf_addf(&sb, "[%.*s]\n", store->baselen, key);
        }
 
        return sb;
 }
 
-static ssize_t write_section(int fd, const char *key)
+static ssize_t write_section(int fd, const char *key,
+                            const struct config_store_data *store)
 {
-       struct strbuf sb = store_create_section(key);
+       struct strbuf sb = store_create_section(key, store);
        ssize_t ret;
 
        ret = write_in_full(fd, sb.buf, sb.len);
@@ -2353,11 +2456,12 @@ static ssize_t write_section(int fd, const char *key)
        return ret;
 }
 
-static ssize_t write_pair(int fd, const char *key, const char *value)
+static ssize_t write_pair(int fd, const char *key, const char *value,
+                         const struct config_store_data *store)
 {
        int i;
        ssize_t ret;
-       int length = strlen(key + store.baselen + 1);
+       int length = strlen(key + store->baselen + 1);
        const char *quote = "";
        struct strbuf sb = STRBUF_INIT;
 
@@ -2377,7 +2481,7 @@ static ssize_t write_pair(int fd, const char *key, const char *value)
                quote = "\"";
 
        strbuf_addf(&sb, "\t%.*s = %s",
-                   length, key + store.baselen + 1, quote);
+                   length, key + store->baselen + 1, quote);
 
        for (i = 0; value[i]; i++)
                switch (value[i]) {
@@ -2403,30 +2507,85 @@ static ssize_t write_pair(int fd, const char *key, const char *value)
        return ret;
 }
 
-static ssize_t find_beginning_of_line(const char *contents, size_t size,
-       size_t offset_, int *found_bracket)
+/*
+ * If we are about to unset the last key(s) in a section, and if there are
+ * no comments surrounding (or included in) the section, we will want to
+ * extend begin/end to remove the entire section.
+ *
+ * Note: the parameter `seen_ptr` points to the index into the store.seen
+ * array.  * This index may be incremented if a section has more than one
+ * entry (which all are to be removed).
+ */
+static void maybe_remove_section(struct config_store_data *store,
+                                const char *contents,
+                                size_t *begin_offset, size_t *end_offset,
+                                int *seen_ptr)
 {
-       size_t equal_offset = size, bracket_offset = size;
-       ssize_t offset;
+       size_t begin;
+       int i, seen, section_seen = 0;
 
-contline:
-       for (offset = offset_-2; offset > 0
-                       && contents[offset] != '\n'; offset--)
-               switch (contents[offset]) {
-                       case '=': equal_offset = offset; break;
-                       case ']': bracket_offset = offset; break;
+       /*
+        * First, ensure that this is the first key, and that there are no
+        * comments before the entry nor before the section header.
+        */
+       seen = *seen_ptr;
+       for (i = store->seen[seen]; i > 0; i--) {
+               enum config_event_t type = store->parsed[i - 1].type;
+
+               if (type == CONFIG_EVENT_COMMENT)
+                       /* There is a comment before this entry or section */
+                       return;
+               if (type == CONFIG_EVENT_ENTRY) {
+                       if (!section_seen)
+                               /* This is not the section's first entry. */
+                               return;
+                       /* We encountered no comment before the section. */
+                       break;
+               }
+               if (type == CONFIG_EVENT_SECTION) {
+                       if (!store->parsed[i - 1].is_keys_section)
+                               break;
+                       section_seen = 1;
                }
-       if (offset > 0 && contents[offset-1] == '\\') {
-               offset_ = offset;
-               goto contline;
        }
-       if (bracket_offset < equal_offset) {
-               *found_bracket = 1;
-               offset = bracket_offset+1;
-       } else
-               offset++;
+       begin = store->parsed[i].begin;
 
-       return offset;
+       /*
+        * Next, make sure that we are removing he last key(s) in the section,
+        * and that there are no comments that are possibly about the current
+        * section.
+        */
+       for (i = store->seen[seen] + 1; i < store->parsed_nr; i++) {
+               enum config_event_t type = store->parsed[i].type;
+
+               if (type == CONFIG_EVENT_COMMENT)
+                       return;
+               if (type == CONFIG_EVENT_SECTION) {
+                       if (store->parsed[i].is_keys_section)
+                               continue;
+                       break;
+               }
+               if (type == CONFIG_EVENT_ENTRY) {
+                       if (++seen < store->seen_nr &&
+                           i == store->seen[seen])
+                               /* We want to remove this entry, too */
+                               continue;
+                       /* There is another entry in this section. */
+                       return;
+               }
+       }
+
+       /*
+        * We are really removing the last entry/entries from this section, and
+        * there are no enclosed or surrounding comments. Remove the entire,
+        * now-empty section.
+        */
+       *seen_ptr = seen;
+       *begin_offset = begin;
+       if (i < store->parsed_nr)
+               *end_offset = store->parsed[i].begin;
+       else
+               *end_offset = store->parsed[store->parsed_nr - 1].end;
 }
 
 int git_config_set_in_file_gently(const char *config_filename,
@@ -2487,6 +2646,9 @@ int git_config_set_multivar_in_file_gently(const char *config_filename,
        char *filename_buf = NULL;
        char *contents = NULL;
        size_t contents_sz;
+       struct config_store_data store;
+
+       memset(&store, 0, sizeof(store));
 
        /* parse-key returns negative; flip the sign to feed exit(3) */
        ret = 0 - git_config_parse_key(key, &store.key, &store.baselen);
@@ -2529,13 +2691,14 @@ int git_config_set_multivar_in_file_gently(const char *config_filename,
                }
 
                store.key = (char *)key;
-               if (write_section(fd, key) < 0 ||
-                   write_pair(fd, key, value) < 0)
+               if (write_section(fd, key, &store) < 0 ||
+                   write_pair(fd, key, value, &store) < 0)
                        goto write_err_out;
        } else {
                struct stat st;
                size_t copy_begin, copy_end;
                int i, new_line = 0;
+               struct config_options opts;
 
                if (value_regex == NULL)
                        store.value_regex = NULL;
@@ -2558,18 +2721,24 @@ int git_config_set_multivar_in_file_gently(const char *config_filename,
                        }
                }
 
-               ALLOC_GROW(store.offset, 1, store.offset_alloc);
-               store.offset[0] = 0;
-               store.state = START;
-               store.seen = 0;
+               ALLOC_GROW(store.parsed, 1, store.parsed_alloc);
+               store.parsed[0].end = 0;
+
+               memset(&opts, 0, sizeof(opts));
+               opts.event_fn = store_aux_event;
+               opts.event_fn_data = &store;
 
                /*
-                * After this, store.offset will contain the *end* offset
-                * of the last match, or remain at 0 if no match was found.
+                * After this, store.parsed will contain offsets of all the
+                * parsed elements, and store.seen will contain a list of
+                * matches, as indices into store.parsed.
+                *
                 * As a side effect, we make sure to transform only a valid
                 * existing config file.
                 */
-               if (git_config_from_file(store_aux, config_filename, NULL)) {
+               if (git_config_from_file_with_options(store_aux,
+                                                     config_filename,
+                                                     &store, &opts)) {
                        error("invalid config file %s", config_filename);
                        free(store.key);
                        if (store.value_regex != NULL &&
@@ -2589,8 +2758,8 @@ int git_config_set_multivar_in_file_gently(const char *config_filename,
                }
 
                /* if nothing to unset, or too many matches, error out */
-               if ((store.seen == 0 && value == NULL) ||
-                               (store.seen > 1 && multi_replace == 0)) {
+               if ((store.seen_nr == 0 && value == NULL) ||
+                   (store.seen_nr > 1 && multi_replace == 0)) {
                        ret = CONFIG_NOTHING_SET;
                        goto out_free;
                }
@@ -2621,18 +2790,49 @@ int git_config_set_multivar_in_file_gently(const char *config_filename,
                        goto out_free;
                }
 
-               if (store.seen == 0)
-                       store.seen = 1;
+               if (store.seen_nr == 0) {
+                       if (!store.seen_alloc) {
+                               /* Did not see key nor section */
+                               ALLOC_GROW(store.seen, 1, store.seen_alloc);
+                               store.seen[0] = store.parsed_nr
+                                       - !!store.parsed_nr;
+                       }
+                       store.seen_nr = 1;
+               }
 
-               for (i = 0, copy_begin = 0; i < store.seen; i++) {
-                       if (store.offset[i] == 0) {
-                               store.offset[i] = copy_end = contents_sz;
-                       } else if (store.state != KEY_SEEN) {
-                               copy_end = store.offset[i];
-                       } else
-                               copy_end = find_beginning_of_line(
-                                       contents, contents_sz,
-                                       store.offset[i]-2, &new_line);
+               for (i = 0, copy_begin = 0; i < store.seen_nr; i++) {
+                       size_t replace_end;
+                       int j = store.seen[i];
+
+                       new_line = 0;
+                       if (!store.key_seen) {
+                               copy_end = store.parsed[j].end;
+                               /* include '\n' when copying section header */
+                               if (copy_end > 0 && copy_end < contents_sz &&
+                                   contents[copy_end - 1] != '\n' &&
+                                   contents[copy_end] == '\n')
+                                       copy_end++;
+                               replace_end = copy_end;
+                       } else {
+                               replace_end = store.parsed[j].end;
+                               copy_end = store.parsed[j].begin;
+                               if (!value)
+                                       maybe_remove_section(&store, contents,
+                                                            &copy_end,
+                                                            &replace_end, &i);
+                               /*
+                                * Swallow preceding white-space on the same
+                                * line.
+                                */
+                               while (copy_end > 0 ) {
+                                       char c = contents[copy_end - 1];
+
+                                       if (isspace(c) && c != '\n')
+                                               copy_end--;
+                                       else
+                                               break;
+                               }
+                       }
 
                        if (copy_end > 0 && contents[copy_end-1] != '\n')
                                new_line = 1;
@@ -2646,16 +2846,16 @@ int git_config_set_multivar_in_file_gently(const char *config_filename,
                                    write_str_in_full(fd, "\n") < 0)
                                        goto write_err_out;
                        }
-                       copy_begin = store.offset[i];
+                       copy_begin = replace_end;
                }
 
                /* write the pair (value == NULL means unset) */
                if (value != NULL) {
-                       if (store.state == START) {
-                               if (write_section(fd, key) < 0)
+                       if (!store.section_seen) {
+                               if (write_section(fd, key, &store) < 0)
                                        goto write_err_out;
                        }
-                       if (write_pair(fd, key, value) < 0)
+                       if (write_pair(fd, key, value, &store) < 0)
                                goto write_err_out;
                }
 
@@ -2779,7 +2979,8 @@ static int section_name_is_ok(const char *name)
 
 /* if new_name == NULL, the section is removed instead */
 static int git_config_copy_or_rename_section_in_file(const char *config_filename,
-                                     const char *old_name, const char *new_name, int copy)
+                                     const char *old_name,
+                                     const char *new_name, int copy)
 {
        int ret = 0, remove = 0;
        char *filename_buf = NULL;
@@ -2789,6 +2990,9 @@ static int git_config_copy_or_rename_section_in_file(const char *config_filename
        FILE *config_file = NULL;
        struct stat st;
        struct strbuf copystr = STRBUF_INIT;
+       struct config_store_data store;
+
+       memset(&store, 0, sizeof(store));
 
        if (new_name && !section_name_is_ok(new_name)) {
                ret = error("invalid section name: %s", new_name);
@@ -2858,7 +3062,7 @@ static int git_config_copy_or_rename_section_in_file(const char *config_filename
                                }
                                store.baselen = strlen(new_name);
                                if (!copy) {
-                                       if (write_section(out_fd, new_name) < 0) {
+                                       if (write_section(out_fd, new_name, &store) < 0) {
                                                ret = write_error(get_lock_file_path(&lock));
                                                goto out;
                                        }
@@ -2879,7 +3083,7 @@ static int git_config_copy_or_rename_section_in_file(const char *config_filename
                                                output[0] = '\t';
                                        }
                                } else {
-                                       copystr = store_create_section(new_name);
+                                       copystr = store_create_section(new_name, &store);
                                }
                        }
                        remove = 0;
index ef70a9cac1e6dc67df24d157a4e5de38edd8c984..cdac2fc73e6a2d0bc3230848425557a23e88d0bf 100644 (file)
--- a/config.h
+++ b/config.h
@@ -28,15 +28,40 @@ enum config_origin_type {
        CONFIG_ORIGIN_CMDLINE
 };
 
+enum config_event_t {
+       CONFIG_EVENT_SECTION,
+       CONFIG_EVENT_ENTRY,
+       CONFIG_EVENT_WHITESPACE,
+       CONFIG_EVENT_COMMENT,
+       CONFIG_EVENT_EOF,
+       CONFIG_EVENT_ERROR
+};
+
+/*
+ * The parser event function (if not NULL) is called with the event type and
+ * the begin/end offsets of the parsed elements.
+ *
+ * Note: for CONFIG_EVENT_ENTRY (i.e. config variables), the trailing newline
+ * character is considered part of the element.
+ */
+typedef int (*config_parser_event_fn_t)(enum config_event_t type,
+                                       size_t begin_offset, size_t end_offset,
+                                       void *event_fn_data);
+
 struct config_options {
        unsigned int respect_includes : 1;
        const char *commondir;
        const char *git_dir;
+       config_parser_event_fn_t event_fn;
+       void *event_fn_data;
 };
 
 typedef int (*config_fn_t)(const char *, const char *, void *);
 extern int git_default_config(const char *, const char *, void *);
 extern int git_config_from_file(config_fn_t fn, const char *, void *);
+extern int git_config_from_file_with_options(config_fn_t fn, const char *,
+                                            void *,
+                                            const struct config_options *);
 extern int git_config_from_mem(config_fn_t fn, const enum config_origin_type,
                                        const char *name, const char *buf, size_t len, void *data);
 extern int git_config_from_blob_oid(config_fn_t fn, const char *name,
@@ -59,6 +84,7 @@ extern int git_config_bool(const char *, const char *);
 extern int git_config_string(const char **, const char *, const char *);
 extern int git_config_pathname(const char **, const char *, const char *);
 extern int git_config_expiry_date(timestamp_t *, const char *, const char *);
+extern int git_config_color(char *, const char *, const char *);
 extern int git_config_set_in_file_gently(const char *, const char *, const char *);
 extern void git_config_set_in_file(const char *, const char *, const char *);
 extern int git_config_set_gently(const char *, const char *);
diff --git a/config.mak.dev b/config.mak.dev
new file mode 100644 (file)
index 0000000..2d244ca
--- /dev/null
@@ -0,0 +1,42 @@
+ifeq ($(filter no-error,$(DEVOPTS)),)
+CFLAGS += -Werror
+endif
+CFLAGS += -Wdeclaration-after-statement
+CFLAGS += -Wno-format-zero-length
+CFLAGS += -Wold-style-definition
+CFLAGS += -Woverflow
+CFLAGS += -Wpointer-arith
+CFLAGS += -Wstrict-prototypes
+CFLAGS += -Wunused
+CFLAGS += -Wvla
+
+ifndef COMPILER_FEATURES
+COMPILER_FEATURES := $(shell ./detect-compiler $(CC))
+endif
+
+ifneq ($(filter clang4,$(COMPILER_FEATURES)),)
+CFLAGS += -Wtautological-constant-out-of-range-compare
+endif
+
+ifneq ($(or $(filter gcc6,$(COMPILER_FEATURES)),$(filter clang4,$(COMPILER_FEATURES))),)
+CFLAGS += -Wextra
+# if a function is public, there should be a prototype and the right
+# header file should be included. If not, it should be static.
+CFLAGS += -Wmissing-prototypes
+ifeq ($(filter extra-all,$(DEVOPTS)),)
+# These are disabled because we have these all over the place.
+CFLAGS += -Wno-empty-body
+CFLAGS += -Wno-missing-field-initializers
+CFLAGS += -Wno-sign-compare
+CFLAGS += -Wno-unused-function
+CFLAGS += -Wno-unused-parameter
+endif
+endif
+
+# uninitialized warnings on gcc 4.9.2 in xdiff/xdiffi.c and config.c
+# not worth fixing since newer compilers correctly stop complaining
+ifneq ($(filter gcc4,$(COMPILER_FEATURES)),)
+ifeq ($(filter gcc5,$(COMPILER_FEATURES)),)
+CFLAGS += -Wno-uninitialized
+endif
+endif
index 6a1d0de0cc571f395eeeb8f149d4377a1a5e1602..684fc5bf02677bbaddd214f78b14fa55df7025c2 100644 (file)
@@ -37,6 +37,8 @@ ifeq ($(uname_S),Linux)
        HAVE_GETDELIM = YesPlease
        SANE_TEXT_GREP=-a
        FREAD_READS_DIRECTORIES = UnfortunatelyYes
+       BASIC_CFLAGS += -DHAVE_SYSINFO
+       PROCFS_EXECUTABLE_PATH = /proc/self/exe
 endif
 ifeq ($(uname_S),GNU/kFreeBSD)
        HAVE_ALLOCA_H = YesPlease
@@ -111,6 +113,7 @@ ifeq ($(uname_S),Darwin)
        BASIC_CFLAGS += -DPROTECT_HFS_DEFAULT=1
        HAVE_BSD_SYSCTL = YesPlease
        FREAD_READS_DIRECTORIES = UnfortunatelyYes
+       HAVE_NS_GET_EXECUTABLE_PATH = YesPlease
 endif
 ifeq ($(uname_S),SunOS)
        NEEDS_SOCKET = YesPlease
@@ -205,6 +208,7 @@ ifeq ($(uname_S),FreeBSD)
        HAVE_PATHS_H = YesPlease
        GMTIME_UNRELIABLE_ERRORS = UnfortunatelyYes
        HAVE_BSD_SYSCTL = YesPlease
+       HAVE_BSD_KERN_PROC_SYSCTL = YesPlease
        PAGER_ENV = LESS=FRX LV=-c MORE=FRX
        FREAD_READS_DIRECTORIES = UnfortunatelyYes
 endif
@@ -217,6 +221,8 @@ ifeq ($(uname_S),OpenBSD)
        BASIC_LDFLAGS += -L/usr/local/lib
        HAVE_PATHS_H = YesPlease
        HAVE_BSD_SYSCTL = YesPlease
+       HAVE_BSD_KERN_PROC_SYSCTL = YesPlease
+       PROCFS_EXECUTABLE_PATH = /proc/curproc/file
 endif
 ifeq ($(uname_S),MirBSD)
        NO_STRCASESTR = YesPlease
@@ -235,6 +241,8 @@ ifeq ($(uname_S),NetBSD)
        USE_ST_TIMESPEC = YesPlease
        HAVE_PATHS_H = YesPlease
        HAVE_BSD_SYSCTL = YesPlease
+       HAVE_BSD_KERN_PROC_SYSCTL = YesPlease
+       PROCFS_EXECUTABLE_PATH = /proc/curproc/exe
 endif
 ifeq ($(uname_S),AIX)
        DEFAULT_PAGER = more
@@ -350,6 +358,7 @@ ifeq ($(uname_S),Windows)
        SNPRINTF_RETURNS_BOGUS = YesPlease
        NO_SVN_TESTS = YesPlease
        RUNTIME_PREFIX = YesPlease
+       HAVE_WPGMPTR = YesWeDo
        NO_ST_BLOCKS_IN_STRUCT_STAT = YesPlease
        NO_NSEC = YesPlease
        USE_WIN32_MMAP = YesPlease
@@ -499,6 +508,7 @@ ifneq (,$(findstring MINGW,$(uname_S)))
        NO_SVN_TESTS = YesPlease
        NO_PERL_MAKEMAKER = YesPlease
        RUNTIME_PREFIX = YesPlease
+       HAVE_WPGMPTR = YesWeDo
        NO_ST_BLOCKS_IN_STRUCT_STAT = YesPlease
        NO_NSEC = YesPlease
        USE_WIN32_MMAP = YesPlease
index 7f8415140f309e0522310fac3160b5a01cefc7cd..e11b7976ab1c93d8ccec2e499d0093db42551059 100644 (file)
@@ -254,25 +254,25 @@ GIT_PARSE_WITH([openssl]))
 # Perl-compatible regular expressions instead of standard or extended
 # POSIX regular expressions.
 #
-# Currently USE_LIBPCRE is a synonym for USE_LIBPCRE1, define
-# USE_LIBPCRE2 instead if you'd like to use version 2 of the PCRE
-# library. The USE_LIBPCRE flag will likely be changed to mean v2 by
-# default in future releases.
+# USE_LIBPCRE is a synonym for USE_LIBPCRE2, define USE_LIBPCRE1
+# instead if you'd like to use the legacy version 1 of the PCRE
+# library. Support for version 1 will likely be removed in some future
+# release of Git, as upstream has all but abandoned it.
 #
 # Define LIBPCREDIR=/foo/bar if your PCRE header and library files are in
 # /foo/bar/include and /foo/bar/lib directories.
 #
 AC_ARG_WITH(libpcre,
-AS_HELP_STRING([--with-libpcre],[synonym for --with-libpcre1]),
+AS_HELP_STRING([--with-libpcre],[synonym for --with-libpcre2]),
     if test "$withval" = "no"; then
-       USE_LIBPCRE1=
+       USE_LIBPCRE2=
     elif test "$withval" = "yes"; then
-       USE_LIBPCRE1=YesPlease
+       USE_LIBPCRE2=YesPlease
     else
-       USE_LIBPCRE1=YesPlease
+       USE_LIBPCRE2=YesPlease
        LIBPCREDIR=$withval
        AC_MSG_NOTICE([Setting LIBPCREDIR to $LIBPCREDIR])
-        dnl USE_LIBPCRE1 can still be modified below, so don't substitute
+        dnl USE_LIBPCRE2 can still be modified below, so don't substitute
         dnl it yet.
        GIT_CONF_SUBST([LIBPCREDIR])
     fi)
@@ -296,6 +296,10 @@ AS_HELP_STRING([],           [ARG can be also prefix for libpcre library and hea
 AC_ARG_WITH(libpcre2,
 AS_HELP_STRING([--with-libpcre2],[support Perl-compatible regexes via libpcre2 (default is NO)])
 AS_HELP_STRING([],           [ARG can be also prefix for libpcre library and headers]),
+    if test -n "$USE_LIBPCRE2"; then
+        AC_MSG_ERROR([Only supply one of --with-libpcre or its synonym --with-libpcre2!])
+    fi
+
     if test -n "$USE_LIBPCRE1"; then
         AC_MSG_ERROR([Only supply one of --with-libpcre1 or --with-libpcre2!])
     fi
@@ -549,8 +553,8 @@ if test -n "$USE_LIBPCRE1"; then
 GIT_STASH_FLAGS($LIBPCREDIR)
 
 AC_CHECK_LIB([pcre], [pcre_version],
-[USE_LIBPCRE=YesPlease],
-[USE_LIBPCRE=])
+[USE_LIBPCRE1=YesPlease],
+[USE_LIBPCRE1=])
 
 GIT_UNSTASH_FLAGS($LIBPCREDIR)
 
@@ -923,7 +927,7 @@ AC_RUN_IFELSE(
        [AC_LANG_PROGRAM([AC_INCLUDES_DEFAULT],
                [[
                FILE *f = fopen(".", "r");
-               return f)]])],
+               return f != NULL;]])],
        [ac_cv_fread_reads_directories=no],
        [ac_cv_fread_reads_directories=yes])
 ])
index c3a014c5babf72ee4c0d135fec264afb37b040de..31aa9c843311b4e0b01e7378a85709083b071311 100644 (file)
--- a/connect.c
+++ b/connect.c
 #include "sha1-array.h"
 #include "transport.h"
 #include "strbuf.h"
+#include "version.h"
 #include "protocol.h"
 
-static char *server_capabilities;
+static char *server_capabilities_v1;
+static struct argv_array server_capabilities_v2 = ARGV_ARRAY_INIT;
 static const char *parse_feature_value(const char *, const char *, int *);
 
 static int check_ref(const char *name, unsigned int flags)
@@ -46,8 +48,14 @@ int check_ref_type(const struct ref *ref, int flags)
        return check_ref(ref->name, flags);
 }
 
-static void die_initial_contact(int unexpected)
+static NORETURN void die_initial_contact(int unexpected)
 {
+       /*
+        * A hang-up after seeing some response from the other end
+        * means that it is unexpected, as we know the other end is
+        * willing to talk to us.  A hang-up before seeing any
+        * response does not necessarily mean an ACL problem, though.
+        */
        if (unexpected)
                die(_("The remote end hung up upon initial contact"));
        else
@@ -56,6 +64,92 @@ static void die_initial_contact(int unexpected)
                      "and the repository exists."));
 }
 
+/* Checks if the server supports the capability 'c' */
+int server_supports_v2(const char *c, int die_on_error)
+{
+       int i;
+
+       for (i = 0; i < server_capabilities_v2.argc; i++) {
+               const char *out;
+               if (skip_prefix(server_capabilities_v2.argv[i], c, &out) &&
+                   (!*out || *out == '='))
+                       return 1;
+       }
+
+       if (die_on_error)
+               die("server doesn't support '%s'", c);
+
+       return 0;
+}
+
+int server_supports_feature(const char *c, const char *feature,
+                           int die_on_error)
+{
+       int i;
+
+       for (i = 0; i < server_capabilities_v2.argc; i++) {
+               const char *out;
+               if (skip_prefix(server_capabilities_v2.argv[i], c, &out) &&
+                   (!*out || *(out++) == '=')) {
+                       if (parse_feature_request(out, feature))
+                               return 1;
+                       else
+                               break;
+               }
+       }
+
+       if (die_on_error)
+               die("server doesn't support feature '%s'", feature);
+
+       return 0;
+}
+
+static void process_capabilities_v2(struct packet_reader *reader)
+{
+       while (packet_reader_read(reader) == PACKET_READ_NORMAL)
+               argv_array_push(&server_capabilities_v2, reader->line);
+
+       if (reader->status != PACKET_READ_FLUSH)
+               die("expected flush after capabilities");
+}
+
+enum protocol_version discover_version(struct packet_reader *reader)
+{
+       enum protocol_version version = protocol_unknown_version;
+
+       /*
+        * Peek the first line of the server's response to
+        * determine the protocol version the server is speaking.
+        */
+       switch (packet_reader_peek(reader)) {
+       case PACKET_READ_EOF:
+               die_initial_contact(0);
+       case PACKET_READ_FLUSH:
+       case PACKET_READ_DELIM:
+               version = protocol_v0;
+               break;
+       case PACKET_READ_NORMAL:
+               version = determine_protocol_version_client(reader->line);
+               break;
+       }
+
+       switch (version) {
+       case protocol_v2:
+               process_capabilities_v2(reader);
+               break;
+       case protocol_v1:
+               /* Read the peeked version line */
+               packet_reader_read(reader);
+               break;
+       case protocol_v0:
+               break;
+       case protocol_unknown_version:
+               BUG("unknown protocol version");
+       }
+
+       return version;
+}
+
 static void parse_one_symref_info(struct string_list *symref, const char *val, int len)
 {
        char *sym, *target;
@@ -85,7 +179,7 @@ static void parse_one_symref_info(struct string_list *symref, const char *val, i
 static void annotate_refs_with_symref_info(struct ref *ref)
 {
        struct string_list symref = STRING_LIST_INIT_DUP;
-       const char *feature_list = server_capabilities;
+       const char *feature_list = server_capabilities_v1;
 
        while (feature_list) {
                int len;
@@ -109,60 +203,21 @@ static void annotate_refs_with_symref_info(struct ref *ref)
        string_list_clear(&symref, 0);
 }
 
-/*
- * Read one line of a server's ref advertisement into packet_buffer.
- */
-static int read_remote_ref(int in, char **src_buf, size_t *src_len,
-                          int *responded)
-{
-       int len = packet_read(in, src_buf, src_len,
-                             packet_buffer, sizeof(packet_buffer),
-                             PACKET_READ_GENTLE_ON_EOF |
-                             PACKET_READ_CHOMP_NEWLINE);
-       const char *arg;
-       if (len < 0)
-               die_initial_contact(*responded);
-       if (len > 4 && skip_prefix(packet_buffer, "ERR ", &arg))
-               die("remote error: %s", arg);
-
-       *responded = 1;
-
-       return len;
-}
-
-#define EXPECTING_PROTOCOL_VERSION 0
-#define EXPECTING_FIRST_REF 1
-#define EXPECTING_REF 2
-#define EXPECTING_SHALLOW 3
-
-/* Returns 1 if packet_buffer is a protocol version pkt-line, 0 otherwise. */
-static int process_protocol_version(void)
+static void process_capabilities(const char *line, int *len)
 {
-       switch (determine_protocol_version_client(packet_buffer)) {
-       case protocol_v1:
-               return 1;
-       case protocol_v0:
-               return 0;
-       default:
-               die("server is speaking an unknown protocol");
-       }
-}
-
-static void process_capabilities(int *len)
-{
-       int nul_location = strlen(packet_buffer);
+       int nul_location = strlen(line);
        if (nul_location == *len)
                return;
-       server_capabilities = xstrdup(packet_buffer + nul_location + 1);
+       server_capabilities_v1 = xstrdup(line + nul_location + 1);
        *len = nul_location;
 }
 
-static int process_dummy_ref(void)
+static int process_dummy_ref(const char *line)
 {
        struct object_id oid;
        const char *name;
 
-       if (parse_oid_hex(packet_buffer, &oid, &name))
+       if (parse_oid_hex(line, &oid, &name))
                return 0;
        if (*name != ' ')
                return 0;
@@ -171,20 +226,20 @@ static int process_dummy_ref(void)
        return !oidcmp(&null_oid, &oid) && !strcmp(name, "capabilities^{}");
 }
 
-static void check_no_capabilities(int len)
+static void check_no_capabilities(const char *line, int len)
 {
-       if (strlen(packet_buffer) != len)
+       if (strlen(line) != len)
                warning("Ignoring capabilities after first line '%s'",
-                       packet_buffer + strlen(packet_buffer));
+                       line + strlen(line));
 }
 
-static int process_ref(int len, struct ref ***list, unsigned int flags,
-                      struct oid_array *extra_have)
+static int process_ref(const char *line, int len, struct ref ***list,
+                      unsigned int flags, struct oid_array *extra_have)
 {
        struct object_id old_oid;
        const char *name;
 
-       if (parse_oid_hex(packet_buffer, &old_oid, &name))
+       if (parse_oid_hex(line, &old_oid, &name))
                return 0;
        if (*name != ' ')
                return 0;
@@ -200,16 +255,17 @@ static int process_ref(int len, struct ref ***list, unsigned int flags,
                **list = ref;
                *list = &ref->next;
        }
-       check_no_capabilities(len);
+       check_no_capabilities(line, len);
        return 1;
 }
 
-static int process_shallow(int len, struct oid_array *shallow_points)
+static int process_shallow(const char *line, int len,
+                          struct oid_array *shallow_points)
 {
        const char *arg;
        struct object_id old_oid;
 
-       if (!skip_prefix(packet_buffer, "shallow ", &arg))
+       if (!skip_prefix(line, "shallow ", &arg))
                return 0;
 
        if (get_oid_hex(arg, &old_oid))
@@ -217,60 +273,68 @@ static int process_shallow(int len, struct oid_array *shallow_points)
        if (!shallow_points)
                die("repository on the other end cannot be shallow");
        oid_array_append(shallow_points, &old_oid);
-       check_no_capabilities(len);
+       check_no_capabilities(line, len);
        return 1;
 }
 
+enum get_remote_heads_state {
+       EXPECTING_FIRST_REF = 0,
+       EXPECTING_REF,
+       EXPECTING_SHALLOW,
+       EXPECTING_DONE,
+};
+
 /*
  * Read all the refs from the other end
  */
-struct ref **get_remote_heads(int in, char *src_buf, size_t src_len,
+struct ref **get_remote_heads(struct packet_reader *reader,
                              struct ref **list, unsigned int flags,
                              struct oid_array *extra_have,
                              struct oid_array *shallow_points)
 {
        struct ref **orig_list = list;
-
-       /*
-        * A hang-up after seeing some response from the other end
-        * means that it is unexpected, as we know the other end is
-        * willing to talk to us.  A hang-up before seeing any
-        * response does not necessarily mean an ACL problem, though.
-        */
-       int responded = 0;
-       int len;
-       int state = EXPECTING_PROTOCOL_VERSION;
+       int len = 0;
+       enum get_remote_heads_state state = EXPECTING_FIRST_REF;
+       const char *arg;
 
        *list = NULL;
 
-       while ((len = read_remote_ref(in, &src_buf, &src_len, &responded))) {
+       while (state != EXPECTING_DONE) {
+               switch (packet_reader_read(reader)) {
+               case PACKET_READ_EOF:
+                       die_initial_contact(1);
+               case PACKET_READ_NORMAL:
+                       len = reader->pktlen;
+                       if (len > 4 && skip_prefix(reader->line, "ERR ", &arg))
+                               die("remote error: %s", arg);
+                       break;
+               case PACKET_READ_FLUSH:
+                       state = EXPECTING_DONE;
+                       break;
+               case PACKET_READ_DELIM:
+                       die("invalid packet");
+               }
+
                switch (state) {
-               case EXPECTING_PROTOCOL_VERSION:
-                       if (process_protocol_version()) {
-                               state = EXPECTING_FIRST_REF;
-                               break;
-                       }
-                       state = EXPECTING_FIRST_REF;
-                       /* fallthrough */
                case EXPECTING_FIRST_REF:
-                       process_capabilities(&len);
-                       if (process_dummy_ref()) {
+                       process_capabilities(reader->line, &len);
+                       if (process_dummy_ref(reader->line)) {
                                state = EXPECTING_SHALLOW;
                                break;
                        }
                        state = EXPECTING_REF;
                        /* fallthrough */
                case EXPECTING_REF:
-                       if (process_ref(len, &list, flags, extra_have))
+                       if (process_ref(reader->line, len, &list, flags, extra_have))
                                break;
                        state = EXPECTING_SHALLOW;
                        /* fallthrough */
                case EXPECTING_SHALLOW:
-                       if (process_shallow(len, shallow_points))
+                       if (process_shallow(reader->line, len, shallow_points))
                                break;
-                       die("protocol error: unexpected '%s'", packet_buffer);
-               default:
-                       die("unexpected state %d", state);
+                       die("protocol error: unexpected '%s'", reader->line);
+               case EXPECTING_DONE:
+                       break;
                }
        }
 
@@ -279,6 +343,112 @@ struct ref **get_remote_heads(int in, char *src_buf, size_t src_len,
        return list;
 }
 
+/* Returns 1 when a valid ref has been added to `list`, 0 otherwise */
+static int process_ref_v2(const char *line, struct ref ***list)
+{
+       int ret = 1;
+       int i = 0;
+       struct object_id old_oid;
+       struct ref *ref;
+       struct string_list line_sections = STRING_LIST_INIT_DUP;
+       const char *end;
+
+       /*
+        * Ref lines have a number of fields which are space deliminated.  The
+        * first field is the OID of the ref.  The second field is the ref
+        * name.  Subsequent fields (symref-target and peeled) are optional and
+        * don't have a particular order.
+        */
+       if (string_list_split(&line_sections, line, ' ', -1) < 2) {
+               ret = 0;
+               goto out;
+       }
+
+       if (parse_oid_hex(line_sections.items[i++].string, &old_oid, &end) ||
+           *end) {
+               ret = 0;
+               goto out;
+       }
+
+       ref = alloc_ref(line_sections.items[i++].string);
+
+       oidcpy(&ref->old_oid, &old_oid);
+       **list = ref;
+       *list = &ref->next;
+
+       for (; i < line_sections.nr; i++) {
+               const char *arg = line_sections.items[i].string;
+               if (skip_prefix(arg, "symref-target:", &arg))
+                       ref->symref = xstrdup(arg);
+
+               if (skip_prefix(arg, "peeled:", &arg)) {
+                       struct object_id peeled_oid;
+                       char *peeled_name;
+                       struct ref *peeled;
+                       if (parse_oid_hex(arg, &peeled_oid, &end) || *end) {
+                               ret = 0;
+                               goto out;
+                       }
+
+                       peeled_name = xstrfmt("%s^{}", ref->name);
+                       peeled = alloc_ref(peeled_name);
+
+                       oidcpy(&peeled->old_oid, &peeled_oid);
+                       **list = peeled;
+                       *list = &peeled->next;
+
+                       free(peeled_name);
+               }
+       }
+
+out:
+       string_list_clear(&line_sections, 0);
+       return ret;
+}
+
+struct ref **get_remote_refs(int fd_out, struct packet_reader *reader,
+                            struct ref **list, int for_push,
+                            const struct argv_array *ref_prefixes,
+                            const struct string_list *server_options)
+{
+       int i;
+       *list = NULL;
+
+       if (server_supports_v2("ls-refs", 1))
+               packet_write_fmt(fd_out, "command=ls-refs\n");
+
+       if (server_supports_v2("agent", 0))
+               packet_write_fmt(fd_out, "agent=%s", git_user_agent_sanitized());
+
+       if (server_options && server_options->nr &&
+           server_supports_v2("server-option", 1))
+               for (i = 0; i < server_options->nr; i++)
+                       packet_write_fmt(fd_out, "server-option=%s",
+                                        server_options->items[i].string);
+
+       packet_delim(fd_out);
+       /* When pushing we don't want to request the peeled tags */
+       if (!for_push)
+               packet_write_fmt(fd_out, "peel\n");
+       packet_write_fmt(fd_out, "symrefs\n");
+       for (i = 0; ref_prefixes && i < ref_prefixes->argc; i++) {
+               packet_write_fmt(fd_out, "ref-prefix %s\n",
+                                ref_prefixes->argv[i]);
+       }
+       packet_flush(fd_out);
+
+       /* Process response from server */
+       while (packet_reader_read(reader) == PACKET_READ_NORMAL) {
+               if (!process_ref_v2(reader->line, &list))
+                       die("invalid ls-refs response: %s", reader->line);
+       }
+
+       if (reader->status != PACKET_READ_FLUSH)
+               die("expected flush after ref listing");
+
+       return list;
+}
+
 static const char *parse_feature_value(const char *feature_list, const char *feature, int *lenp)
 {
        int len;
@@ -323,7 +493,7 @@ int parse_feature_request(const char *feature_list, const char *feature)
 
 const char *server_feature_value(const char *feature, int *len)
 {
-       return parse_feature_value(server_capabilities, feature, len);
+       return parse_feature_value(server_capabilities_v1, feature, len);
 }
 
 int server_supports(const char *feature)
@@ -872,6 +1042,7 @@ static enum ssh_variant determine_ssh_variant(const char *ssh_command,
  */
 static struct child_process *git_connect_git(int fd[2], char *hostandport,
                                             const char *path, const char *prog,
+                                            enum protocol_version version,
                                             int flags)
 {
        struct child_process *conn;
@@ -910,10 +1081,10 @@ static struct child_process *git_connect_git(int fd[2], char *hostandport,
                    target_host, 0);
 
        /* If using a new version put that stuff here after a second null byte */
-       if (get_protocol_version_config() > 0) {
+       if (version > 0) {
                strbuf_addch(&request, '\0');
                strbuf_addf(&request, "version=%d%c",
-                           get_protocol_version_config(), '\0');
+                           version, '\0');
        }
 
        packet_write(fd[1], request.buf, request.len);
@@ -929,14 +1100,14 @@ static struct child_process *git_connect_git(int fd[2], char *hostandport,
  */
 static void push_ssh_options(struct argv_array *args, struct argv_array *env,
                             enum ssh_variant variant, const char *port,
-                            int flags)
+                            enum protocol_version version, int flags)
 {
        if (variant == VARIANT_SSH &&
-           get_protocol_version_config() > 0) {
+           version > 0) {
                argv_array_push(args, "-o");
                argv_array_push(args, "SendEnv=" GIT_PROTOCOL_ENVIRONMENT);
                argv_array_pushf(env, GIT_PROTOCOL_ENVIRONMENT "=version=%d",
-                                get_protocol_version_config());
+                                version);
        }
 
        if (flags & CONNECT_IPV4) {
@@ -989,7 +1160,8 @@ static void push_ssh_options(struct argv_array *args, struct argv_array *env,
 
 /* Prepare a child_process for use by Git's SSH-tunneled transport. */
 static void fill_ssh_args(struct child_process *conn, const char *ssh_host,
-                         const char *port, int flags)
+                         const char *port, enum protocol_version version,
+                         int flags)
 {
        const char *ssh;
        enum ssh_variant variant;
@@ -1023,14 +1195,14 @@ static void fill_ssh_args(struct child_process *conn, const char *ssh_host,
                argv_array_push(&detect.args, ssh);
                argv_array_push(&detect.args, "-G");
                push_ssh_options(&detect.args, &detect.env_array,
-                                VARIANT_SSH, port, flags);
+                                VARIANT_SSH, port, version, flags);
                argv_array_push(&detect.args, ssh_host);
 
                variant = run_command(&detect) ? VARIANT_SIMPLE : VARIANT_SSH;
        }
 
        argv_array_push(&conn->args, ssh);
-       push_ssh_options(&conn->args, &conn->env_array, variant, port, flags);
+       push_ssh_options(&conn->args, &conn->env_array, variant, port, version, flags);
        argv_array_push(&conn->args, ssh_host);
 }
 
@@ -1051,6 +1223,15 @@ struct child_process *git_connect(int fd[2], const char *url,
        char *hostandport, *path;
        struct child_process *conn;
        enum protocol protocol;
+       enum protocol_version version = get_protocol_version_config();
+
+       /*
+        * NEEDSWORK: If we are trying to use protocol v2 and we are planning
+        * to perform a push, then fallback to v0 since the client doesn't know
+        * how to push yet using v2.
+        */
+       if (version == protocol_v2 && !strcmp("git-receive-pack", prog))
+               version = protocol_v0;
 
        /* Without this we cannot rely on waitpid() to tell
         * what happened to our children.
@@ -1065,7 +1246,7 @@ struct child_process *git_connect(int fd[2], const char *url,
                printf("Diag: path=%s\n", path ? path : "NULL");
                conn = NULL;
        } else if (protocol == PROTO_GIT) {
-               conn = git_connect_git(fd, hostandport, path, prog, flags);
+               conn = git_connect_git(fd, hostandport, path, prog, version, flags);
        } else {
                struct strbuf cmd = STRBUF_INIT;
                const char *const *var;
@@ -1108,12 +1289,12 @@ struct child_process *git_connect(int fd[2], const char *url,
                                strbuf_release(&cmd);
                                return NULL;
                        }
-                       fill_ssh_args(conn, ssh_host, port, flags);
+                       fill_ssh_args(conn, ssh_host, port, version, flags);
                } else {
                        transport_check_allowed("file");
-                       if (get_protocol_version_config() > 0) {
+                       if (version > 0) {
                                argv_array_pushf(&conn->env_array, GIT_PROTOCOL_ENVIRONMENT "=version=%d",
-                                                get_protocol_version_config());
+                                                version);
                        }
                }
                argv_array_push(&conn->args, cmd.buf);
index 01f14cdf3fa4e6b6c8cd3b4c9ec3c3d55e7fc04f..0e69c6709c9fdb83b4888443f490d79ef504c8a3 100644 (file)
--- a/connect.h
+++ b/connect.h
@@ -13,4 +13,11 @@ extern int parse_feature_request(const char *features, const char *feature);
 extern const char *server_feature_value(const char *feature, int *len_ret);
 extern int url_is_local_not_ssh(const char *url);
 
+struct packet_reader;
+extern enum protocol_version discover_version(struct packet_reader *reader);
+
+extern int server_supports_v2(const char *c, int die_on_error);
+extern int server_supports_feature(const char *c, const char *feature,
+                                  int die_on_error);
+
 #endif
diff --git a/contrib/coccinelle/commit.cocci b/contrib/coccinelle/commit.cocci
new file mode 100644 (file)
index 0000000..a7e9215
--- /dev/null
@@ -0,0 +1,28 @@
+@@
+expression c;
+@@
+- &c->maybe_tree->object.oid
++ get_commit_tree_oid(c)
+
+@@
+expression c;
+@@
+- c->maybe_tree->object.oid.hash
++ get_commit_tree_oid(c)->hash
+
+// These excluded functions must access c->maybe_tree direcly.
+@@
+identifier f !~ "^(get_commit_tree|get_commit_tree_in_graph|load_tree_for_commit)$";
+expression c;
+@@
+  f(...) {...
+- c->maybe_tree
++ get_commit_tree(c)
+  ...}
+
+@@
+expression c;
+expression s;
+@@
+- get_commit_tree(c) = s
++ c->maybe_tree = s
index b09c8a23626b431a0cb97f6f7f930cccce25bf07..f2f331120bd2c132f90dcf314a9c5065781659b7 100644 (file)
@@ -29,6 +29,8 @@
 # tell the completion to use commit completion.  This also works with aliases
 # of form "!sh -c '...'".  For example, "!sh -c ': git commit ; ... '".
 #
+# Compatible with bash 3.2.57.
+#
 # You can set the following environment variables to influence the behavior of
 # the completion routines:
 #
@@ -282,7 +284,11 @@ __gitcomp ()
 
 # Clear the variables caching builtins' options when (re-)sourcing
 # the completion script.
-unset $(set |sed -ne 's/^\(__gitcomp_builtin_[a-zA-Z0-9_][a-zA-Z0-9_]*\)=.*/\1/p') 2>/dev/null
+if [[ -n ${ZSH_VERSION-} ]]; then
+       unset $(set |sed -ne 's/^\(__gitcomp_builtin_[a-zA-Z0-9_][a-zA-Z0-9_]*\)=.*/\1/p') 2>/dev/null
+else
+       unset $(compgen -v __gitcomp_builtin_)
+fi
 
 # This function is equivalent to
 #
@@ -388,12 +394,7 @@ __git_index_files ()
        local root="${2-.}" file
 
        __git_ls_files_helper "$root" "$1" |
-       while read -r file; do
-               case "$file" in
-               ?*/*) echo "${file%%/*}" ;;
-               *) echo "$file" ;;
-               esac
-       done | sort | uniq
+       cut -f1 -d/ | sort | uniq
 }
 
 # Lists branches from the local repository.
@@ -878,6 +879,7 @@ __git_list_porcelain_commands ()
                check-ref-format) : plumbing;;
                checkout-index)   : plumbing;;
                column)           : internal helper;;
+               commit-graph)     : plumbing;;
                commit-tree)      : plumbing;;
                count-objects)    : infrequent;;
                credential)       : credentials;;
@@ -1284,6 +1286,12 @@ _git_checkout ()
 
 _git_cherry ()
 {
+       case "$cur" in
+       --*)
+               __gitcomp_builtin cherry
+               return
+       esac
+
        __git_complete_refs
 }
 
@@ -1503,16 +1511,6 @@ _git_fsck ()
        esac
 }
 
-_git_gc ()
-{
-       case "$cur" in
-       --*)
-               __gitcomp_builtin gc
-               return
-               ;;
-       esac
-}
-
 _git_gitk ()
 {
        _gitk
@@ -1637,6 +1635,13 @@ _git_ls_remote ()
 
 _git_ls_tree ()
 {
+       case "$cur" in
+       --*)
+               __gitcomp_builtin ls-tree
+               return
+               ;;
+       esac
+
        __git_complete_file
 }
 
@@ -1812,11 +1817,6 @@ _git_mv ()
        fi
 }
 
-_git_name_rev ()
-{
-       __gitcomp_builtin name-rev
-}
-
 _git_notes ()
 {
        local subcommands='add append copy edit get-ref list merge prune remove show'
@@ -1949,7 +1949,7 @@ _git_rebase ()
        --*)
                __gitcomp "
                        --onto --merge --strategy --interactive
-                       --preserve-merges --stat --no-stat
+                       --rebase-merges --preserve-merges --stat --no-stat
                        --committer-date-is-author-date --ignore-date
                        --ignore-whitespace --whitespace=
                        --autosquash --no-autosquash
@@ -2120,7 +2120,7 @@ _git_config ()
                return
                ;;
        branch.*.rebase)
-               __gitcomp "false true preserve interactive"
+               __gitcomp "false true merges preserve interactive"
                return
                ;;
        remote.pushdefault)
@@ -2177,7 +2177,7 @@ _git_config ()
                __gitcomp "$__git_log_date_formats"
                return
                ;;
-       sendemail.aliasesfiletype)
+       sendemail.aliasfiletype)
                __gitcomp "mutt mailrc pine elm gnus"
                return
                ;;
@@ -2350,6 +2350,7 @@ _git_config ()
                core.bigFileThreshold
                core.checkStat
                core.commentChar
+               core.commitGraph
                core.compression
                core.createObject
                core.deltaBaseCacheLimit
@@ -2774,13 +2775,21 @@ _git_show_branch ()
 _git_stash ()
 {
        local save_opts='--all --keep-index --no-keep-index --quiet --patch --include-untracked'
-       local subcommands='push save list show apply clear drop pop create branch'
-       local subcommand="$(__git_find_on_cmdline "$subcommands")"
+       local subcommands='push list show apply clear drop pop create branch'
+       local subcommand="$(__git_find_on_cmdline "$subcommands save")"
+       if [ -n "$(__git_find_on_cmdline "-p")" ]; then
+               subcommand="push"
+       fi
        if [ -z "$subcommand" ]; then
                case "$cur" in
                --*)
                        __gitcomp "$save_opts"
                        ;;
+               sa*)
+                       if [ -z "$(__git_find_on_cmdline "$save_opts")" ]; then
+                               __gitcomp "save"
+                       fi
+                       ;;
                *)
                        if [ -z "$(__git_find_on_cmdline "$save_opts")" ]; then
                                __gitcomp "$subcommands"
@@ -3036,6 +3045,45 @@ _git_worktree ()
        fi
 }
 
+__git_complete_common () {
+       local command="$1"
+
+       case "$cur" in
+       --*)
+               __gitcomp_builtin "$command"
+               ;;
+       esac
+}
+
+__git_cmds_with_parseopt_helper=
+__git_support_parseopt_helper () {
+       test -n "$__git_cmds_with_parseopt_helper" ||
+               __git_cmds_with_parseopt_helper="$(__git --list-parseopt-builtins)"
+
+       case " $__git_cmds_with_parseopt_helper " in
+       *" $1 "*)
+               return 0
+               ;;
+       *)
+               return 1
+               ;;
+       esac
+}
+
+__git_complete_command () {
+       local command="$1"
+       local completion_func="_git_${command//-/_}"
+       if declare -f $completion_func >/dev/null 2>/dev/null; then
+               $completion_func
+               return 0
+       elif __git_support_parseopt_helper "$command"; then
+               __git_complete_common "$command"
+               return 0
+       else
+               return 1
+       fi
+}
+
 __git_main ()
 {
        local i c=1 command __git_dir __git_repo_path
@@ -3095,14 +3143,12 @@ __git_main ()
                return
        fi
 
-       local completion_func="_git_${command//-/_}"
-       declare -f $completion_func >/dev/null 2>/dev/null && $completion_func && return
+       __git_complete_command "$command" && return
 
        local expansion=$(__git_aliased_command "$command")
        if [ -n "$expansion" ]; then
                words[1]=$expansion
-               completion_func="_git_${expansion//-/_}"
-               declare -f $completion_func >/dev/null 2>/dev/null && $completion_func
+               __git_complete_command "$expansion"
        fi
 }
 
diff --git a/contrib/convert-grafts-to-replace-refs.sh b/contrib/convert-grafts-to-replace-refs.sh
deleted file mode 100755 (executable)
index 0cbc917..0000000
+++ /dev/null
@@ -1,28 +0,0 @@
-#!/bin/sh
-
-# You should execute this script in the repository where you
-# want to convert grafts to replace refs.
-
-GRAFTS_FILE="${GIT_DIR:-.git}/info/grafts"
-
-. $(git --exec-path)/git-sh-setup
-
-test -f "$GRAFTS_FILE" || die "Could not find graft file: '$GRAFTS_FILE'"
-
-grep '^[^# ]' "$GRAFTS_FILE" |
-while read definition
-do
-       if test -n "$definition"
-       then
-               echo "Converting: $definition"
-               git replace --graft $definition ||
-                       die "Conversion failed for: $definition"
-       fi
-done
-
-mv "$GRAFTS_FILE" "$GRAFTS_FILE.bak" ||
-       die "Could not rename '$GRAFTS_FILE' to '$GRAFTS_FILE.bak'"
-
-echo "Success!"
-echo "All the grafts in '$GRAFTS_FILE' have been converted to replace refs!"
-echo "The grafts file '$GRAFTS_FILE' has been renamed: '$GRAFTS_FILE.bak'"
index 663992e530c82f891361ccc9b952099aff3d56ec..536754583b59945e984ad487b6e524e5d1de7056 100644 (file)
@@ -21,37 +21,82 @@ package DiffHighlight;
 my $COLOR = qr/\x1b\[[0-9;]*m/;
 my $BORING = qr/$COLOR|\s/;
 
-# The patch portion of git log -p --graph should only ever have preceding | and
-# not / or \ as merge history only shows up on the commit line.
-my $GRAPH = qr/$COLOR?\|$COLOR?\s+/;
-
 my @removed;
 my @added;
 my $in_hunk;
+my $graph_indent = 0;
 
 our $line_cb = sub { print @_ };
 our $flush_cb = sub { local $| = 1 };
 
-sub handle_line {
+# Count the visible width of a string, excluding any terminal color sequences.
+sub visible_width {
        local $_ = shift;
+       my $ret = 0;
+       while (length) {
+               if (s/^$COLOR//) {
+                       # skip colors
+               } elsif (s/^.//) {
+                       $ret++;
+               }
+       }
+       return $ret;
+}
+
+# Return a substring of $str, omitting $len visible characters from the
+# beginning, where terminal color sequences do not count as visible.
+sub visible_substr {
+       my ($str, $len) = @_;
+       while ($len > 0) {
+               if ($str =~ s/^$COLOR//) {
+                       next
+               }
+               $str =~ s/^.//;
+               $len--;
+       }
+       return $str;
+}
+
+sub handle_line {
+       my $orig = shift;
+       local $_ = $orig;
+
+       # match a graph line that begins a commit
+       if (/^(?:$COLOR?\|$COLOR?[ ])* # zero or more leading "|" with space
+                $COLOR?\*$COLOR?[ ]   # a "*" with its trailing space
+             (?:$COLOR?\|$COLOR?[ ])* # zero or more trailing "|"
+                                [ ]*  # trailing whitespace for merges
+           /x) {
+               my $graph_prefix = $&;
+
+               # We must flush before setting graph indent, since the
+               # new commit may be indented differently from what we
+               # queued.
+               flush();
+               $graph_indent = visible_width($graph_prefix);
+
+       } elsif ($graph_indent) {
+               if (length($_) < $graph_indent) {
+                       $graph_indent = 0;
+               } else {
+                       $_ = visible_substr($_, $graph_indent);
+               }
+       }
 
        if (!$in_hunk) {
-               $line_cb->($_);
-               $in_hunk = /^$GRAPH*$COLOR*\@\@ /;
+               $line_cb->($orig);
+               $in_hunk = /^$COLOR*\@\@ /;
        }
-       elsif (/^$GRAPH*$COLOR*-/) {
-               push @removed, $_;
+       elsif (/^$COLOR*-/) {
+               push @removed, $orig;
        }
-       elsif (/^$GRAPH*$COLOR*\+/) {
-               push @added, $_;
+       elsif (/^$COLOR*\+/) {
+               push @added, $orig;
        }
        else {
-               show_hunk(\@removed, \@added);
-               @removed = ();
-               @added = ();
-
-               $line_cb->($_);
-               $in_hunk = /^$GRAPH*$COLOR*[\@ ]/;
+               flush();
+               $line_cb->($orig);
+               $in_hunk = /^$COLOR*[\@ ]/;
        }
 
        # Most of the time there is enough output to keep things streaming,
@@ -71,6 +116,8 @@ sub flush {
        # Flush any queued hunk (this can happen when there is no trailing
        # context in the final diff of the input).
        show_hunk(\@removed, \@added);
+       @removed = ();
+       @added = ();
 }
 
 sub highlight_stdin {
@@ -226,8 +273,8 @@ sub is_pair_interesting {
        my $suffix_a = join('', @$a[($sa+1)..$#$a]);
        my $suffix_b = join('', @$b[($sb+1)..$#$b]);
 
-       return $prefix_a !~ /^$GRAPH*$COLOR*-$BORING*$/ ||
-              $prefix_b !~ /^$GRAPH*$COLOR*\+$BORING*$/ ||
+       return visible_substr($prefix_a, $graph_indent) !~ /^$COLOR*-$BORING*$/ ||
+              visible_substr($prefix_b, $graph_indent) !~ /^$COLOR*\+$BORING*$/ ||
               $suffix_a !~ /^$BORING*$/ ||
               $suffix_b !~ /^$BORING*$/;
 }
index 3b43dbed7488c5f4a5f05809725ffd0bcd7e61b5..f6f5195d00f6ca01b0751fc1c1b58055f0ef25ee 100755 (executable)
@@ -52,15 +52,17 @@ test_strip_patch_header () {
 # dh_test_setup_history generates a contrived graph such that we have at least
 # 1 nesting (E) and 2 nestings (F).
 #
-#            A branch
-#           /
-#      D---E---F master
+#        A---B master
+#       /
+#      D---E---F branch
 #
 #      git log --all --graph
 #      * commit
-#      |    A
+#      |    B
 #      | * commit
 #      | |    F
+#      * | commit
+#      | |    A
 #      | * commit
 #      |/
 #      |    E
@@ -68,24 +70,30 @@ test_strip_patch_header () {
 #           D
 #
 dh_test_setup_history () {
-       echo "file1" >file1 &&
-       echo "file2" >file2 &&
-       echo "file3" >file3 &&
-
-       cat file1 >file &&
+       echo file1 >file &&
        git add file &&
+       test_tick &&
        git commit -m "D" &&
 
        git checkout -b branch &&
-       cat file2 >file &&
-       git commit -a -m "A" &&
+       echo file2 >file &&
+       test_tick &&
+       git commit -a -m "E" &&
 
        git checkout master &&
-       cat file2 >file &&
-       git commit -a -m "E" &&
+       echo file2 >file &&
+       test_tick &&
+       git commit -a -m "A" &&
 
-       cat file3 >file &&
-       git commit -a -m "F"
+       git checkout branch &&
+       echo file3 >file &&
+       test_tick &&
+       git commit -a -m "F" &&
+
+       git checkout master &&
+       echo file3 >file &&
+       test_tick &&
+       git commit -a -m "B"
 }
 
 left_trim () {
@@ -246,16 +254,25 @@ test_expect_failure 'diff-highlight treats combining code points as a unit' '
 test_expect_success 'diff-highlight works with the --graph option' '
        dh_test_setup_history &&
 
-       # topo-order so that the order of the commits is the same as with --graph
+       # date-order so that the commits are interleaved for both
        # trim graph elements so we can do a diff
        # trim leading space because our trim_graph is not perfect
-       git log --branches -p --topo-order |
+       git log --branches -p --date-order |
                "$DIFF_HIGHLIGHT" | left_trim >graph.exp &&
-       git log --branches -p --graph |
+       git log --branches -p --date-order --graph |
                "$DIFF_HIGHLIGHT" | trim_graph | left_trim >graph.act &&
        test_cmp graph.exp graph.act
 '
 
+# Just reuse the previous graph test, but with --color.  Our trimming
+# doesn't know about color, so just sanity check that something got
+# highlighted.
+test_expect_success 'diff-highlight works with color graph' '
+       git log --branches -p --date-order --graph --color |
+               "$DIFF_HIGHLIGHT" | trim_graph | left_trim >graph &&
+       grep "\[7m" graph
+'
+
 # Most combined diffs won't meet diff-highlight's line-number filter. So we
 # create one here where one side drops a line and the other modifies it. That
 # should result in a diff like:
@@ -293,4 +310,32 @@ test_expect_success 'diff-highlight ignores combined diffs' '
        test_cmp expect actual
 '
 
+test_expect_success 'diff-highlight handles --graph with leading dash' '
+       cat >file <<-\EOF &&
+       before
+       the old line
+       -leading dash
+       EOF
+       git add file &&
+       git commit -m before &&
+
+       sed s/old/new/ <file >file.tmp &&
+       mv file.tmp file &&
+       git add file &&
+       git commit -m after &&
+
+       cat >expect <<-EOF &&
+       --- a/file
+       +++ b/file
+       @@ -1,3 +1,3 @@
+        before
+       -the ${CW}old${CR} line
+       +the ${CW}new${CR} line
+        -leading dash
+       EOF
+       git log --graph -p -1 | "$DIFF_HIGHLIGHT" >actual.raw &&
+       trim_graph <actual.raw | sed -n "/^---/,\$p" >actual &&
+       test_cmp expect actual
+'
+
 test_done
diff --git a/contrib/emacs/.gitignore b/contrib/emacs/.gitignore
deleted file mode 100644 (file)
index c531d98..0000000
+++ /dev/null
@@ -1 +0,0 @@
-*.elc
diff --git a/contrib/emacs/Makefile b/contrib/emacs/Makefile
deleted file mode 100644 (file)
index 24d9312..0000000
+++ /dev/null
@@ -1,21 +0,0 @@
-## Build and install stuff
-
-EMACS = emacs
-
-ELC = git.elc git-blame.elc
-INSTALL ?= install
-INSTALL_ELC = $(INSTALL) -m 644
-prefix ?= $(HOME)
-emacsdir = $(prefix)/share/emacs/site-lisp
-RM ?= rm -f
-
-all: $(ELC)
-
-install: all
-       $(INSTALL) -d $(DESTDIR)$(emacsdir)
-       $(INSTALL_ELC) $(ELC:.elc=.el) $(ELC) $(DESTDIR)$(emacsdir)
-
-%.elc: %.el
-       $(EMACS) -batch -f batch-byte-compile $<
-
-clean:; $(RM) $(ELC)
index 82368bdbfff199465ff8e8cbea49d99e5485e1d7..977a16f1e339faca937dfd1a60bb10395bfd59c4 100644 (file)
@@ -1,30 +1,24 @@
-This directory contains various modules for Emacs support.
+This directory used to contain various modules for Emacs support.
 
-To make the modules available to Emacs, you should add this directory
-to your load-path, and then require the modules you want. This can be
-done by adding to your .emacs something like this:
+These were added shortly after Git was first released. Since then
+Emacs's own support for Git got better than what was offered by these
+modes. There are also popular 3rd-party Git modes such as Magit which
+offer replacements for these.
 
-  (add-to-list 'load-path ".../git/contrib/emacs")
-  (require 'git)
-  (require 'git-blame)
-
-
-The following modules are available:
+The following modules were available, and can be dug up from the Git
+history:
 
 * git.el:
 
-  Status manager that displays the state of all the files of the
-  project, and provides easy access to the most frequently used git
-  commands. The user interface is as far as possible compatible with
-  the pcl-cvs mode. It can be started with `M-x git-status'.
+  Wrapper for "git status" that provided access to other git commands.
+
+  Modern alternatives to this include Magit, and VC mode that ships
+  with Emacs.
 
 * git-blame.el:
 
-  Emacs implementation of incremental git-blame.  When you turn it on
-  while viewing a file, the editor buffer will be updated by setting
-  the background of individual lines to a color that reflects which
-  commit it comes from.  And when you move around the buffer, a
-  one-line summary will be shown in the echo area.
+  A wrapper for "git blame" written before Emacs's own vc-annotate
+  mode learned to invoke git-blame, which can be done via C-x v g.
 
 * vc-git.el:
 
index 510e0f710374cfb06d5875108acdad9e877eaf2d..6a8a2b8ff190842f2dfc2a84f5283de12d2f5f9f 100644 (file)
@@ -1,483 +1,6 @@
-;;; git-blame.el --- Minor mode for incremental blame for Git  -*- coding: utf-8 -*-
-;;
-;; Copyright (C) 2007  David Kågedal
-;;
-;; Authors:    David Kågedal <davidk@lysator.liu.se>
-;; Created:    31 Jan 2007
-;; Message-ID: <87iren2vqx.fsf@morpheus.local>
-;; License:    GPL
-;; Keywords:   git, version control, release management
-;;
-;; Compatibility: Emacs21, Emacs22 and EmacsCVS
-;;                Git 1.5 and up
-
-;; This file is *NOT* part of GNU Emacs.
-;; This file is distributed under the same terms as GNU Emacs.
-
-;; This program is free software; you can redistribute it and/or
-;; modify it under the terms of the GNU General Public License as
-;; published by the Free Software Foundation; either version 2 of
-;; the License, or (at your option) any later version.
-
-;; This program is distributed in the hope that it will be
-;; useful, but WITHOUT ANY WARRANTY; without even the implied
-;; warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
-;; PURPOSE.  See the GNU General Public License for more details.
-
-;; You should have received a copy of the GNU General Public
-;; License along with this program; if not, see
-;; <http://www.gnu.org/licenses/>.
-
-;; http://www.fsf.org/copyleft/gpl.html
-
-
-;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
-;;
-;;; Commentary:
-;;
-;; Here is an Emacs implementation of incremental git-blame.  When you
-;; turn it on while viewing a file, the editor buffer will be updated by
-;; setting the background of individual lines to a color that reflects
-;; which commit it comes from.  And when you move around the buffer, a
-;; one-line summary will be shown in the echo area.
-
-;;; Installation:
-;;
-;; To use this package, put it somewhere in `load-path' (or add
-;; directory with git-blame.el to `load-path'), and add the following
-;; line to your .emacs:
-;;
-;;    (require 'git-blame)
-;;
-;; If you do not want to load this package before it is necessary, you
-;; can make use of the `autoload' feature, e.g. by adding to your .emacs
-;; the following lines
-;;
-;;    (autoload 'git-blame-mode "git-blame"
-;;              "Minor mode for incremental blame for Git." t)
-;;
-;; Then first use of `M-x git-blame-mode' would load the package.
-
-;;; Compatibility:
-;;
-;; It requires GNU Emacs 21 or later and Git 1.5.0 and up
-;;
-;; If you'are using Emacs 20, try changing this:
-;;
-;;            (overlay-put ovl 'face (list :background
-;;                                         (cdr (assq 'color (cddddr info)))))
-;;
-;; to
-;;
-;;            (overlay-put ovl 'face (cons 'background-color
-;;                                         (cdr (assq 'color (cddddr info)))))
-
-
-;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
-;;
-;;; Code:
-
-(eval-when-compile (require 'cl))                            ; to use `push', `pop'
-(require 'format-spec)
-
-(defface git-blame-prefix-face
-  '((((background dark)) (:foreground "gray"
-                          :background "black"))
-    (((background light)) (:foreground "gray"
-                           :background "white"))
-    (t (:weight bold)))
-  "The face used for the hash prefix."
-  :group 'git-blame)
-
-(defgroup git-blame nil
-  "A minor mode showing Git blame information."
-  :group 'git
-  :link '(function-link git-blame-mode))
-
-
-(defcustom git-blame-use-colors t
-  "Use colors to indicate commits in `git-blame-mode'."
-  :type 'boolean
-  :group 'git-blame)
-
-(defcustom git-blame-prefix-format
-  "%h %20A:"
-  "The format of the prefix added to each line in `git-blame'
-mode. The format is passed to `format-spec' with the following format keys:
-
-  %h - the abbreviated hash
-  %H - the full hash
-  %a - the author name
-  %A - the author email
-  %c - the committer name
-  %C - the committer email
-  %s - the commit summary
-"
-  :group 'git-blame)
-
-(defcustom git-blame-mouseover-format
-  "%h %a %A: %s"
-  "The format of the description shown when pointing at a line in
-`git-blame' mode. The format string is passed to `format-spec'
-with the following format keys:
-
-  %h - the abbreviated hash
-  %H - the full hash
-  %a - the author name
-  %A - the author email
-  %c - the committer name
-  %C - the committer email
-  %s - the commit summary
-"
-  :group 'git-blame)
-
-
-(defun git-blame-color-scale (&rest elements)
-  "Given a list, returns a list of triples formed with each
-elements of the list.
-
-a b => bbb bba bab baa abb aba aaa aab"
-  (let (result)
-    (dolist (a elements)
-      (dolist (b elements)
-        (dolist (c elements)
-          (setq result (cons (format "#%s%s%s" a b c) result)))))
-    result))
-
-;; (git-blame-color-scale "0c" "04" "24" "1c" "2c" "34" "14" "3c") =>
-;; ("#3c3c3c" "#3c3c14" "#3c3c34" "#3c3c2c" "#3c3c1c" "#3c3c24"
-;; "#3c3c04" "#3c3c0c" "#3c143c" "#3c1414" "#3c1434" "#3c142c" ...)
-
-(defmacro git-blame-random-pop (l)
-  "Select a random element from L and returns it. Also remove
-selected element from l."
-  ;; only works on lists with unique elements
-  `(let ((e (elt ,l (random (length ,l)))))
-     (setq ,l (remove e ,l))
-     e))
-
-(defvar git-blame-log-oneline-format
-  "format:[%cr] %cn: %s"
-  "*Formatting option used for describing current line in the minibuffer.
-
-This option is used to pass to git log --pretty= command-line option,
-and describe which commit the current line was made.")
-
-(defvar git-blame-dark-colors
-  (git-blame-color-scale "0c" "04" "24" "1c" "2c" "34" "14" "3c")
-  "*List of colors (format #RGB) to use in a dark environment.
-
-To check out the list, evaluate (list-colors-display git-blame-dark-colors).")
-
-(defvar git-blame-light-colors
-  (git-blame-color-scale "c4" "d4" "cc" "dc" "f4" "e4" "fc" "ec")
-  "*List of colors (format #RGB) to use in a light environment.
-
-To check out the list, evaluate (list-colors-display git-blame-light-colors).")
-
-(defvar git-blame-colors '()
-  "Colors used by git-blame. The list is built once when activating git-blame
-minor mode.")
-
-(defvar git-blame-ancient-color "dark green"
-  "*Color to be used for ancient commit.")
-
-(defvar git-blame-autoupdate t
-  "*Automatically update the blame display while editing")
-
-(defvar git-blame-proc nil
-  "The running git-blame process")
-(make-variable-buffer-local 'git-blame-proc)
-
-(defvar git-blame-overlays nil
-  "The git-blame overlays used in the current buffer.")
-(make-variable-buffer-local 'git-blame-overlays)
-
-(defvar git-blame-cache nil
-  "A cache of git-blame information for the current buffer")
-(make-variable-buffer-local 'git-blame-cache)
-
-(defvar git-blame-idle-timer nil
-  "An idle timer that updates the blame")
-(make-variable-buffer-local 'git-blame-cache)
-
-(defvar git-blame-update-queue nil
-  "A queue of update requests")
-(make-variable-buffer-local 'git-blame-update-queue)
-
-;; FIXME: docstrings
-(defvar git-blame-file nil)
-(defvar git-blame-current nil)
-
-(defvar git-blame-mode nil)
-(make-variable-buffer-local 'git-blame-mode)
-
-(defvar git-blame-mode-line-string " blame"
-  "String to display on the mode line when git-blame is active.")
-
-(or (assq 'git-blame-mode minor-mode-alist)
-    (setq minor-mode-alist
-         (cons '(git-blame-mode git-blame-mode-line-string) minor-mode-alist)))
-
-;;;###autoload
-(defun git-blame-mode (&optional arg)
-  "Toggle minor mode for displaying Git blame
-
-With prefix ARG, turn the mode on if ARG is positive."
-  (interactive "P")
-  (cond
-   ((null arg)
-    (if git-blame-mode (git-blame-mode-off) (git-blame-mode-on)))
-   ((> (prefix-numeric-value arg) 0) (git-blame-mode-on))
-   (t (git-blame-mode-off))))
-
-(defun git-blame-mode-on ()
-  "Turn on git-blame mode.
-
-See also function `git-blame-mode'."
-  (make-local-variable 'git-blame-colors)
-  (if git-blame-autoupdate
-      (add-hook 'after-change-functions 'git-blame-after-change nil t)
-    (remove-hook 'after-change-functions 'git-blame-after-change t))
-  (git-blame-cleanup)
-  (let ((bgmode (cdr (assoc 'background-mode (frame-parameters)))))
-    (if (eq bgmode 'dark)
-       (setq git-blame-colors git-blame-dark-colors)
-      (setq git-blame-colors git-blame-light-colors)))
-  (setq git-blame-cache (make-hash-table :test 'equal))
-  (setq git-blame-mode t)
-  (git-blame-run))
-
-(defun git-blame-mode-off ()
-  "Turn off git-blame mode.
-
-See also function `git-blame-mode'."
-  (git-blame-cleanup)
-  (if git-blame-idle-timer (cancel-timer git-blame-idle-timer))
-  (setq git-blame-mode nil))
-
-;;;###autoload
-(defun git-reblame ()
-  "Recalculate all blame information in the current buffer"
-  (interactive)
-  (unless git-blame-mode
-    (error "Git-blame is not active"))
-
-  (git-blame-cleanup)
-  (git-blame-run))
-
-(defun git-blame-run (&optional startline endline)
-  (if git-blame-proc
-      ;; Should maybe queue up a new run here
-      (message "Already running git blame")
-    (let ((display-buf (current-buffer))
-          (blame-buf (get-buffer-create
-                      (concat " git blame for " (buffer-name))))
-          (args '("--incremental" "--contents" "-")))
-      (if startline
-          (setq args (append args
-                             (list "-L" (format "%d,%d" startline endline)))))
-      (setq args (append args
-                         (list (file-name-nondirectory buffer-file-name))))
-      (setq git-blame-proc
-            (apply 'start-process
-                   "git-blame" blame-buf
-                   "git" "blame"
-                   args))
-      (with-current-buffer blame-buf
-        (erase-buffer)
-        (make-local-variable 'git-blame-file)
-        (make-local-variable 'git-blame-current)
-        (setq git-blame-file display-buf)
-        (setq git-blame-current nil))
-      (set-process-filter git-blame-proc 'git-blame-filter)
-      (set-process-sentinel git-blame-proc 'git-blame-sentinel)
-      (process-send-region git-blame-proc (point-min) (point-max))
-      (process-send-eof git-blame-proc))))
-
-(defun remove-git-blame-text-properties (start end)
-  (let ((modified (buffer-modified-p))
-        (inhibit-read-only t))
-    (remove-text-properties start end '(point-entered nil))
-    (set-buffer-modified-p modified)))
-
-(defun git-blame-cleanup ()
-  "Remove all blame properties"
-    (mapc 'delete-overlay git-blame-overlays)
-    (setq git-blame-overlays nil)
-    (remove-git-blame-text-properties (point-min) (point-max)))
-
-(defun git-blame-update-region (start end)
-  "Rerun blame to get updates between START and END"
-  (let ((overlays (overlays-in start end)))
-    (while overlays
-      (let ((overlay (pop overlays)))
-        (if (< (overlay-start overlay) start)
-            (setq start (overlay-start overlay)))
-        (if (> (overlay-end overlay) end)
-            (setq end (overlay-end overlay)))
-        (setq git-blame-overlays (delete overlay git-blame-overlays))
-        (delete-overlay overlay))))
-  (remove-git-blame-text-properties start end)
-  ;; We can be sure that start and end are at line breaks
-  (git-blame-run (1+ (count-lines (point-min) start))
-                 (count-lines (point-min) end)))
-
-(defun git-blame-sentinel (proc status)
-  (with-current-buffer (process-buffer proc)
-    (with-current-buffer git-blame-file
-      (setq git-blame-proc nil)
-      (if git-blame-update-queue
-          (git-blame-delayed-update))))
-  ;;(kill-buffer (process-buffer proc))
-  ;;(message "git blame finished")
-  )
-
-(defvar in-blame-filter nil)
-
-(defun git-blame-filter (proc str)
-  (with-current-buffer (process-buffer proc)
-    (save-excursion
-      (goto-char (process-mark proc))
-      (insert-before-markers str)
-      (goto-char (point-min))
-      (unless in-blame-filter
-        (let ((more t)
-              (in-blame-filter t))
-          (while more
-            (setq more (git-blame-parse))))))))
-
-(defun git-blame-parse ()
-  (cond ((looking-at "\\([0-9a-f]\\{40\\}\\) \\([0-9]+\\) \\([0-9]+\\) \\([0-9]+\\)\n")
-         (let ((hash (match-string 1))
-               (src-line (string-to-number (match-string 2)))
-               (res-line (string-to-number (match-string 3)))
-               (num-lines (string-to-number (match-string 4))))
-           (delete-region (point) (match-end 0))
-           (setq git-blame-current (list (git-blame-new-commit hash)
-                                         src-line res-line num-lines)))
-         t)
-        ((looking-at "\\([a-z-]+\\) \\(.+\\)\n")
-         (let ((key (match-string 1))
-               (value (match-string 2)))
-           (delete-region (point) (match-end 0))
-           (git-blame-add-info (car git-blame-current) key value)
-           (when (string= key "filename")
-             (git-blame-create-overlay (car git-blame-current)
-                                       (caddr git-blame-current)
-                                       (cadddr git-blame-current))
-             (setq git-blame-current nil)))
-         t)
-        (t
-         nil)))
-
-(defun git-blame-new-commit (hash)
-  (with-current-buffer git-blame-file
-    (or (gethash hash git-blame-cache)
-        ;; Assign a random color to each new commit info
-        ;; Take care not to select the same color multiple times
-        (let* ((color (if git-blame-colors
-                          (git-blame-random-pop git-blame-colors)
-                        git-blame-ancient-color))
-               (info `(,hash (color . ,color))))
-          (puthash hash info git-blame-cache)
-          info))))
-
-(defun git-blame-create-overlay (info start-line num-lines)
-  (with-current-buffer git-blame-file
-    (save-excursion
-      (let ((inhibit-point-motion-hooks t)
-            (inhibit-modification-hooks t))
-        (goto-char (point-min))
-        (forward-line (1- start-line))
-        (let* ((start (point))
-               (end (progn (forward-line num-lines) (point)))
-               (ovl (make-overlay start end))
-               (hash (car info))
-               (spec `((?h . ,(substring hash 0 6))
-                       (?H . ,hash)
-                       (?a . ,(git-blame-get-info info 'author))
-                       (?A . ,(git-blame-get-info info 'author-mail))
-                       (?c . ,(git-blame-get-info info 'committer))
-                       (?C . ,(git-blame-get-info info 'committer-mail))
-                       (?s . ,(git-blame-get-info info 'summary)))))
-          (push ovl git-blame-overlays)
-          (overlay-put ovl 'git-blame info)
-          (overlay-put ovl 'help-echo
-                       (format-spec git-blame-mouseover-format spec))
-          (if git-blame-use-colors
-              (overlay-put ovl 'face (list :background
-                                           (cdr (assq 'color (cdr info))))))
-          (overlay-put ovl 'line-prefix
-                       (propertize (format-spec git-blame-prefix-format spec)
-                                   'face 'git-blame-prefix-face)))))))
-
-(defun git-blame-add-info (info key value)
-  (nconc info (list (cons (intern key) value))))
-
-(defun git-blame-get-info (info key)
-  (cdr (assq key (cdr info))))
-
-(defun git-blame-current-commit ()
-  (let ((info (get-char-property (point) 'git-blame)))
-    (if info
-        (car info)
-      (error "No commit info"))))
-
-(defun git-describe-commit (hash)
-  (with-temp-buffer
-    (call-process "git" nil t nil
-                  "log" "-1"
-                 (concat "--pretty=" git-blame-log-oneline-format)
-                  hash)
-    (buffer-substring (point-min) (point-max))))
-
-(defvar git-blame-last-identification nil)
-(make-variable-buffer-local 'git-blame-last-identification)
-(defun git-blame-identify (&optional hash)
-  (interactive)
-  (let ((info (gethash (or hash (git-blame-current-commit)) git-blame-cache)))
-    (when (and info (not (eq info git-blame-last-identification)))
-      (message "%s" (nth 4 info))
-      (setq git-blame-last-identification info))))
-
-;; (defun git-blame-after-save ()
-;;   (when git-blame-mode
-;;     (git-blame-cleanup)
-;;     (git-blame-run)))
-;; (add-hook 'after-save-hook 'git-blame-after-save)
-
-(defun git-blame-after-change (start end length)
-  (when git-blame-mode
-    (git-blame-enq-update start end)))
-
-(defvar git-blame-last-update nil)
-(make-variable-buffer-local 'git-blame-last-update)
-(defun git-blame-enq-update (start end)
-  "Mark the region between START and END as needing blame update"
-  ;; Try to be smart and avoid multiple callouts for sequential
-  ;; editing
-  (cond ((and git-blame-last-update
-              (= start (cdr git-blame-last-update)))
-         (setcdr git-blame-last-update end))
-        ((and git-blame-last-update
-              (= end (car git-blame-last-update)))
-         (setcar git-blame-last-update start))
-        (t
-         (setq git-blame-last-update (cons start end))
-         (setq git-blame-update-queue (nconc git-blame-update-queue
-                                             (list git-blame-last-update)))))
-  (unless (or git-blame-proc git-blame-idle-timer)
-    (setq git-blame-idle-timer
-          (run-with-idle-timer 0.5 nil 'git-blame-delayed-update))))
-
-(defun git-blame-delayed-update ()
-  (setq git-blame-idle-timer nil)
-  (if git-blame-update-queue
-      (let ((first (pop git-blame-update-queue))
-            (inhibit-point-motion-hooks t))
-        (git-blame-update-region (car first) (cdr first)))))
-
-(provide 'git-blame)
-
-;;; git-blame.el ends here
+(error "git-blame.el no longer ships with git. It's recommended
+to replace its use with Emacs's own vc-annotate. See
+contrib/emacs/README in git's
+sources (https://github.com/git/git/blob/master/contrib/emacs/README)
+for more info on suggested alternatives and for why this
+happened.")
index 97919f2d73a73d06bf4e23831fddd473ed75022d..03f926281fb16128a68969acdd97f32b7d6a1e03 100644 (file)
-;;; git.el --- A user interface for git
-
-;; Copyright (C) 2005, 2006, 2007, 2008, 2009 Alexandre Julliard <julliard@winehq.org>
-
-;; Version: 1.0
-
-;; This program is free software; you can redistribute it and/or
-;; modify it under the terms of the GNU General Public License as
-;; published by the Free Software Foundation; either version 2 of
-;; the License, or (at your option) any later version.
-;;
-;; This program is distributed in the hope that it will be
-;; useful, but WITHOUT ANY WARRANTY; without even the implied
-;; warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
-;; PURPOSE.  See the GNU General Public License for more details.
-;;
-;; You should have received a copy of the GNU General Public
-;; License along with this program; if not, see
-;; <http://www.gnu.org/licenses/>.
-
-;;; Commentary:
-
-;; This file contains an interface for the git version control
-;; system. It provides easy access to the most frequently used git
-;; commands. The user interface is as far as possible identical to
-;; that of the PCL-CVS mode.
-;;
-;; To install: put this file on the load-path and place the following
-;; in your .emacs file:
-;;
-;;    (require 'git)
-;;
-;; To start: `M-x git-status'
-;;
-;; TODO
-;;  - diff against other branch
-;;  - renaming files from the status buffer
-;;  - creating tags
-;;  - fetch/pull
-;;  - revlist browser
-;;  - git-show-branch browser
-;;
-
-;;; Compatibility:
-;;
-;; This file works on GNU Emacs 21 or later. It may work on older
-;; versions but this is not guaranteed.
-;;
-;; It may work on XEmacs 21, provided that you first install the ewoc
-;; and log-edit packages.
-;;
-
-(eval-when-compile (require 'cl))
-(require 'ewoc)
-(require 'log-edit)
-(require 'easymenu)
-
-
-;;;; Customizations
-;;;; ------------------------------------------------------------
-
-(defgroup git nil
-  "A user interface for the git versioning system."
-  :group 'tools)
-
-(defcustom git-committer-name nil
-  "User name to use for commits.
-The default is to fall back to the repository config,
-then to `add-log-full-name' and then to `user-full-name'."
-  :group 'git
-  :type '(choice (const :tag "Default" nil)
-                 (string :tag "Name")))
-
-(defcustom git-committer-email nil
-  "Email address to use for commits.
-The default is to fall back to the git repository config,
-then to `add-log-mailing-address' and then to `user-mail-address'."
-  :group 'git
-  :type '(choice (const :tag "Default" nil)
-                 (string :tag "Email")))
-
-(defcustom git-commits-coding-system nil
-  "Default coding system for the log message of git commits."
-  :group 'git
-  :type '(choice (const :tag "From repository config" nil)
-                 (coding-system)))
-
-(defcustom git-append-signed-off-by nil
-  "Whether to append a Signed-off-by line to the commit message before editing."
-  :group 'git
-  :type 'boolean)
-
-(defcustom git-reuse-status-buffer t
-  "Whether `git-status' should try to reuse an existing buffer
-if there is already one that displays the same directory."
-  :group 'git
-  :type 'boolean)
-
-(defcustom git-per-dir-ignore-file ".gitignore"
-  "Name of the per-directory ignore file."
-  :group 'git
-  :type 'string)
-
-(defcustom git-show-uptodate nil
-  "Whether to display up-to-date files."
-  :group 'git
-  :type 'boolean)
-
-(defcustom git-show-ignored nil
-  "Whether to display ignored files."
-  :group 'git
-  :type 'boolean)
-
-(defcustom git-show-unknown t
-  "Whether to display unknown files."
-  :group 'git
-  :type 'boolean)
-
-
-(defface git-status-face
-  '((((class color) (background light)) (:foreground "purple"))
-    (((class color) (background dark)) (:foreground "salmon")))
-  "Git mode face used to highlight added and modified files."
-  :group 'git)
-
-(defface git-unmerged-face
-  '((((class color) (background light)) (:foreground "red" :bold t))
-    (((class color) (background dark)) (:foreground "red" :bold t)))
-  "Git mode face used to highlight unmerged files."
-  :group 'git)
-
-(defface git-unknown-face
-  '((((class color) (background light)) (:foreground "goldenrod" :bold t))
-    (((class color) (background dark)) (:foreground "goldenrod" :bold t)))
-  "Git mode face used to highlight unknown files."
-  :group 'git)
-
-(defface git-uptodate-face
-  '((((class color) (background light)) (:foreground "grey60"))
-    (((class color) (background dark)) (:foreground "grey40")))
-  "Git mode face used to highlight up-to-date files."
-  :group 'git)
-
-(defface git-ignored-face
-  '((((class color) (background light)) (:foreground "grey60"))
-    (((class color) (background dark)) (:foreground "grey40")))
-  "Git mode face used to highlight ignored files."
-  :group 'git)
-
-(defface git-mark-face
-  '((((class color) (background light)) (:foreground "red" :bold t))
-    (((class color) (background dark)) (:foreground "tomato" :bold t)))
-  "Git mode face used for the file marks."
-  :group 'git)
-
-(defface git-header-face
-  '((((class color) (background light)) (:foreground "blue"))
-    (((class color) (background dark)) (:foreground "blue")))
-  "Git mode face used for commit headers."
-  :group 'git)
-
-(defface git-separator-face
-  '((((class color) (background light)) (:foreground "brown"))
-    (((class color) (background dark)) (:foreground "brown")))
-  "Git mode face used for commit separator."
-  :group 'git)
-
-(defface git-permission-face
-  '((((class color) (background light)) (:foreground "green" :bold t))
-    (((class color) (background dark)) (:foreground "green" :bold t)))
-  "Git mode face used for permission changes."
-  :group 'git)
-
-
-;;;; Utilities
-;;;; ------------------------------------------------------------
-
-(defconst git-log-msg-separator "--- log message follows this line ---")
-
-(defvar git-log-edit-font-lock-keywords
-  `(("^\\(Author:\\|Date:\\|Merge:\\|Signed-off-by:\\)\\(.*\\)$"
-     (1 font-lock-keyword-face)
-     (2 font-lock-function-name-face))
-    (,(concat "^\\(" (regexp-quote git-log-msg-separator) "\\)$")
-     (1 font-lock-comment-face))))
-
-(defun git-get-env-strings (env)
-  "Build a list of NAME=VALUE strings from a list of environment strings."
-  (mapcar (lambda (entry) (concat (car entry) "=" (cdr entry))) env))
-
-(defun git-call-process (buffer &rest args)
-  "Wrapper for call-process that sets environment strings."
-  (apply #'call-process "git" nil buffer nil args))
-
-(defun git-call-process-display-error (&rest args)
-  "Wrapper for call-process that displays error messages."
-  (let* ((dir default-directory)
-         (buffer (get-buffer-create "*Git Command Output*"))
-         (ok (with-current-buffer buffer
-               (let ((default-directory dir)
-                     (buffer-read-only nil))
-                 (erase-buffer)
-                 (eq 0 (apply #'git-call-process (list buffer t) args))))))
-    (unless ok (display-message-or-buffer buffer))
-    ok))
-
-(defun git-call-process-string (&rest args)
-  "Wrapper for call-process that returns the process output as a string,
-or nil if the git command failed."
-  (with-temp-buffer
-    (and (eq 0 (apply #'git-call-process t args))
-         (buffer-string))))
-
-(defun git-call-process-string-display-error (&rest args)
-  "Wrapper for call-process that displays error message and returns
-the process output as a string, or nil if the git command failed."
-  (with-temp-buffer
-    (if (eq 0 (apply #'git-call-process (list t t) args))
-        (buffer-string)
-      (display-message-or-buffer (current-buffer))
-      nil)))
-
-(defun git-run-process-region (buffer start end program args)
-  "Run a git process with a buffer region as input."
-  (let ((output-buffer (current-buffer))
-        (dir default-directory))
-    (with-current-buffer buffer
-      (cd dir)
-      (apply #'call-process-region start end program
-             nil (list output-buffer t) nil args))))
-
-(defun git-run-command-buffer (buffer-name &rest args)
-  "Run a git command, sending the output to a buffer named BUFFER-NAME."
-  (let ((dir default-directory)
-        (buffer (get-buffer-create buffer-name)))
-    (message "Running git %s..." (car args))
-    (with-current-buffer buffer
-      (let ((default-directory dir)
-            (buffer-read-only nil))
-        (erase-buffer)
-        (apply #'git-call-process buffer args)))
-    (message "Running git %s...done" (car args))
-    buffer))
-
-(defun git-run-command-region (buffer start end env &rest args)
-  "Run a git command with specified buffer region as input."
-  (with-temp-buffer
-    (if (eq 0 (if env
-                  (git-run-process-region
-                   buffer start end "env"
-                   (append (git-get-env-strings env) (list "git") args))
-                (git-run-process-region buffer start end "git" args)))
-        (buffer-string)
-      (display-message-or-buffer (current-buffer))
-      nil)))
-
-(defun git-run-hook (hook env &rest args)
-  "Run a git hook and display its output if any."
-  (let ((dir default-directory)
-        (hook-name (expand-file-name (concat ".git/hooks/" hook))))
-    (or (not (file-executable-p hook-name))
-        (let (status (buffer (get-buffer-create "*Git Hook Output*")))
-          (with-current-buffer buffer
-            (erase-buffer)
-            (cd dir)
-            (setq status
-                  (if env
-                      (apply #'call-process "env" nil (list buffer t) nil
-                             (append (git-get-env-strings env) (list hook-name) args))
-                    (apply #'call-process hook-name nil (list buffer t) nil args))))
-          (display-message-or-buffer buffer)
-          (eq 0 status)))))
-
-(defun git-get-string-sha1 (string)
-  "Read a SHA1 from the specified string."
-  (and string
-       (string-match "[0-9a-f]\\{40\\}" string)
-       (match-string 0 string)))
-
-(defun git-get-committer-name ()
-  "Return the name to use as GIT_COMMITTER_NAME."
-  ; copied from log-edit
-  (or git-committer-name
-      (git-config "user.name")
-      (and (boundp 'add-log-full-name) add-log-full-name)
-      (and (fboundp 'user-full-name) (user-full-name))
-      (and (boundp 'user-full-name) user-full-name)))
-
-(defun git-get-committer-email ()
-  "Return the email address to use as GIT_COMMITTER_EMAIL."
-  ; copied from log-edit
-  (or git-committer-email
-      (git-config "user.email")
-      (and (boundp 'add-log-mailing-address) add-log-mailing-address)
-      (and (fboundp 'user-mail-address) (user-mail-address))
-      (and (boundp 'user-mail-address) user-mail-address)))
-
-(defun git-get-commits-coding-system ()
-  "Return the coding system to use for commits."
-  (let ((repo-config (git-config "i18n.commitencoding")))
-    (or git-commits-coding-system
-        (and repo-config
-             (fboundp 'locale-charset-to-coding-system)
-             (locale-charset-to-coding-system repo-config))
-      'utf-8)))
-
-(defun git-get-logoutput-coding-system ()
-  "Return the coding system used for git-log output."
-  (let ((repo-config (or (git-config "i18n.logoutputencoding")
-                         (git-config "i18n.commitencoding"))))
-    (or git-commits-coding-system
-        (and repo-config
-             (fboundp 'locale-charset-to-coding-system)
-             (locale-charset-to-coding-system repo-config))
-      'utf-8)))
-
-(defun git-escape-file-name (name)
-  "Escape a file name if necessary."
-  (if (string-match "[\n\t\"\\]" name)
-      (concat "\""
-              (mapconcat (lambda (c)
-                   (case c
-                     (?\n "\\n")
-                     (?\t "\\t")
-                     (?\\ "\\\\")
-                     (?\" "\\\"")
-                     (t (char-to-string c))))
-                 name "")
-              "\"")
-    name))
-
-(defun git-success-message (text files)
-  "Print a success message after having handled FILES."
-  (let ((n (length files)))
-    (if (equal n 1)
-        (message "%s %s" text (car files))
-      (message "%s %d files" text n))))
-
-(defun git-get-top-dir (dir)
-  "Retrieve the top-level directory of a git tree."
-  (let ((cdup (with-output-to-string
-                (with-current-buffer standard-output
-                  (cd dir)
-                  (unless (eq 0 (git-call-process t "rev-parse" "--show-cdup"))
-                    (error "cannot find top-level git tree for %s." dir))))))
-    (expand-file-name (concat (file-name-as-directory dir)
-                              (car (split-string cdup "\n"))))))
-
-;stolen from pcl-cvs
-(defun git-append-to-ignore (file)
-  "Add a file name to the ignore file in its directory."
-  (let* ((fullname (expand-file-name file))
-         (dir (file-name-directory fullname))
-         (name (file-name-nondirectory fullname))
-         (ignore-name (expand-file-name git-per-dir-ignore-file dir))
-         (created (not (file-exists-p ignore-name))))
-  (save-window-excursion
-    (set-buffer (find-file-noselect ignore-name))
-    (goto-char (point-max))
-    (unless (zerop (current-column)) (insert "\n"))
-    (insert "/" name "\n")
-    (sort-lines nil (point-min) (point-max))
-    (save-buffer))
-  (when created
-    (git-call-process nil "update-index" "--add" "--" (file-relative-name ignore-name)))
-  (git-update-status-files (list (file-relative-name ignore-name)))))
-
-; propertize definition for XEmacs, stolen from erc-compat
-(eval-when-compile
-  (unless (fboundp 'propertize)
-    (defun propertize (string &rest props)
-      (let ((string (copy-sequence string)))
-        (while props
-          (put-text-property 0 (length string) (nth 0 props) (nth 1 props) string)
-          (setq props (cddr props)))
-        string))))
-
-;;;; Wrappers for basic git commands
-;;;; ------------------------------------------------------------
-
-(defun git-rev-parse (rev)
-  "Parse a revision name and return its SHA1."
-  (git-get-string-sha1
-   (git-call-process-string "rev-parse" rev)))
-
-(defun git-config (key)
-  "Retrieve the value associated to KEY in the git repository config file."
-  (let ((str (git-call-process-string "config" key)))
-    (and str (car (split-string str "\n")))))
-
-(defun git-symbolic-ref (ref)
-  "Wrapper for the git-symbolic-ref command."
-  (let ((str (git-call-process-string "symbolic-ref" ref)))
-    (and str (car (split-string str "\n")))))
-
-(defun git-update-ref (ref newval &optional oldval reason)
-  "Update a reference by calling git-update-ref."
-  (let ((args (and oldval (list oldval))))
-    (when newval (push newval args))
-    (push ref args)
-    (when reason
-     (push reason args)
-     (push "-m" args))
-    (unless newval (push "-d" args))
-    (apply 'git-call-process-display-error "update-ref" args)))
-
-(defun git-for-each-ref (&rest specs)
-  "Return a list of refs using git-for-each-ref.
-Each entry is a cons of (SHORT-NAME . FULL-NAME)."
-  (let (refs)
-    (with-temp-buffer
-      (apply #'git-call-process t "for-each-ref" "--format=%(refname)" specs)
-      (goto-char (point-min))
-      (while (re-search-forward "^[^/\n]+/[^/\n]+/\\(.+\\)$" nil t)
-       (push (cons (match-string 1) (match-string 0)) refs)))
-    (nreverse refs)))
-
-(defun git-read-tree (tree &optional index-file)
-  "Read a tree into the index file."
-  (let ((process-environment
-         (append (and index-file (list (concat "GIT_INDEX_FILE=" index-file))) process-environment)))
-    (apply 'git-call-process-display-error "read-tree" (if tree (list tree)))))
-
-(defun git-write-tree (&optional index-file)
-  "Call git-write-tree and return the resulting tree SHA1 as a string."
-  (let ((process-environment
-         (append (and index-file (list (concat "GIT_INDEX_FILE=" index-file))) process-environment)))
-    (git-get-string-sha1
-     (git-call-process-string-display-error "write-tree"))))
-
-(defun git-commit-tree (buffer tree parent)
-  "Create a commit and possibly update HEAD.
-Create a commit with the message in BUFFER using the tree with hash TREE.
-Use PARENT as the parent of the new commit. If PARENT is the current \"HEAD\",
-update the \"HEAD\" reference to the new commit."
-  (let ((author-name (git-get-committer-name))
-        (author-email (git-get-committer-email))
-        (subject "commit (initial): ")
-        author-date log-start log-end args coding-system-for-write)
-    (when parent
-      (setq subject "commit: ")
-      (push "-p" args)
-      (push parent args))
-    (with-current-buffer buffer
-      (goto-char (point-min))
-      (if
-          (setq log-start (re-search-forward (concat "^" (regexp-quote git-log-msg-separator) "\n") nil t))
-          (save-restriction
-            (narrow-to-region (point-min) log-start)
-            (goto-char (point-min))
-            (when (re-search-forward "^Author: +\\(.*?\\) *<\\(.*\\)> *$" nil t)
-              (setq author-name (match-string 1)
-                    author-email (match-string 2)))
-            (goto-char (point-min))
-            (when (re-search-forward "^Date: +\\(.*\\)$" nil t)
-              (setq author-date (match-string 1)))
-            (goto-char (point-min))
-            (when (re-search-forward "^Merge: +\\(.*\\)" nil t)
-              (setq subject "commit (merge): ")
-              (dolist (parent (split-string (match-string 1) " +" t))
-                (push "-p" args)
-                (push parent args))))
-        (setq log-start (point-min)))
-      (setq log-end (point-max))
-      (goto-char log-start)
-      (when (re-search-forward ".*$" nil t)
-        (setq subject (concat subject (match-string 0))))
-      (setq coding-system-for-write buffer-file-coding-system))
-    (let ((commit
-           (git-get-string-sha1
-            (let ((env `(("GIT_AUTHOR_NAME" . ,author-name)
-                         ("GIT_AUTHOR_EMAIL" . ,author-email)
-                         ("GIT_COMMITTER_NAME" . ,(git-get-committer-name))
-                         ("GIT_COMMITTER_EMAIL" . ,(git-get-committer-email)))))
-              (when author-date (push `("GIT_AUTHOR_DATE" . ,author-date) env))
-              (apply #'git-run-command-region
-                     buffer log-start log-end env
-                     "commit-tree" tree (nreverse args))))))
-      (when commit (git-update-ref "HEAD" commit parent subject))
-      commit)))
-
-(defun git-empty-db-p ()
-  "Check if the git db is empty (no commit done yet)."
-  (not (eq 0 (git-call-process nil "rev-parse" "--verify" "HEAD"))))
-
-(defun git-get-merge-heads ()
-  "Retrieve the merge heads from the MERGE_HEAD file if present."
-  (let (heads)
-    (when (file-readable-p ".git/MERGE_HEAD")
-      (with-temp-buffer
-        (insert-file-contents ".git/MERGE_HEAD" nil nil nil t)
-        (goto-char (point-min))
-        (while (re-search-forward "[0-9a-f]\\{40\\}" nil t)
-          (push (match-string 0) heads))))
-    (nreverse heads)))
-
-(defun git-get-commit-description (commit)
-  "Get a one-line description of COMMIT."
-  (let ((coding-system-for-read (git-get-logoutput-coding-system)))
-    (let ((descr (git-call-process-string "log" "--max-count=1" "--pretty=oneline" commit)))
-      (if (and descr (string-match "\\`\\([0-9a-f]\\{40\\}\\) *\\(.*\\)$" descr))
-          (concat (substring (match-string 1 descr) 0 10) " - " (match-string 2 descr))
-        descr))))
-
-;;;; File info structure
-;;;; ------------------------------------------------------------
-
-; fileinfo structure stolen from pcl-cvs
-(defstruct (git-fileinfo
-            (:copier nil)
-            (:constructor git-create-fileinfo (state name &optional old-perm new-perm rename-state orig-name marked))
-            (:conc-name git-fileinfo->))
-  marked              ;; t/nil
-  state               ;; current state
-  name                ;; file name
-  old-perm new-perm   ;; permission flags
-  rename-state        ;; rename or copy state
-  orig-name           ;; original name for renames or copies
-  needs-update        ;; whether file needs to be updated
-  needs-refresh)      ;; whether file needs to be refreshed
-
-(defvar git-status nil)
-
-(defun git-set-fileinfo-state (info state)
-  "Set the state of a file info."
-  (unless (eq (git-fileinfo->state info) state)
-    (setf (git-fileinfo->state info) state
-         (git-fileinfo->new-perm info) (git-fileinfo->old-perm info)
-          (git-fileinfo->rename-state info) nil
-          (git-fileinfo->orig-name info) nil
-          (git-fileinfo->needs-update info) nil
-          (git-fileinfo->needs-refresh info) t)))
-
-(defun git-status-filenames-map (status func files &rest args)
-  "Apply FUNC to the status files names in the FILES list.
-The list must be sorted."
-  (when files
-    (let ((file (pop files))
-          (node (ewoc-nth status 0)))
-      (while (and file node)
-        (let* ((info (ewoc-data node))
-               (name (git-fileinfo->name info)))
-          (if (string-lessp name file)
-              (setq node (ewoc-next status node))
-            (if (string-equal name file)
-                (apply func info args))
-            (setq file (pop files))))))))
-
-(defun git-set-filenames-state (status files state)
-  "Set the state of a list of named files. The list must be sorted"
-  (when files
-    (git-status-filenames-map status #'git-set-fileinfo-state files state)
-    (unless state  ;; delete files whose state has been set to nil
-      (ewoc-filter status (lambda (info) (git-fileinfo->state info))))))
-
-(defun git-state-code (code)
-  "Convert from a string to a added/deleted/modified state."
-  (case (string-to-char code)
-    (?M 'modified)
-    (?? 'unknown)
-    (?A 'added)
-    (?D 'deleted)
-    (?U 'unmerged)
-    (?T 'modified)
-    (t nil)))
-
-(defun git-status-code-as-string (code)
-  "Format a git status code as string."
-  (case code
-    ('modified (propertize "Modified" 'face 'git-status-face))
-    ('unknown  (propertize "Unknown " 'face 'git-unknown-face))
-    ('added    (propertize "Added   " 'face 'git-status-face))
-    ('deleted  (propertize "Deleted " 'face 'git-status-face))
-    ('unmerged (propertize "Unmerged" 'face 'git-unmerged-face))
-    ('uptodate (propertize "Uptodate" 'face 'git-uptodate-face))
-    ('ignored  (propertize "Ignored " 'face 'git-ignored-face))
-    (t "?       ")))
-
-(defun git-file-type-as-string (old-perm new-perm)
-  "Return a string describing the file type based on its permissions."
-  (let* ((old-type (lsh (or old-perm 0) -9))
-        (new-type (lsh (or new-perm 0) -9))
-        (str (case new-type
-               (64  ;; file
-                (case old-type
-                  (64 nil)
-                  (80 "   (type change symlink -> file)")
-                  (112 "   (type change subproject -> file)")))
-                (80  ;; symlink
-                 (case old-type
-                   (64 "   (type change file -> symlink)")
-                   (112 "   (type change subproject -> symlink)")
-                   (t "   (symlink)")))
-                 (112  ;; subproject
-                  (case old-type
-                    (64 "   (type change file -> subproject)")
-                    (80 "   (type change symlink -> subproject)")
-                    (t "   (subproject)")))
-                  (72 nil)  ;; directory (internal, not a real git state)
-                 (0  ;; deleted or unknown
-                  (case old-type
-                    (80 "   (symlink)")
-                    (112 "   (subproject)")))
-                 (t (format "   (unknown type %o)" new-type)))))
-    (cond (str (propertize str 'face 'git-status-face))
-          ((eq new-type 72) "/")
-          (t ""))))
-
-(defun git-rename-as-string (info)
-  "Return a string describing the copy or rename associated with INFO, or an empty string if none."
-  (let ((state (git-fileinfo->rename-state info)))
-    (if state
-        (propertize
-         (concat "   ("
-                 (if (eq state 'copy) "copied from "
-                   (if (eq (git-fileinfo->state info) 'added) "renamed from "
-                     "renamed to "))
-                 (git-escape-file-name (git-fileinfo->orig-name info))
-                 ")") 'face 'git-status-face)
-      "")))
-
-(defun git-permissions-as-string (old-perm new-perm)
-  "Format a permission change as string."
-  (propertize
-   (if (or (not old-perm)
-           (not new-perm)
-           (eq 0 (logand ?\111 (logxor old-perm new-perm))))
-       "  "
-     (if (eq 0 (logand ?\111 old-perm)) "+x" "-x"))
-  'face 'git-permission-face))
-
-(defun git-fileinfo-prettyprint (info)
-  "Pretty-printer for the git-fileinfo structure."
-  (let ((old-perm (git-fileinfo->old-perm info))
-       (new-perm (git-fileinfo->new-perm info)))
-    (insert (concat "   " (if (git-fileinfo->marked info) (propertize "*" 'face 'git-mark-face) " ")
-                   " " (git-status-code-as-string (git-fileinfo->state info))
-                   " " (git-permissions-as-string old-perm new-perm)
-                   "  " (git-escape-file-name (git-fileinfo->name info))
-                   (git-file-type-as-string old-perm new-perm)
-                   (git-rename-as-string info)))))
-
-(defun git-update-node-fileinfo (node info)
-  "Update the fileinfo of the specified node. The names are assumed to match already."
-  (let ((data (ewoc-data node)))
-    (setf
-     ;; preserve the marked flag
-     (git-fileinfo->marked info) (git-fileinfo->marked data)
-     (git-fileinfo->needs-update data) nil)
-    (when (not (equal info data))
-      (setf (git-fileinfo->needs-refresh info) t
-            (ewoc-data node) info))))
-
-(defun git-insert-info-list (status infolist files)
-  "Insert a sorted list of file infos in the status buffer, replacing existing ones if any."
-  (let* ((info (pop infolist))
-         (node (ewoc-nth status 0))
-         (name (and info (git-fileinfo->name info)))
-         remaining)
-    (while info
-      (let ((nodename (and node (git-fileinfo->name (ewoc-data node)))))
-        (while (and files (string-lessp (car files) name))
-          (push (pop files) remaining))
-        (when (and files (string-equal (car files) name))
-          (setq files (cdr files)))
-        (cond ((not nodename)
-               (setq node (ewoc-enter-last status info))
-               (setq info (pop infolist))
-               (setq name (and info (git-fileinfo->name info))))
-              ((string-lessp nodename name)
-               (setq node (ewoc-next status node)))
-              ((string-equal nodename name)
-               ;; preserve the marked flag
-               (git-update-node-fileinfo node info)
-               (setq info (pop infolist))
-               (setq name (and info (git-fileinfo->name info))))
-              (t
-               (setq node (ewoc-enter-before status node info))
-               (setq info (pop infolist))
-               (setq name (and info (git-fileinfo->name info)))))))
-    (nconc (nreverse remaining) files)))
-
-(defun git-run-diff-index (status files)
-  "Run git-diff-index on FILES and parse the results into STATUS.
-Return the list of files that haven't been handled."
-  (let (infolist)
-    (with-temp-buffer
-      (apply #'git-call-process t "diff-index" "-z" "-M" "HEAD" "--" files)
-      (goto-char (point-min))
-      (while (re-search-forward
-             ":\\([0-7]\\{6\\}\\) \\([0-7]\\{6\\}\\) [0-9a-f]\\{40\\} [0-9a-f]\\{40\\} \\(\\([ADMUT]\\)\0\\([^\0]+\\)\\|\\([CR]\\)[0-9]*\0\\([^\0]+\\)\0\\([^\0]+\\)\\)\0"
-              nil t 1)
-        (let ((old-perm (string-to-number (match-string 1) 8))
-              (new-perm (string-to-number (match-string 2) 8))
-              (state (or (match-string 4) (match-string 6)))
-              (name (or (match-string 5) (match-string 7)))
-              (new-name (match-string 8)))
-          (if new-name  ; copy or rename
-              (if (eq ?C (string-to-char state))
-                  (push (git-create-fileinfo 'added new-name old-perm new-perm 'copy name) infolist)
-                (push (git-create-fileinfo 'deleted name 0 0 'rename new-name) infolist)
-                (push (git-create-fileinfo 'added new-name old-perm new-perm 'rename name) infolist))
-            (push (git-create-fileinfo (git-state-code state) name old-perm new-perm) infolist)))))
-    (setq infolist (sort (nreverse infolist)
-                         (lambda (info1 info2)
-                           (string-lessp (git-fileinfo->name info1)
-                                         (git-fileinfo->name info2)))))
-    (git-insert-info-list status infolist files)))
-
-(defun git-find-status-file (status file)
-  "Find a given file in the status ewoc and return its node."
-  (let ((node (ewoc-nth status 0)))
-    (while (and node (not (string= file (git-fileinfo->name (ewoc-data node)))))
-      (setq node (ewoc-next status node)))
-    node))
-
-(defun git-run-ls-files (status files default-state &rest options)
-  "Run git-ls-files on FILES and parse the results into STATUS.
-Return the list of files that haven't been handled."
-  (let (infolist)
-    (with-temp-buffer
-      (apply #'git-call-process t "ls-files" "-z" (append options (list "--") files))
-      (goto-char (point-min))
-      (while (re-search-forward "\\([^\0]*?\\)\\(/?\\)\0" nil t 1)
-        (let ((name (match-string 1)))
-          (push (git-create-fileinfo default-state name 0
-                                     (if (string-equal "/" (match-string 2)) (lsh ?\110 9) 0))
-                infolist))))
-    (setq infolist (nreverse infolist))  ;; assume it is sorted already
-    (git-insert-info-list status infolist files)))
-
-(defun git-run-ls-files-cached (status files default-state)
-  "Run git-ls-files -c on FILES and parse the results into STATUS.
-Return the list of files that haven't been handled."
-  (let (infolist)
-    (with-temp-buffer
-      (apply #'git-call-process t "ls-files" "-z" "-s" "-c" "--" files)
-      (goto-char (point-min))
-      (while (re-search-forward "\\([0-7]\\{6\\}\\) [0-9a-f]\\{40\\} 0\t\\([^\0]+\\)\0" nil t)
-       (let* ((new-perm (string-to-number (match-string 1) 8))
-              (old-perm (if (eq default-state 'added) 0 new-perm))
-              (name (match-string 2)))
-         (push (git-create-fileinfo default-state name old-perm new-perm) infolist))))
-    (setq infolist (nreverse infolist))  ;; assume it is sorted already
-    (git-insert-info-list status infolist files)))
-
-(defun git-run-ls-unmerged (status files)
-  "Run git-ls-files -u on FILES and parse the results into STATUS."
-  (with-temp-buffer
-    (apply #'git-call-process t "ls-files" "-z" "-u" "--" files)
-    (goto-char (point-min))
-    (let (unmerged-files)
-      (while (re-search-forward "[0-7]\\{6\\} [0-9a-f]\\{40\\} [123]\t\\([^\0]+\\)\0" nil t)
-        (push (match-string 1) unmerged-files))
-      (setq unmerged-files (nreverse unmerged-files))  ;; assume it is sorted already
-      (git-set-filenames-state status unmerged-files 'unmerged))))
-
-(defun git-get-exclude-files ()
-  "Get the list of exclude files to pass to git-ls-files."
-  (let (files
-        (config (git-config "core.excludesfile")))
-    (when (file-readable-p ".git/info/exclude")
-      (push ".git/info/exclude" files))
-    (when (and config (file-readable-p config))
-      (push config files))
-    files))
-
-(defun git-run-ls-files-with-excludes (status files default-state &rest options)
-  "Run git-ls-files on FILES with appropriate --exclude-from options."
-  (let ((exclude-files (git-get-exclude-files)))
-    (apply #'git-run-ls-files status files default-state "--directory" "--no-empty-directory"
-           (concat "--exclude-per-directory=" git-per-dir-ignore-file)
-           (append options (mapcar (lambda (f) (concat "--exclude-from=" f)) exclude-files)))))
-
-(defun git-update-status-files (&optional files mark-files)
-  "Update the status of FILES from the index.
-The FILES list must be sorted."
-  (unless git-status (error "Not in git-status buffer."))
-  ;; set the needs-update flag on existing files
-  (if files
-      (git-status-filenames-map
-       git-status (lambda (info) (setf (git-fileinfo->needs-update info) t)) files)
-    (ewoc-map (lambda (info) (setf (git-fileinfo->needs-update info) t) nil) git-status)
-    (git-call-process nil "update-index" "--refresh")
-    (when git-show-uptodate
-      (git-run-ls-files-cached git-status nil 'uptodate)))
-  (let ((remaining-files
-          (if (git-empty-db-p) ; we need some special handling for an empty db
-             (git-run-ls-files-cached git-status files 'added)
-            (git-run-diff-index git-status files))))
-    (git-run-ls-unmerged git-status files)
-    (when (or remaining-files (and git-show-unknown (not files)))
-      (setq remaining-files (git-run-ls-files-with-excludes git-status remaining-files 'unknown "-o")))
-    (when (or remaining-files (and git-show-ignored (not files)))
-      (setq remaining-files (git-run-ls-files-with-excludes git-status remaining-files 'ignored "-o" "-i")))
-    (unless files
-      (setq remaining-files (git-get-filenames (ewoc-collect git-status #'git-fileinfo->needs-update))))
-    (when remaining-files
-      (setq remaining-files (git-run-ls-files-cached git-status remaining-files 'uptodate)))
-    (git-set-filenames-state git-status remaining-files nil)
-    (when mark-files (git-mark-files git-status files))
-    (git-refresh-files)
-    (git-refresh-ewoc-hf git-status)))
-
-(defun git-mark-files (status files)
-  "Mark all the specified FILES, and unmark the others."
-  (let ((file (and files (pop files)))
-        (node (ewoc-nth status 0)))
-    (while node
-      (let ((info (ewoc-data node)))
-        (if (and file (string-equal (git-fileinfo->name info) file))
-            (progn
-              (unless (git-fileinfo->marked info)
-                (setf (git-fileinfo->marked info) t)
-                (setf (git-fileinfo->needs-refresh info) t))
-              (setq file (pop files))
-              (setq node (ewoc-next status node)))
-          (when (git-fileinfo->marked info)
-            (setf (git-fileinfo->marked info) nil)
-            (setf (git-fileinfo->needs-refresh info) t))
-          (if (and file (string-lessp file (git-fileinfo->name info)))
-              (setq file (pop files))
-            (setq node (ewoc-next status node))))))))
-
-(defun git-marked-files ()
-  "Return a list of all marked files, or if none a list containing just the file at cursor position."
-  (unless git-status (error "Not in git-status buffer."))
-  (or (ewoc-collect git-status (lambda (info) (git-fileinfo->marked info)))
-      (list (ewoc-data (ewoc-locate git-status)))))
-
-(defun git-marked-files-state (&rest states)
-  "Return a sorted list of marked files that are in the specified states."
-  (let ((files (git-marked-files))
-        result)
-    (dolist (info files)
-      (when (memq (git-fileinfo->state info) states)
-        (push info result)))
-    (nreverse result)))
-
-(defun git-refresh-files ()
-  "Refresh all files that need it and clear the needs-refresh flag."
-  (unless git-status (error "Not in git-status buffer."))
-  (ewoc-map
-   (lambda (info)
-     (let ((refresh (git-fileinfo->needs-refresh info)))
-       (setf (git-fileinfo->needs-refresh info) nil)
-       refresh))
-   git-status)
-  ; move back to goal column
-  (when goal-column (move-to-column goal-column)))
-
-(defun git-refresh-ewoc-hf (status)
-  "Refresh the ewoc header and footer."
-  (let ((branch (git-symbolic-ref "HEAD"))
-        (head (if (git-empty-db-p) "Nothing committed yet"
-                (git-get-commit-description "HEAD")))
-        (merge-heads (git-get-merge-heads)))
-    (ewoc-set-hf status
-                 (format "Directory:  %s\nBranch:     %s\nHead:       %s%s\n"
-                         default-directory
-                         (if branch
-                             (if (string-match "^refs/heads/" branch)
-                                 (substring branch (match-end 0))
-                               branch)
-                           "none (detached HEAD)")
-                         head
-                         (if merge-heads
-                             (concat "\nMerging:    "
-                                     (mapconcat (lambda (str) (git-get-commit-description str)) merge-heads "\n            "))
-                           ""))
-                 (if (ewoc-nth status 0) "" "    No changes."))))
-
-(defun git-get-filenames (files)
-  (mapcar (lambda (info) (git-fileinfo->name info)) files))
-
-(defun git-update-index (index-file files)
-  "Run git-update-index on a list of files."
-  (let ((process-environment (append (and index-file (list (concat "GIT_INDEX_FILE=" index-file)))
-                                     process-environment))
-        added deleted modified)
-    (dolist (info files)
-      (case (git-fileinfo->state info)
-        ('added (push info added))
-        ('deleted (push info deleted))
-        ('modified (push info modified))))
-    (and
-     (or (not added) (apply #'git-call-process-display-error "update-index" "--add" "--" (git-get-filenames added)))
-     (or (not deleted) (apply #'git-call-process-display-error "update-index" "--remove" "--" (git-get-filenames deleted)))
-     (or (not modified) (apply #'git-call-process-display-error "update-index" "--" (git-get-filenames modified))))))
-
-(defun git-run-pre-commit-hook ()
-  "Run the pre-commit hook if any."
-  (unless git-status (error "Not in git-status buffer."))
-  (let ((files (git-marked-files-state 'added 'deleted 'modified)))
-    (or (not files)
-        (not (file-executable-p ".git/hooks/pre-commit"))
-        (let ((index-file (make-temp-file "gitidx")))
-          (unwind-protect
-            (let ((head-tree (unless (git-empty-db-p) (git-rev-parse "HEAD^{tree}"))))
-              (git-read-tree head-tree index-file)
-              (git-update-index index-file files)
-              (git-run-hook "pre-commit" `(("GIT_INDEX_FILE" . ,index-file))))
-          (delete-file index-file))))))
-
-(defun git-do-commit ()
-  "Perform the actual commit using the current buffer as log message."
-  (interactive)
-  (let ((buffer (current-buffer))
-        (index-file (make-temp-file "gitidx")))
-    (with-current-buffer log-edit-parent-buffer
-      (if (git-marked-files-state 'unmerged)
-          (message "You cannot commit unmerged files, resolve them first.")
-        (unwind-protect
-            (let ((files (git-marked-files-state 'added 'deleted 'modified))
-                  head tree head-tree)
-              (unless (git-empty-db-p)
-                (setq head (git-rev-parse "HEAD")
-                      head-tree (git-rev-parse "HEAD^{tree}")))
-              (message "Running git commit...")
-              (when
-                  (and
-                   (git-read-tree head-tree index-file)
-                   (git-update-index nil files)         ;update both the default index
-                   (git-update-index index-file files)  ;and the temporary one
-                   (setq tree (git-write-tree index-file)))
-                (if (or (not (string-equal tree head-tree))
-                        (yes-or-no-p "The tree was not modified, do you really want to perform an empty commit? "))
-                    (let ((commit (git-commit-tree buffer tree head)))
-                      (when commit
-                        (condition-case nil (delete-file ".git/MERGE_HEAD") (error nil))
-                        (condition-case nil (delete-file ".git/MERGE_MSG") (error nil))
-                        (with-current-buffer buffer (erase-buffer))
-                        (git-update-status-files (git-get-filenames files))
-                        (git-call-process nil "rerere")
-                        (git-call-process nil "gc" "--auto")
-                        (message "Committed %s." commit)
-                        (git-run-hook "post-commit" nil)))
-                  (message "Commit aborted."))))
-          (delete-file index-file))))))
-
-
-;;;; Interactive functions
-;;;; ------------------------------------------------------------
-
-(defun git-mark-file ()
-  "Mark the file that the cursor is on and move to the next one."
-  (interactive)
-  (unless git-status (error "Not in git-status buffer."))
-  (let* ((pos (ewoc-locate git-status))
-         (info (ewoc-data pos)))
-    (setf (git-fileinfo->marked info) t)
-    (ewoc-invalidate git-status pos)
-    (ewoc-goto-next git-status 1)))
-
-(defun git-unmark-file ()
-  "Unmark the file that the cursor is on and move to the next one."
-  (interactive)
-  (unless git-status (error "Not in git-status buffer."))
-  (let* ((pos (ewoc-locate git-status))
-         (info (ewoc-data pos)))
-    (setf (git-fileinfo->marked info) nil)
-    (ewoc-invalidate git-status pos)
-    (ewoc-goto-next git-status 1)))
-
-(defun git-unmark-file-up ()
-  "Unmark the file that the cursor is on and move to the previous one."
-  (interactive)
-  (unless git-status (error "Not in git-status buffer."))
-  (let* ((pos (ewoc-locate git-status))
-         (info (ewoc-data pos)))
-    (setf (git-fileinfo->marked info) nil)
-    (ewoc-invalidate git-status pos)
-    (ewoc-goto-prev git-status 1)))
-
-(defun git-mark-all ()
-  "Mark all files."
-  (interactive)
-  (unless git-status (error "Not in git-status buffer."))
-  (ewoc-map (lambda (info) (unless (git-fileinfo->marked info)
-                             (setf (git-fileinfo->marked info) t))) git-status)
-  ; move back to goal column after invalidate
-  (when goal-column (move-to-column goal-column)))
-
-(defun git-unmark-all ()
-  "Unmark all files."
-  (interactive)
-  (unless git-status (error "Not in git-status buffer."))
-  (ewoc-map (lambda (info) (when (git-fileinfo->marked info)
-                             (setf (git-fileinfo->marked info) nil)
-                             t)) git-status)
-  ; move back to goal column after invalidate
-  (when goal-column (move-to-column goal-column)))
-
-(defun git-toggle-all-marks ()
-  "Toggle all file marks."
-  (interactive)
-  (unless git-status (error "Not in git-status buffer."))
-  (ewoc-map (lambda (info) (setf (git-fileinfo->marked info) (not (git-fileinfo->marked info))) t) git-status)
-  ; move back to goal column after invalidate
-  (when goal-column (move-to-column goal-column)))
-
-(defun git-next-file (&optional n)
-  "Move the selection down N files."
-  (interactive "p")
-  (unless git-status (error "Not in git-status buffer."))
-  (ewoc-goto-next git-status n))
-
-(defun git-prev-file (&optional n)
-  "Move the selection up N files."
-  (interactive "p")
-  (unless git-status (error "Not in git-status buffer."))
-  (ewoc-goto-prev git-status n))
-
-(defun git-next-unmerged-file (&optional n)
-  "Move the selection down N unmerged files."
-  (interactive "p")
-  (unless git-status (error "Not in git-status buffer."))
-  (let* ((last (ewoc-locate git-status))
-         (node (ewoc-next git-status last)))
-    (while (and node (> n 0))
-      (when (eq 'unmerged (git-fileinfo->state (ewoc-data node)))
-        (setq n (1- n))
-        (setq last node))
-      (setq node (ewoc-next git-status node)))
-    (ewoc-goto-node git-status last)))
-
-(defun git-prev-unmerged-file (&optional n)
-  "Move the selection up N unmerged files."
-  (interactive "p")
-  (unless git-status (error "Not in git-status buffer."))
-  (let* ((last (ewoc-locate git-status))
-         (node (ewoc-prev git-status last)))
-    (while (and node (> n 0))
-      (when (eq 'unmerged (git-fileinfo->state (ewoc-data node)))
-        (setq n (1- n))
-        (setq last node))
-      (setq node (ewoc-prev git-status node)))
-    (ewoc-goto-node git-status last)))
-
-(defun git-insert-file (file)
-  "Insert file(s) into the git-status buffer."
-  (interactive "fInsert file: ")
-  (git-update-status-files (list (file-relative-name file))))
-
-(defun git-add-file ()
-  "Add marked file(s) to the index cache."
-  (interactive)
-  (let ((files (git-get-filenames (git-marked-files-state 'unknown 'ignored 'unmerged))))
-    ;; FIXME: add support for directories
-    (unless files
-      (push (file-relative-name (read-file-name "File to add: " nil nil t)) files))
-    (when (apply 'git-call-process-display-error "update-index" "--add" "--" files)
-      (git-update-status-files files)
-      (git-success-message "Added" files))))
-
-(defun git-ignore-file ()
-  "Add marked file(s) to the ignore list."
-  (interactive)
-  (let ((files (git-get-filenames (git-marked-files-state 'unknown))))
-    (unless files
-      (push (file-relative-name (read-file-name "File to ignore: " nil nil t)) files))
-    (dolist (f files) (git-append-to-ignore f))
-    (git-update-status-files files)
-    (git-success-message "Ignored" files)))
-
-(defun git-remove-file ()
-  "Remove the marked file(s)."
-  (interactive)
-  (let ((files (git-get-filenames (git-marked-files-state 'added 'modified 'unknown 'uptodate 'ignored))))
-    (unless files
-      (push (file-relative-name (read-file-name "File to remove: " nil nil t)) files))
-    (if (yes-or-no-p
-         (if (cdr files)
-             (format "Remove %d files? " (length files))
-           (format "Remove %s? " (car files))))
-        (progn
-          (dolist (name files)
-            (ignore-errors
-              (if (file-directory-p name)
-                  (delete-directory name)
-                (delete-file name))))
-          (when (apply 'git-call-process-display-error "update-index" "--remove" "--" files)
-            (git-update-status-files files)
-            (git-success-message "Removed" files)))
-      (message "Aborting"))))
-
-(defun git-revert-file ()
-  "Revert changes to the marked file(s)."
-  (interactive)
-  (let ((files (git-marked-files-state 'added 'deleted 'modified 'unmerged))
-        added modified)
-    (when (and files
-               (yes-or-no-p
-                (if (cdr files)
-                    (format "Revert %d files? " (length files))
-                  (format "Revert %s? " (git-fileinfo->name (car files))))))
-      (dolist (info files)
-        (case (git-fileinfo->state info)
-          ('added (push (git-fileinfo->name info) added))
-          ('deleted (push (git-fileinfo->name info) modified))
-          ('unmerged (push (git-fileinfo->name info) modified))
-          ('modified (push (git-fileinfo->name info) modified))))
-      ;; check if a buffer contains one of the files and isn't saved
-      (dolist (file modified)
-        (let ((buffer (get-file-buffer file)))
-          (when (and buffer (buffer-modified-p buffer))
-            (error "Buffer %s is modified. Please kill or save modified buffers before reverting." (buffer-name buffer)))))
-      (let ((ok (and
-                 (or (not added)
-                     (apply 'git-call-process-display-error "update-index" "--force-remove" "--" added))
-                 (or (not modified)
-                     (apply 'git-call-process-display-error "checkout" "HEAD" modified))))
-            (names (git-get-filenames files)))
-        (git-update-status-files names)
-        (when ok
-          (dolist (file modified)
-            (let ((buffer (get-file-buffer file)))
-              (when buffer (with-current-buffer buffer (revert-buffer t t t)))))
-          (git-success-message "Reverted" names))))))
-
-(defun git-remove-handled ()
-  "Remove handled files from the status list."
-  (interactive)
-  (ewoc-filter git-status
-               (lambda (info)
-                 (case (git-fileinfo->state info)
-                   ('ignored git-show-ignored)
-                   ('uptodate git-show-uptodate)
-                   ('unknown git-show-unknown)
-                   (t t))))
-  (unless (ewoc-nth git-status 0)  ; refresh header if list is empty
-    (git-refresh-ewoc-hf git-status)))
-
-(defun git-toggle-show-uptodate ()
-  "Toogle the option for showing up-to-date files."
-  (interactive)
-  (if (setq git-show-uptodate (not git-show-uptodate))
-      (git-refresh-status)
-    (git-remove-handled)))
-
-(defun git-toggle-show-ignored ()
-  "Toogle the option for showing ignored files."
-  (interactive)
-  (if (setq git-show-ignored (not git-show-ignored))
-      (progn
-        (message "Inserting ignored files...")
-        (git-run-ls-files-with-excludes git-status nil 'ignored "-o" "-i")
-        (git-refresh-files)
-        (git-refresh-ewoc-hf git-status)
-        (message "Inserting ignored files...done"))
-    (git-remove-handled)))
-
-(defun git-toggle-show-unknown ()
-  "Toogle the option for showing unknown files."
-  (interactive)
-  (if (setq git-show-unknown (not git-show-unknown))
-      (progn
-        (message "Inserting unknown files...")
-        (git-run-ls-files-with-excludes git-status nil 'unknown "-o")
-        (git-refresh-files)
-        (git-refresh-ewoc-hf git-status)
-        (message "Inserting unknown files...done"))
-    (git-remove-handled)))
-
-(defun git-expand-directory (info)
-  "Expand the directory represented by INFO to list its files."
-  (when (eq (lsh (git-fileinfo->new-perm info) -9) ?\110)
-    (let ((dir (git-fileinfo->name info)))
-      (git-set-filenames-state git-status (list dir) nil)
-      (git-run-ls-files-with-excludes git-status (list (concat dir "/")) 'unknown "-o")
-      (git-refresh-files)
-      (git-refresh-ewoc-hf git-status)
-      t)))
-
-(defun git-setup-diff-buffer (buffer)
-  "Setup a buffer for displaying a diff."
-  (let ((dir default-directory))
-    (with-current-buffer buffer
-      (diff-mode)
-      (goto-char (point-min))
-      (setq default-directory dir)
-      (setq buffer-read-only t)))
-  (display-buffer buffer)
-  ; shrink window only if it displays the status buffer
-  (when (eq (window-buffer) (current-buffer))
-    (shrink-window-if-larger-than-buffer)))
-
-(defun git-diff-file ()
-  "Diff the marked file(s) against HEAD."
-  (interactive)
-  (let ((files (git-marked-files)))
-    (git-setup-diff-buffer
-     (apply #'git-run-command-buffer "*git-diff*" "diff-index" "-p" "-M" "HEAD" "--" (git-get-filenames files)))))
-
-(defun git-diff-file-merge-head (arg)
-  "Diff the marked file(s) against the first merge head (or the nth one with a numeric prefix)."
-  (interactive "p")
-  (let ((files (git-marked-files))
-        (merge-heads (git-get-merge-heads)))
-    (unless merge-heads (error "No merge in progress"))
-    (git-setup-diff-buffer
-     (apply #'git-run-command-buffer "*git-diff*" "diff-index" "-p" "-M"
-            (or (nth (1- arg) merge-heads) "HEAD") "--" (git-get-filenames files)))))
-
-(defun git-diff-unmerged-file (stage)
-  "Diff the marked unmerged file(s) against the specified stage."
-  (let ((files (git-marked-files)))
-    (git-setup-diff-buffer
-     (apply #'git-run-command-buffer "*git-diff*" "diff-files" "-p" stage "--" (git-get-filenames files)))))
-
-(defun git-diff-file-base ()
-  "Diff the marked unmerged file(s) against the common base file."
-  (interactive)
-  (git-diff-unmerged-file "-1"))
-
-(defun git-diff-file-mine ()
-  "Diff the marked unmerged file(s) against my pre-merge version."
-  (interactive)
-  (git-diff-unmerged-file "-2"))
-
-(defun git-diff-file-other ()
-  "Diff the marked unmerged file(s) against the other's pre-merge version."
-  (interactive)
-  (git-diff-unmerged-file "-3"))
-
-(defun git-diff-file-combined ()
-  "Do a combined diff of the marked unmerged file(s)."
-  (interactive)
-  (git-diff-unmerged-file "-c"))
-
-(defun git-diff-file-idiff ()
-  "Perform an interactive diff on the current file."
-  (interactive)
-  (let ((files (git-marked-files-state 'added 'deleted 'modified)))
-    (unless (eq 1 (length files))
-      (error "Cannot perform an interactive diff on multiple files."))
-    (let* ((filename (car (git-get-filenames files)))
-           (buff1 (find-file-noselect filename))
-           (buff2 (git-run-command-buffer (concat filename ".~HEAD~") "cat-file" "blob" (concat "HEAD:" filename))))
-      (ediff-buffers buff1 buff2))))
-
-(defun git-log-file ()
-  "Display a log of changes to the marked file(s)."
-  (interactive)
-  (let* ((files (git-marked-files))
-         (coding-system-for-read git-commits-coding-system)
-         (buffer (apply #'git-run-command-buffer "*git-log*" "rev-list" "--pretty" "HEAD" "--" (git-get-filenames files))))
-    (with-current-buffer buffer
-      ; (git-log-mode)  FIXME: implement log mode
-      (goto-char (point-min))
-      (setq buffer-read-only t))
-    (display-buffer buffer)))
-
-(defun git-log-edit-files ()
-  "Return a list of marked files for use in the log-edit buffer."
-  (with-current-buffer log-edit-parent-buffer
-    (git-get-filenames (git-marked-files-state 'added 'deleted 'modified))))
-
-(defun git-log-edit-diff ()
-  "Run a diff of the current files being committed from a log-edit buffer."
-  (with-current-buffer log-edit-parent-buffer
-    (git-diff-file)))
-
-(defun git-append-sign-off (name email)
-  "Append a Signed-off-by entry to the current buffer, avoiding duplicates."
-  (let ((sign-off (format "Signed-off-by: %s <%s>" name email))
-        (case-fold-search t))
-    (goto-char (point-min))
-    (unless (re-search-forward (concat "^" (regexp-quote sign-off)) nil t)
-      (goto-char (point-min))
-      (unless (re-search-forward "^Signed-off-by: " nil t)
-        (setq sign-off (concat "\n" sign-off)))
-      (goto-char (point-max))
-      (insert sign-off "\n"))))
-
-(defun git-setup-log-buffer (buffer &optional merge-heads author-name author-email subject date msg)
-  "Setup the log buffer for a commit."
-  (unless git-status (error "Not in git-status buffer."))
-  (let ((dir default-directory)
-        (committer-name (git-get-committer-name))
-        (committer-email (git-get-committer-email))
-        (sign-off git-append-signed-off-by))
-    (with-current-buffer buffer
-      (cd dir)
-      (erase-buffer)
-      (insert
-       (propertize
-        (format "Author: %s <%s>\n%s%s"
-                (or author-name committer-name)
-                (or author-email committer-email)
-                (if date (format "Date: %s\n" date) "")
-                (if merge-heads
-                    (format "Merge: %s\n"
-                            (mapconcat 'identity merge-heads " "))
-                  ""))
-        'face 'git-header-face)
-       (propertize git-log-msg-separator 'face 'git-separator-face)
-       "\n")
-      (when subject (insert subject "\n\n"))
-      (cond (msg (insert msg "\n"))
-            ((file-readable-p ".git/rebase-apply/msg")
-             (insert-file-contents ".git/rebase-apply/msg"))
-            ((file-readable-p ".git/MERGE_MSG")
-             (insert-file-contents ".git/MERGE_MSG")))
-      ; delete empty lines at end
-      (goto-char (point-min))
-      (when (re-search-forward "\n+\\'" nil t)
-        (replace-match "\n" t t))
-      (when sign-off (git-append-sign-off committer-name committer-email)))
-    buffer))
-
-(define-derived-mode git-log-edit-mode log-edit-mode "Git-Log-Edit"
-  "Major mode for editing git log messages.
-
-Set up git-specific `font-lock-keywords' for `log-edit-mode'."
-  (set (make-local-variable 'font-lock-defaults)
-       '(git-log-edit-font-lock-keywords t t)))
-
-(defun git-commit-file ()
-  "Commit the marked file(s), asking for a commit message."
-  (interactive)
-  (unless git-status (error "Not in git-status buffer."))
-  (when (git-run-pre-commit-hook)
-    (let ((buffer (get-buffer-create "*git-commit*"))
-          (coding-system (git-get-commits-coding-system))
-          author-name author-email subject date)
-      (when (eq 0 (buffer-size buffer))
-        (when (file-readable-p ".git/rebase-apply/info")
-          (with-temp-buffer
-            (insert-file-contents ".git/rebase-apply/info")
-            (goto-char (point-min))
-            (when (re-search-forward "^Author: \\(.*\\)\nEmail: \\(.*\\)$" nil t)
-              (setq author-name (match-string 1))
-              (setq author-email (match-string 2)))
-            (goto-char (point-min))
-            (when (re-search-forward "^Subject: \\(.*\\)$" nil t)
-              (setq subject (match-string 1)))
-            (goto-char (point-min))
-            (when (re-search-forward "^Date: \\(.*\\)$" nil t)
-              (setq date (match-string 1)))))
-        (git-setup-log-buffer buffer (git-get-merge-heads) author-name author-email subject date))
-      (if (boundp 'log-edit-diff-function)
-         (log-edit 'git-do-commit nil '((log-edit-listfun . git-log-edit-files)
-                                        (log-edit-diff-function . git-log-edit-diff)) buffer 'git-log-edit-mode)
-       (log-edit 'git-do-commit nil 'git-log-edit-files buffer
-                  'git-log-edit-mode))
-      (setq paragraph-separate (concat (regexp-quote git-log-msg-separator) "$\\|Author: \\|Date: \\|Merge: \\|Signed-off-by: \\|\f\\|[        ]*$"))
-      (setq buffer-file-coding-system coding-system)
-      (re-search-forward (regexp-quote (concat git-log-msg-separator "\n")) nil t))))
-
-(defun git-setup-commit-buffer (commit)
-  "Setup the commit buffer with the contents of COMMIT."
-  (let (parents author-name author-email subject date msg)
-    (with-temp-buffer
-      (let ((coding-system (git-get-logoutput-coding-system)))
-        (git-call-process t "log" "-1" "--pretty=medium" "--abbrev=40" commit)
-        (goto-char (point-min))
-        (when (re-search-forward "^Merge: *\\(.*\\)$" nil t)
-          (setq parents (cdr (split-string (match-string 1) " +"))))
-        (when (re-search-forward "^Author: *\\(.*\\) <\\(.*\\)>$" nil t)
-          (setq author-name (match-string 1))
-          (setq author-email (match-string 2)))
-        (when (re-search-forward "^Date: *\\(.*\\)$" nil t)
-          (setq date (match-string 1)))
-        (while (re-search-forward "^    \\(.*\\)$" nil t)
-          (push (match-string 1) msg))
-        (setq msg (nreverse msg))
-        (setq subject (pop msg))
-        (while (and msg (zerop (length (car msg))) (pop msg)))))
-    (git-setup-log-buffer (get-buffer-create "*git-commit*")
-                          parents author-name author-email subject date
-                          (mapconcat #'identity msg "\n"))))
-
-(defun git-get-commit-files (commit)
-  "Retrieve a sorted list of files modified by COMMIT."
-  (let (files)
-    (with-temp-buffer
-      (git-call-process t "diff-tree" "-m" "-r" "-z" "--name-only" "--no-commit-id" "--root" commit)
-      (goto-char (point-min))
-      (while (re-search-forward "\\([^\0]*\\)\0" nil t 1)
-        (push (match-string 1) files)))
-    (sort files #'string-lessp)))
-
-(defun git-read-commit-name (prompt &optional default)
-  "Ask for a commit name, with completion for local branch, remote branch and tag."
-  (completing-read prompt
-                   (list* "HEAD" "ORIG_HEAD" "FETCH_HEAD" (mapcar #'car (git-for-each-ref)))
-                  nil nil nil nil default))
-
-(defun git-checkout (branch &optional merge)
-  "Checkout a branch, tag, or any commit.
-Use a prefix arg if git should merge while checking out."
-  (interactive
-   (list (git-read-commit-name "Checkout: ")
-         current-prefix-arg))
-  (unless git-status (error "Not in git-status buffer."))
-  (let ((args (list branch "--")))
-    (when merge (push "-m" args))
-    (when (apply #'git-call-process-display-error "checkout" args)
-      (git-update-status-files))))
-
-(defun git-branch (branch)
-  "Create a branch from the current HEAD and switch to it."
-  (interactive (list (git-read-commit-name "Branch: ")))
-  (unless git-status (error "Not in git-status buffer."))
-  (if (git-rev-parse (concat "refs/heads/" branch))
-      (if (yes-or-no-p (format "Branch %s already exists, replace it? " branch))
-          (and (git-call-process-display-error "branch" "-f" branch)
-               (git-call-process-display-error "checkout" branch))
-        (message "Canceled."))
-    (git-call-process-display-error "checkout" "-b" branch))
-    (git-refresh-ewoc-hf git-status))
-
-(defun git-amend-commit ()
-  "Undo the last commit on HEAD, and set things up to commit an
-amended version of it."
-  (interactive)
-  (unless git-status (error "Not in git-status buffer."))
-  (when (git-empty-db-p) (error "No commit to amend."))
-  (let* ((commit (git-rev-parse "HEAD"))
-         (files (git-get-commit-files commit)))
-    (when (if (git-rev-parse "HEAD^")
-              (git-call-process-display-error "reset" "--soft" "HEAD^")
-            (and (git-update-ref "ORIG_HEAD" commit)
-                 (git-update-ref "HEAD" nil commit)))
-      (git-update-status-files files t)
-      (git-setup-commit-buffer commit)
-      (git-commit-file))))
-
-(defun git-cherry-pick-commit (arg)
-  "Cherry-pick a commit."
-  (interactive (list (git-read-commit-name "Cherry-pick commit: ")))
-  (unless git-status (error "Not in git-status buffer."))
-  (let ((commit (git-rev-parse (concat arg "^0"))))
-    (unless commit (error "Not a valid commit '%s'." arg))
-    (when (git-rev-parse (concat commit "^2"))
-      (error "Cannot cherry-pick a merge commit."))
-    (let ((files (git-get-commit-files commit))
-          (ok (git-call-process-display-error "cherry-pick" "-n" commit)))
-      (git-update-status-files files ok)
-      (with-current-buffer (git-setup-commit-buffer commit)
-        (goto-char (point-min))
-        (if (re-search-forward "^\n*Signed-off-by:" nil t 1)
-            (goto-char (match-beginning 0))
-          (goto-char (point-max)))
-        (insert "(cherry picked from commit " commit ")\n"))
-      (when ok (git-commit-file)))))
-
-(defun git-revert-commit (arg)
-  "Revert a commit."
-  (interactive (list (git-read-commit-name "Revert commit: ")))
-  (unless git-status (error "Not in git-status buffer."))
-  (let ((commit (git-rev-parse (concat arg "^0"))))
-    (unless commit (error "Not a valid commit '%s'." arg))
-    (when (git-rev-parse (concat commit "^2"))
-      (error "Cannot revert a merge commit."))
-    (let ((files (git-get-commit-files commit))
-          (subject (git-get-commit-description commit))
-          (ok (git-call-process-display-error "revert" "-n" commit)))
-      (git-update-status-files files ok)
-      (when (string-match "^[0-9a-f]+ - \\(.*\\)$" subject)
-        (setq subject (match-string 1 subject)))
-      (git-setup-log-buffer (get-buffer-create "*git-commit*")
-                            (git-get-merge-heads) nil nil (format "Revert \"%s\"" subject) nil
-                            (format "This reverts commit %s.\n" commit))
-      (when ok (git-commit-file)))))
-
-(defun git-find-file ()
-  "Visit the current file in its own buffer."
-  (interactive)
-  (unless git-status (error "Not in git-status buffer."))
-  (let ((info (ewoc-data (ewoc-locate git-status))))
-    (unless (git-expand-directory info)
-      (find-file (git-fileinfo->name info))
-      (when (eq 'unmerged (git-fileinfo->state info))
-        (smerge-mode 1)))))
-
-(defun git-find-file-other-window ()
-  "Visit the current file in its own buffer in another window."
-  (interactive)
-  (unless git-status (error "Not in git-status buffer."))
-  (let ((info (ewoc-data (ewoc-locate git-status))))
-    (find-file-other-window (git-fileinfo->name info))
-    (when (eq 'unmerged (git-fileinfo->state info))
-      (smerge-mode))))
-
-(defun git-find-file-imerge ()
-  "Visit the current file in interactive merge mode."
-  (interactive)
-  (unless git-status (error "Not in git-status buffer."))
-  (let ((info (ewoc-data (ewoc-locate git-status))))
-    (find-file (git-fileinfo->name info))
-    (smerge-ediff)))
-
-(defun git-view-file ()
-  "View the current file in its own buffer."
-  (interactive)
-  (unless git-status (error "Not in git-status buffer."))
-  (let ((info (ewoc-data (ewoc-locate git-status))))
-    (view-file (git-fileinfo->name info))))
-
-(defun git-refresh-status ()
-  "Refresh the git status buffer."
-  (interactive)
-  (unless git-status (error "Not in git-status buffer."))
-  (message "Refreshing git status...")
-  (git-update-status-files)
-  (message "Refreshing git status...done"))
-
-(defun git-status-quit ()
-  "Quit git-status mode."
-  (interactive)
-  (bury-buffer))
-
-;;;; Major Mode
-;;;; ------------------------------------------------------------
-
-(defvar git-status-mode-hook nil
-  "Run after `git-status-mode' is setup.")
-
-(defvar git-status-mode-map nil
-  "Keymap for git major mode.")
-
-(defvar git-status nil
-  "List of all files managed by the git-status mode.")
-
-(unless git-status-mode-map
-  (let ((map (make-keymap))
-        (commit-map (make-sparse-keymap))
-        (diff-map (make-sparse-keymap))
-        (toggle-map (make-sparse-keymap)))
-    (suppress-keymap map)
-    (define-key map "?"   'git-help)
-    (define-key map "h"   'git-help)
-    (define-key map " "   'git-next-file)
-    (define-key map "a"   'git-add-file)
-    (define-key map "c"   'git-commit-file)
-    (define-key map "\C-c" commit-map)
-    (define-key map "d"    diff-map)
-    (define-key map "="   'git-diff-file)
-    (define-key map "f"   'git-find-file)
-    (define-key map "\r"  'git-find-file)
-    (define-key map "g"   'git-refresh-status)
-    (define-key map "i"   'git-ignore-file)
-    (define-key map "I"   'git-insert-file)
-    (define-key map "l"   'git-log-file)
-    (define-key map "m"   'git-mark-file)
-    (define-key map "M"   'git-mark-all)
-    (define-key map "n"   'git-next-file)
-    (define-key map "N"   'git-next-unmerged-file)
-    (define-key map "o"   'git-find-file-other-window)
-    (define-key map "p"   'git-prev-file)
-    (define-key map "P"   'git-prev-unmerged-file)
-    (define-key map "q"   'git-status-quit)
-    (define-key map "r"   'git-remove-file)
-    (define-key map "t"    toggle-map)
-    (define-key map "T"   'git-toggle-all-marks)
-    (define-key map "u"   'git-unmark-file)
-    (define-key map "U"   'git-revert-file)
-    (define-key map "v"   'git-view-file)
-    (define-key map "x"   'git-remove-handled)
-    (define-key map "\C-?" 'git-unmark-file-up)
-    (define-key map "\M-\C-?" 'git-unmark-all)
-    ; the commit submap
-    (define-key commit-map "\C-a" 'git-amend-commit)
-    (define-key commit-map "\C-b" 'git-branch)
-    (define-key commit-map "\C-o" 'git-checkout)
-    (define-key commit-map "\C-p" 'git-cherry-pick-commit)
-    (define-key commit-map "\C-v" 'git-revert-commit)
-    ; the diff submap
-    (define-key diff-map "b" 'git-diff-file-base)
-    (define-key diff-map "c" 'git-diff-file-combined)
-    (define-key diff-map "=" 'git-diff-file)
-    (define-key diff-map "e" 'git-diff-file-idiff)
-    (define-key diff-map "E" 'git-find-file-imerge)
-    (define-key diff-map "h" 'git-diff-file-merge-head)
-    (define-key diff-map "m" 'git-diff-file-mine)
-    (define-key diff-map "o" 'git-diff-file-other)
-    ; the toggle submap
-    (define-key toggle-map "u" 'git-toggle-show-uptodate)
-    (define-key toggle-map "i" 'git-toggle-show-ignored)
-    (define-key toggle-map "k" 'git-toggle-show-unknown)
-    (define-key toggle-map "m" 'git-toggle-all-marks)
-    (setq git-status-mode-map map))
-  (easy-menu-define git-menu git-status-mode-map
-    "Git Menu"
-    `("Git"
-      ["Refresh" git-refresh-status t]
-      ["Commit" git-commit-file t]
-      ["Checkout..." git-checkout t]
-      ["New Branch..." git-branch t]
-      ["Cherry-pick Commit..." git-cherry-pick-commit t]
-      ["Revert Commit..." git-revert-commit t]
-      ("Merge"
-       ["Next Unmerged File" git-next-unmerged-file t]
-       ["Prev Unmerged File" git-prev-unmerged-file t]
-       ["Interactive Merge File" git-find-file-imerge t]
-       ["Diff Against Common Base File" git-diff-file-base t]
-       ["Diff Combined" git-diff-file-combined t]
-       ["Diff Against Merge Head" git-diff-file-merge-head t]
-       ["Diff Against Mine" git-diff-file-mine t]
-       ["Diff Against Other" git-diff-file-other t])
-      "--------"
-      ["Add File" git-add-file t]
-      ["Revert File" git-revert-file t]
-      ["Ignore File" git-ignore-file t]
-      ["Remove File" git-remove-file t]
-      ["Insert File" git-insert-file t]
-      "--------"
-      ["Find File" git-find-file t]
-      ["View File" git-view-file t]
-      ["Diff File" git-diff-file t]
-      ["Interactive Diff File" git-diff-file-idiff t]
-      ["Log" git-log-file t]
-      "--------"
-      ["Mark" git-mark-file t]
-      ["Mark All" git-mark-all t]
-      ["Unmark" git-unmark-file t]
-      ["Unmark All" git-unmark-all t]
-      ["Toggle All Marks" git-toggle-all-marks t]
-      ["Hide Handled Files" git-remove-handled t]
-      "--------"
-      ["Show Uptodate Files" git-toggle-show-uptodate :style toggle :selected git-show-uptodate]
-      ["Show Ignored Files" git-toggle-show-ignored :style toggle :selected git-show-ignored]
-      ["Show Unknown Files" git-toggle-show-unknown :style toggle :selected git-show-unknown]
-      "--------"
-      ["Quit" git-status-quit t])))
-
-
-;; git mode should only run in the *git status* buffer
-(put 'git-status-mode 'mode-class 'special)
-
-(defun git-status-mode ()
-  "Major mode for interacting with Git.
-Commands:
-\\{git-status-mode-map}"
-  (kill-all-local-variables)
-  (buffer-disable-undo)
-  (setq mode-name "git status"
-        major-mode 'git-status-mode
-        goal-column 17
-        buffer-read-only t)
-  (use-local-map git-status-mode-map)
-  (let ((buffer-read-only nil))
-    (erase-buffer)
-  (let ((status (ewoc-create 'git-fileinfo-prettyprint "" "")))
-    (set (make-local-variable 'git-status) status))
-  (set (make-local-variable 'list-buffers-directory) default-directory)
-  (make-local-variable 'git-show-uptodate)
-  (make-local-variable 'git-show-ignored)
-  (make-local-variable 'git-show-unknown)
-  (run-hooks 'git-status-mode-hook)))
-
-(defun git-find-status-buffer (dir)
-  "Find the git status buffer handling a specified directory."
-  (let ((list (buffer-list))
-        (fulldir (expand-file-name dir))
-        found)
-    (while (and list (not found))
-      (let ((buffer (car list)))
-        (with-current-buffer buffer
-          (when (and list-buffers-directory
-                     (string-equal fulldir (expand-file-name list-buffers-directory))
-                    (eq major-mode 'git-status-mode))
-            (setq found buffer))))
-      (setq list (cdr list)))
-    found))
-
-(defun git-status (dir)
-  "Entry point into git-status mode."
-  (interactive "DSelect directory: ")
-  (setq dir (git-get-top-dir dir))
-  (if (file-exists-p (concat (file-name-as-directory dir) ".git"))
-      (let ((buffer (or (and git-reuse-status-buffer (git-find-status-buffer dir))
-                        (create-file-buffer (expand-file-name "*git-status*" dir)))))
-        (switch-to-buffer buffer)
-        (cd dir)
-        (git-status-mode)
-        (git-refresh-status)
-        (goto-char (point-min))
-        (add-hook 'after-save-hook 'git-update-saved-file))
-    (message "%s is not a git working tree." dir)))
-
-(defun git-update-saved-file ()
-  "Update the corresponding git-status buffer when a file is saved.
-Meant to be used in `after-save-hook'."
-  (let* ((file (expand-file-name buffer-file-name))
-         (dir (condition-case nil (git-get-top-dir (file-name-directory file)) (error nil)))
-         (buffer (and dir (git-find-status-buffer dir))))
-    (when buffer
-      (with-current-buffer buffer
-        (let ((filename (file-relative-name file dir)))
-          ; skip files located inside the .git directory
-          (unless (string-match "^\\.git/" filename)
-            (git-call-process nil "add" "--refresh" "--" filename)
-            (git-update-status-files (list filename))))))))
-
-(defun git-help ()
-  "Display help for Git mode."
-  (interactive)
-  (describe-function 'git-status-mode))
-
-(provide 'git)
-;;; git.el ends here
+(error "git.el no longer ships with git. It's recommended to
+replace its use with Magit, or simply delete references to git.el
+in your initialization file(s). See contrib/emacs/README in git's
+sources (https://github.com/git/git/blob/master/contrib/emacs/README)
+for suggested alternatives and for why this happened. Emacs's own
+VC mode and Magit are viable alternatives.")
index 6946f3dd2ad924639f03b3779635c759c938c1e4..18bc60b021be00f69d2b5a784996bbc1e98c82c1 100644 (file)
@@ -1,3 +1,20 @@
-These are original scripted implementations, kept primarily for their
-reference value to any aspiring plumbing users who want to learn how
-pieces can be fit together.
+This directory used to contain scripted implementations of builtins
+that have since been rewritten in C.
+
+They have now been removed, but can be retrieved from an older commit
+that removed them from this directory.
+
+They're interesting for their reference value to any aspiring plumbing
+users who want to learn how pieces can be fit together, but in many
+cases have drifted enough from the actual implementations Git uses to
+be instructive.
+
+Other things that can be useful:
+
+ * Some commands such as git-gc wrap other commands, and what they're
+   doing behind the scenes can be seen by running them under
+   GIT_TRACE=1
+
+ * Doing `git log` on paths matching '*--helper.c' will show
+   incremental effort in the direction of moving existing shell
+   scripts to C.
diff --git a/contrib/examples/builtin-fetch--tool.c b/contrib/examples/builtin-fetch--tool.c
deleted file mode 100644 (file)
index 22648c3..0000000
+++ /dev/null
@@ -1,575 +0,0 @@
-#include "builtin.h"
-#include "cache.h"
-#include "refs.h"
-#include "commit.h"
-#include "sigchain.h"
-
-static char *get_stdin(void)
-{
-       struct strbuf buf = STRBUF_INIT;
-       if (strbuf_read(&buf, 0, 1024) < 0) {
-               die_errno("error reading standard input");
-       }
-       return strbuf_detach(&buf, NULL);
-}
-
-static void show_new(enum object_type type, unsigned char *sha1_new)
-{
-       fprintf(stderr, "  %s: %s\n", type_name(type),
-               find_unique_abbrev(sha1_new, DEFAULT_ABBREV));
-}
-
-static int update_ref_env(const char *action,
-                     const char *refname,
-                     unsigned char *sha1,
-                     unsigned char *oldval)
-{
-       char msg[1024];
-       const char *rla = getenv("GIT_REFLOG_ACTION");
-
-       if (!rla)
-               rla = "(reflog update)";
-       if (snprintf(msg, sizeof(msg), "%s: %s", rla, action) >= sizeof(msg))
-               warning("reflog message too long: %.*s...", 50, msg);
-       return update_ref(msg, refname, sha1, oldval, 0,
-                         UPDATE_REFS_QUIET_ON_ERR);
-}
-
-static int update_local_ref(const char *name,
-                           const char *new_head,
-                           const char *note,
-                           int verbose, int force)
-{
-       unsigned char sha1_old[20], sha1_new[20];
-       char oldh[41], newh[41];
-       struct commit *current, *updated;
-       enum object_type type;
-
-       if (get_sha1_hex(new_head, sha1_new))
-               die("malformed object name %s", new_head);
-
-       type = sha1_object_info(sha1_new, NULL);
-       if (type < 0)
-               die("object %s not found", new_head);
-
-       if (!*name) {
-               /* Not storing */
-               if (verbose) {
-                       fprintf(stderr, "* fetched %s\n", note);
-                       show_new(type, sha1_new);
-               }
-               return 0;
-       }
-
-       if (get_sha1(name, sha1_old)) {
-               const char *msg;
-       just_store:
-               /* new ref */
-               if (!strncmp(name, "refs/tags/", 10))
-                       msg = "storing tag";
-               else
-                       msg = "storing head";
-               fprintf(stderr, "* %s: storing %s\n",
-                       name, note);
-               show_new(type, sha1_new);
-               return update_ref_env(msg, name, sha1_new, NULL);
-       }
-
-       if (!hashcmp(sha1_old, sha1_new)) {
-               if (verbose) {
-                       fprintf(stderr, "* %s: same as %s\n", name, note);
-                       show_new(type, sha1_new);
-               }
-               return 0;
-       }
-
-       if (!strncmp(name, "refs/tags/", 10)) {
-               fprintf(stderr, "* %s: updating with %s\n", name, note);
-               show_new(type, sha1_new);
-               return update_ref_env("updating tag", name, sha1_new, NULL);
-       }
-
-       current = lookup_commit_reference(sha1_old);
-       updated = lookup_commit_reference(sha1_new);
-       if (!current || !updated)
-               goto just_store;
-
-       strcpy(oldh, find_unique_abbrev(current->object.sha1, DEFAULT_ABBREV));
-       strcpy(newh, find_unique_abbrev(sha1_new, DEFAULT_ABBREV));
-
-       if (in_merge_bases(current, updated)) {
-               fprintf(stderr, "* %s: fast-forward to %s\n",
-                       name, note);
-               fprintf(stderr, "  old..new: %s..%s\n", oldh, newh);
-               return update_ref_env("fast-forward", name, sha1_new, sha1_old);
-       }
-       if (!force) {
-               fprintf(stderr,
-                       "* %s: not updating to non-fast-forward %s\n",
-                       name, note);
-               fprintf(stderr,
-                       "  old...new: %s...%s\n", oldh, newh);
-               return 1;
-       }
-       fprintf(stderr,
-               "* %s: forcing update to non-fast-forward %s\n",
-               name, note);
-       fprintf(stderr, "  old...new: %s...%s\n", oldh, newh);
-       return update_ref_env("forced-update", name, sha1_new, sha1_old);
-}
-
-static int append_fetch_head(FILE *fp,
-                            const char *head, const char *remote,
-                            const char *remote_name, const char *remote_nick,
-                            const char *local_name, int not_for_merge,
-                            int verbose, int force)
-{
-       struct commit *commit;
-       int remote_len, i, note_len;
-       unsigned char sha1[20];
-       char note[1024];
-       const char *what, *kind;
-
-       if (get_sha1(head, sha1))
-               return error("Not a valid object name: %s", head);
-       commit = lookup_commit_reference_gently(sha1, 1);
-       if (!commit)
-               not_for_merge = 1;
-
-       if (!strcmp(remote_name, "HEAD")) {
-               kind = "";
-               what = "";
-       }
-       else if (!strncmp(remote_name, "refs/heads/", 11)) {
-               kind = "branch";
-               what = remote_name + 11;
-       }
-       else if (!strncmp(remote_name, "refs/tags/", 10)) {
-               kind = "tag";
-               what = remote_name + 10;
-       }
-       else if (!strncmp(remote_name, "refs/remotes/", 13)) {
-               kind = "remote-tracking branch";
-               what = remote_name + 13;
-       }
-       else {
-               kind = "";
-               what = remote_name;
-       }
-
-       remote_len = strlen(remote);
-       for (i = remote_len - 1; remote[i] == '/' && 0 <= i; i--)
-               ;
-       remote_len = i + 1;
-       if (4 < i && !strncmp(".git", remote + i - 3, 4))
-               remote_len = i - 3;
-
-       note_len = 0;
-       if (*what) {
-               if (*kind)
-                       note_len += sprintf(note + note_len, "%s ", kind);
-               note_len += sprintf(note + note_len, "'%s' of ", what);
-       }
-       note_len += sprintf(note + note_len, "%.*s", remote_len, remote);
-       fprintf(fp, "%s\t%s\t%s\n",
-               sha1_to_hex(commit ? commit->object.sha1 : sha1),
-               not_for_merge ? "not-for-merge" : "",
-               note);
-       return update_local_ref(local_name, head, note, verbose, force);
-}
-
-static char *keep;
-static void remove_keep(void)
-{
-       if (keep && *keep)
-               unlink(keep);
-}
-
-static void remove_keep_on_signal(int signo)
-{
-       remove_keep();
-       sigchain_pop(signo);
-       raise(signo);
-}
-
-static char *find_local_name(const char *remote_name, const char *refs,
-                            int *force_p, int *not_for_merge_p)
-{
-       const char *ref = refs;
-       int len = strlen(remote_name);
-
-       while (ref) {
-               const char *next;
-               int single_force, not_for_merge;
-
-               while (*ref == '\n')
-                       ref++;
-               if (!*ref)
-                       break;
-               next = strchr(ref, '\n');
-
-               single_force = not_for_merge = 0;
-               if (*ref == '+') {
-                       single_force = 1;
-                       ref++;
-               }
-               if (*ref == '.') {
-                       not_for_merge = 1;
-                       ref++;
-                       if (*ref == '+') {
-                               single_force = 1;
-                               ref++;
-                       }
-               }
-               if (!strncmp(remote_name, ref, len) && ref[len] == ':') {
-                       const char *local_part = ref + len + 1;
-                       int retlen;
-
-                       if (!next)
-                               retlen = strlen(local_part);
-                       else
-                               retlen = next - local_part;
-                       *force_p = single_force;
-                       *not_for_merge_p = not_for_merge;
-                       return xmemdupz(local_part, retlen);
-               }
-               ref = next;
-       }
-       return NULL;
-}
-
-static int fetch_native_store(FILE *fp,
-                             const char *remote,
-                             const char *remote_nick,
-                             const char *refs,
-                             int verbose, int force)
-{
-       char buffer[1024];
-       int err = 0;
-
-       sigchain_push_common(remove_keep_on_signal);
-       atexit(remove_keep);
-
-       while (fgets(buffer, sizeof(buffer), stdin)) {
-               int len;
-               char *cp;
-               char *local_name;
-               int single_force, not_for_merge;
-
-               for (cp = buffer; *cp && !isspace(*cp); cp++)
-                       ;
-               if (*cp)
-                       *cp++ = 0;
-               len = strlen(cp);
-               if (len && cp[len-1] == '\n')
-                       cp[--len] = 0;
-               if (!strcmp(buffer, "failed"))
-                       die("Fetch failure: %s", remote);
-               if (!strcmp(buffer, "pack"))
-                       continue;
-               if (!strcmp(buffer, "keep")) {
-                       char *od = get_object_directory();
-                       int len = strlen(od) + strlen(cp) + 50;
-                       keep = xmalloc(len);
-                       sprintf(keep, "%s/pack/pack-%s.keep", od, cp);
-                       continue;
-               }
-
-               local_name = find_local_name(cp, refs,
-                                            &single_force, &not_for_merge);
-               if (!local_name)
-                       continue;
-               err |= append_fetch_head(fp,
-                                        buffer, remote, cp, remote_nick,
-                                        local_name, not_for_merge,
-                                        verbose, force || single_force);
-       }
-       return err;
-}
-
-static int parse_reflist(const char *reflist)
-{
-       const char *ref;
-
-       printf("refs='");
-       for (ref = reflist; ref; ) {
-               const char *next;
-               while (*ref && isspace(*ref))
-                       ref++;
-               if (!*ref)
-                       break;
-               for (next = ref; *next && !isspace(*next); next++)
-                       ;
-               printf("\n%.*s", (int)(next - ref), ref);
-               ref = next;
-       }
-       printf("'\n");
-
-       printf("rref='");
-       for (ref = reflist; ref; ) {
-               const char *next, *colon;
-               while (*ref && isspace(*ref))
-                       ref++;
-               if (!*ref)
-                       break;
-               for (next = ref; *next && !isspace(*next); next++)
-                       ;
-               if (*ref == '.')
-                       ref++;
-               if (*ref == '+')
-                       ref++;
-               colon = strchr(ref, ':');
-               putchar('\n');
-               printf("%.*s", (int)((colon ? colon : next) - ref), ref);
-               ref = next;
-       }
-       printf("'\n");
-       return 0;
-}
-
-static int expand_refs_wildcard(const char *ls_remote_result, int numrefs,
-                               const char **refs)
-{
-       int i, matchlen, replacelen;
-       int found_one = 0;
-       const char *remote = *refs++;
-       numrefs--;
-
-       if (numrefs == 0) {
-               fprintf(stderr, "Nothing specified for fetching with remote.%s.fetch\n",
-                       remote);
-               printf("empty\n");
-       }
-
-       for (i = 0; i < numrefs; i++) {
-               const char *ref = refs[i];
-               const char *lref = ref;
-               const char *colon;
-               const char *tail;
-               const char *ls;
-               const char *next;
-
-               if (*lref == '+')
-                       lref++;
-               colon = strchr(lref, ':');
-               tail = lref + strlen(lref);
-               if (!(colon &&
-                     2 < colon - lref &&
-                     colon[-1] == '*' &&
-                     colon[-2] == '/' &&
-                     2 < tail - (colon + 1) &&
-                     tail[-1] == '*' &&
-                     tail[-2] == '/')) {
-                       /* not a glob */
-                       if (!found_one++)
-                               printf("explicit\n");
-                       printf("%s\n", ref);
-                       continue;
-               }
-
-               /* glob */
-               if (!found_one++)
-                       printf("glob\n");
-
-               /* lref to colon-2 is remote hierarchy name;
-                * colon+1 to tail-2 is local.
-                */
-               matchlen = (colon-1) - lref;
-               replacelen = (tail-1) - (colon+1);
-               for (ls = ls_remote_result; ls; ls = next) {
-                       const char *eol;
-                       unsigned char sha1[20];
-                       int namelen;
-
-                       while (*ls && isspace(*ls))
-                               ls++;
-                       next = strchr(ls, '\n');
-                       eol = !next ? (ls + strlen(ls)) : next;
-                       if (!memcmp("^{}", eol-3, 3))
-                               continue;
-                       if (eol - ls < 40)
-                               continue;
-                       if (get_sha1_hex(ls, sha1))
-                               continue;
-                       ls += 40;
-                       while (ls < eol && isspace(*ls))
-                               ls++;
-                       /* ls to next (or eol) is the name.
-                        * is it identical to lref to colon-2?
-                        */
-                       if ((eol - ls) <= matchlen ||
-                           strncmp(ls, lref, matchlen))
-                               continue;
-
-                       /* Yes, it is a match */
-                       namelen = eol - ls;
-                       if (lref != ref)
-                               putchar('+');
-                       printf("%.*s:%.*s%.*s\n",
-                              namelen, ls,
-                              replacelen, colon + 1,
-                              namelen - matchlen, ls + matchlen);
-               }
-       }
-       return 0;
-}
-
-static int pick_rref(int sha1_only, const char *rref, const char *ls_remote_result)
-{
-       int err = 0;
-       int lrr_count = lrr_count, i, pass;
-       const char *cp;
-       struct lrr {
-               const char *line;
-               const char *name;
-               int namelen;
-               int shown;
-       } *lrr_list = lrr_list;
-
-       for (pass = 0; pass < 2; pass++) {
-               /* pass 0 counts and allocates, pass 1 fills... */
-               cp = ls_remote_result;
-               i = 0;
-               while (1) {
-                       const char *np;
-                       while (*cp && isspace(*cp))
-                               cp++;
-                       if (!*cp)
-                               break;
-                       np = strchrnul(cp, '\n');
-                       if (pass) {
-                               lrr_list[i].line = cp;
-                               lrr_list[i].name = cp + 41;
-                               lrr_list[i].namelen = np - (cp + 41);
-                       }
-                       i++;
-                       cp = np;
-               }
-               if (!pass) {
-                       lrr_count = i;
-                       lrr_list = xcalloc(lrr_count, sizeof(*lrr_list));
-               }
-       }
-
-       while (1) {
-               const char *next;
-               int rreflen;
-               int i;
-
-               while (*rref && isspace(*rref))
-                       rref++;
-               if (!*rref)
-                       break;
-               next = strchrnul(rref, '\n');
-               rreflen = next - rref;
-
-               for (i = 0; i < lrr_count; i++) {
-                       struct lrr *lrr = &(lrr_list[i]);
-
-                       if (rreflen == lrr->namelen &&
-                           !memcmp(lrr->name, rref, rreflen)) {
-                               if (!lrr->shown)
-                                       printf("%.*s\n",
-                                              sha1_only ? 40 : lrr->namelen + 41,
-                                              lrr->line);
-                               lrr->shown = 1;
-                               break;
-                       }
-               }
-               if (lrr_count <= i) {
-                       error("pick-rref: %.*s not found", rreflen, rref);
-                       err = 1;
-               }
-               rref = next;
-       }
-       free(lrr_list);
-       return err;
-}
-
-int cmd_fetch__tool(int argc, const char **argv, const char *prefix)
-{
-       int verbose = 0;
-       int force = 0;
-       int sopt = 0;
-
-       while (1 < argc) {
-               const char *arg = argv[1];
-               if (!strcmp("-v", arg))
-                       verbose = 1;
-               else if (!strcmp("-f", arg))
-                       force = 1;
-               else if (!strcmp("-s", arg))
-                       sopt = 1;
-               else
-                       break;
-               argc--;
-               argv++;
-       }
-
-       if (argc <= 1)
-               return error("Missing subcommand");
-
-       if (!strcmp("append-fetch-head", argv[1])) {
-               int result;
-               FILE *fp;
-               char *filename;
-
-               if (argc != 8)
-                       return error("append-fetch-head takes 6 args");
-               filename = git_path_fetch_head();
-               fp = fopen(filename, "a");
-               if (!fp)
-                       return error("cannot open %s: %s", filename, strerror(errno));
-               result = append_fetch_head(fp, argv[2], argv[3],
-                                          argv[4], argv[5],
-                                          argv[6], !!argv[7][0],
-                                          verbose, force);
-               fclose(fp);
-               return result;
-       }
-       if (!strcmp("native-store", argv[1])) {
-               int result;
-               FILE *fp;
-               char *filename;
-
-               if (argc != 5)
-                       return error("fetch-native-store takes 3 args");
-               filename = git_path_fetch_head();
-               fp = fopen(filename, "a");
-               if (!fp)
-                       return error("cannot open %s: %s", filename, strerror(errno));
-               result = fetch_native_store(fp, argv[2], argv[3], argv[4],
-                                           verbose, force);
-               fclose(fp);
-               return result;
-       }
-       if (!strcmp("parse-reflist", argv[1])) {
-               const char *reflist;
-               if (argc != 3)
-                       return error("parse-reflist takes 1 arg");
-               reflist = argv[2];
-               if (!strcmp(reflist, "-"))
-                       reflist = get_stdin();
-               return parse_reflist(reflist);
-       }
-       if (!strcmp("pick-rref", argv[1])) {
-               const char *ls_remote_result;
-               if (argc != 4)
-                       return error("pick-rref takes 2 args");
-               ls_remote_result = argv[3];
-               if (!strcmp(ls_remote_result, "-"))
-                       ls_remote_result = get_stdin();
-               return pick_rref(sopt, argv[2], ls_remote_result);
-       }
-       if (!strcmp("expand-refs-wildcard", argv[1])) {
-               const char *reflist;
-               if (argc < 4)
-                       return error("expand-refs-wildcard takes at least 2 args");
-               reflist = argv[2];
-               if (!strcmp(reflist, "-"))
-                       reflist = get_stdin();
-               return expand_refs_wildcard(reflist, argc - 3, argv + 3);
-       }
-
-       return error("Unknown subcommand: %s", argv[1]);
-}
diff --git a/contrib/examples/git-am.sh b/contrib/examples/git-am.sh
deleted file mode 100755 (executable)
index dd539f1..0000000
+++ /dev/null
@@ -1,975 +0,0 @@
-#!/bin/sh
-#
-# Copyright (c) 2005, 2006 Junio C Hamano
-
-SUBDIRECTORY_OK=Yes
-OPTIONS_KEEPDASHDASH=
-OPTIONS_STUCKLONG=t
-OPTIONS_SPEC="\
-git am [options] [(<mbox>|<Maildir>)...]
-git am [options] (--continue | --skip | --abort)
---
-i,interactive   run interactively
-b,binary*       (historical option -- no-op)
-3,3way          allow fall back on 3way merging if needed
-q,quiet         be quiet
-s,signoff       add a Signed-off-by line to the commit message
-u,utf8          recode into utf8 (default)
-k,keep          pass -k flag to git-mailinfo
-keep-non-patch  pass -b flag to git-mailinfo
-m,message-id    pass -m flag to git-mailinfo
-keep-cr         pass --keep-cr flag to git-mailsplit for mbox format
-no-keep-cr      do not pass --keep-cr flag to git-mailsplit independent of am.keepcr
-c,scissors      strip everything before a scissors line
-whitespace=     pass it through git-apply
-ignore-space-change pass it through git-apply
-ignore-whitespace pass it through git-apply
-directory=      pass it through git-apply
-exclude=        pass it through git-apply
-include=        pass it through git-apply
-C=              pass it through git-apply
-p=              pass it through git-apply
-patch-format=   format the patch(es) are in
-reject          pass it through git-apply
-resolvemsg=     override error message when patch failure occurs
-continue        continue applying patches after resolving a conflict
-r,resolved      synonyms for --continue
-skip            skip the current patch
-abort           restore the original branch and abort the patching operation.
-committer-date-is-author-date    lie about committer date
-ignore-date     use current timestamp for author date
-rerere-autoupdate update the index with reused conflict resolution if possible
-S,gpg-sign?     GPG-sign commits
-rebasing*       (internal use for git-rebase)"
-
-. git-sh-setup
-. git-sh-i18n
-prefix=$(git rev-parse --show-prefix)
-set_reflog_action am
-require_work_tree
-cd_to_toplevel
-
-git var GIT_COMMITTER_IDENT >/dev/null ||
-       die "$(gettext "You need to set your committer info first")"
-
-if git rev-parse --verify -q HEAD >/dev/null
-then
-       HAS_HEAD=yes
-else
-       HAS_HEAD=
-fi
-
-cmdline="git am"
-if test '' != "$interactive"
-then
-       cmdline="$cmdline -i"
-fi
-if test '' != "$threeway"
-then
-       cmdline="$cmdline -3"
-fi
-
-empty_tree=4b825dc642cb6eb9a060e54bf8d69288fbee4904
-
-sq () {
-       git rev-parse --sq-quote "$@"
-}
-
-stop_here () {
-    echo "$1" >"$dotest/next"
-    git rev-parse --verify -q HEAD >"$dotest/abort-safety"
-    exit 1
-}
-
-safe_to_abort () {
-       if test -f "$dotest/dirtyindex"
-       then
-               return 1
-       fi
-
-       if ! test -f "$dotest/abort-safety"
-       then
-               return 0
-       fi
-
-       abort_safety=$(cat "$dotest/abort-safety")
-       if test "z$(git rev-parse --verify -q HEAD)" = "z$abort_safety"
-       then
-               return 0
-       fi
-       gettextln "You seem to have moved HEAD since the last 'am' failure.
-Not rewinding to ORIG_HEAD" >&2
-       return 1
-}
-
-stop_here_user_resolve () {
-    if [ -n "$resolvemsg" ]; then
-           printf '%s\n' "$resolvemsg"
-           stop_here $1
-    fi
-    eval_gettextln "When you have resolved this problem, run \"\$cmdline --continue\".
-If you prefer to skip this patch, run \"\$cmdline --skip\" instead.
-To restore the original branch and stop patching, run \"\$cmdline --abort\"."
-
-    stop_here $1
-}
-
-go_next () {
-       rm -f "$dotest/$msgnum" "$dotest/msg" "$dotest/msg-clean" \
-               "$dotest/patch" "$dotest/info"
-       echo "$next" >"$dotest/next"
-       this=$next
-}
-
-cannot_fallback () {
-       echo "$1"
-       gettextln "Cannot fall back to three-way merge."
-       exit 1
-}
-
-fall_back_3way () {
-    O_OBJECT=$(cd "$GIT_OBJECT_DIRECTORY" && pwd)
-
-    rm -fr "$dotest"/patch-merge-*
-    mkdir "$dotest/patch-merge-tmp-dir"
-
-    # First see if the patch records the index info that we can use.
-    cmd="git apply $git_apply_opt --build-fake-ancestor" &&
-    cmd="$cmd "'"$dotest/patch-merge-tmp-index" "$dotest/patch"' &&
-    eval "$cmd" &&
-    GIT_INDEX_FILE="$dotest/patch-merge-tmp-index" \
-    git write-tree >"$dotest/patch-merge-base+" ||
-    cannot_fallback "$(gettext "Repository lacks necessary blobs to fall back on 3-way merge.")"
-
-    say "$(gettext "Using index info to reconstruct a base tree...")"
-
-    cmd='GIT_INDEX_FILE="$dotest/patch-merge-tmp-index"'
-
-    if test -z "$GIT_QUIET"
-    then
-       eval "$cmd git diff-index --cached --diff-filter=AM --name-status HEAD"
-    fi
-
-    cmd="$cmd git apply --cached $git_apply_opt"' <"$dotest/patch"'
-    if eval "$cmd"
-    then
-       mv "$dotest/patch-merge-base+" "$dotest/patch-merge-base"
-       mv "$dotest/patch-merge-tmp-index" "$dotest/patch-merge-index"
-    else
-       cannot_fallback "$(gettext "Did you hand edit your patch?
-It does not apply to blobs recorded in its index.")"
-    fi
-
-    test -f "$dotest/patch-merge-index" &&
-    his_tree=$(GIT_INDEX_FILE="$dotest/patch-merge-index" git write-tree) &&
-    orig_tree=$(cat "$dotest/patch-merge-base") &&
-    rm -fr "$dotest"/patch-merge-* || exit 1
-
-    say "$(gettext "Falling back to patching base and 3-way merge...")"
-
-    # This is not so wrong.  Depending on which base we picked,
-    # orig_tree may be wildly different from ours, but his_tree
-    # has the same set of wildly different changes in parts the
-    # patch did not touch, so recursive ends up canceling them,
-    # saying that we reverted all those changes.
-
-    eval GITHEAD_$his_tree='"$FIRSTLINE"'
-    export GITHEAD_$his_tree
-    if test -n "$GIT_QUIET"
-    then
-           GIT_MERGE_VERBOSITY=0 && export GIT_MERGE_VERBOSITY
-    fi
-    our_tree=$(git rev-parse --verify -q HEAD || echo $empty_tree)
-    git-merge-recursive $orig_tree -- $our_tree $his_tree || {
-           git rerere $allow_rerere_autoupdate
-           die "$(gettext "Failed to merge in the changes.")"
-    }
-    unset GITHEAD_$his_tree
-}
-
-clean_abort () {
-       test $# = 0 || echo >&2 "$@"
-       rm -fr "$dotest"
-       exit 1
-}
-
-patch_format=
-
-check_patch_format () {
-       # early return if patch_format was set from the command line
-       if test -n "$patch_format"
-       then
-               return 0
-       fi
-
-       # we default to mbox format if input is from stdin and for
-       # directories
-       if test $# = 0 || test "x$1" = "x-" || test -d "$1"
-       then
-               patch_format=mbox
-               return 0
-       fi
-
-       # otherwise, check the first few non-blank lines of the first
-       # patch to try to detect its format
-       {
-               # Start from first line containing non-whitespace
-               l1=
-               while test -z "$l1"
-               do
-                       read l1 || break
-               done
-               read l2
-               read l3
-               case "$l1" in
-               "From "* | "From: "*)
-                       patch_format=mbox
-                       ;;
-               '# This series applies on GIT commit'*)
-                       patch_format=stgit-series
-                       ;;
-               "# HG changeset patch")
-                       patch_format=hg
-                       ;;
-               *)
-                       # if the second line is empty and the third is
-                       # a From, Author or Date entry, this is very
-                       # likely an StGIT patch
-                       case "$l2,$l3" in
-                       ,"From: "* | ,"Author: "* | ,"Date: "*)
-                               patch_format=stgit
-                               ;;
-                       *)
-                               ;;
-                       esac
-                       ;;
-               esac
-               if test -z "$patch_format" &&
-                       test -n "$l1" &&
-                       test -n "$l2" &&
-                       test -n "$l3"
-               then
-                       # This begins with three non-empty lines.  Is this a
-                       # piece of e-mail a-la RFC2822?  Grab all the headers,
-                       # discarding the indented remainder of folded lines,
-                       # and see if it looks like that they all begin with the
-                       # header field names...
-                       tr -d '\015' <"$1" |
-                       sed -n -e '/^$/q' -e '/^[       ]/d' -e p |
-                       sane_egrep -v '^[!-9;-~]+:' >/dev/null ||
-                       patch_format=mbox
-               fi
-       } < "$1" || clean_abort
-}
-
-split_patches () {
-       case "$patch_format" in
-       mbox)
-               if test t = "$keepcr"
-               then
-                   keep_cr=--keep-cr
-               else
-                   keep_cr=
-               fi
-               git mailsplit -d"$prec" -o"$dotest" -b $keep_cr -- "$@" > "$dotest/last" ||
-               clean_abort
-               ;;
-       stgit-series)
-               if test $# -ne 1
-               then
-                       clean_abort "$(gettext "Only one StGIT patch series can be applied at once")"
-               fi
-               series_dir=$(dirname "$1")
-               series_file="$1"
-               shift
-               {
-                       set x
-                       while read filename
-                       do
-                               set "$@" "$series_dir/$filename"
-                       done
-                       # remove the safety x
-                       shift
-                       # remove the arg coming from the first-line comment
-                       shift
-               } < "$series_file" || clean_abort
-               # set the patch format appropriately
-               patch_format=stgit
-               # now handle the actual StGIT patches
-               split_patches "$@"
-               ;;
-       stgit)
-               this=0
-               test 0 -eq "$#" && set -- -
-               for stgit in "$@"
-               do
-                       this=$(expr "$this" + 1)
-                       msgnum=$(printf "%0${prec}d" $this)
-                       # Perl version of StGIT parse_patch. The first nonemptyline
-                       # not starting with Author, From or Date is the
-                       # subject, and the body starts with the next nonempty
-                       # line not starting with Author, From or Date
-                       @@PERL@@ -ne 'BEGIN { $subject = 0 }
-                               if ($subject > 1) { print ; }
-                               elsif (/^\s+$/) { next ; }
-                               elsif (/^Author:/) { s/Author/From/ ; print ;}
-                               elsif (/^(From|Date)/) { print ; }
-                               elsif ($subject) {
-                                       $subject = 2 ;
-                                       print "\n" ;
-                                       print ;
-                               } else {
-                                       print "Subject: ", $_ ;
-                                       $subject = 1;
-                               }
-                       ' -- "$stgit" >"$dotest/$msgnum" || clean_abort
-               done
-               echo "$this" > "$dotest/last"
-               this=
-               msgnum=
-               ;;
-       hg)
-               this=0
-               test 0 -eq "$#" && set -- -
-               for hg in "$@"
-               do
-                       this=$(( $this + 1 ))
-                       msgnum=$(printf "%0${prec}d" $this)
-                       # hg stores changeset metadata in #-commented lines preceding
-                       # the commit message and diff(s). The only metadata we care about
-                       # are the User and Date (Node ID and Parent are hashes which are
-                       # only relevant to the hg repository and thus not useful to us)
-                       # Since we cannot guarantee that the commit message is in
-                       # git-friendly format, we put no Subject: line and just consume
-                       # all of the message as the body
-                       LANG=C LC_ALL=C @@PERL@@ -M'POSIX qw(strftime)' -ne 'BEGIN { $subject = 0 }
-                               if ($subject) { print ; }
-                               elsif (/^\# User /) { s/\# User/From:/ ; print ; }
-                               elsif (/^\# Date /) {
-                                       my ($hashsign, $str, $time, $tz) = split ;
-                                       $tz_str = sprintf "%+05d", (0-$tz)/36;
-                                       print "Date: " .
-                                             strftime("%a, %d %b %Y %H:%M:%S ",
-                                                      gmtime($time-$tz))
-                                             . "$tz_str\n";
-                               } elsif (/^\# /) { next ; }
-                               else {
-                                       print "\n", $_ ;
-                                       $subject = 1;
-                               }
-                       ' -- "$hg" >"$dotest/$msgnum" || clean_abort
-               done
-               echo "$this" >"$dotest/last"
-               this=
-               msgnum=
-               ;;
-       *)
-               if test -n "$patch_format"
-               then
-                       clean_abort "$(eval_gettext "Patch format \$patch_format is not supported.")"
-               else
-                       clean_abort "$(gettext "Patch format detection failed.")"
-               fi
-               ;;
-       esac
-}
-
-prec=4
-dotest="$GIT_DIR/rebase-apply"
-sign= utf8=t keep= keepcr= skip= interactive= resolved= rebasing= abort=
-messageid= resolvemsg= resume= scissors= no_inbody_headers=
-git_apply_opt=
-committer_date_is_author_date=
-ignore_date=
-allow_rerere_autoupdate=
-gpg_sign_opt=
-threeway=
-
-if test "$(git config --bool --get am.messageid)" = true
-then
-    messageid=t
-fi
-
-if test "$(git config --bool --get am.keepcr)" = true
-then
-    keepcr=t
-fi
-
-while test $# != 0
-do
-       case "$1" in
-       -i|--interactive)
-               interactive=t ;;
-       -b|--binary)
-               gettextln >&2 "The -b/--binary option has been a no-op for long time, and
-it will be removed. Please do not use it anymore."
-               ;;
-       -3|--3way)
-               threeway=t ;;
-       -s|--signoff)
-               sign=t ;;
-       -u|--utf8)
-               utf8=t ;; # this is now default
-       --no-utf8)
-               utf8= ;;
-       -m|--message-id)
-               messageid=t ;;
-       --no-message-id)
-               messageid=f ;;
-       -k|--keep)
-               keep=t ;;
-       --keep-non-patch)
-               keep=b ;;
-       -c|--scissors)
-               scissors=t ;;
-       --no-scissors)
-               scissors=f ;;
-       -r|--resolved|--continue)
-               resolved=t ;;
-       --skip)
-               skip=t ;;
-       --abort)
-               abort=t ;;
-       --rebasing)
-               rebasing=t threeway=t ;;
-       --resolvemsg=*)
-               resolvemsg="${1#--resolvemsg=}" ;;
-       --whitespace=*|--directory=*|--exclude=*|--include=*)
-               git_apply_opt="$git_apply_opt $(sq "$1")" ;;
-       -C*|-p*)
-               git_apply_opt="$git_apply_opt $(sq "$1")" ;;
-       --patch-format=*)
-               patch_format="${1#--patch-format=}" ;;
-       --reject|--ignore-whitespace|--ignore-space-change)
-               git_apply_opt="$git_apply_opt $1" ;;
-       --committer-date-is-author-date)
-               committer_date_is_author_date=t ;;
-       --ignore-date)
-               ignore_date=t ;;
-       --rerere-autoupdate|--no-rerere-autoupdate)
-               allow_rerere_autoupdate="$1" ;;
-       -q|--quiet)
-               GIT_QUIET=t ;;
-       --keep-cr)
-               keepcr=t ;;
-       --no-keep-cr)
-               keepcr=f ;;
-       --gpg-sign)
-               gpg_sign_opt=-S ;;
-       --gpg-sign=*)
-               gpg_sign_opt="-S${1#--gpg-sign=}" ;;
-       --)
-               shift; break ;;
-       *)
-               usage ;;
-       esac
-       shift
-done
-
-# If the dotest directory exists, but we have finished applying all the
-# patches in them, clear it out.
-if test -d "$dotest" &&
-   test -f "$dotest/last" &&
-   test -f "$dotest/next" &&
-   last=$(cat "$dotest/last") &&
-   next=$(cat "$dotest/next") &&
-   test $# != 0 &&
-   test "$next" -gt "$last"
-then
-   rm -fr "$dotest"
-fi
-
-if test -d "$dotest" && test -f "$dotest/last" && test -f "$dotest/next"
-then
-       case "$#,$skip$resolved$abort" in
-       0,*t*)
-               # Explicit resume command and we do not have file, so
-               # we are happy.
-               : ;;
-       0,)
-               # No file input but without resume parameters; catch
-               # user error to feed us a patch from standard input
-               # when there is already $dotest.  This is somewhat
-               # unreliable -- stdin could be /dev/null for example
-               # and the caller did not intend to feed us a patch but
-               # wanted to continue unattended.
-               test -t 0
-               ;;
-       *)
-               false
-               ;;
-       esac ||
-       die "$(eval_gettext "previous rebase directory \$dotest still exists but mbox given.")"
-       resume=yes
-
-       case "$skip,$abort" in
-       t,t)
-               die "$(gettext "Please make up your mind. --skip or --abort?")"
-               ;;
-       t,)
-               git rerere clear
-               head_tree=$(git rev-parse --verify -q HEAD || echo $empty_tree) &&
-               git read-tree --reset -u $head_tree $head_tree &&
-               index_tree=$(git write-tree) &&
-               git read-tree -m -u $index_tree $head_tree
-               git read-tree -m $head_tree
-               ;;
-       ,t)
-               if test -f "$dotest/rebasing"
-               then
-                       exec git rebase --abort
-               fi
-               git rerere clear
-               if safe_to_abort
-               then
-                       head_tree=$(git rev-parse --verify -q HEAD || echo $empty_tree) &&
-                       git read-tree --reset -u $head_tree $head_tree &&
-                       index_tree=$(git write-tree) &&
-                       orig_head=$(git rev-parse --verify -q ORIG_HEAD || echo $empty_tree) &&
-                       git read-tree -m -u $index_tree $orig_head
-                       if git rev-parse --verify -q ORIG_HEAD >/dev/null 2>&1
-                       then
-                               git reset ORIG_HEAD
-                       else
-                               git read-tree $empty_tree
-                               curr_branch=$(git symbolic-ref HEAD 2>/dev/null) &&
-                               git update-ref -d $curr_branch
-                       fi
-               fi
-               rm -fr "$dotest"
-               exit ;;
-       esac
-       rm -f "$dotest/dirtyindex"
-else
-       # Possible stray $dotest directory in the independent-run
-       # case; in the --rebasing case, it is upto the caller
-       # (git-rebase--am) to take care of stray directories.
-       if test -d "$dotest" && test -z "$rebasing"
-       then
-               case "$skip,$resolved,$abort" in
-               ,,t)
-                       rm -fr "$dotest"
-                       exit 0
-                       ;;
-               *)
-                       die "$(eval_gettext "Stray \$dotest directory found.
-Use \"git am --abort\" to remove it.")"
-                       ;;
-               esac
-       fi
-
-       # Make sure we are not given --skip, --continue, or --abort
-       test "$skip$resolved$abort" = "" ||
-               die "$(gettext "Resolve operation not in progress, we are not resuming.")"
-
-       # Start afresh.
-       mkdir -p "$dotest" || exit
-
-       if test -n "$prefix" && test $# != 0
-       then
-               first=t
-               for arg
-               do
-                       test -n "$first" && {
-                               set x
-                               first=
-                       }
-                       if is_absolute_path "$arg"
-                       then
-                               set "$@" "$arg"
-                       else
-                               set "$@" "$prefix$arg"
-                       fi
-               done
-               shift
-       fi
-
-       check_patch_format "$@"
-
-       split_patches "$@"
-
-       # -i can and must be given when resuming; everything
-       # else is kept
-       echo " $git_apply_opt" >"$dotest/apply-opt"
-       echo "$threeway" >"$dotest/threeway"
-       echo "$sign" >"$dotest/sign"
-       echo "$utf8" >"$dotest/utf8"
-       echo "$keep" >"$dotest/keep"
-       echo "$messageid" >"$dotest/messageid"
-       echo "$scissors" >"$dotest/scissors"
-       echo "$no_inbody_headers" >"$dotest/no_inbody_headers"
-       echo "$GIT_QUIET" >"$dotest/quiet"
-       echo 1 >"$dotest/next"
-       if test -n "$rebasing"
-       then
-               : >"$dotest/rebasing"
-       else
-               : >"$dotest/applying"
-               if test -n "$HAS_HEAD"
-               then
-                       git update-ref ORIG_HEAD HEAD
-               else
-                       git update-ref -d ORIG_HEAD >/dev/null 2>&1
-               fi
-       fi
-fi
-
-git update-index -q --refresh
-
-case "$resolved" in
-'')
-       case "$HAS_HEAD" in
-       '')
-               files=$(git ls-files) ;;
-       ?*)
-               files=$(git diff-index --cached --name-only HEAD --) ;;
-       esac || exit
-       if test "$files"
-       then
-               test -n "$HAS_HEAD" && : >"$dotest/dirtyindex"
-               die "$(eval_gettext "Dirty index: cannot apply patches (dirty: \$files)")"
-       fi
-esac
-
-# Now, decide what command line options we will give to the git
-# commands we invoke, based on the result of parsing command line
-# options and previous invocation state stored in $dotest/ files.
-
-if test "$(cat "$dotest/utf8")" = t
-then
-       utf8=-u
-else
-       utf8=-n
-fi
-keep=$(cat "$dotest/keep")
-case "$keep" in
-t)
-       keep=-k ;;
-b)
-       keep=-b ;;
-*)
-       keep= ;;
-esac
-case "$(cat "$dotest/messageid")" in
-t)
-       messageid=-m ;;
-f)
-       messageid= ;;
-esac
-case "$(cat "$dotest/scissors")" in
-t)
-       scissors=--scissors ;;
-f)
-       scissors=--no-scissors ;;
-esac
-if test "$(cat "$dotest/no_inbody_headers")" = t
-then
-       no_inbody_headers=--no-inbody-headers
-else
-       no_inbody_headers=
-fi
-if test "$(cat "$dotest/quiet")" = t
-then
-       GIT_QUIET=t
-fi
-if test "$(cat "$dotest/threeway")" = t
-then
-       threeway=t
-fi
-git_apply_opt=$(cat "$dotest/apply-opt")
-if test "$(cat "$dotest/sign")" = t
-then
-       SIGNOFF=$(git var GIT_COMMITTER_IDENT | sed -e '
-                       s/>.*/>/
-                       s/^/Signed-off-by: /'
-               )
-else
-       SIGNOFF=
-fi
-
-last=$(cat "$dotest/last")
-this=$(cat "$dotest/next")
-if test "$skip" = t
-then
-       this=$(expr "$this" + 1)
-       resume=
-fi
-
-while test "$this" -le "$last"
-do
-       msgnum=$(printf "%0${prec}d" $this)
-       next=$(expr "$this" + 1)
-       test -f "$dotest/$msgnum" || {
-               resume=
-               go_next
-               continue
-       }
-
-       # If we are not resuming, parse and extract the patch information
-       # into separate files:
-       #  - info records the authorship and title
-       #  - msg is the rest of commit log message
-       #  - patch is the patch body.
-       #
-       # When we are resuming, these files are either already prepared
-       # by the user, or the user can tell us to do so by --continue flag.
-       case "$resume" in
-       '')
-               if test -f "$dotest/rebasing"
-               then
-                       commit=$(sed -e 's/^From \([0-9a-f]*\) .*/\1/' \
-                               -e q "$dotest/$msgnum") &&
-                       test "$(git cat-file -t "$commit")" = commit ||
-                               stop_here $this
-                       git cat-file commit "$commit" |
-                       sed -e '1,/^$/d' >"$dotest/msg-clean"
-                       echo "$commit" >"$dotest/original-commit"
-                       get_author_ident_from_commit "$commit" >"$dotest/author-script"
-                       git diff-tree --root --binary --full-index "$commit" >"$dotest/patch"
-               else
-                       git mailinfo $keep $no_inbody_headers $messageid $scissors $utf8 "$dotest/msg" "$dotest/patch" \
-                               <"$dotest/$msgnum" >"$dotest/info" ||
-                               stop_here $this
-
-                       # skip pine's internal folder data
-                       sane_grep '^Author: Mail System Internal Data$' \
-                               <"$dotest"/info >/dev/null &&
-                               go_next && continue
-
-                       test -s "$dotest/patch" || {
-                               eval_gettextln "Patch is empty.  Was it split wrong?
-If you would prefer to skip this patch, instead run \"\$cmdline --skip\".
-To restore the original branch and stop patching run \"\$cmdline --abort\"."
-                               stop_here $this
-                       }
-                       rm -f "$dotest/original-commit" "$dotest/author-script"
-                       {
-                               sed -n '/^Subject/ s/Subject: //p' "$dotest/info"
-                               echo
-                               cat "$dotest/msg"
-                       } |
-                       git stripspace > "$dotest/msg-clean"
-               fi
-               ;;
-       esac
-
-       if test -f "$dotest/author-script"
-       then
-               eval $(cat "$dotest/author-script")
-       else
-               GIT_AUTHOR_NAME="$(sed -n '/^Author/ s/Author: //p' "$dotest/info")"
-               GIT_AUTHOR_EMAIL="$(sed -n '/^Email/ s/Email: //p' "$dotest/info")"
-               GIT_AUTHOR_DATE="$(sed -n '/^Date/ s/Date: //p' "$dotest/info")"
-       fi
-
-       if test -z "$GIT_AUTHOR_EMAIL"
-       then
-               gettextln "Patch does not have a valid e-mail address."
-               stop_here $this
-       fi
-
-       export GIT_AUTHOR_NAME GIT_AUTHOR_EMAIL GIT_AUTHOR_DATE
-
-       case "$resume" in
-       '')
-           if test '' != "$SIGNOFF"
-           then
-               LAST_SIGNED_OFF_BY=$(
-                   sed -ne '/^Signed-off-by: /p' \
-                   "$dotest/msg-clean" |
-                   sed -ne '$p'
-               )
-               ADD_SIGNOFF=$(
-                   test "$LAST_SIGNED_OFF_BY" = "$SIGNOFF" || {
-                   test '' = "$LAST_SIGNED_OFF_BY" && echo
-                   echo "$SIGNOFF"
-               })
-           else
-               ADD_SIGNOFF=
-           fi
-           {
-               if test -s "$dotest/msg-clean"
-               then
-                       cat "$dotest/msg-clean"
-               fi
-               if test '' != "$ADD_SIGNOFF"
-               then
-                       echo "$ADD_SIGNOFF"
-               fi
-           } >"$dotest/final-commit"
-           ;;
-       *)
-               case "$resolved$interactive" in
-               tt)
-                       # This is used only for interactive view option.
-                       git diff-index -p --cached HEAD -- >"$dotest/patch"
-                       ;;
-               esac
-       esac
-
-       resume=
-       if test "$interactive" = t
-       then
-           test -t 0 ||
-           die "$(gettext "cannot be interactive without stdin connected to a terminal.")"
-           action=again
-           while test "$action" = again
-           do
-               gettextln "Commit Body is:"
-               echo "--------------------------"
-               cat "$dotest/final-commit"
-               echo "--------------------------"
-               # TRANSLATORS: Make sure to include [y], [n], [e], [v] and [a]
-               # in your translation. The program will only accept English
-               # input at this point.
-               gettext "Apply? [y]es/[n]o/[e]dit/[v]iew patch/[a]ccept all "
-               read reply
-               case "$reply" in
-               [yY]*) action=yes ;;
-               [aA]*) action=yes interactive= ;;
-               [nN]*) action=skip ;;
-               [eE]*) git_editor "$dotest/final-commit"
-                      action=again ;;
-               [vV]*) action=again
-                      git_pager "$dotest/patch" ;;
-               *)     action=again ;;
-               esac
-           done
-       else
-           action=yes
-       fi
-
-       if test $action = skip
-       then
-               go_next
-               continue
-       fi
-
-       hook="$(git rev-parse --git-path hooks/applypatch-msg)"
-       if test -x "$hook"
-       then
-               "$hook" "$dotest/final-commit" || stop_here $this
-       fi
-
-       if test -f "$dotest/final-commit"
-       then
-               FIRSTLINE=$(sed 1q "$dotest/final-commit")
-       else
-               FIRSTLINE=""
-       fi
-
-       say "$(eval_gettext "Applying: \$FIRSTLINE")"
-
-       case "$resolved" in
-       '')
-               # When we are allowed to fall back to 3-way later, don't give
-               # false errors during the initial attempt.
-               squelch=
-               if test "$threeway" = t
-               then
-                       squelch='>/dev/null 2>&1 '
-               fi
-               eval "git apply $squelch$git_apply_opt"' --index "$dotest/patch"'
-               apply_status=$?
-               ;;
-       t)
-               # Resolved means the user did all the hard work, and
-               # we do not have to do any patch application.  Just
-               # trust what the user has in the index file and the
-               # working tree.
-               resolved=
-               git diff-index --quiet --cached HEAD -- && {
-                       gettextln "No changes - did you forget to use 'git add'?
-If there is nothing left to stage, chances are that something else
-already introduced the same changes; you might want to skip this patch."
-                       stop_here_user_resolve $this
-               }
-               unmerged=$(git ls-files -u)
-               if test -n "$unmerged"
-               then
-                       gettextln "You still have unmerged paths in your index
-did you forget to use 'git add'?"
-                       stop_here_user_resolve $this
-               fi
-               apply_status=0
-               git rerere
-               ;;
-       esac
-
-       if test $apply_status != 0 && test "$threeway" = t
-       then
-               if (fall_back_3way)
-               then
-                   # Applying the patch to an earlier tree and merging the
-                   # result may have produced the same tree as ours.
-                   git diff-index --quiet --cached HEAD -- && {
-                       say "$(gettext "No changes -- Patch already applied.")"
-                       go_next
-                       continue
-                   }
-                   # clear apply_status -- we have successfully merged.
-                   apply_status=0
-               fi
-       fi
-       if test $apply_status != 0
-       then
-               eval_gettextln 'Patch failed at $msgnum $FIRSTLINE'
-               if test "$(git config --bool advice.amworkdir)" != false
-               then
-                       eval_gettextln 'The copy of the patch that failed is found in:
-   $dotest/patch'
-               fi
-               stop_here_user_resolve $this
-       fi
-
-       hook="$(git rev-parse --git-path hooks/pre-applypatch)"
-       if test -x "$hook"
-       then
-               "$hook" || stop_here $this
-       fi
-
-       tree=$(git write-tree) &&
-       commit=$(
-               if test -n "$ignore_date"
-               then
-                       GIT_AUTHOR_DATE=
-               fi
-               parent=$(git rev-parse --verify -q HEAD) ||
-               say >&2 "$(gettext "applying to an empty history")"
-
-               if test -n "$committer_date_is_author_date"
-               then
-                       GIT_COMMITTER_DATE="$GIT_AUTHOR_DATE"
-                       export GIT_COMMITTER_DATE
-               fi &&
-               git commit-tree ${parent:+-p} $parent ${gpg_sign_opt:+"$gpg_sign_opt"} $tree  \
-                       <"$dotest/final-commit"
-       ) &&
-       git update-ref -m "$GIT_REFLOG_ACTION: $FIRSTLINE" HEAD $commit $parent ||
-       stop_here $this
-
-       if test -f "$dotest/original-commit"; then
-               echo "$(cat "$dotest/original-commit") $commit" >> "$dotest/rewritten"
-       fi
-
-       hook="$(git rev-parse --git-path hooks/post-applypatch)"
-       test -x "$hook" && "$hook"
-
-       go_next
-done
-
-if test -s "$dotest"/rewritten; then
-    git notes copy --for-rewrite=rebase < "$dotest"/rewritten
-    hook="$(git rev-parse --git-path hooks/post-rewrite)"
-    if test -x "$hook"; then
-       "$hook" rebase < "$dotest"/rewritten
-    fi
-fi
-
-# If am was called with --rebasing (from git-rebase--am), it's up to
-# the caller to take care of housekeeping.
-if ! test -f "$dotest/rebasing"
-then
-       rm -fr "$dotest"
-       git gc --auto
-fi
diff --git a/contrib/examples/git-checkout.sh b/contrib/examples/git-checkout.sh
deleted file mode 100755 (executable)
index 683cae7..0000000
+++ /dev/null
@@ -1,302 +0,0 @@
-#!/bin/sh
-
-OPTIONS_KEEPDASHDASH=t
-OPTIONS_SPEC="\
-git-checkout [options] [<branch>] [<paths>...]
---
-b=          create a new branch started at <branch>
-l           create the new branch's reflog
-track       arrange that the new branch tracks the remote branch
-f           proceed even if the index or working tree is not HEAD
-m           merge local modifications into the new branch
-q,quiet     be quiet
-"
-SUBDIRECTORY_OK=Sometimes
-. git-sh-setup
-require_work_tree
-
-old_name=HEAD
-old=$(git rev-parse --verify $old_name 2>/dev/null)
-oldbranch=$(git symbolic-ref $old_name 2>/dev/null)
-new=
-new_name=
-force=
-branch=
-track=
-newbranch=
-newbranch_log=
-merge=
-quiet=
-v=-v
-LF='
-'
-
-while test $# != 0; do
-       case "$1" in
-       -b)
-               shift
-               newbranch="$1"
-               [ -z "$newbranch" ] &&
-                       die "git checkout: -b needs a branch name"
-               git show-ref --verify --quiet -- "refs/heads/$newbranch" &&
-                       die "git checkout: branch $newbranch already exists"
-               git check-ref-format "heads/$newbranch" ||
-                       die "git checkout: we do not like '$newbranch' as a branch name."
-               ;;
-       -l)
-               newbranch_log=-l
-               ;;
-       --track|--no-track)
-               track="$1"
-               ;;
-       -f)
-               force=1
-               ;;
-       -m)
-               merge=1
-               ;;
-       -q|--quiet)
-               quiet=1
-               v=
-               ;;
-       --)
-               shift
-               break
-               ;;
-       *)
-               usage
-               ;;
-       esac
-       shift
-done
-
-arg="$1"
-rev=$(git rev-parse --verify "$arg" 2>/dev/null)
-if rev=$(git rev-parse --verify "$rev^0" 2>/dev/null)
-then
-       [ -z "$rev" ] && die "unknown flag $arg"
-       new_name="$arg"
-       if git show-ref --verify --quiet -- "refs/heads/$arg"
-       then
-               rev=$(git rev-parse --verify "refs/heads/$arg^0")
-               branch="$arg"
-       fi
-       new="$rev"
-       shift
-elif rev=$(git rev-parse --verify "$rev^{tree}" 2>/dev/null)
-then
-       # checking out selected paths from a tree-ish.
-       new="$rev"
-       new_name="$rev^{tree}"
-       shift
-fi
-[ "$1" = "--" ] && shift
-
-case "$newbranch,$track" in
-,--*)
-       die "git checkout: --track and --no-track require -b"
-esac
-
-case "$force$merge" in
-11)
-       die "git checkout: -f and -m are incompatible"
-esac
-
-# The behaviour of the command with and without explicit path
-# parameters is quite different.
-#
-# Without paths, we are checking out everything in the work tree,
-# possibly switching branches.  This is the traditional behaviour.
-#
-# With paths, we are _never_ switching branch, but checking out
-# the named paths from either index (when no rev is given),
-# or the named tree-ish (when rev is given).
-
-if test "$#" -ge 1
-then
-       hint=
-       if test "$#" -eq 1
-       then
-               hint="
-Did you intend to checkout '$@' which can not be resolved as commit?"
-       fi
-       if test '' != "$newbranch$force$merge"
-       then
-               die "git checkout: updating paths is incompatible with switching branches/forcing$hint"
-       fi
-       if test '' != "$new"
-       then
-               # from a specific tree-ish; note that this is for
-               # rescuing paths and is never meant to remove what
-               # is not in the named tree-ish.
-               git ls-tree --full-name -r "$new" "$@" |
-               git update-index --index-info || exit $?
-       fi
-
-       # Make sure the request is about existing paths.
-       git ls-files --full-name --error-unmatch -- "$@" >/dev/null || exit
-       git ls-files --full-name -- "$@" |
-               (cd_to_toplevel && git checkout-index -f -u --stdin)
-
-       # Run a post-checkout hook -- the HEAD does not change so the
-       # current HEAD is passed in for both args
-       if test -x "$GIT_DIR"/hooks/post-checkout; then
-           "$GIT_DIR"/hooks/post-checkout $old $old 0
-       fi
-
-       exit $?
-else
-       # Make sure we did not fall back on $arg^{tree} codepath
-       # since we are not checking out from an arbitrary tree-ish,
-       # but switching branches.
-       if test '' != "$new"
-       then
-               git rev-parse --verify "$new^{commit}" >/dev/null 2>&1 ||
-               die "Cannot switch branch to a non-commit."
-       fi
-fi
-
-# We are switching branches and checking out trees, so
-# we *NEED* to be at the toplevel.
-cd_to_toplevel
-
-[ -z "$new" ] && new=$old && new_name="$old_name"
-
-# If we don't have an existing branch that we're switching to,
-# and we don't have a new branch name for the target we
-# are switching to, then we are detaching our HEAD from any
-# branch.  However, if "git checkout HEAD" detaches the HEAD
-# from the current branch, even though that may be logically
-# correct, it feels somewhat funny.  More importantly, we do not
-# want "git checkout" or "git checkout -f" to detach HEAD.
-
-detached=
-detach_warn=
-
-describe_detached_head () {
-       test -n "$quiet" || {
-               printf >&2 "$1 "
-               GIT_PAGER= git log >&2 -1 --pretty=oneline --abbrev-commit "$2" --
-       }
-}
-
-if test -z "$branch$newbranch" && test "$new_name" != "$old_name"
-then
-       detached="$new"
-       if test -n "$oldbranch" && test -z "$quiet"
-       then
-               detach_warn="Note: moving to \"$new_name\" which isn't a local branch
-If you want to create a new branch from this checkout, you may do so
-(now or later) by using -b with the checkout command again. Example:
-  git checkout -b <new_branch_name>"
-       fi
-elif test -z "$oldbranch" && test "$new" != "$old"
-then
-       describe_detached_head 'Previous HEAD position was' "$old"
-fi
-
-if [ "X$old" = X ]
-then
-       if test -z "$quiet"
-       then
-               echo >&2 "warning: You appear to be on a branch yet to be born."
-               echo >&2 "warning: Forcing checkout of $new_name."
-       fi
-       force=1
-fi
-
-if [ "$force" ]
-then
-    git read-tree $v --reset -u $new
-else
-    git update-index --refresh >/dev/null
-    git read-tree $v -m -u --exclude-per-directory=.gitignore $old $new || (
-       case "$merge,$v" in
-       ,*)
-               exit 1 ;;
-       1,)
-               ;; # quiet
-       *)
-               echo >&2 "Falling back to 3-way merge..." ;;
-       esac
-
-       # Match the index to the working tree, and do a three-way.
-       git diff-files --name-only | git update-index --remove --stdin &&
-       work=$(git write-tree) &&
-       git read-tree $v --reset -u $new || exit
-
-       eval GITHEAD_$new='${new_name:-${branch:-$new}}' &&
-       eval GITHEAD_$work=local &&
-       export GITHEAD_$new GITHEAD_$work &&
-       git merge-recursive $old -- $new $work
-
-       # Do not register the cleanly merged paths in the index yet.
-       # this is not a real merge before committing, but just carrying
-       # the working tree changes along.
-       unmerged=$(git ls-files -u)
-       git read-tree $v --reset $new
-       case "$unmerged" in
-       '')     ;;
-       *)
-               (
-                       z40=0000000000000000000000000000000000000000
-                       echo "$unmerged" |
-                       sed -e 's/^[0-7]* [0-9a-f]* /'"0 $z40 /"
-                       echo "$unmerged"
-               ) | git update-index --index-info
-               ;;
-       esac
-       exit 0
-    )
-    saved_err=$?
-    if test "$saved_err" = 0 && test -z "$quiet"
-    then
-       git diff-index --name-status "$new"
-    fi
-    (exit $saved_err)
-fi
-
-#
-# Switch the HEAD pointer to the new branch if we
-# checked out a branch head, and remove any potential
-# old MERGE_HEAD's (subsequent commits will clearly not
-# be based on them, since we re-set the index)
-#
-if [ "$?" -eq 0 ]; then
-       if [ "$newbranch" ]; then
-               git branch $track $newbranch_log "$newbranch" "$new_name" || exit
-               branch="$newbranch"
-       fi
-       if test -n "$branch"
-       then
-               old_branch_name=$(expr "z$oldbranch" : 'zrefs/heads/\(.*\)')
-               GIT_DIR="$GIT_DIR" git symbolic-ref -m "checkout: moving from ${old_branch_name:-$old} to $branch" HEAD "refs/heads/$branch"
-               if test -n "$quiet"
-               then
-                       true    # nothing
-               elif test "refs/heads/$branch" = "$oldbranch"
-               then
-                       echo >&2 "Already on branch \"$branch\""
-               else
-                       echo >&2 "Switched to${newbranch:+ a new} branch \"$branch\""
-               fi
-       elif test -n "$detached"
-       then
-               old_branch_name=$(expr "z$oldbranch" : 'zrefs/heads/\(.*\)')
-               git update-ref --no-deref -m "checkout: moving from ${old_branch_name:-$old} to $arg" HEAD "$detached" ||
-                       die "Cannot detach HEAD"
-               if test -n "$detach_warn"
-               then
-                       echo >&2 "$detach_warn"
-               fi
-               describe_detached_head 'HEAD is now at' HEAD
-       fi
-       rm -f "$GIT_DIR/MERGE_HEAD"
-else
-       exit 1
-fi
-
-# Run a post-checkout hook
-if test -x "$GIT_DIR"/hooks/post-checkout; then
-       "$GIT_DIR"/hooks/post-checkout $old $new 1
-fi
diff --git a/contrib/examples/git-clean.sh b/contrib/examples/git-clean.sh
deleted file mode 100755 (executable)
index 01c95e9..0000000
+++ /dev/null
@@ -1,118 +0,0 @@
-#!/bin/sh
-#
-# Copyright (c) 2005-2006 Pavel Roskin
-#
-
-OPTIONS_KEEPDASHDASH=
-OPTIONS_SPEC="\
-git-clean [options] <paths>...
-
-Clean untracked files from the working directory
-
-When optional <paths>... arguments are given, the paths
-affected are further limited to those that match them.
---
-d remove directories as well
-f override clean.requireForce and clean anyway
-n don't remove anything, just show what would be done
-q be quiet, only report errors
-x remove ignored files as well
-X remove only ignored files"
-
-SUBDIRECTORY_OK=Yes
-. git-sh-setup
-require_work_tree
-
-ignored=
-ignoredonly=
-cleandir=
-rmf="rm -f --"
-rmrf="rm -rf --"
-rm_refuse="echo Not removing"
-echo1="echo"
-
-disabled=$(git config --bool clean.requireForce)
-
-while test $# != 0
-do
-       case "$1" in
-       -d)
-               cleandir=1
-               ;;
-       -f)
-               disabled=false
-               ;;
-       -n)
-               disabled=false
-               rmf="echo Would remove"
-               rmrf="echo Would remove"
-               rm_refuse="echo Would not remove"
-               echo1=":"
-               ;;
-       -q)
-               echo1=":"
-               ;;
-       -x)
-               ignored=1
-               ;;
-       -X)
-               ignoredonly=1
-               ;;
-       --)
-               shift
-               break
-               ;;
-       *)
-               usage # should not happen
-               ;;
-       esac
-       shift
-done
-
-# requireForce used to default to false but now it defaults to true.
-# IOW, lack of explicit "clean.requireForce = false" is taken as
-# "clean.requireForce = true".
-case "$disabled" in
-"")
-       die "clean.requireForce not set and -n or -f not given; refusing to clean"
-       ;;
-"true")
-       die "clean.requireForce set and -n or -f not given; refusing to clean"
-       ;;
-esac
-
-if [ "$ignored,$ignoredonly" = "1,1" ]; then
-       die "-x and -X cannot be set together"
-fi
-
-if [ -z "$ignored" ]; then
-       excl="--exclude-per-directory=.gitignore"
-       excl_info= excludes_file=
-       if [ -f "$GIT_DIR/info/exclude" ]; then
-               excl_info="--exclude-from=$GIT_DIR/info/exclude"
-       fi
-       if cfg_excl=$(git config core.excludesfile) && test -f "$cfg_excl"
-       then
-               excludes_file="--exclude-from=$cfg_excl"
-       fi
-       if [ "$ignoredonly" ]; then
-               excl="$excl --ignored"
-       fi
-fi
-
-git ls-files --others --directory \
-       $excl ${excl_info:+"$excl_info"} ${excludes_file:+"$excludes_file"} \
-       -- "$@" |
-while read -r file; do
-       if [ -d "$file" -a ! -L "$file" ]; then
-               if [ -z "$cleandir" ]; then
-                       $rm_refuse "$file"
-                       continue
-               fi
-               $echo1 "Removing $file"
-               $rmrf "$file"
-       else
-               $echo1 "Removing $file"
-               $rmf "$file"
-       fi
-done
diff --git a/contrib/examples/git-clone.sh b/contrib/examples/git-clone.sh
deleted file mode 100755 (executable)
index 08cf246..0000000
+++ /dev/null
@@ -1,525 +0,0 @@
-#!/bin/sh
-#
-# Copyright (c) 2005, Linus Torvalds
-# Copyright (c) 2005, Junio C Hamano
-#
-# Clone a repository into a different directory that does not yet exist.
-
-# See git-sh-setup why.
-unset CDPATH
-
-OPTIONS_SPEC="\
-git-clone [options] [--] <repo> [<dir>]
---
-n,no-checkout        don't create a checkout
-bare                 create a bare repository
-naked                create a bare repository
-l,local              to clone from a local repository
-no-hardlinks         don't use local hardlinks, always copy
-s,shared             setup as a shared repository
-template=            path to the template directory
-q,quiet              be quiet
-reference=           reference repository
-o,origin=            use <name> instead of 'origin' to track upstream
-u,upload-pack=       path to git-upload-pack on the remote
-depth=               create a shallow clone of that depth
-
-use-separate-remote  compatibility, do not use
-no-separate-remote   compatibility, do not use"
-
-die() {
-       echo >&2 "$@"
-       exit 1
-}
-
-usage() {
-       exec "$0" -h
-}
-
-eval "$(echo "$OPTIONS_SPEC" | git rev-parse --parseopt -- "$@" || echo exit $?)"
-
-get_repo_base() {
-       (
-               cd "$(/bin/pwd)" &&
-               cd "$1" || cd "$1.git" &&
-               {
-                       cd .git
-                       pwd
-               }
-       ) 2>/dev/null
-}
-
-if [ -n "$GIT_SSL_NO_VERIFY" -o \
-       "$(git config --bool http.sslVerify)" = false ]; then
-    curl_extra_args="-k"
-fi
-
-http_fetch () {
-       # $1 = Remote, $2 = Local
-       curl -nsfL $curl_extra_args "$1" >"$2"
-       curl_exit_status=$?
-       case $curl_exit_status in
-       126|127) exit ;;
-       *)       return $curl_exit_status ;;
-       esac
-}
-
-clone_dumb_http () {
-       # $1 - remote, $2 - local
-       cd "$2" &&
-       clone_tmp="$GIT_DIR/clone-tmp" &&
-       mkdir -p "$clone_tmp" || exit 1
-       if [ -n "$GIT_CURL_FTP_NO_EPSV" -o \
-               "$(git config --bool http.noEPSV)" = true ]; then
-               curl_extra_args="${curl_extra_args} --disable-epsv"
-       fi
-       http_fetch "$1/info/refs" "$clone_tmp/refs" ||
-               die "Cannot get remote repository information.
-Perhaps git-update-server-info needs to be run there?"
-       test "z$quiet" = z && v=-v || v=
-       while read sha1 refname
-       do
-               name=$(expr "z$refname" : 'zrefs/\(.*\)') &&
-               case "$name" in
-               *^*)    continue;;
-               esac
-               case "$bare,$name" in
-               yes,* | ,heads/* | ,tags/*) ;;
-               *)      continue ;;
-               esac
-               if test -n "$use_separate_remote" &&
-                  branch_name=$(expr "z$name" : 'zheads/\(.*\)')
-               then
-                       tname="remotes/$origin/$branch_name"
-               else
-                       tname=$name
-               fi
-               git-http-fetch $v -a -w "$tname" "$sha1" "$1" || exit 1
-       done <"$clone_tmp/refs"
-       rm -fr "$clone_tmp"
-       http_fetch "$1/HEAD" "$GIT_DIR/REMOTE_HEAD" ||
-       rm -f "$GIT_DIR/REMOTE_HEAD"
-       if test -f "$GIT_DIR/REMOTE_HEAD"; then
-               head_sha1=$(cat "$GIT_DIR/REMOTE_HEAD")
-               case "$head_sha1" in
-               'ref: refs/'*)
-                       ;;
-               *)
-                       git-http-fetch $v -a "$head_sha1" "$1" ||
-                       rm -f "$GIT_DIR/REMOTE_HEAD"
-                       ;;
-               esac
-       fi
-}
-
-quiet=
-local=no
-use_local_hardlink=yes
-local_shared=no
-unset template
-no_checkout=
-upload_pack=
-bare=
-reference=
-origin=
-origin_override=
-use_separate_remote=t
-depth=
-no_progress=
-local_explicitly_asked_for=
-test -t 1 || no_progress=--no-progress
-
-while test $# != 0
-do
-       case "$1" in
-       -n|--no-checkout)
-               no_checkout=yes ;;
-       --naked|--bare)
-               bare=yes ;;
-       -l|--local)
-               local_explicitly_asked_for=yes
-               use_local_hardlink=yes
-               ;;
-       --no-hardlinks)
-               use_local_hardlink=no ;;
-       -s|--shared)
-               local_shared=yes ;;
-       --template)
-               shift; template="--template=$1" ;;
-       -q|--quiet)
-               quiet=-q ;;
-       --use-separate-remote|--no-separate-remote)
-               die "clones are always made with separate-remote layout" ;;
-       --reference)
-               shift; reference="$1" ;;
-       -o|--origin)
-               shift;
-               case "$1" in
-               '')
-                   usage ;;
-               */*)
-                   die "'$1' is not suitable for an origin name"
-               esac
-               git check-ref-format "heads/$1" ||
-                   die "'$1' is not suitable for a branch name"
-               test -z "$origin_override" ||
-                   die "Do not give more than one --origin options."
-               origin_override=yes
-               origin="$1"
-               ;;
-       -u|--upload-pack)
-               shift
-               upload_pack="--upload-pack=$1" ;;
-       --depth)
-               shift
-               depth="--depth=$1" ;;
-       --)
-               shift
-               break ;;
-       *)
-               usage ;;
-       esac
-       shift
-done
-
-repo="$1"
-test -n "$repo" ||
-    die 'you must specify a repository to clone.'
-
-# --bare implies --no-checkout and --no-separate-remote
-if test yes = "$bare"
-then
-       if test yes = "$origin_override"
-       then
-               die '--bare and --origin $origin options are incompatible.'
-       fi
-       no_checkout=yes
-       use_separate_remote=
-fi
-
-if test -z "$origin"
-then
-       origin=origin
-fi
-
-# Turn the source into an absolute path if
-# it is local
-if base=$(get_repo_base "$repo"); then
-       repo="$base"
-       if test -z "$depth"
-       then
-               local=yes
-       fi
-elif test -f "$repo"
-then
-       case "$repo" in /*) ;; *) repo="$PWD/$repo" ;; esac
-fi
-
-# Decide the directory name of the new repository
-if test -n "$2"
-then
-       dir="$2"
-       test $# = 2 || die "excess parameter to git-clone"
-else
-       # Derive one from the repository name
-       # Try using "humanish" part of source repo if user didn't specify one
-       if test -f "$repo"
-       then
-               # Cloning from a bundle
-               dir=$(echo "$repo" | sed -e 's|/*\.bundle$||' -e 's|.*/||g')
-       else
-               dir=$(echo "$repo" |
-                       sed -e 's|/$||' -e 's|:*/*\.git$||' -e 's|.*[/:]||g')
-       fi
-fi
-
-[ -e "$dir" ] && die "destination directory '$dir' already exists."
-[ yes = "$bare" ] && unset GIT_WORK_TREE
-[ -n "$GIT_WORK_TREE" ] && [ -e "$GIT_WORK_TREE" ] &&
-die "working tree '$GIT_WORK_TREE' already exists."
-D=
-W=
-cleanup() {
-       test -z "$D" && rm -rf "$dir"
-       test -z "$W" && test -n "$GIT_WORK_TREE" && rm -rf "$GIT_WORK_TREE"
-       cd ..
-       test -n "$D" && rm -rf "$D"
-       test -n "$W" && rm -rf "$W"
-       exit $err
-}
-trap 'err=$?; cleanup' 0
-mkdir -p "$dir" && D=$(cd "$dir" && pwd) || usage
-test -n "$GIT_WORK_TREE" && mkdir -p "$GIT_WORK_TREE" &&
-W=$(cd "$GIT_WORK_TREE" && pwd) && GIT_WORK_TREE="$W" && export GIT_WORK_TREE
-if test yes = "$bare" || test -n "$GIT_WORK_TREE"; then
-       GIT_DIR="$D"
-else
-       GIT_DIR="$D/.git"
-fi &&
-export GIT_DIR &&
-GIT_CONFIG="$GIT_DIR/config" git-init $quiet ${template+"$template"} || usage
-
-if test -n "$bare"
-then
-       GIT_CONFIG="$GIT_DIR/config" git config core.bare true
-fi
-
-if test -n "$reference"
-then
-       ref_git=
-       if test -d "$reference"
-       then
-               if test -d "$reference/.git/objects"
-               then
-                       ref_git="$reference/.git"
-               elif test -d "$reference/objects"
-               then
-                       ref_git="$reference"
-               fi
-       fi
-       if test -n "$ref_git"
-       then
-               ref_git=$(cd "$ref_git" && pwd)
-               echo "$ref_git/objects" >"$GIT_DIR/objects/info/alternates"
-               (
-                       GIT_DIR="$ref_git" git for-each-ref \
-                               --format='%(objectname) %(*objectname)'
-               ) |
-               while read a b
-               do
-                       test -z "$a" ||
-                       git update-ref "refs/reference-tmp/$a" "$a"
-                       test -z "$b" ||
-                       git update-ref "refs/reference-tmp/$b" "$b"
-               done
-       else
-               die "reference repository '$reference' is not a local directory."
-       fi
-fi
-
-rm -f "$GIT_DIR/CLONE_HEAD"
-
-# We do local magic only when the user tells us to.
-case "$local" in
-yes)
-       ( cd "$repo/objects" ) ||
-               die "cannot chdir to local '$repo/objects'."
-
-       if test "$local_shared" = yes
-       then
-               mkdir -p "$GIT_DIR/objects/info"
-               echo "$repo/objects" >>"$GIT_DIR/objects/info/alternates"
-       else
-               cpio_quiet_flag=""
-               cpio --help 2>&1 | grep -- --quiet >/dev/null && \
-                       cpio_quiet_flag=--quiet
-               l= &&
-               if test "$use_local_hardlink" = yes
-               then
-                       # See if we can hardlink and drop "l" if not.
-                       sample_file=$(cd "$repo" && \
-                                     find objects -type f -print | sed -e 1q)
-                       # objects directory should not be empty because
-                       # we are cloning!
-                       test -f "$repo/$sample_file" ||
-                               die "fatal: cannot clone empty repository"
-                       if ln "$repo/$sample_file" "$GIT_DIR/objects/sample" 2>/dev/null
-                       then
-                               rm -f "$GIT_DIR/objects/sample"
-                               l=l
-                       elif test -n "$local_explicitly_asked_for"
-                       then
-                               echo >&2 "Warning: -l asked but cannot hardlink to $repo"
-                       fi
-               fi &&
-               cd "$repo" &&
-               # Create dirs using umask and permissions and destination
-               find objects -type d -print | (cd "$GIT_DIR" && xargs mkdir -p) &&
-               # Copy existing 0444 permissions on content
-               find objects ! -type d -print | cpio $cpio_quiet_flag -pumd$l "$GIT_DIR/" || \
-                       exit 1
-       fi
-       git-ls-remote "$repo" >"$GIT_DIR/CLONE_HEAD" || exit 1
-       ;;
-*)
-       case "$repo" in
-       rsync://*)
-               case "$depth" in
-               "") ;;
-               *) die "shallow over rsync not supported" ;;
-               esac
-               rsync $quiet -av --ignore-existing  \
-                       --exclude info "$repo/objects/" "$GIT_DIR/objects/" ||
-               exit
-               # Look at objects/info/alternates for rsync -- http will
-               # support it natively and git native ones will do it on the
-               # remote end.  Not having that file is not a crime.
-               rsync -q "$repo/objects/info/alternates" \
-                       "$GIT_DIR/TMP_ALT" 2>/dev/null ||
-                       rm -f "$GIT_DIR/TMP_ALT"
-               if test -f "$GIT_DIR/TMP_ALT"
-               then
-                   ( cd "$D" &&
-                     . git-parse-remote &&
-                     resolve_alternates "$repo" <"$GIT_DIR/TMP_ALT" ) |
-                   while read alt
-                   do
-                       case "$alt" in 'bad alternate: '*) die "$alt";; esac
-                       case "$quiet" in
-                       '')     echo >&2 "Getting alternate: $alt" ;;
-                       esac
-                       rsync $quiet -av --ignore-existing  \
-                           --exclude info "$alt" "$GIT_DIR/objects" || exit
-                   done
-                   rm -f "$GIT_DIR/TMP_ALT"
-               fi
-               git-ls-remote "$repo" >"$GIT_DIR/CLONE_HEAD" || exit 1
-               ;;
-       https://*|http://*|ftp://*)
-               case "$depth" in
-               "") ;;
-               *) die "shallow over http or ftp not supported" ;;
-               esac
-               if test -z "@@NO_CURL@@"
-               then
-                       clone_dumb_http "$repo" "$D"
-               else
-                       die "http transport not supported, rebuild Git with curl support"
-               fi
-               ;;
-       *)
-               if [ -f "$repo" ] ; then
-                       git bundle unbundle "$repo" > "$GIT_DIR/CLONE_HEAD" ||
-                       die "unbundle from '$repo' failed."
-               else
-                       case "$upload_pack" in
-                       '') git-fetch-pack --all -k $quiet $depth $no_progress "$repo";;
-                       *) git-fetch-pack --all -k \
-                               $quiet "$upload_pack" $depth $no_progress "$repo" ;;
-                       esac >"$GIT_DIR/CLONE_HEAD" ||
-                       die "fetch-pack from '$repo' failed."
-               fi
-               ;;
-       esac
-       ;;
-esac
-test -d "$GIT_DIR/refs/reference-tmp" && rm -fr "$GIT_DIR/refs/reference-tmp"
-
-if test -f "$GIT_DIR/CLONE_HEAD"
-then
-       # Read git-fetch-pack -k output and store the remote branches.
-       if [ -n "$use_separate_remote" ]
-       then
-               branch_top="remotes/$origin"
-       else
-               branch_top="heads"
-       fi
-       tag_top="tags"
-       while read sha1 name
-       do
-               case "$name" in
-               *'^{}')
-                       continue ;;
-               HEAD)
-                       destname="REMOTE_HEAD" ;;
-               refs/heads/*)
-                       destname="refs/$branch_top/${name#refs/heads/}" ;;
-               refs/tags/*)
-                       destname="refs/$tag_top/${name#refs/tags/}" ;;
-               *)
-                       continue ;;
-               esac
-               git update-ref -m "clone: from $repo" "$destname" "$sha1" ""
-       done < "$GIT_DIR/CLONE_HEAD"
-fi
-
-if test -n "$W"; then
-       cd "$W" || exit
-else
-       cd "$D" || exit
-fi
-
-if test -z "$bare"
-then
-       # a non-bare repository is always in separate-remote layout
-       remote_top="refs/remotes/$origin"
-       head_sha1=
-       test ! -r "$GIT_DIR/REMOTE_HEAD" || head_sha1=$(cat "$GIT_DIR/REMOTE_HEAD")
-       case "$head_sha1" in
-       'ref: refs/'*)
-               # Uh-oh, the remote told us (http transport done against
-               # new style repository with a symref HEAD).
-               # Ideally we should skip the guesswork but for now
-               # opt for minimum change.
-               head_sha1=$(expr "z$head_sha1" : 'zref: refs/heads/\(.*\)')
-               head_sha1=$(cat "$GIT_DIR/$remote_top/$head_sha1")
-               ;;
-       esac
-
-       # The name under $remote_top the remote HEAD seems to point at.
-       head_points_at=$(
-               (
-                       test -f "$GIT_DIR/$remote_top/master" && echo "master"
-                       cd "$GIT_DIR/$remote_top" &&
-                       find . -type f -print | sed -e 's/^\.\///'
-               ) | (
-               done=f
-               while read name
-               do
-                       test t = $done && continue
-                       branch_tip=$(cat "$GIT_DIR/$remote_top/$name")
-                       if test "$head_sha1" = "$branch_tip"
-                       then
-                               echo "$name"
-                               done=t
-                       fi
-               done
-               )
-       )
-
-       # Upstream URL
-       git config remote."$origin".url "$repo" &&
-
-       # Set up the mappings to track the remote branches.
-       git config remote."$origin".fetch \
-               "+refs/heads/*:$remote_top/*" '^$' &&
-
-       # Write out remote.$origin config, and update our "$head_points_at".
-       case "$head_points_at" in
-       ?*)
-               # Local default branch
-               git symbolic-ref HEAD "refs/heads/$head_points_at" &&
-
-               # Tracking branch for the primary branch at the remote.
-               git update-ref HEAD "$head_sha1" &&
-
-               rm -f "refs/remotes/$origin/HEAD"
-               git symbolic-ref "refs/remotes/$origin/HEAD" \
-                       "refs/remotes/$origin/$head_points_at" &&
-
-               git config branch."$head_points_at".remote "$origin" &&
-               git config branch."$head_points_at".merge "refs/heads/$head_points_at"
-               ;;
-       '')
-               if test -z "$head_sha1"
-               then
-                       # Source had nonexistent ref in HEAD
-                       echo >&2 "Warning: Remote HEAD refers to nonexistent ref, unable to checkout."
-                       no_checkout=t
-               else
-                       # Source had detached HEAD pointing nowhere
-                       git update-ref --no-deref HEAD "$head_sha1" &&
-                       rm -f "refs/remotes/$origin/HEAD"
-               fi
-               ;;
-       esac
-
-       case "$no_checkout" in
-       '')
-               test "z$quiet" = z && test "z$no_progress" = z && v=-v || v=
-               git read-tree -m -u $v HEAD HEAD
-       esac
-fi
-rm -f "$GIT_DIR/CLONE_HEAD" "$GIT_DIR/REMOTE_HEAD"
-
-trap - 0
diff --git a/contrib/examples/git-commit.sh b/contrib/examples/git-commit.sh
deleted file mode 100755 (executable)
index 86c9cfa..0000000
+++ /dev/null
@@ -1,639 +0,0 @@
-#!/bin/sh
-#
-# Copyright (c) 2005 Linus Torvalds
-# Copyright (c) 2006 Junio C Hamano
-
-USAGE='[-a | --interactive] [-s] [-v] [--no-verify] [-m <message> | -F <logfile> | (-C|-c) <commit> | --amend] [-u] [-e] [--author <author>] [--template <file>] [[-i | -o] <path>...]'
-SUBDIRECTORY_OK=Yes
-OPTIONS_SPEC=
-. git-sh-setup
-require_work_tree
-
-git rev-parse --verify HEAD >/dev/null 2>&1 || initial_commit=t
-
-case "$0" in
-*status)
-       status_only=t
-       ;;
-*commit)
-       status_only=
-       ;;
-esac
-
-refuse_partial () {
-       echo >&2 "$1"
-       echo >&2 "You might have meant to say 'git commit -i paths...', perhaps?"
-       exit 1
-}
-
-TMP_INDEX=
-THIS_INDEX="${GIT_INDEX_FILE:-$GIT_DIR/index}"
-NEXT_INDEX="$GIT_DIR/next-index$$"
-rm -f "$NEXT_INDEX"
-save_index () {
-       cp -p "$THIS_INDEX" "$NEXT_INDEX"
-}
-
-run_status () {
-       # If TMP_INDEX is defined, that means we are doing
-       # "--only" partial commit, and that index file is used
-       # to build the tree for the commit.  Otherwise, if
-       # NEXT_INDEX exists, that is the index file used to
-       # make the commit.  Otherwise we are using as-is commit
-       # so the regular index file is what we use to compare.
-       if test '' != "$TMP_INDEX"
-       then
-               GIT_INDEX_FILE="$TMP_INDEX"
-               export GIT_INDEX_FILE
-       elif test -f "$NEXT_INDEX"
-       then
-               GIT_INDEX_FILE="$NEXT_INDEX"
-               export GIT_INDEX_FILE
-       fi
-
-       if test "$status_only" = "t" || test "$use_status_color" = "t"; then
-               color=
-       else
-               color=--nocolor
-       fi
-       git runstatus ${color} \
-               ${verbose:+--verbose} \
-               ${amend:+--amend} \
-               ${untracked_files:+--untracked}
-}
-
-trap '
-       test -z "$TMP_INDEX" || {
-               test -f "$TMP_INDEX" && rm -f "$TMP_INDEX"
-       }
-       rm -f "$NEXT_INDEX"
-' 0
-
-################################################################
-# Command line argument parsing and sanity checking
-
-all=
-also=
-allow_empty=f
-interactive=
-only=
-logfile=
-use_commit=
-amend=
-edit_flag=
-no_edit=
-log_given=
-log_message=
-verify=t
-quiet=
-verbose=
-signoff=
-force_author=
-only_include_assumed=
-untracked_files=
-templatefile="$(git config commit.template)"
-while test $# != 0
-do
-       case "$1" in
-       -F|--F|-f|--f|--fi|--fil|--file)
-               case "$#" in 1) usage ;; esac
-               shift
-               no_edit=t
-               log_given=t$log_given
-               logfile="$1"
-               ;;
-       -F*|-f*)
-               no_edit=t
-               log_given=t$log_given
-               logfile="${1#-[Ff]}"
-               ;;
-       --F=*|--f=*|--fi=*|--fil=*|--file=*)
-               no_edit=t
-               log_given=t$log_given
-               logfile="${1#*=}"
-               ;;
-       -a|--a|--al|--all)
-               all=t
-               ;;
-       --allo|--allow|--allow-|--allow-e|--allow-em|--allow-emp|\
-       --allow-empt|--allow-empty)
-               allow_empty=t
-               ;;
-       --au=*|--aut=*|--auth=*|--autho=*|--author=*)
-               force_author="${1#*=}"
-               ;;
-       --au|--aut|--auth|--autho|--author)
-               case "$#" in 1) usage ;; esac
-               shift
-               force_author="$1"
-               ;;
-       -e|--e|--ed|--edi|--edit)
-               edit_flag=t
-               ;;
-       -i|--i|--in|--inc|--incl|--inclu|--includ|--include)
-               also=t
-               ;;
-       --int|--inte|--inter|--intera|--interac|--interact|--interacti|\
-       --interactiv|--interactive)
-               interactive=t
-               ;;
-       -o|--o|--on|--onl|--only)
-               only=t
-               ;;
-       -m|--m|--me|--mes|--mess|--messa|--messag|--message)
-               case "$#" in 1) usage ;; esac
-               shift
-               log_given=m$log_given
-               log_message="${log_message:+${log_message}
-
-}$1"
-               no_edit=t
-               ;;
-       -m*)
-               log_given=m$log_given
-               log_message="${log_message:+${log_message}
-
-}${1#-m}"
-               no_edit=t
-               ;;
-       --m=*|--me=*|--mes=*|--mess=*|--messa=*|--messag=*|--message=*)
-               log_given=m$log_given
-               log_message="${log_message:+${log_message}
-
-}${1#*=}"
-               no_edit=t
-               ;;
-       -n|--n|--no|--no-|--no-v|--no-ve|--no-ver|--no-veri|--no-verif|\
-       --no-verify)
-               verify=
-               ;;
-       --a|--am|--ame|--amen|--amend)
-               amend=t
-               use_commit=HEAD
-               ;;
-       -c)
-               case "$#" in 1) usage ;; esac
-               shift
-               log_given=t$log_given
-               use_commit="$1"
-               no_edit=
-               ;;
-       --ree=*|--reed=*|--reedi=*|--reedit=*|--reedit-=*|--reedit-m=*|\
-       --reedit-me=*|--reedit-mes=*|--reedit-mess=*|--reedit-messa=*|\
-       --reedit-messag=*|--reedit-message=*)
-               log_given=t$log_given
-               use_commit="${1#*=}"
-               no_edit=
-               ;;
-       --ree|--reed|--reedi|--reedit|--reedit-|--reedit-m|--reedit-me|\
-       --reedit-mes|--reedit-mess|--reedit-messa|--reedit-messag|\
-       --reedit-message)
-               case "$#" in 1) usage ;; esac
-               shift
-               log_given=t$log_given
-               use_commit="$1"
-               no_edit=
-               ;;
-       -C)
-               case "$#" in 1) usage ;; esac
-               shift
-               log_given=t$log_given
-               use_commit="$1"
-               no_edit=t
-               ;;
-       --reu=*|--reus=*|--reuse=*|--reuse-=*|--reuse-m=*|--reuse-me=*|\
-       --reuse-mes=*|--reuse-mess=*|--reuse-messa=*|--reuse-messag=*|\
-       --reuse-message=*)
-               log_given=t$log_given
-               use_commit="${1#*=}"
-               no_edit=t
-               ;;
-       --reu|--reus|--reuse|--reuse-|--reuse-m|--reuse-me|--reuse-mes|\
-       --reuse-mess|--reuse-messa|--reuse-messag|--reuse-message)
-               case "$#" in 1) usage ;; esac
-               shift
-               log_given=t$log_given
-               use_commit="$1"
-               no_edit=t
-               ;;
-       -s|--s|--si|--sig|--sign|--signo|--signof|--signoff)
-               signoff=t
-               ;;
-       -t|--t|--te|--tem|--temp|--templ|--templa|--templat|--template)
-               case "$#" in 1) usage ;; esac
-               shift
-               templatefile="$1"
-               no_edit=
-               ;;
-       -q|--q|--qu|--qui|--quie|--quiet)
-               quiet=t
-               ;;
-       -v|--v|--ve|--ver|--verb|--verbo|--verbos|--verbose)
-               verbose=t
-               ;;
-       -u|--u|--un|--unt|--untr|--untra|--untrac|--untrack|--untracke|\
-       --untracked|--untracked-|--untracked-f|--untracked-fi|--untracked-fil|\
-       --untracked-file|--untracked-files)
-               untracked_files=t
-               ;;
-       --)
-               shift
-               break
-               ;;
-       -*)
-               usage
-               ;;
-       *)
-               break
-               ;;
-       esac
-       shift
-done
-case "$edit_flag" in t) no_edit= ;; esac
-
-################################################################
-# Sanity check options
-
-case "$amend,$initial_commit" in
-t,t)
-       die "You do not have anything to amend." ;;
-t,)
-       if [ -f "$GIT_DIR/MERGE_HEAD" ]; then
-               die "You are in the middle of a merge -- cannot amend."
-       fi ;;
-esac
-
-case "$log_given" in
-tt*)
-       die "Only one of -c/-C/-F can be used." ;;
-*tm*|*mt*)
-       die "Option -m cannot be combined with -c/-C/-F." ;;
-esac
-
-case "$#,$also,$only,$amend" in
-*,t,t,*)
-       die "Only one of --include/--only can be used." ;;
-0,t,,* | 0,,t,)
-       die "No paths with --include/--only does not make sense." ;;
-0,,t,t)
-       only_include_assumed="# Clever... amending the last one with dirty index." ;;
-0,,,*)
-       ;;
-*,,,*)
-       only_include_assumed="# Explicit paths specified without -i or -o; assuming --only paths..."
-       also=
-       ;;
-esac
-unset only
-case "$all,$interactive,$also,$#" in
-*t,*t,*)
-       die "Cannot use -a, --interactive or -i at the same time." ;;
-t,,,[1-9]*)
-       die "Paths with -a does not make sense." ;;
-,t,,[1-9]*)
-       die "Paths with --interactive does not make sense." ;;
-,,t,0)
-       die "No paths with -i does not make sense." ;;
-esac
-
-if test ! -z "$templatefile" && test -z "$log_given"
-then
-       if test ! -f "$templatefile"
-       then
-               die "Commit template file does not exist."
-       fi
-fi
-
-################################################################
-# Prepare index to have a tree to be committed
-
-case "$all,$also" in
-t,)
-       if test ! -f "$THIS_INDEX"
-       then
-               die 'nothing to commit (use "git add file1 file2" to include for commit)'
-       fi
-       save_index &&
-       (
-               cd_to_toplevel &&
-               GIT_INDEX_FILE="$NEXT_INDEX" &&
-               export GIT_INDEX_FILE &&
-               git diff-files --name-only -z |
-               git update-index --remove -z --stdin
-       ) || exit
-       ;;
-,t)
-       save_index &&
-       git ls-files --error-unmatch -- "$@" >/dev/null || exit
-
-       git diff-files --name-only -z -- "$@"  |
-       (
-               cd_to_toplevel &&
-               GIT_INDEX_FILE="$NEXT_INDEX" &&
-               export GIT_INDEX_FILE &&
-               git update-index --remove -z --stdin
-       ) || exit
-       ;;
-,)
-       if test "$interactive" = t; then
-               git add --interactive || exit
-       fi
-       case "$#" in
-       0)
-               ;; # commit as-is
-       *)
-               if test -f "$GIT_DIR/MERGE_HEAD"
-               then
-                       refuse_partial "Cannot do a partial commit during a merge."
-               fi
-
-               TMP_INDEX="$GIT_DIR/tmp-index$$"
-               W=
-               test -z "$initial_commit" && W=--with-tree=HEAD
-               commit_only=$(git ls-files --error-unmatch $W -- "$@") || exit
-
-               # Build a temporary index and update the real index
-               # the same way.
-               if test -z "$initial_commit"
-               then
-                       GIT_INDEX_FILE="$THIS_INDEX" \
-                       git read-tree --index-output="$TMP_INDEX" -i -m HEAD
-               else
-                       rm -f "$TMP_INDEX"
-               fi || exit
-
-               printf '%s\n' "$commit_only" |
-               GIT_INDEX_FILE="$TMP_INDEX" \
-               git update-index --add --remove --stdin &&
-
-               save_index &&
-               printf '%s\n' "$commit_only" |
-               (
-                       GIT_INDEX_FILE="$NEXT_INDEX"
-                       export GIT_INDEX_FILE
-                       git update-index --add --remove --stdin
-               ) || exit
-               ;;
-       esac
-       ;;
-esac
-
-################################################################
-# If we do as-is commit, the index file will be THIS_INDEX,
-# otherwise NEXT_INDEX after we make this commit.  We leave
-# the index as is if we abort.
-
-if test -f "$NEXT_INDEX"
-then
-       USE_INDEX="$NEXT_INDEX"
-else
-       USE_INDEX="$THIS_INDEX"
-fi
-
-case "$status_only" in
-t)
-       # This will silently fail in a read-only repository, which is
-       # what we want.
-       GIT_INDEX_FILE="$USE_INDEX" git update-index -q --unmerged --refresh
-       run_status
-       exit $?
-       ;;
-'')
-       GIT_INDEX_FILE="$USE_INDEX" git update-index -q --refresh || exit
-       ;;
-esac
-
-################################################################
-# Grab commit message, write out tree and make commit.
-
-if test t = "$verify" && test -x "$GIT_DIR"/hooks/pre-commit
-then
-    GIT_INDEX_FILE="${TMP_INDEX:-${USE_INDEX}}" "$GIT_DIR"/hooks/pre-commit \
-    || exit
-fi
-
-if test "$log_message" != ''
-then
-       printf '%s\n' "$log_message"
-elif test "$logfile" != ""
-then
-       if test "$logfile" = -
-       then
-               test -t 0 &&
-               echo >&2 "(reading log message from standard input)"
-               cat
-       else
-               cat <"$logfile"
-       fi
-elif test "$use_commit" != ""
-then
-       encoding=$(git config i18n.commitencoding || echo UTF-8)
-       git show -s --pretty=raw --encoding="$encoding" "$use_commit" |
-       sed -e '1,/^$/d' -e 's/^    //'
-elif test -f "$GIT_DIR/MERGE_MSG"
-then
-       cat "$GIT_DIR/MERGE_MSG"
-elif test -f "$GIT_DIR/SQUASH_MSG"
-then
-       cat "$GIT_DIR/SQUASH_MSG"
-elif test "$templatefile" != ""
-then
-       cat "$templatefile"
-fi | git stripspace >"$GIT_DIR"/COMMIT_EDITMSG
-
-case "$signoff" in
-t)
-       sign=$(git var GIT_COMMITTER_IDENT | sed -e '
-               s/>.*/>/
-               s/^/Signed-off-by: /
-               ')
-       blank_before_signoff=
-       tail -n 1 "$GIT_DIR"/COMMIT_EDITMSG |
-       grep 'Signed-off-by:' >/dev/null || blank_before_signoff='
-'
-       tail -n 1 "$GIT_DIR"/COMMIT_EDITMSG |
-       grep "$sign"$ >/dev/null ||
-       printf '%s%s\n' "$blank_before_signoff" "$sign" \
-               >>"$GIT_DIR"/COMMIT_EDITMSG
-       ;;
-esac
-
-if test -f "$GIT_DIR/MERGE_HEAD" && test -z "$no_edit"; then
-       echo "#"
-       echo "# It looks like you may be committing a MERGE."
-       echo "# If this is not correct, please remove the file"
-       printf '%s\n' "#        $GIT_DIR/MERGE_HEAD"
-       echo "# and try again"
-       echo "#"
-fi >>"$GIT_DIR"/COMMIT_EDITMSG
-
-# Author
-if test '' != "$use_commit"
-then
-       eval "$(get_author_ident_from_commit "$use_commit")"
-       export GIT_AUTHOR_NAME GIT_AUTHOR_EMAIL GIT_AUTHOR_DATE
-fi
-if test '' != "$force_author"
-then
-       GIT_AUTHOR_NAME=$(expr "z$force_author" : 'z\(.*[^ ]\) *<.*') &&
-       GIT_AUTHOR_EMAIL=$(expr "z$force_author" : '.*\(<.*\)') &&
-       test '' != "$GIT_AUTHOR_NAME" &&
-       test '' != "$GIT_AUTHOR_EMAIL" ||
-       die "malformed --author parameter"
-       export GIT_AUTHOR_NAME GIT_AUTHOR_EMAIL
-fi
-
-PARENTS="-p HEAD"
-if test -z "$initial_commit"
-then
-       rloga='commit'
-       if [ -f "$GIT_DIR/MERGE_HEAD" ]; then
-               rloga='commit (merge)'
-               PARENTS="-p HEAD "$(sed -e 's/^/-p /' "$GIT_DIR/MERGE_HEAD")
-       elif test -n "$amend"; then
-               rloga='commit (amend)'
-               PARENTS=$(git cat-file commit HEAD |
-                       sed -n -e '/^$/q' -e 's/^parent /-p /p')
-       fi
-       current="$(git rev-parse --verify HEAD)"
-else
-       if [ -z "$(git ls-files)" ]; then
-               echo >&2 'nothing to commit (use "git add file1 file2" to include for commit)'
-               exit 1
-       fi
-       PARENTS=""
-       rloga='commit (initial)'
-       current=''
-fi
-set_reflog_action "$rloga"
-
-if test -z "$no_edit"
-then
-       {
-               echo ""
-               echo "# Please enter the commit message for your changes."
-               echo "# (Comment lines starting with '#' will not be included)"
-               test -z "$only_include_assumed" || echo "$only_include_assumed"
-               run_status
-       } >>"$GIT_DIR"/COMMIT_EDITMSG
-else
-       # we need to check if there is anything to commit
-       run_status >/dev/null
-fi
-case "$allow_empty,$?,$PARENTS" in
-t,* | ?,0,* | ?,*,-p' '?*-p' '?*)
-       # an explicit --allow-empty, or a merge commit can record the
-       # same tree as its parent.  Otherwise having commitable paths
-       # is required.
-       ;;
-*)
-       rm -f "$GIT_DIR/COMMIT_EDITMSG" "$GIT_DIR/SQUASH_MSG"
-       use_status_color=t
-       run_status
-       exit 1
-esac
-
-case "$no_edit" in
-'')
-       git var GIT_AUTHOR_IDENT > /dev/null  || die
-       git var GIT_COMMITTER_IDENT > /dev/null  || die
-       git_editor "$GIT_DIR/COMMIT_EDITMSG"
-       ;;
-esac
-
-case "$verify" in
-t)
-       if test -x "$GIT_DIR"/hooks/commit-msg
-       then
-               "$GIT_DIR"/hooks/commit-msg "$GIT_DIR"/COMMIT_EDITMSG || exit
-       fi
-esac
-
-if test -z "$no_edit"
-then
-    sed -e '
-        /^diff --git a\/.*/{
-           s///
-           q
-       }
-       /^#/d
-    ' "$GIT_DIR"/COMMIT_EDITMSG
-else
-    cat "$GIT_DIR"/COMMIT_EDITMSG
-fi |
-git stripspace >"$GIT_DIR"/COMMIT_MSG
-
-# Test whether the commit message has any content we didn't supply.
-have_commitmsg=
-grep -v -i '^Signed-off-by' "$GIT_DIR"/COMMIT_MSG |
-       git stripspace > "$GIT_DIR"/COMMIT_BAREMSG
-
-# Is the commit message totally empty?
-if test -s "$GIT_DIR"/COMMIT_BAREMSG
-then
-       if test "$templatefile" != ""
-       then
-               # Test whether this is just the unaltered template.
-               if cnt=$(sed -e '/^#/d' < "$templatefile" |
-                       git stripspace |
-                       diff "$GIT_DIR"/COMMIT_BAREMSG - |
-                       wc -l) &&
-                  test 0 -lt $cnt
-               then
-                       have_commitmsg=t
-               fi
-       else
-               # No template, so the content in the commit message must
-               # have come from the user.
-               have_commitmsg=t
-       fi
-fi
-
-rm -f "$GIT_DIR"/COMMIT_BAREMSG
-
-if test "$have_commitmsg" = "t"
-then
-       if test -z "$TMP_INDEX"
-       then
-               tree=$(GIT_INDEX_FILE="$USE_INDEX" git write-tree)
-       else
-               tree=$(GIT_INDEX_FILE="$TMP_INDEX" git write-tree) &&
-               rm -f "$TMP_INDEX"
-       fi &&
-       commit=$(git commit-tree $tree $PARENTS <"$GIT_DIR/COMMIT_MSG") &&
-       rlogm=$(sed -e 1q "$GIT_DIR"/COMMIT_MSG) &&
-       git update-ref -m "$GIT_REFLOG_ACTION: $rlogm" HEAD $commit "$current" &&
-       rm -f -- "$GIT_DIR/MERGE_HEAD" "$GIT_DIR/MERGE_MSG" &&
-       if test -f "$NEXT_INDEX"
-       then
-               mv "$NEXT_INDEX" "$THIS_INDEX"
-       else
-               : ;# happy
-       fi
-else
-       echo >&2 "* no commit message?  aborting commit."
-       false
-fi
-ret="$?"
-rm -f "$GIT_DIR/COMMIT_MSG" "$GIT_DIR/COMMIT_EDITMSG" "$GIT_DIR/SQUASH_MSG"
-
-cd_to_toplevel
-
-git rerere
-
-if test "$ret" = 0
-then
-       git gc --auto
-       if test -x "$GIT_DIR"/hooks/post-commit
-       then
-               "$GIT_DIR"/hooks/post-commit
-       fi
-       if test -z "$quiet"
-       then
-               commit=$(git diff-tree --always --shortstat --pretty="format:%h: %s"\
-                      --abbrev --summary --root HEAD --)
-               echo "Created${initial_commit:+ initial} commit $commit"
-       fi
-fi
-
-exit "$ret"
diff --git a/contrib/examples/git-difftool.perl b/contrib/examples/git-difftool.perl
deleted file mode 100755 (executable)
index b2ea80f..0000000
+++ /dev/null
@@ -1,481 +0,0 @@
-#!/usr/bin/perl
-# Copyright (c) 2009, 2010 David Aguilar
-# Copyright (c) 2012 Tim Henigan
-#
-# This is a wrapper around the GIT_EXTERNAL_DIFF-compatible
-# git-difftool--helper script.
-#
-# This script exports GIT_EXTERNAL_DIFF and GIT_PAGER for use by git.
-# The GIT_DIFF* variables are exported for use by git-difftool--helper.
-#
-# Any arguments that are unknown to this script are forwarded to 'git diff'.
-
-use 5.008;
-use strict;
-use warnings;
-use Git::LoadCPAN::Error qw(:try);
-use File::Basename qw(dirname);
-use File::Copy;
-use File::Find;
-use File::stat;
-use File::Path qw(mkpath rmtree);
-use File::Temp qw(tempdir);
-use Getopt::Long qw(:config pass_through);
-use Git;
-use Git::I18N;
-
-sub usage
-{
-       my $exitcode = shift;
-       print << 'USAGE';
-usage: git difftool [-t|--tool=<tool>] [--tool-help]
-                    [-x|--extcmd=<cmd>]
-                    [-g|--gui] [--no-gui]
-                    [--prompt] [-y|--no-prompt]
-                    [-d|--dir-diff]
-                    ['git diff' options]
-USAGE
-       exit($exitcode);
-}
-
-sub print_tool_help
-{
-       # See the comment at the bottom of file_diff() for the reason behind
-       # using system() followed by exit() instead of exec().
-       my $rc = system(qw(git mergetool --tool-help=diff));
-       exit($rc | ($rc >> 8));
-}
-
-sub exit_cleanup
-{
-       my ($tmpdir, $status) = @_;
-       my $errno = $!;
-       rmtree($tmpdir);
-       if ($status and $errno) {
-               my ($package, $file, $line) = caller();
-               warn "$file line $line: $errno\n";
-       }
-       exit($status | ($status >> 8));
-}
-
-sub use_wt_file
-{
-       my ($file, $sha1) = @_;
-       my $null_sha1 = '0' x 40;
-
-       if (-l $file || ! -e _) {
-               return (0, $null_sha1);
-       }
-
-       my $wt_sha1 = Git::command_oneline('hash-object', $file);
-       my $use = ($sha1 eq $null_sha1) || ($sha1 eq $wt_sha1);
-       return ($use, $wt_sha1);
-}
-
-sub changed_files
-{
-       my ($repo_path, $index, $worktree) = @_;
-       $ENV{GIT_INDEX_FILE} = $index;
-
-       my @gitargs = ('--git-dir', $repo_path, '--work-tree', $worktree);
-       my @refreshargs = (
-               @gitargs, 'update-index',
-               '--really-refresh', '-q', '--unmerged');
-       try {
-               Git::command_oneline(@refreshargs);
-       } catch Git::Error::Command with {};
-
-       my @diffargs = (@gitargs, 'diff-files', '--name-only', '-z');
-       my $line = Git::command_oneline(@diffargs);
-       my @files;
-       if (defined $line) {
-               @files = split('\0', $line);
-       } else {
-               @files = ();
-       }
-
-       delete($ENV{GIT_INDEX_FILE});
-
-       return map { $_ => 1 } @files;
-}
-
-sub setup_dir_diff
-{
-       my ($worktree, $symlinks) = @_;
-       my @gitargs = ('diff', '--raw', '--no-abbrev', '-z', @ARGV);
-       my $diffrtn = Git::command_oneline(@gitargs);
-       exit(0) unless defined($diffrtn);
-
-       # Go to the root of the worktree now that we've captured the list of
-       # changed files.  The paths returned by diff --raw are relative to the
-       # top-level of the repository, but we defer changing directories so
-       # that @ARGV can perform pathspec limiting in the current directory.
-       chdir($worktree);
-
-       # Build index info for left and right sides of the diff
-       my $submodule_mode = '160000';
-       my $symlink_mode = '120000';
-       my $null_mode = '0' x 6;
-       my $null_sha1 = '0' x 40;
-       my $lindex = '';
-       my $rindex = '';
-       my $wtindex = '';
-       my %submodule;
-       my %symlink;
-       my @files = ();
-       my %working_tree_dups = ();
-       my @rawdiff = split('\0', $diffrtn);
-
-       my $i = 0;
-       while ($i < $#rawdiff) {
-               if ($rawdiff[$i] =~ /^::/) {
-                       warn __ <<'EOF';
-Combined diff formats ('-c' and '--cc') are not supported in
-directory diff mode ('-d' and '--dir-diff').
-EOF
-                       exit(1);
-               }
-
-               my ($lmode, $rmode, $lsha1, $rsha1, $status) =
-                       split(' ', substr($rawdiff[$i], 1));
-               my $src_path = $rawdiff[$i + 1];
-               my $dst_path;
-
-               if ($status =~ /^[CR]/) {
-                       $dst_path = $rawdiff[$i + 2];
-                       $i += 3;
-               } else {
-                       $dst_path = $src_path;
-                       $i += 2;
-               }
-
-               if ($lmode eq $submodule_mode or $rmode eq $submodule_mode) {
-                       $submodule{$src_path}{left} = $lsha1;
-                       if ($lsha1 ne $rsha1) {
-                               $submodule{$dst_path}{right} = $rsha1;
-                       } else {
-                               $submodule{$dst_path}{right} = "$rsha1-dirty";
-                       }
-                       next;
-               }
-
-               if ($lmode eq $symlink_mode) {
-                       $symlink{$src_path}{left} =
-                               Git::command_oneline('show', $lsha1);
-               }
-
-               if ($rmode eq $symlink_mode) {
-                       $symlink{$dst_path}{right} =
-                               Git::command_oneline('show', $rsha1);
-               }
-
-               if ($lmode ne $null_mode and $status !~ /^C/) {
-                       $lindex .= "$lmode $lsha1\t$src_path\0";
-               }
-
-               if ($rmode ne $null_mode) {
-                       # Avoid duplicate entries
-                       if ($working_tree_dups{$dst_path}++) {
-                               next;
-                       }
-                       my ($use, $wt_sha1) =
-                               use_wt_file($dst_path, $rsha1);
-                       if ($use) {
-                               push @files, $dst_path;
-                               $wtindex .= "$rmode $wt_sha1\t$dst_path\0";
-                       } else {
-                               $rindex .= "$rmode $rsha1\t$dst_path\0";
-                       }
-               }
-       }
-
-       # Go to the root of the worktree so that the left index files
-       # are properly setup -- the index is toplevel-relative.
-       chdir($worktree);
-
-       # Setup temp directories
-       my $tmpdir = tempdir('git-difftool.XXXXX', CLEANUP => 0, TMPDIR => 1);
-       my $ldir = "$tmpdir/left";
-       my $rdir = "$tmpdir/right";
-       mkpath($ldir) or exit_cleanup($tmpdir, 1);
-       mkpath($rdir) or exit_cleanup($tmpdir, 1);
-
-       # Populate the left and right directories based on each index file
-       my ($inpipe, $ctx);
-       $ENV{GIT_INDEX_FILE} = "$tmpdir/lindex";
-       ($inpipe, $ctx) =
-               Git::command_input_pipe('update-index', '-z', '--index-info');
-       print($inpipe $lindex);
-       Git::command_close_pipe($inpipe, $ctx);
-
-       my $rc = system('git', 'checkout-index', '--all', "--prefix=$ldir/");
-       exit_cleanup($tmpdir, $rc) if $rc != 0;
-
-       $ENV{GIT_INDEX_FILE} = "$tmpdir/rindex";
-       ($inpipe, $ctx) =
-               Git::command_input_pipe('update-index', '-z', '--index-info');
-       print($inpipe $rindex);
-       Git::command_close_pipe($inpipe, $ctx);
-
-       $rc = system('git', 'checkout-index', '--all', "--prefix=$rdir/");
-       exit_cleanup($tmpdir, $rc) if $rc != 0;
-
-       $ENV{GIT_INDEX_FILE} = "$tmpdir/wtindex";
-       ($inpipe, $ctx) =
-               Git::command_input_pipe('update-index', '--info-only', '-z', '--index-info');
-       print($inpipe $wtindex);
-       Git::command_close_pipe($inpipe, $ctx);
-
-       # If $GIT_DIR was explicitly set just for the update/checkout
-       # commands, then it should be unset before continuing.
-       delete($ENV{GIT_INDEX_FILE});
-
-       # Changes in the working tree need special treatment since they are
-       # not part of the index.
-       for my $file (@files) {
-               my $dir = dirname($file);
-               unless (-d "$rdir/$dir") {
-                       mkpath("$rdir/$dir") or
-                       exit_cleanup($tmpdir, 1);
-               }
-               if ($symlinks) {
-                       symlink("$worktree/$file", "$rdir/$file") or
-                       exit_cleanup($tmpdir, 1);
-               } else {
-                       copy($file, "$rdir/$file") or
-                       exit_cleanup($tmpdir, 1);
-
-                       my $mode = stat($file)->mode;
-                       chmod($mode, "$rdir/$file") or
-                       exit_cleanup($tmpdir, 1);
-               }
-       }
-
-       # Changes to submodules require special treatment. This loop writes a
-       # temporary file to both the left and right directories to show the
-       # change in the recorded SHA1 for the submodule.
-       for my $path (keys %submodule) {
-               my $ok = 0;
-               if (defined($submodule{$path}{left})) {
-                       $ok = write_to_file("$ldir/$path",
-                               "Subproject commit $submodule{$path}{left}");
-               }
-               if (defined($submodule{$path}{right})) {
-                       $ok = write_to_file("$rdir/$path",
-                               "Subproject commit $submodule{$path}{right}");
-               }
-               exit_cleanup($tmpdir, 1) if not $ok;
-       }
-
-       # Symbolic links require special treatment. The standard "git diff"
-       # shows only the link itself, not the contents of the link target.
-       # This loop replicates that behavior.
-       for my $path (keys %symlink) {
-               my $ok = 0;
-               if (defined($symlink{$path}{left})) {
-                       $ok = write_to_file("$ldir/$path",
-                                       $symlink{$path}{left});
-               }
-               if (defined($symlink{$path}{right})) {
-                       $ok = write_to_file("$rdir/$path",
-                                       $symlink{$path}{right});
-               }
-               exit_cleanup($tmpdir, 1) if not $ok;
-       }
-
-       return ($ldir, $rdir, $tmpdir, @files);
-}
-
-sub write_to_file
-{
-       my $path = shift;
-       my $value = shift;
-
-       # Make sure the path to the file exists
-       my $dir = dirname($path);
-       unless (-d "$dir") {
-               mkpath("$dir") or return 0;
-       }
-
-       # If the file already exists in that location, delete it.  This
-       # is required in the case of symbolic links.
-       unlink($path);
-
-       open(my $fh, '>', $path) or return 0;
-       print($fh $value);
-       close($fh);
-
-       return 1;
-}
-
-sub main
-{
-       # parse command-line options. all unrecognized options and arguments
-       # are passed through to the 'git diff' command.
-       my %opts = (
-               difftool_cmd => undef,
-               dirdiff => undef,
-               extcmd => undef,
-               gui => undef,
-               help => undef,
-               prompt => undef,
-               symlinks => $^O ne 'cygwin' &&
-                               $^O ne 'MSWin32' && $^O ne 'msys',
-               tool_help => undef,
-               trust_exit_code => undef,
-       );
-       GetOptions('g|gui!' => \$opts{gui},
-               'd|dir-diff' => \$opts{dirdiff},
-               'h' => \$opts{help},
-               'prompt!' => \$opts{prompt},
-               'y' => sub { $opts{prompt} = 0; },
-               'symlinks' => \$opts{symlinks},
-               'no-symlinks' => sub { $opts{symlinks} = 0; },
-               't|tool:s' => \$opts{difftool_cmd},
-               'tool-help' => \$opts{tool_help},
-               'trust-exit-code' => \$opts{trust_exit_code},
-               'no-trust-exit-code' => sub { $opts{trust_exit_code} = 0; },
-               'x|extcmd:s' => \$opts{extcmd});
-
-       if (defined($opts{help})) {
-               usage(0);
-       }
-       if (defined($opts{tool_help})) {
-               print_tool_help();
-       }
-       if (defined($opts{difftool_cmd})) {
-               if (length($opts{difftool_cmd}) > 0) {
-                       $ENV{GIT_DIFF_TOOL} = $opts{difftool_cmd};
-               } else {
-                       print __("No <tool> given for --tool=<tool>\n");
-                       usage(1);
-               }
-       }
-       if (defined($opts{extcmd})) {
-               if (length($opts{extcmd}) > 0) {
-                       $ENV{GIT_DIFFTOOL_EXTCMD} = $opts{extcmd};
-               } else {
-                       print __("No <cmd> given for --extcmd=<cmd>\n");
-                       usage(1);
-               }
-       }
-       if ($opts{gui}) {
-               my $guitool = Git::config('diff.guitool');
-               if (defined($guitool) && length($guitool) > 0) {
-                       $ENV{GIT_DIFF_TOOL} = $guitool;
-               }
-       }
-
-       if (!defined $opts{trust_exit_code}) {
-               $opts{trust_exit_code} = Git::config_bool('difftool.trustExitCode');
-       }
-       if ($opts{trust_exit_code}) {
-               $ENV{GIT_DIFFTOOL_TRUST_EXIT_CODE} = 'true';
-       } else {
-               $ENV{GIT_DIFFTOOL_TRUST_EXIT_CODE} = 'false';
-       }
-
-       # In directory diff mode, 'git-difftool--helper' is called once
-       # to compare the a/b directories.  In file diff mode, 'git diff'
-       # will invoke a separate instance of 'git-difftool--helper' for
-       # each file that changed.
-       if (defined($opts{dirdiff})) {
-               dir_diff($opts{extcmd}, $opts{symlinks});
-       } else {
-               file_diff($opts{prompt});
-       }
-}
-
-sub dir_diff
-{
-       my ($extcmd, $symlinks) = @_;
-       my $rc;
-       my $error = 0;
-       my $repo = Git->repository();
-       my $repo_path = $repo->repo_path();
-       my $worktree = $repo->wc_path();
-       $worktree =~ s|/$||; # Avoid double slashes in symlink targets
-       my ($a, $b, $tmpdir, @files) = setup_dir_diff($worktree, $symlinks);
-
-       if (defined($extcmd)) {
-               $rc = system($extcmd, $a, $b);
-       } else {
-               $ENV{GIT_DIFFTOOL_DIRDIFF} = 'true';
-               $rc = system('git', 'difftool--helper', $a, $b);
-       }
-       # If the diff including working copy files and those
-       # files were modified during the diff, then the changes
-       # should be copied back to the working tree.
-       # Do not copy back files when symlinks are used and the
-       # external tool did not replace the original link with a file.
-       #
-       # These hashes are loaded lazily since they aren't needed
-       # in the common case of --symlinks and the difftool updating
-       # files through the symlink.
-       my %wt_modified;
-       my %tmp_modified;
-       my $indices_loaded = 0;
-
-       for my $file (@files) {
-               next if $symlinks && -l "$b/$file";
-               next if ! -f "$b/$file";
-
-               if (!$indices_loaded) {
-                       %wt_modified = changed_files(
-                               $repo_path, "$tmpdir/wtindex", $worktree);
-                       %tmp_modified = changed_files(
-                               $repo_path, "$tmpdir/wtindex", $b);
-                       $indices_loaded = 1;
-               }
-
-               if (exists $wt_modified{$file} and exists $tmp_modified{$file}) {
-                       warn sprintf(__(
-                               "warning: Both files modified:\n" .
-                               "'%s/%s' and '%s/%s'.\n" .
-                               "warning: Working tree file has been left.\n" .
-                               "warning:\n"), $worktree, $file, $b, $file);
-                       $error = 1;
-               } elsif (exists $tmp_modified{$file}) {
-                       my $mode = stat("$b/$file")->mode;
-                       copy("$b/$file", $file) or
-                       exit_cleanup($tmpdir, 1);
-
-                       chmod($mode, $file) or
-                       exit_cleanup($tmpdir, 1);
-               }
-       }
-       if ($error) {
-               warn sprintf(__(
-                       "warning: Temporary files exist in '%s'.\n" .
-                       "warning: You may want to cleanup or recover these.\n"), $tmpdir);
-               exit(1);
-       } else {
-               exit_cleanup($tmpdir, $rc);
-       }
-}
-
-sub file_diff
-{
-       my ($prompt) = @_;
-
-       if (defined($prompt)) {
-               if ($prompt) {
-                       $ENV{GIT_DIFFTOOL_PROMPT} = 'true';
-               } else {
-                       $ENV{GIT_DIFFTOOL_NO_PROMPT} = 'true';
-               }
-       }
-
-       $ENV{GIT_PAGER} = '';
-       $ENV{GIT_EXTERNAL_DIFF} = 'git-difftool--helper';
-
-       # ActiveState Perl for Win32 does not implement POSIX semantics of
-       # exec* system call. It just spawns the given executable and finishes
-       # the starting program, exiting with code 0.
-       # system will at least catch the errors returned by git diff,
-       # allowing the caller of git difftool better handling of failures.
-       my $rc = system('git', 'diff', @ARGV);
-       exit($rc | ($rc >> 8));
-}
-
-main();
diff --git a/contrib/examples/git-fetch.sh b/contrib/examples/git-fetch.sh
deleted file mode 100755 (executable)
index 57d2e56..0000000
+++ /dev/null
@@ -1,379 +0,0 @@
-#!/bin/sh
-#
-
-USAGE='<fetch-options> <repository> <refspec>...'
-SUBDIRECTORY_OK=Yes
-. git-sh-setup
-set_reflog_action "fetch $*"
-cd_to_toplevel ;# probably unnecessary...
-
-. git-parse-remote
-_x40='[0-9a-f][0-9a-f][0-9a-f][0-9a-f][0-9a-f]'
-_x40="$_x40$_x40$_x40$_x40$_x40$_x40$_x40$_x40"
-
-LF='
-'
-IFS="$LF"
-
-no_tags=
-tags=
-append=
-force=
-verbose=
-update_head_ok=
-exec=
-keep=
-shallow_depth=
-no_progress=
-test -t 1 || no_progress=--no-progress
-quiet=
-while test $# != 0
-do
-       case "$1" in
-       -a|--a|--ap|--app|--appe|--appen|--append)
-               append=t
-               ;;
-       --upl|--uplo|--uploa|--upload|--upload-|--upload-p|\
-       --upload-pa|--upload-pac|--upload-pack)
-               shift
-               exec="--upload-pack=$1"
-               ;;
-       --upl=*|--uplo=*|--uploa=*|--upload=*|\
-       --upload-=*|--upload-p=*|--upload-pa=*|--upload-pac=*|--upload-pack=*)
-               exec=--upload-pack=$(expr "z$1" : 'z-[^=]*=\(.*\)')
-               shift
-               ;;
-       -f|--f|--fo|--for|--forc|--force)
-               force=t
-               ;;
-       -t|--t|--ta|--tag|--tags)
-               tags=t
-               ;;
-       -n|--n|--no|--no-|--no-t|--no-ta|--no-tag|--no-tags)
-               no_tags=t
-               ;;
-       -u|--u|--up|--upd|--upda|--updat|--update|--update-|--update-h|\
-       --update-he|--update-hea|--update-head|--update-head-|\
-       --update-head-o|--update-head-ok)
-               update_head_ok=t
-               ;;
-       -q|--q|--qu|--qui|--quie|--quiet)
-               quiet=--quiet
-               ;;
-       -v|--verbose)
-               verbose="$verbose"Yes
-               ;;
-       -k|--k|--ke|--kee|--keep)
-               keep='-k -k'
-               ;;
-       --depth=*)
-               shallow_depth="--depth=$(expr "z$1" : 'z-[^=]*=\(.*\)')"
-               ;;
-       --depth)
-               shift
-               shallow_depth="--depth=$1"
-               ;;
-       -*)
-               usage
-               ;;
-       *)
-               break
-               ;;
-       esac
-       shift
-done
-
-case "$#" in
-0)
-       origin=$(get_default_remote)
-       test -n "$(get_remote_url ${origin})" ||
-               die "Where do you want to fetch from today?"
-       set x $origin ; shift ;;
-esac
-
-if test -z "$exec"
-then
-       # No command line override and we have configuration for the remote.
-       exec="--upload-pack=$(get_uploadpack $1)"
-fi
-
-remote_nick="$1"
-remote=$(get_remote_url "$@")
-refs=
-rref=
-rsync_slurped_objects=
-
-if test "" = "$append"
-then
-       : >"$GIT_DIR/FETCH_HEAD"
-fi
-
-# Global that is reused later
-ls_remote_result=$(git ls-remote $exec "$remote") ||
-       die "Cannot get the repository state from $remote"
-
-append_fetch_head () {
-       flags=
-       test -n "$verbose" && flags="$flags$LF-v"
-       test -n "$force$single_force" && flags="$flags$LF-f"
-       GIT_REFLOG_ACTION="$GIT_REFLOG_ACTION" \
-               git fetch--tool $flags append-fetch-head "$@"
-}
-
-# updating the current HEAD with git-fetch in a bare
-# repository is always fine.
-if test -z "$update_head_ok" && test $(is_bare_repository) = false
-then
-       orig_head=$(git rev-parse --verify HEAD 2>/dev/null)
-fi
-
-# Allow --tags/--notags from remote.$1.tagopt
-case "$tags$no_tags" in
-'')
-       case "$(git config --get "remote.$1.tagopt")" in
-       --tags)
-               tags=t ;;
-       --no-tags)
-               no_tags=t ;;
-       esac
-esac
-
-# If --tags (and later --heads or --all) is specified, then we are
-# not talking about defaults stored in Pull: line of remotes or
-# branches file, and just fetch those and refspecs explicitly given.
-# Otherwise we do what we always did.
-
-reflist=$(get_remote_refs_for_fetch "$@")
-if test "$tags"
-then
-       taglist=$(IFS=' ' &&
-                 echo "$ls_remote_result" |
-                 git show-ref --exclude-existing=refs/tags/ |
-                 while read sha1 name
-                 do
-                       echo ".${name}:${name}"
-                 done) || exit
-       if test "$#" -gt 1
-       then
-               # remote URL plus explicit refspecs; we need to merge them.
-               reflist="$reflist$LF$taglist"
-       else
-               # No explicit refspecs; fetch tags only.
-               reflist=$taglist
-       fi
-fi
-
-fetch_all_at_once () {
-
-  eval=$(echo "$1" | git fetch--tool parse-reflist "-")
-  eval "$eval"
-
-    ( : subshell because we muck with IFS
-      IFS="    $LF"
-      (
-       if test "$remote" = . ; then
-           git show-ref $rref || echo failed "$remote"
-       elif test -f "$remote" ; then
-           test -n "$shallow_depth" &&
-               die "shallow clone with bundle is not supported"
-           git bundle unbundle "$remote" $rref ||
-           echo failed "$remote"
-       else
-               if      test -d "$remote" &&
-
-                       # The remote might be our alternate.  With
-                       # this optimization we will bypass fetch-pack
-                       # altogether, which means we cannot be doing
-                       # the shallow stuff at all.
-                       test ! -f "$GIT_DIR/shallow" &&
-                       test -z "$shallow_depth" &&
-
-                       # See if all of what we are going to fetch are
-                       # connected to our repository's tips, in which
-                       # case we do not have to do any fetch.
-                       theirs=$(echo "$ls_remote_result" | \
-                               git fetch--tool -s pick-rref "$rref" "-") &&
-
-                       # This will barf when $theirs reach an object that
-                       # we do not have in our repository.  Otherwise,
-                       # we already have everything the fetch would bring in.
-                       git rev-list --objects $theirs --not --all \
-                               >/dev/null 2>/dev/null
-               then
-                       echo "$ls_remote_result" | \
-                               git fetch--tool pick-rref "$rref" "-"
-               else
-                       flags=
-                       case $verbose in
-                       YesYes*)
-                           flags="-v"
-                           ;;
-                       esac
-                       git-fetch-pack --thin $exec $keep $shallow_depth \
-                               $quiet $no_progress $flags "$remote" $rref ||
-                       echo failed "$remote"
-               fi
-       fi
-      ) |
-      (
-       flags=
-       test -n "$verbose" && flags="$flags -v"
-       test -n "$force" && flags="$flags -f"
-       GIT_REFLOG_ACTION="$GIT_REFLOG_ACTION" \
-               git fetch--tool $flags native-store \
-                       "$remote" "$remote_nick" "$refs"
-      )
-    ) || exit
-
-}
-
-fetch_per_ref () {
-  reflist="$1"
-  refs=
-  rref=
-
-  for ref in $reflist
-  do
-      refs="$refs$LF$ref"
-
-      # These are relative path from $GIT_DIR, typically starting at refs/
-      # but may be HEAD
-      if expr "z$ref" : 'z\.' >/dev/null
-      then
-         not_for_merge=t
-         ref=$(expr "z$ref" : 'z\.\(.*\)')
-      else
-         not_for_merge=
-      fi
-      if expr "z$ref" : 'z+' >/dev/null
-      then
-         single_force=t
-         ref=$(expr "z$ref" : 'z+\(.*\)')
-      else
-         single_force=
-      fi
-      remote_name=$(expr "z$ref" : 'z\([^:]*\):')
-      local_name=$(expr "z$ref" : 'z[^:]*:\(.*\)')
-
-      rref="$rref$LF$remote_name"
-
-      # There are transports that can fetch only one head at a time...
-      case "$remote" in
-      http://* | https://* | ftp://*)
-         test -n "$shallow_depth" &&
-               die "shallow clone with http not supported"
-         proto=$(expr "$remote" : '\([^:]*\):')
-         if [ -n "$GIT_SSL_NO_VERIFY" ]; then
-             curl_extra_args="-k"
-         fi
-         if [ -n "$GIT_CURL_FTP_NO_EPSV" -o \
-               "$(git config --bool http.noEPSV)" = true ]; then
-             noepsv_opt="--disable-epsv"
-         fi
-
-         # Find $remote_name from ls-remote output.
-         head=$(echo "$ls_remote_result" | \
-               git fetch--tool -s pick-rref "$remote_name" "-")
-         expr "z$head" : "z$_x40\$" >/dev/null ||
-               die "No such ref $remote_name at $remote"
-         echo >&2 "Fetching $remote_name from $remote using $proto"
-         case "$quiet" in '') v=-v ;; *) v= ;; esac
-         git-http-fetch $v -a "$head" "$remote" || exit
-         ;;
-      rsync://*)
-         test -n "$shallow_depth" &&
-               die "shallow clone with rsync not supported"
-         TMP_HEAD="$GIT_DIR/TMP_HEAD"
-         rsync -L -q "$remote/$remote_name" "$TMP_HEAD" || exit 1
-         head=$(git rev-parse --verify TMP_HEAD)
-         rm -f "$TMP_HEAD"
-         case "$quiet" in '') v=-v ;; *) v= ;; esac
-         test "$rsync_slurped_objects" || {
-             rsync -a $v --ignore-existing --exclude info \
-                 "$remote/objects/" "$GIT_OBJECT_DIRECTORY/" || exit
-
-             # Look at objects/info/alternates for rsync -- http will
-             # support it natively and git native ones will do it on
-             # the remote end.  Not having that file is not a crime.
-             rsync -q "$remote/objects/info/alternates" \
-                 "$GIT_DIR/TMP_ALT" 2>/dev/null ||
-                 rm -f "$GIT_DIR/TMP_ALT"
-             if test -f "$GIT_DIR/TMP_ALT"
-             then
-                 resolve_alternates "$remote" <"$GIT_DIR/TMP_ALT" |
-                 while read alt
-                 do
-                     case "$alt" in 'bad alternate: '*) die "$alt";; esac
-                     echo >&2 "Getting alternate: $alt"
-                     rsync -av --ignore-existing --exclude info \
-                     "$alt" "$GIT_OBJECT_DIRECTORY/" || exit
-                 done
-                 rm -f "$GIT_DIR/TMP_ALT"
-             fi
-             rsync_slurped_objects=t
-         }
-         ;;
-      esac
-
-      append_fetch_head "$head" "$remote" \
-         "$remote_name" "$remote_nick" "$local_name" "$not_for_merge" || exit
-
-  done
-
-}
-
-fetch_main () {
-       case "$remote" in
-       http://* | https://* | ftp://* | rsync://* )
-               fetch_per_ref "$@"
-               ;;
-       *)
-               fetch_all_at_once "$@"
-               ;;
-       esac
-}
-
-fetch_main "$reflist" || exit
-
-# automated tag following
-case "$no_tags$tags" in
-'')
-       case "$reflist" in
-       *:refs/*)
-               # effective only when we are following remote branch
-               # using local tracking branch.
-               taglist=$(IFS=' ' &&
-               echo "$ls_remote_result" |
-               git show-ref --exclude-existing=refs/tags/ |
-               while read sha1 name
-               do
-                       git cat-file -t "$sha1" >/dev/null 2>&1 || continue
-                       echo >&2 "Auto-following $name"
-                       echo ".${name}:${name}"
-               done)
-       esac
-       case "$taglist" in
-       '') ;;
-       ?*)
-               # do not deepen a shallow tree when following tags
-               shallow_depth=
-               fetch_main "$taglist" || exit ;;
-       esac
-esac
-
-# If the original head was empty (i.e. no "master" yet), or
-# if we were told not to worry, we do not have to check.
-case "$orig_head" in
-'')
-       ;;
-?*)
-       curr_head=$(git rev-parse --verify HEAD 2>/dev/null)
-       if test "$curr_head" != "$orig_head"
-       then
-           git update-ref \
-                       -m "$GIT_REFLOG_ACTION: Undoing incorrectly fetched HEAD." \
-                       HEAD "$orig_head"
-               die "Cannot fetch into the current branch."
-       fi
-       ;;
-esac
diff --git a/contrib/examples/git-gc.sh b/contrib/examples/git-gc.sh
deleted file mode 100755 (executable)
index 1597e9f..0000000
+++ /dev/null
@@ -1,37 +0,0 @@
-#!/bin/sh
-#
-# Copyright (c) 2006, Shawn O. Pearce
-#
-# Cleanup unreachable files and optimize the repository.
-
-USAGE='[--prune]'
-SUBDIRECTORY_OK=Yes
-. git-sh-setup
-
-no_prune=:
-while test $# != 0
-do
-       case "$1" in
-       --prune)
-               no_prune=
-               ;;
-       --)
-               usage
-               ;;
-       esac
-       shift
-done
-
-case "$(git config --get gc.packrefs)" in
-notbare|"")
-       test $(is_bare_repository) = true || pack_refs=true;;
-*)
-       pack_refs=$(git config --bool --get gc.packrefs)
-esac
-
-test "true" != "$pack_refs" ||
-git pack-refs --prune &&
-git reflog expire --all &&
-git-repack -a -d -l &&
-$no_prune git prune &&
-git rerere gc || exit
diff --git a/contrib/examples/git-log.sh b/contrib/examples/git-log.sh
deleted file mode 100755 (executable)
index c2ea71c..0000000
+++ /dev/null
@@ -1,15 +0,0 @@
-#!/bin/sh
-#
-# Copyright (c) 2005 Linus Torvalds
-#
-
-USAGE='[--max-count=<n>] [<since>..<limit>] [--pretty=<format>] [git-rev-list options]'
-SUBDIRECTORY_OK='Yes'
-. git-sh-setup
-
-revs=$(git-rev-parse --revs-only --no-flags --default HEAD "$@") || exit
-[ "$revs" ] || {
-       die "No HEAD ref"
-}
-git-rev-list --pretty $(git-rev-parse --default HEAD "$@") |
-LESS=-S ${PAGER:-less}
diff --git a/contrib/examples/git-ls-remote.sh b/contrib/examples/git-ls-remote.sh
deleted file mode 100755 (executable)
index 2aa89a7..0000000
+++ /dev/null
@@ -1,142 +0,0 @@
-#!/bin/sh
-#
-
-usage () {
-    echo >&2 "usage: $0 [--heads] [--tags] [-u|--upload-pack <upload-pack>]"
-    echo >&2 "          <repository> <refs>..."
-    exit 1;
-}
-
-die () {
-    echo >&2 "$*"
-    exit 1
-}
-
-exec=
-while test $# != 0
-do
-  case "$1" in
-  -h|--h|--he|--hea|--head|--heads)
-  heads=heads; shift ;;
-  -t|--t|--ta|--tag|--tags)
-  tags=tags; shift ;;
-  -u|--u|--up|--upl|--uploa|--upload|--upload-|--upload-p|--upload-pa|\
-  --upload-pac|--upload-pack)
-       shift
-       exec="--upload-pack=$1"
-       shift;;
-  -u=*|--u=*|--up=*|--upl=*|--uplo=*|--uploa=*|--upload=*|\
-  --upload-=*|--upload-p=*|--upload-pa=*|--upload-pac=*|--upload-pack=*)
-       exec=--upload-pack=$(expr "z$1" : 'z-[^=]*=\(.*\)')
-       shift;;
-  --)
-  shift; break ;;
-  -*)
-  usage ;;
-  *)
-  break ;;
-  esac
-done
-
-case "$#" in 0) usage ;; esac
-
-case ",$heads,$tags," in
-,,,) heads=heads tags=tags other=other ;;
-esac
-
-. git-parse-remote
-peek_repo="$(get_remote_url "$@")"
-shift
-
-tmp=.ls-remote-$$
-trap "rm -fr $tmp-*" 0 1 2 3 15
-tmpdir=$tmp-d
-
-case "$peek_repo" in
-http://* | https://* | ftp://* )
-       if [ -n "$GIT_SSL_NO_VERIFY" -o \
-               "$(git config --bool http.sslVerify)" = false ]; then
-               curl_extra_args="-k"
-       fi
-       if [ -n "$GIT_CURL_FTP_NO_EPSV" -o \
-               "$(git config --bool http.noEPSV)" = true ]; then
-               curl_extra_args="${curl_extra_args} --disable-epsv"
-       fi
-       curl -nsf $curl_extra_args --header "Pragma: no-cache" "$peek_repo/info/refs" ||
-               echo "failed    slurping"
-       ;;
-
-rsync://* )
-       mkdir $tmpdir &&
-       rsync -rlq "$peek_repo/HEAD" $tmpdir &&
-       rsync -rq "$peek_repo/refs" $tmpdir || {
-               echo "failed    slurping"
-               exit
-       }
-       head=$(cat "$tmpdir/HEAD") &&
-       case "$head" in
-       ref:' '*)
-               head=$(expr "z$head" : 'zref: \(.*\)') &&
-               head=$(cat "$tmpdir/$head") || exit
-       esac &&
-       echo "$head     HEAD"
-       (cd $tmpdir && find refs -type f) |
-       while read path
-       do
-               tr -d '\012' <"$tmpdir/$path"
-               echo "  $path"
-       done &&
-       rm -fr $tmpdir
-       ;;
-
-* )
-       if test -f "$peek_repo" ; then
-               git bundle list-heads "$peek_repo" ||
-               echo "failed    slurping"
-       else
-               git-peek-remote $exec "$peek_repo" ||
-               echo "failed    slurping"
-       fi
-       ;;
-esac |
-sort -t '      ' -k 2 |
-while read sha1 path
-do
-       case "$sha1" in
-       failed)
-               exit 1 ;;
-       esac
-       case "$path" in
-       refs/heads/*)
-               group=heads ;;
-       refs/tags/*)
-               group=tags ;;
-       *)
-               group=other ;;
-       esac
-       case ",$heads,$tags,$other," in
-       *,$group,*)
-               ;;
-       *)
-               continue;;
-       esac
-       case "$#" in
-       0)
-               match=yes ;;
-       *)
-               match=no
-               for pat
-               do
-                       case "/$path" in
-                       */$pat )
-                               match=yes
-                               break ;;
-                       esac
-               done
-       esac
-       case "$match" in
-       no)
-               continue ;;
-       esac
-       echo "$sha1     $path"
-done
diff --git a/contrib/examples/git-merge-ours.sh b/contrib/examples/git-merge-ours.sh
deleted file mode 100755 (executable)
index 29dba4b..0000000
+++ /dev/null
@@ -1,14 +0,0 @@
-#!/bin/sh
-#
-# Copyright (c) 2005 Junio C Hamano
-#
-# Pretend we resolved the heads, but declare our tree trumps everybody else.
-#
-
-# We need to exit with 2 if the index does not match our HEAD tree,
-# because the current index is what we will be committing as the
-# merge result.
-
-git diff-index --quiet --cached HEAD -- || exit 2
-
-exit 0
diff --git a/contrib/examples/git-merge.sh b/contrib/examples/git-merge.sh
deleted file mode 100755 (executable)
index 932e78d..0000000
+++ /dev/null
@@ -1,620 +0,0 @@
-#!/bin/sh
-#
-# Copyright (c) 2005 Junio C Hamano
-#
-
-OPTIONS_KEEPDASHDASH=
-OPTIONS_SPEC="\
-git merge [options] <remote>...
-git merge [options] <msg> HEAD <remote>
---
-stat                 show a diffstat at the end of the merge
-n                    don't show a diffstat at the end of the merge
-summary              (synonym to --stat)
-log                  add list of one-line log to merge commit message
-squash               create a single commit instead of doing a merge
-commit               perform a commit if the merge succeeds (default)
-ff                   allow fast-forward (default)
-ff-only              abort if fast-forward is not possible
-rerere-autoupdate    update index with any reused conflict resolution
-s,strategy=          merge strategy to use
-X=                   option for selected merge strategy
-m,message=           message to be used for the merge commit (if any)
-"
-
-SUBDIRECTORY_OK=Yes
-. git-sh-setup
-require_work_tree
-cd_to_toplevel
-
-test -z "$(git ls-files -u)" ||
-       die "Merge is not possible because you have unmerged files."
-
-! test -e "$GIT_DIR/MERGE_HEAD" ||
-       die 'You have not concluded your merge (MERGE_HEAD exists).'
-
-LF='
-'
-
-all_strategies='recur recursive octopus resolve stupid ours subtree'
-all_strategies="$all_strategies recursive-ours recursive-theirs"
-not_strategies='base file index tree'
-default_twohead_strategies='recursive'
-default_octopus_strategies='octopus'
-no_fast_forward_strategies='subtree ours'
-no_trivial_strategies='recursive recur subtree ours recursive-ours recursive-theirs'
-use_strategies=
-xopt=
-
-allow_fast_forward=t
-fast_forward_only=
-allow_trivial_merge=t
-squash= no_commit= log_arg= rr_arg=
-
-dropsave() {
-       rm -f -- "$GIT_DIR/MERGE_HEAD" "$GIT_DIR/MERGE_MSG" \
-                "$GIT_DIR/MERGE_STASH" "$GIT_DIR/MERGE_MODE" || exit 1
-}
-
-savestate() {
-       # Stash away any local modifications.
-       git stash create >"$GIT_DIR/MERGE_STASH"
-}
-
-restorestate() {
-        if test -f "$GIT_DIR/MERGE_STASH"
-       then
-               git reset --hard $head >/dev/null
-               git stash apply $(cat "$GIT_DIR/MERGE_STASH")
-               git update-index --refresh >/dev/null
-       fi
-}
-
-finish_up_to_date () {
-       case "$squash" in
-       t)
-               echo "$1 (nothing to squash)" ;;
-       '')
-               echo "$1" ;;
-       esac
-       dropsave
-}
-
-squash_message () {
-       echo Squashed commit of the following:
-       echo
-       git log --no-merges --pretty=medium ^"$head" $remoteheads
-}
-
-finish () {
-       if test '' = "$2"
-       then
-               rlogm="$GIT_REFLOG_ACTION"
-       else
-               echo "$2"
-               rlogm="$GIT_REFLOG_ACTION: $2"
-       fi
-       case "$squash" in
-       t)
-               echo "Squash commit -- not updating HEAD"
-               squash_message >"$GIT_DIR/SQUASH_MSG"
-               ;;
-       '')
-               case "$merge_msg" in
-               '')
-                       echo "No merge message -- not updating HEAD"
-                       ;;
-               *)
-                       git update-ref -m "$rlogm" HEAD "$1" "$head" || exit 1
-                       git gc --auto
-                       ;;
-               esac
-               ;;
-       esac
-       case "$1" in
-       '')
-               ;;
-       ?*)
-               if test "$show_diffstat" = t
-               then
-                       # We want color (if set), but no pager
-                       GIT_PAGER='' git diff --stat --summary -M "$head" "$1"
-               fi
-               ;;
-       esac
-
-       # Run a post-merge hook
-        if test -x "$GIT_DIR"/hooks/post-merge
-        then
-           case "$squash" in
-           t)
-                "$GIT_DIR"/hooks/post-merge 1
-               ;;
-           '')
-                "$GIT_DIR"/hooks/post-merge 0
-               ;;
-           esac
-        fi
-}
-
-merge_name () {
-       remote="$1"
-       rh=$(git rev-parse --verify "$remote^0" 2>/dev/null) || return
-       if truname=$(expr "$remote" : '\(.*\)~[0-9]*$') &&
-               git show-ref -q --verify "refs/heads/$truname" 2>/dev/null
-       then
-               echo "$rh               branch '$truname' (early part) of ."
-               return
-       fi
-       if found_ref=$(git rev-parse --symbolic-full-name --verify \
-                                                       "$remote" 2>/dev/null)
-       then
-               expanded=$(git check-ref-format --branch "$remote") ||
-                       exit
-               if test "${found_ref#refs/heads/}" != "$found_ref"
-               then
-                       echo "$rh               branch '$expanded' of ."
-                       return
-               elif test "${found_ref#refs/remotes/}" != "$found_ref"
-               then
-                       echo "$rh               remote branch '$expanded' of ."
-                       return
-               fi
-       fi
-       if test "$remote" = "FETCH_HEAD" && test -r "$GIT_DIR/FETCH_HEAD"
-       then
-               sed -e 's/      not-for-merge   /               /' -e 1q \
-                       "$GIT_DIR/FETCH_HEAD"
-               return
-       fi
-       echo "$rh               commit '$remote'"
-}
-
-parse_config () {
-       while test $# != 0; do
-               case "$1" in
-               -n|--no-stat|--no-summary)
-                       show_diffstat=false ;;
-               --stat|--summary)
-                       show_diffstat=t ;;
-               --log|--no-log)
-                       log_arg=$1 ;;
-               --squash)
-                       test "$allow_fast_forward" = t ||
-                               die "You cannot combine --squash with --no-ff."
-                       squash=t no_commit=t ;;
-               --no-squash)
-                       squash= no_commit= ;;
-               --commit)
-                       no_commit= ;;
-               --no-commit)
-                       no_commit=t ;;
-               --ff)
-                       allow_fast_forward=t ;;
-               --no-ff)
-                       test "$squash" != t ||
-                               die "You cannot combine --squash with --no-ff."
-                       test "$fast_forward_only" != t ||
-                               die "You cannot combine --ff-only with --no-ff."
-                       allow_fast_forward=f ;;
-               --ff-only)
-                       test "$allow_fast_forward" != f ||
-                               die "You cannot combine --ff-only with --no-ff."
-                       fast_forward_only=t ;;
-               --rerere-autoupdate|--no-rerere-autoupdate)
-                       rr_arg=$1 ;;
-               -s|--strategy)
-                       shift
-                       case " $all_strategies " in
-                       *" $1 "*)
-                               use_strategies="$use_strategies$1 "
-                               ;;
-                       *)
-                               case " $not_strategies " in
-                               *" $1 "*)
-                                       false
-                               esac &&
-                               type "git-merge-$1" >/dev/null 2>&1 ||
-                                       die "available strategies are: $all_strategies"
-                               use_strategies="$use_strategies$1 "
-                               ;;
-                       esac
-                       ;;
-               -X)
-                       shift
-                       xopt="${xopt:+$xopt }$(git rev-parse --sq-quote "--$1")"
-                       ;;
-               -m|--message)
-                       shift
-                       merge_msg="$1"
-                       have_message=t
-                       ;;
-               --)
-                       shift
-                       break ;;
-               *)      usage ;;
-               esac
-               shift
-       done
-       args_left=$#
-}
-
-test $# != 0 || usage
-
-have_message=
-
-if branch=$(git-symbolic-ref -q HEAD)
-then
-       mergeopts=$(git config "branch.${branch#refs/heads/}.mergeoptions")
-       if test -n "$mergeopts"
-       then
-               parse_config $mergeopts --
-       fi
-fi
-
-parse_config "$@"
-while test $args_left -lt $#; do shift; done
-
-if test -z "$show_diffstat"; then
-    test "$(git config --bool merge.diffstat)" = false && show_diffstat=false
-    test "$(git config --bool merge.stat)" = false && show_diffstat=false
-    test -z "$show_diffstat" && show_diffstat=t
-fi
-
-# This could be traditional "merge <msg> HEAD <commit>..."  and the
-# way we can tell it is to see if the second token is HEAD, but some
-# people might have misused the interface and used a commit-ish that
-# is the same as HEAD there instead.  Traditional format never would
-# have "-m" so it is an additional safety measure to check for it.
-
-if test -z "$have_message" &&
-       second_token=$(git rev-parse --verify "$2^0" 2>/dev/null) &&
-       head_commit=$(git rev-parse --verify "HEAD" 2>/dev/null) &&
-       test "$second_token" = "$head_commit"
-then
-       merge_msg="$1"
-       shift
-       head_arg="$1"
-       shift
-elif ! git rev-parse --verify HEAD >/dev/null 2>&1
-then
-       # If the merged head is a valid one there is no reason to
-       # forbid "git merge" into a branch yet to be born.  We do
-       # the same for "git pull".
-       if test 1 -ne $#
-       then
-               echo >&2 "Can merge only exactly one commit into empty head"
-               exit 1
-       fi
-
-       test "$squash" != t ||
-               die "Squash commit into empty head not supported yet"
-       test "$allow_fast_forward" = t ||
-               die "Non-fast-forward into an empty head does not make sense"
-       rh=$(git rev-parse --verify "$1^0") ||
-               die "$1 - not something we can merge"
-
-       git update-ref -m "initial pull" HEAD "$rh" "" &&
-       git read-tree --reset -u HEAD
-       exit
-
-else
-       # We are invoked directly as the first-class UI.
-       head_arg=HEAD
-
-       # All the rest are the commits being merged; prepare
-       # the standard merge summary message to be appended to
-       # the given message.  If remote is invalid we will die
-       # later in the common codepath so we discard the error
-       # in this loop.
-       merge_msg="$(
-               for remote
-               do
-                       merge_name "$remote"
-               done |
-               if test "$have_message" = t
-               then
-                       git fmt-merge-msg -m "$merge_msg" $log_arg
-               else
-                       git fmt-merge-msg $log_arg
-               fi
-       )"
-fi
-head=$(git rev-parse --verify "$head_arg"^0) || usage
-
-# All the rest are remote heads
-test "$#" = 0 && usage ;# we need at least one remote head.
-set_reflog_action "merge $*"
-
-remoteheads=
-for remote
-do
-       remotehead=$(git rev-parse --verify "$remote"^0 2>/dev/null) ||
-           die "$remote - not something we can merge"
-       remoteheads="${remoteheads}$remotehead "
-       eval GITHEAD_$remotehead='"$remote"'
-       export GITHEAD_$remotehead
-done
-set x $remoteheads ; shift
-
-case "$use_strategies" in
-'')
-       case "$#" in
-       1)
-               var="$(git config --get pull.twohead)"
-               if test -n "$var"
-               then
-                       use_strategies="$var"
-               else
-                       use_strategies="$default_twohead_strategies"
-               fi ;;
-       *)
-               var="$(git config --get pull.octopus)"
-               if test -n "$var"
-               then
-                       use_strategies="$var"
-               else
-                       use_strategies="$default_octopus_strategies"
-               fi ;;
-       esac
-       ;;
-esac
-
-for s in $use_strategies
-do
-       for ss in $no_fast_forward_strategies
-       do
-               case " $s " in
-               *" $ss "*)
-                       allow_fast_forward=f
-                       break
-                       ;;
-               esac
-       done
-       for ss in $no_trivial_strategies
-       do
-               case " $s " in
-               *" $ss "*)
-                       allow_trivial_merge=f
-                       break
-                       ;;
-               esac
-       done
-done
-
-case "$#" in
-1)
-       common=$(git merge-base --all $head "$@")
-       ;;
-*)
-       common=$(git merge-base --all --octopus $head "$@")
-       ;;
-esac
-echo "$head" >"$GIT_DIR/ORIG_HEAD"
-
-case "$allow_fast_forward,$#,$common,$no_commit" in
-?,*,'',*)
-       # No common ancestors found. We need a real merge.
-       ;;
-?,1,"$1",*)
-       # If head can reach all the merge then we are up to date.
-       # but first the most common case of merging one remote.
-       finish_up_to_date "Already up to date."
-       exit 0
-       ;;
-t,1,"$head",*)
-       # Again the most common case of merging one remote.
-       echo "Updating $(git rev-parse --short $head)..$(git rev-parse --short $1)"
-       git update-index --refresh 2>/dev/null
-       msg="Fast-forward"
-       if test -n "$have_message"
-       then
-               msg="$msg (no commit created; -m option ignored)"
-       fi
-       new_head=$(git rev-parse --verify "$1^0") &&
-       git read-tree -v -m -u --exclude-per-directory=.gitignore $head "$new_head" &&
-       finish "$new_head" "$msg" || exit
-       dropsave
-       exit 0
-       ;;
-?,1,?*"$LF"?*,*)
-       # We are not doing octopus and not fast-forward.  Need a
-       # real merge.
-       ;;
-?,1,*,)
-       # We are not doing octopus, not fast-forward, and have only
-       # one common.
-       git update-index --refresh 2>/dev/null
-       case "$allow_trivial_merge,$fast_forward_only" in
-       t,)
-               # See if it is really trivial.
-               git var GIT_COMMITTER_IDENT >/dev/null || exit
-               echo "Trying really trivial in-index merge..."
-               if git read-tree --trivial -m -u -v $common $head "$1" &&
-                  result_tree=$(git write-tree)
-               then
-                       echo "Wonderful."
-                       result_commit=$(
-                               printf '%s\n' "$merge_msg" |
-                               git commit-tree $result_tree -p HEAD -p "$1"
-                       ) || exit
-                       finish "$result_commit" "In-index merge"
-                       dropsave
-                       exit 0
-               fi
-               echo "Nope."
-       esac
-       ;;
-*)
-       # An octopus.  If we can reach all the remote we are up to date.
-       up_to_date=t
-       for remote
-       do
-               common_one=$(git merge-base --all $head $remote)
-               if test "$common_one" != "$remote"
-               then
-                       up_to_date=f
-                       break
-               fi
-       done
-       if test "$up_to_date" = t
-       then
-               finish_up_to_date "Already up to date. Yeeah!"
-               exit 0
-       fi
-       ;;
-esac
-
-if test "$fast_forward_only" = t
-then
-       die "Not possible to fast-forward, aborting."
-fi
-
-# We are going to make a new commit.
-git var GIT_COMMITTER_IDENT >/dev/null || exit
-
-# At this point, we need a real merge.  No matter what strategy
-# we use, it would operate on the index, possibly affecting the
-# working tree, and when resolved cleanly, have the desired tree
-# in the index -- this means that the index must be in sync with
-# the $head commit.  The strategies are responsible to ensure this.
-
-case "$use_strategies" in
-?*' '?*)
-    # Stash away the local changes so that we can try more than one.
-    savestate
-    single_strategy=no
-    ;;
-*)
-    rm -f "$GIT_DIR/MERGE_STASH"
-    single_strategy=yes
-    ;;
-esac
-
-result_tree= best_cnt=-1 best_strategy= wt_strategy=
-merge_was_ok=
-for strategy in $use_strategies
-do
-    test "$wt_strategy" = '' || {
-       echo "Rewinding the tree to pristine..."
-       restorestate
-    }
-    case "$single_strategy" in
-    no)
-       echo "Trying merge strategy $strategy..."
-       ;;
-    esac
-
-    # Remember which strategy left the state in the working tree
-    wt_strategy=$strategy
-
-    eval 'git-merge-$strategy '"$xopt"' $common -- "$head_arg" "$@"'
-    exit=$?
-    if test "$no_commit" = t && test "$exit" = 0
-    then
-        merge_was_ok=t
-       exit=1 ;# pretend it left conflicts.
-    fi
-
-    test "$exit" = 0 || {
-
-       # The backend exits with 1 when conflicts are left to be resolved,
-       # with 2 when it does not handle the given merge at all.
-
-       if test "$exit" -eq 1
-       then
-           cnt=$({
-               git diff-files --name-only
-               git ls-files --unmerged
-           } | wc -l)
-           if test $best_cnt -le 0 || test $cnt -le $best_cnt
-           then
-               best_strategy=$strategy
-               best_cnt=$cnt
-           fi
-       fi
-       continue
-    }
-
-    # Automerge succeeded.
-    result_tree=$(git write-tree) && break
-done
-
-# If we have a resulting tree, that means the strategy module
-# auto resolved the merge cleanly.
-if test '' != "$result_tree"
-then
-    if test "$allow_fast_forward" = "t"
-    then
-       parents=$(git merge-base --independent "$head" "$@")
-    else
-       parents=$(git rev-parse "$head" "$@")
-    fi
-    parents=$(echo "$parents" | sed -e 's/^/-p /')
-    result_commit=$(printf '%s\n' "$merge_msg" | git commit-tree $result_tree $parents) || exit
-    finish "$result_commit" "Merge made by $wt_strategy."
-    dropsave
-    exit 0
-fi
-
-# Pick the result from the best strategy and have the user fix it up.
-case "$best_strategy" in
-'')
-       restorestate
-       case "$use_strategies" in
-       ?*' '?*)
-               echo >&2 "No merge strategy handled the merge."
-               ;;
-       *)
-               echo >&2 "Merge with strategy $use_strategies failed."
-               ;;
-       esac
-       exit 2
-       ;;
-"$wt_strategy")
-       # We already have its result in the working tree.
-       ;;
-*)
-       echo "Rewinding the tree to pristine..."
-       restorestate
-       echo "Using the $best_strategy to prepare resolving by hand."
-       git-merge-$best_strategy $common -- "$head_arg" "$@"
-       ;;
-esac
-
-if test "$squash" = t
-then
-       finish
-else
-       for remote
-       do
-               echo $remote
-       done >"$GIT_DIR/MERGE_HEAD"
-       printf '%s\n' "$merge_msg" >"$GIT_DIR/MERGE_MSG" ||
-               die "Could not write to $GIT_DIR/MERGE_MSG"
-       if test "$allow_fast_forward" != t
-       then
-               printf "%s" no-ff
-       else
-               :
-       fi >"$GIT_DIR/MERGE_MODE" ||
-               die "Could not write to $GIT_DIR/MERGE_MODE"
-fi
-
-if test "$merge_was_ok" = t
-then
-       echo >&2 \
-       "Automatic merge went well; stopped before committing as requested"
-       exit 0
-else
-       {
-           echo '
-Conflicts:
-'
-               git ls-files --unmerged |
-               sed -e 's/^[^   ]*      /       /' |
-               uniq
-       } >>"$GIT_DIR/MERGE_MSG"
-       git rerere $rr_arg
-       die "Automatic merge failed; fix conflicts and then commit the result."
-fi
diff --git a/contrib/examples/git-notes.sh b/contrib/examples/git-notes.sh
deleted file mode 100755 (executable)
index e642e47..0000000
+++ /dev/null
@@ -1,121 +0,0 @@
-#!/bin/sh
-
-USAGE="(edit [-F <file> | -m <msg>] | show) [commit]"
-. git-sh-setup
-
-test -z "$1" && usage
-ACTION="$1"; shift
-
-test -z "$GIT_NOTES_REF" && GIT_NOTES_REF="$(git config core.notesref)"
-test -z "$GIT_NOTES_REF" && GIT_NOTES_REF="refs/notes/commits"
-
-MESSAGE=
-while test $# != 0
-do
-       case "$1" in
-       -m)
-               test "$ACTION" = "edit" || usage
-               shift
-               if test "$#" = "0"; then
-                       die "error: option -m needs an argument"
-               else
-                       if [ -z "$MESSAGE" ]; then
-                               MESSAGE="$1"
-                       else
-                               MESSAGE="$MESSAGE
-
-$1"
-                       fi
-                       shift
-               fi
-               ;;
-       -F)
-               test "$ACTION" = "edit" || usage
-               shift
-               if test "$#" = "0"; then
-                       die "error: option -F needs an argument"
-               else
-                       if [ -z "$MESSAGE" ]; then
-                               MESSAGE="$(cat "$1")"
-                       else
-                               MESSAGE="$MESSAGE
-
-$(cat "$1")"
-                       fi
-                       shift
-               fi
-               ;;
-       -*)
-               usage
-               ;;
-       *)
-               break
-               ;;
-       esac
-done
-
-COMMIT=$(git rev-parse --verify --default HEAD "$@") ||
-die "Invalid commit: $@"
-
-case "$ACTION" in
-edit)
-       if [ "${GIT_NOTES_REF#refs/notes/}" = "$GIT_NOTES_REF" ]; then
-               die "Refusing to edit notes in $GIT_NOTES_REF (outside of refs/notes/)"
-       fi
-
-       MSG_FILE="$GIT_DIR/new-notes-$COMMIT"
-       GIT_INDEX_FILE="$MSG_FILE.idx"
-       export GIT_INDEX_FILE
-
-       trap '
-               test -f "$MSG_FILE" && rm "$MSG_FILE"
-               test -f "$GIT_INDEX_FILE" && rm "$GIT_INDEX_FILE"
-       ' 0
-
-       CURRENT_HEAD=$(git show-ref "$GIT_NOTES_REF" | cut -f 1 -d ' ')
-       if [ -z "$CURRENT_HEAD" ]; then
-               PARENT=
-       else
-               PARENT="-p $CURRENT_HEAD"
-               git read-tree "$GIT_NOTES_REF" || die "Could not read index"
-       fi
-
-       if [ -z "$MESSAGE" ]; then
-               GIT_NOTES_REF= git log -1 $COMMIT | sed "s/^/#/" > "$MSG_FILE"
-               if [ ! -z "$CURRENT_HEAD" ]; then
-                       git cat-file blob :$COMMIT >> "$MSG_FILE" 2> /dev/null
-               fi
-               core_editor="$(git config core.editor)"
-               ${GIT_EDITOR:-${core_editor:-${VISUAL:-${EDITOR:-vi}}}} "$MSG_FILE"
-       else
-               echo "$MESSAGE" > "$MSG_FILE"
-       fi
-
-       grep -v ^# < "$MSG_FILE" | git stripspace > "$MSG_FILE".processed
-       mv "$MSG_FILE".processed "$MSG_FILE"
-       if [ -s "$MSG_FILE" ]; then
-               BLOB=$(git hash-object -w "$MSG_FILE") ||
-                       die "Could not write into object database"
-               git update-index --add --cacheinfo 0644 $BLOB $COMMIT ||
-                       die "Could not write index"
-       else
-               test -z "$CURRENT_HEAD" &&
-                       die "Will not initialise with empty tree"
-               git update-index --force-remove $COMMIT ||
-                       die "Could not update index"
-       fi
-
-       TREE=$(git write-tree) || die "Could not write tree"
-       NEW_HEAD=$(echo Annotate $COMMIT | git commit-tree $TREE $PARENT) ||
-               die "Could not annotate"
-       git update-ref -m "Annotate $COMMIT" \
-               "$GIT_NOTES_REF" $NEW_HEAD $CURRENT_HEAD
-;;
-show)
-       git rev-parse -q --verify "$GIT_NOTES_REF":$COMMIT > /dev/null ||
-               die "No note for commit $COMMIT."
-       git show "$GIT_NOTES_REF":$COMMIT
-;;
-*)
-       usage
-esac
diff --git a/contrib/examples/git-pull.sh b/contrib/examples/git-pull.sh
deleted file mode 100755 (executable)
index 6b3a03f..0000000
+++ /dev/null
@@ -1,381 +0,0 @@
-#!/bin/sh
-#
-# Copyright (c) 2005 Junio C Hamano
-#
-# Fetch one or more remote refs and merge it/them into the current HEAD.
-
-SUBDIRECTORY_OK=Yes
-OPTIONS_KEEPDASHDASH=
-OPTIONS_STUCKLONG=Yes
-OPTIONS_SPEC="\
-git pull [options] [<repository> [<refspec>...]]
-
-Fetch one or more remote refs and integrate it/them with the current HEAD.
---
-v,verbose                  be more verbose
-q,quiet                    be more quiet
-progress                   force progress reporting
-
-  Options related to merging
-r,rebase?false|true|preserve incorporate changes by rebasing rather than merging
-n!                         do not show a diffstat at the end of the merge
-stat                       show a diffstat at the end of the merge
-summary                    (synonym to --stat)
-log?n                      add (at most <n>) entries from shortlog to merge commit message
-squash                     create a single commit instead of doing a merge
-commit                     perform a commit if the merge succeeds (default)
-e,edit                       edit message before committing
-ff                         allow fast-forward
-ff-only!                   abort if fast-forward is not possible
-verify-signatures          verify that the named commit has a valid GPG signature
-s,strategy=strategy        merge strategy to use
-X,strategy-option=option   option for selected merge strategy
-S,gpg-sign?key-id          GPG sign commit
-
-  Options related to fetching
-all                        fetch from all remotes
-a,append                   append to .git/FETCH_HEAD instead of overwriting
-upload-pack=path           path to upload pack on remote end
-f,force                    force overwrite of local branch
-t,tags                     fetch all tags and associated objects
-p,prune                    prune remote-tracking branches no longer on remote
-recurse-submodules?on-demand control recursive fetching of submodules
-dry-run                    dry run
-k,keep                     keep downloaded pack
-depth=depth                deepen history of shallow clone
-unshallow                  convert to a complete repository
-update-shallow             accept refs that update .git/shallow
-refmap=refmap              specify fetch refmap
-"
-test $# -gt 0 && args="$*"
-. git-sh-setup
-. git-sh-i18n
-set_reflog_action "pull${args+ $args}"
-require_work_tree_exists
-cd_to_toplevel
-
-
-die_conflict () {
-    git diff-index --cached --name-status -r --ignore-submodules HEAD --
-    if [ $(git config --bool --get advice.resolveConflict || echo true) = "true" ]; then
-       die "$(gettext "Pull is not possible because you have unmerged files.
-Please, fix them up in the work tree, and then use 'git add/rm <file>'
-as appropriate to mark resolution and make a commit.")"
-    else
-       die "$(gettext "Pull is not possible because you have unmerged files.")"
-    fi
-}
-
-die_merge () {
-    if [ $(git config --bool --get advice.resolveConflict || echo true) = "true" ]; then
-       die "$(gettext "You have not concluded your merge (MERGE_HEAD exists).
-Please, commit your changes before merging.")"
-    else
-       die "$(gettext "You have not concluded your merge (MERGE_HEAD exists).")"
-    fi
-}
-
-test -z "$(git ls-files -u)" || die_conflict
-test -f "$GIT_DIR/MERGE_HEAD" && die_merge
-
-bool_or_string_config () {
-       git config --bool "$1" 2>/dev/null || git config "$1"
-}
-
-strategy_args= diffstat= no_commit= squash= no_ff= ff_only=
-log_arg= verbosity= progress= recurse_submodules= verify_signatures=
-merge_args= edit= rebase_args= all= append= upload_pack= force= tags= prune=
-keep= depth= unshallow= update_shallow= refmap=
-curr_branch=$(git symbolic-ref -q HEAD)
-curr_branch_short="${curr_branch#refs/heads/}"
-rebase=$(bool_or_string_config branch.$curr_branch_short.rebase)
-if test -z "$rebase"
-then
-       rebase=$(bool_or_string_config pull.rebase)
-fi
-
-# Setup default fast-forward options via `pull.ff`
-pull_ff=$(bool_or_string_config pull.ff)
-case "$pull_ff" in
-true)
-       no_ff=--ff
-       ;;
-false)
-       no_ff=--no-ff
-       ;;
-only)
-       ff_only=--ff-only
-       ;;
-esac
-
-
-dry_run=
-while :
-do
-       case "$1" in
-       -q|--quiet)
-               verbosity="$verbosity -q" ;;
-       -v|--verbose)
-               verbosity="$verbosity -v" ;;
-       --progress)
-               progress=--progress ;;
-       --no-progress)
-               progress=--no-progress ;;
-       -n|--no-stat|--no-summary)
-               diffstat=--no-stat ;;
-       --stat|--summary)
-               diffstat=--stat ;;
-       --log|--log=*|--no-log)
-               log_arg="$1" ;;
-       --no-commit)
-               no_commit=--no-commit ;;
-       --commit)
-               no_commit=--commit ;;
-       -e|--edit)
-               edit=--edit ;;
-       --no-edit)
-               edit=--no-edit ;;
-       --squash)
-               squash=--squash ;;
-       --no-squash)
-               squash=--no-squash ;;
-       --ff)
-               no_ff=--ff ;;
-       --no-ff)
-               no_ff=--no-ff ;;
-       --ff-only)
-               ff_only=--ff-only ;;
-       -s*|--strategy=*)
-               strategy_args="$strategy_args $1"
-               ;;
-       -X*|--strategy-option=*)
-               merge_args="$merge_args $(git rev-parse --sq-quote "$1")"
-               ;;
-       -r*|--rebase=*)
-               rebase="${1#*=}"
-               ;;
-       --rebase)
-               rebase=true
-               ;;
-       --no-rebase)
-               rebase=false
-               ;;
-       --recurse-submodules)
-               recurse_submodules=--recurse-submodules
-               ;;
-       --recurse-submodules=*)
-               recurse_submodules="$1"
-               ;;
-       --no-recurse-submodules)
-               recurse_submodules=--no-recurse-submodules
-               ;;
-       --verify-signatures)
-               verify_signatures=--verify-signatures
-               ;;
-       --no-verify-signatures)
-               verify_signatures=--no-verify-signatures
-               ;;
-       --gpg-sign|-S)
-               gpg_sign_args=-S
-               ;;
-       --gpg-sign=*)
-               gpg_sign_args=$(git rev-parse --sq-quote "-S${1#--gpg-sign=}")
-               ;;
-       -S*)
-               gpg_sign_args=$(git rev-parse --sq-quote "$1")
-               ;;
-       --dry-run)
-               dry_run=--dry-run
-               ;;
-       --all|--no-all)
-               all=$1 ;;
-       -a|--append|--no-append)
-               append=$1 ;;
-       --upload-pack=*|--no-upload-pack)
-               upload_pack=$1 ;;
-       -f|--force|--no-force)
-               force="$force $1" ;;
-       -t|--tags|--no-tags)
-               tags=$1 ;;
-       -p|--prune|--no-prune)
-               prune=$1 ;;
-       -k|--keep|--no-keep)
-               keep=$1 ;;
-       --depth=*|--no-depth)
-               depth=$1 ;;
-       --unshallow|--no-unshallow)
-               unshallow=$1 ;;
-       --update-shallow|--no-update-shallow)
-               update_shallow=$1 ;;
-       --refmap=*|--no-refmap)
-               refmap=$1 ;;
-       -h|--help-all)
-               usage
-               ;;
-       --)
-               shift
-               break
-               ;;
-       *)
-               usage
-               ;;
-       esac
-       shift
-done
-
-case "$rebase" in
-preserve)
-       rebase=true
-       rebase_args=--preserve-merges
-       ;;
-true|false|'')
-       ;;
-*)
-       echo "Invalid value for --rebase, should be true, false, or preserve"
-       usage
-       exit 1
-       ;;
-esac
-
-error_on_no_merge_candidates () {
-       exec >&2
-
-       if test true = "$rebase"
-       then
-               op_type=rebase
-               op_prep=against
-       else
-               op_type=merge
-               op_prep=with
-       fi
-
-       upstream=$(git config "branch.$curr_branch_short.merge")
-       remote=$(git config "branch.$curr_branch_short.remote")
-
-       if [ $# -gt 1 ]; then
-               if [ "$rebase" = true ]; then
-                       printf "There is no candidate for rebasing against "
-               else
-                       printf "There are no candidates for merging "
-               fi
-               echo "among the refs that you just fetched."
-               echo "Generally this means that you provided a wildcard refspec which had no"
-               echo "matches on the remote end."
-       elif [ $# -gt 0 ] && [ "$1" != "$remote" ]; then
-               echo "You asked to pull from the remote '$1', but did not specify"
-               echo "a branch. Because this is not the default configured remote"
-               echo "for your current branch, you must specify a branch on the command line."
-       elif [ -z "$curr_branch" -o -z "$upstream" ]; then
-               . git-parse-remote
-               error_on_missing_default_upstream "pull" $op_type $op_prep \
-                       "git pull <remote> <branch>"
-       else
-               echo "Your configuration specifies to $op_type $op_prep the ref '${upstream#refs/heads/}'"
-               echo "from the remote, but no such ref was fetched."
-       fi
-       exit 1
-}
-
-test true = "$rebase" && {
-       if ! git rev-parse -q --verify HEAD >/dev/null
-       then
-               # On an unborn branch
-               if test -f "$(git rev-parse --git-path index)"
-               then
-                       die "$(gettext "updating an unborn branch with changes added to the index")"
-               fi
-       else
-               require_clean_work_tree "pull with rebase" "Please commit or stash them."
-       fi
-       oldremoteref= &&
-       test -n "$curr_branch" &&
-       . git-parse-remote &&
-       remoteref="$(get_remote_merge_branch "$@" 2>/dev/null)" &&
-       oldremoteref=$(git merge-base --fork-point "$remoteref" $curr_branch 2>/dev/null)
-}
-orig_head=$(git rev-parse -q --verify HEAD)
-git fetch $verbosity $progress $dry_run $recurse_submodules $all $append \
-${upload_pack:+"$upload_pack"} $force $tags $prune $keep $depth $unshallow $update_shallow \
-$refmap --update-head-ok "$@" || exit 1
-test -z "$dry_run" || exit 0
-
-curr_head=$(git rev-parse -q --verify HEAD)
-if test -n "$orig_head" && test "$curr_head" != "$orig_head"
-then
-       # The fetch involved updating the current branch.
-
-       # The working tree and the index file is still based on the
-       # $orig_head commit, but we are merging into $curr_head.
-       # First update the working tree to match $curr_head.
-
-       eval_gettextln "Warning: fetch updated the current branch head.
-Warning: fast-forwarding your working tree from
-Warning: commit \$orig_head." >&2
-       git update-index -q --refresh
-       git read-tree -u -m "$orig_head" "$curr_head" ||
-               die "$(eval_gettext "Cannot fast-forward your working tree.
-After making sure that you saved anything precious from
-$ git diff \$orig_head
-output, run
-$ git reset --hard
-to recover.")"
-
-fi
-
-merge_head=$(sed -e '/ not-for-merge   /d' \
-       -e 's/  .*//' "$GIT_DIR"/FETCH_HEAD | \
-       tr '\012' ' ')
-
-case "$merge_head" in
-'')
-       error_on_no_merge_candidates "$@"
-       ;;
-?*' '?*)
-       if test -z "$orig_head"
-       then
-               die "$(gettext "Cannot merge multiple branches into empty head")"
-       fi
-       if test true = "$rebase"
-       then
-               die "$(gettext "Cannot rebase onto multiple branches")"
-       fi
-       ;;
-esac
-
-# Pulling into unborn branch: a shorthand for branching off
-# FETCH_HEAD, for lazy typers.
-if test -z "$orig_head"
-then
-       # Two-way merge: we claim the index is based on an empty tree,
-       # and try to fast-forward to HEAD.  This ensures we will not
-       # lose index/worktree changes that the user already made on
-       # the unborn branch.
-       empty_tree=4b825dc642cb6eb9a060e54bf8d69288fbee4904
-       git read-tree -m -u $empty_tree $merge_head &&
-       git update-ref -m "initial pull" HEAD $merge_head "$curr_head"
-       exit
-fi
-
-if test true = "$rebase"
-then
-       o=$(git show-branch --merge-base $curr_branch $merge_head $oldremoteref)
-       if test "$oldremoteref" = "$o"
-       then
-               unset oldremoteref
-       fi
-fi
-
-case "$rebase" in
-true)
-       eval="git-rebase $diffstat $strategy_args $merge_args $rebase_args $verbosity"
-       eval="$eval $gpg_sign_args"
-       eval="$eval --onto $merge_head ${oldremoteref:-$merge_head}"
-       ;;
-*)
-       eval="git-merge $diffstat $no_commit $verify_signatures $edit $squash $no_ff $ff_only"
-       eval="$eval $log_arg $strategy_args $merge_args $verbosity $progress"
-       eval="$eval $gpg_sign_args"
-       eval="$eval FETCH_HEAD"
-       ;;
-esac
-eval "exec $eval"
diff --git a/contrib/examples/git-remote.perl b/contrib/examples/git-remote.perl
deleted file mode 100755 (executable)
index d42df7b..0000000
+++ /dev/null
@@ -1,474 +0,0 @@
-#!/usr/bin/perl -w
-
-use strict;
-use Git;
-my $git = Git->repository();
-
-sub add_remote_config {
-       my ($hash, $name, $what, $value) = @_;
-       if ($what eq 'url') {
-               # Having more than one is Ok -- it is used for push.
-               if (! exists $hash->{'URL'}) {
-                       $hash->{$name}{'URL'} = $value;
-               }
-       }
-       elsif ($what eq 'fetch') {
-               $hash->{$name}{'FETCH'} ||= [];
-               push @{$hash->{$name}{'FETCH'}}, $value;
-       }
-       elsif ($what eq 'push') {
-               $hash->{$name}{'PUSH'} ||= [];
-               push @{$hash->{$name}{'PUSH'}}, $value;
-       }
-       if (!exists $hash->{$name}{'SOURCE'}) {
-               $hash->{$name}{'SOURCE'} = 'config';
-       }
-}
-
-sub add_remote_remotes {
-       my ($hash, $file, $name) = @_;
-
-       if (exists $hash->{$name}) {
-               $hash->{$name}{'WARNING'} = 'ignored due to config';
-               return;
-       }
-
-       my $fh;
-       if (!open($fh, '<', $file)) {
-               print STDERR "Warning: cannot open $file\n";
-               return;
-       }
-       my $it = { 'SOURCE' => 'remotes' };
-       $hash->{$name} = $it;
-       while (<$fh>) {
-               chomp;
-               if (/^URL:\s*(.*)$/) {
-                       # Having more than one is Ok -- it is used for push.
-                       if (! exists $it->{'URL'}) {
-                               $it->{'URL'} = $1;
-                       }
-               }
-               elsif (/^Push:\s*(.*)$/) {
-                       $it->{'PUSH'} ||= [];
-                       push @{$it->{'PUSH'}}, $1;
-               }
-               elsif (/^Pull:\s*(.*)$/) {
-                       $it->{'FETCH'} ||= [];
-                       push @{$it->{'FETCH'}}, $1;
-               }
-               elsif (/^\#/) {
-                       ; # ignore
-               }
-               else {
-                       print STDERR "Warning: funny line in $file: $_\n";
-               }
-       }
-       close($fh);
-}
-
-sub list_remote {
-       my ($git) = @_;
-       my %seen = ();
-       my @remotes = eval {
-               $git->command(qw(config --get-regexp), '^remote\.');
-       };
-       for (@remotes) {
-               if (/^remote\.(\S+?)\.([^.\s]+)\s+(.*)$/) {
-                       add_remote_config(\%seen, $1, $2, $3);
-               }
-       }
-
-       my $dir = $git->repo_path() . "/remotes";
-       if (opendir(my $dh, $dir)) {
-               local $_;
-               while ($_ = readdir($dh)) {
-                       chomp;
-                       next if (! -f "$dir/$_" || ! -r _);
-                       add_remote_remotes(\%seen, "$dir/$_", $_);
-               }
-       }
-
-       return \%seen;
-}
-
-sub add_branch_config {
-       my ($hash, $name, $what, $value) = @_;
-       if ($what eq 'remote') {
-               if (exists $hash->{$name}{'REMOTE'}) {
-                       print STDERR "Warning: more than one branch.$name.remote\n";
-               }
-               $hash->{$name}{'REMOTE'} = $value;
-       }
-       elsif ($what eq 'merge') {
-               $hash->{$name}{'MERGE'} ||= [];
-               push @{$hash->{$name}{'MERGE'}}, $value;
-       }
-}
-
-sub list_branch {
-       my ($git) = @_;
-       my %seen = ();
-       my @branches = eval {
-               $git->command(qw(config --get-regexp), '^branch\.');
-       };
-       for (@branches) {
-               if (/^branch\.([^.]*)\.(\S*)\s+(.*)$/) {
-                       add_branch_config(\%seen, $1, $2, $3);
-               }
-       }
-
-       return \%seen;
-}
-
-my $remote = list_remote($git);
-my $branch = list_branch($git);
-
-sub update_ls_remote {
-       my ($harder, $info) = @_;
-
-       return if (($harder == 0) ||
-                  (($harder == 1) && exists $info->{'LS_REMOTE'}));
-
-       my @ref = map { s|refs/heads/||; $_; } keys %{$git->remote_refs($info->{'URL'}, [ 'heads' ])};
-       $info->{'LS_REMOTE'} = \@ref;
-}
-
-sub list_wildcard_mapping {
-       my ($forced, $ours, $ls) = @_;
-       my %refs;
-       for (@$ls) {
-               $refs{$_} = 01; # bit #0 to say "they have"
-       }
-       for ($git->command('for-each-ref', "refs/remotes/$ours")) {
-               chomp;
-               next unless (s|^[0-9a-f]{40}\s[a-z]+\srefs/remotes/$ours/||);
-               next if ($_ eq 'HEAD');
-               $refs{$_} ||= 0;
-               $refs{$_} |= 02; # bit #1 to say "we have"
-       }
-       my (@new, @stale, @tracked);
-       for (sort keys %refs) {
-               my $have = $refs{$_};
-               if ($have == 1) {
-                       push @new, $_;
-               }
-               elsif ($have == 2) {
-                       push @stale, $_;
-               }
-               elsif ($have == 3) {
-                       push @tracked, $_;
-               }
-       }
-       return \@new, \@stale, \@tracked;
-}
-
-sub list_mapping {
-       my ($name, $info) = @_;
-       my $fetch = $info->{'FETCH'};
-       my $ls = $info->{'LS_REMOTE'};
-       my (@new, @stale, @tracked);
-
-       for (@$fetch) {
-               next unless (/(\+)?([^:]+):(.*)/);
-               my ($forced, $theirs, $ours) = ($1, $2, $3);
-               if ($theirs eq 'refs/heads/*' &&
-                   $ours =~ /^refs\/remotes\/(.*)\/\*$/) {
-                       # wildcard mapping
-                       my ($w_new, $w_stale, $w_tracked)
-                               = list_wildcard_mapping($forced, $1, $ls);
-                       push @new, @$w_new;
-                       push @stale, @$w_stale;
-                       push @tracked, @$w_tracked;
-               }
-               elsif ($theirs =~ /\*/ || $ours =~ /\*/) {
-                       print STDERR "Warning: unrecognized mapping in remotes.$name.fetch: $_\n";
-               }
-               elsif ($theirs =~ s|^refs/heads/||) {
-                       if (!grep { $_ eq $theirs } @$ls) {
-                               push @stale, $theirs;
-                       }
-                       elsif ($ours ne '') {
-                               push @tracked, $theirs;
-                       }
-               }
-       }
-       return \@new, \@stale, \@tracked;
-}
-
-sub show_mapping {
-       my ($name, $info) = @_;
-       my ($new, $stale, $tracked) = list_mapping($name, $info);
-       if (@$new) {
-               print "  New remote branches (next fetch will store in remotes/$name)\n";
-               print "    @$new\n";
-       }
-       if (@$stale) {
-               print "  Stale tracking branches in remotes/$name (use 'git remote prune')\n";
-               print "    @$stale\n";
-       }
-       if (@$tracked) {
-               print "  Tracked remote branches\n";
-               print "    @$tracked\n";
-       }
-}
-
-sub prune_remote {
-       my ($name, $ls_remote) = @_;
-       if (!exists $remote->{$name}) {
-               print STDERR "No such remote $name\n";
-               return 1;
-       }
-       my $info = $remote->{$name};
-       update_ls_remote($ls_remote, $info);
-
-       my ($new, $stale, $tracked) = list_mapping($name, $info);
-       my $prefix = "refs/remotes/$name";
-       foreach my $to_prune (@$stale) {
-               my @v = $git->command(qw(rev-parse --verify), "$prefix/$to_prune");
-               $git->command(qw(update-ref -d), "$prefix/$to_prune", $v[0]);
-       }
-       return 0;
-}
-
-sub show_remote {
-       my ($name, $ls_remote) = @_;
-       if (!exists $remote->{$name}) {
-               print STDERR "No such remote $name\n";
-               return 1;
-       }
-       my $info = $remote->{$name};
-       update_ls_remote($ls_remote, $info);
-
-       print "* remote $name\n";
-       print "  URL: $info->{'URL'}\n";
-       for my $branchname (sort keys %$branch) {
-               next unless (defined $branch->{$branchname}{'REMOTE'} &&
-                            $branch->{$branchname}{'REMOTE'} eq $name);
-               my @merged = map {
-                       s|^refs/heads/||;
-                       $_;
-               } split(' ',"@{$branch->{$branchname}{'MERGE'}}");
-               next unless (@merged);
-               print "  Remote branch(es) merged with 'git pull' while on branch $branchname\n";
-               print "    @merged\n";
-       }
-       if ($info->{'LS_REMOTE'}) {
-               show_mapping($name, $info);
-       }
-       if ($info->{'PUSH'}) {
-               my @pushed = map {
-                       s|^refs/heads/||;
-                       s|^\+refs/heads/|+|;
-                       s|:refs/heads/|:|;
-                       $_;
-               } @{$info->{'PUSH'}};
-               print "  Local branch(es) pushed with 'git push'\n";
-               print "    @pushed\n";
-       }
-       return 0;
-}
-
-sub add_remote {
-       my ($name, $url, $opts) = @_;
-       if (exists $remote->{$name}) {
-               print STDERR "remote $name already exists.\n";
-               exit(1);
-       }
-       $git->command('config', "remote.$name.url", $url);
-       my $track = $opts->{'track'} || ["*"];
-
-       for (@$track) {
-               $git->command('config', '--add', "remote.$name.fetch",
-                               $opts->{'mirror'} ?
-                               "+refs/$_:refs/$_" :
-                               "+refs/heads/$_:refs/remotes/$name/$_");
-       }
-       if ($opts->{'fetch'}) {
-               $git->command('fetch', $name);
-       }
-       if (exists $opts->{'master'}) {
-               $git->command('symbolic-ref', "refs/remotes/$name/HEAD",
-                             "refs/remotes/$name/$opts->{'master'}");
-       }
-}
-
-sub update_remote {
-       my ($name) = @_;
-       my @remotes;
-
-        my $conf = $git->config("remotes." . $name);
-       if (defined($conf)) {
-               @remotes = split(' ', $conf);
-       } elsif ($name eq 'default') {
-               @remotes = ();
-               for (sort keys %$remote) {
-                       my $do_fetch = $git->config_bool("remote." . $_ .
-                                                   ".skipDefaultUpdate");
-                       unless ($do_fetch) {
-                               push @remotes, $_;
-                       }
-               }
-       } else {
-               print STDERR "Remote group $name does not exist.\n";
-               exit(1);
-       }
-       for (@remotes) {
-               print "Updating $_\n";
-               $git->command('fetch', "$_");
-       }
-}
-
-sub rm_remote {
-       my ($name) = @_;
-       if (!exists $remote->{$name}) {
-               print STDERR "No such remote $name\n";
-               return 1;
-       }
-
-       $git->command('config', '--remove-section', "remote.$name");
-
-       eval {
-           my @trackers = $git->command('config', '--get-regexp',
-                       'branch.*.remote', $name);
-               for (@trackers) {
-                       /^branch\.(.*)?\.remote/;
-                       $git->config('--unset', "branch.$1.remote");
-                       $git->config('--unset', "branch.$1.merge");
-               }
-       };
-
-       my @refs = $git->command('for-each-ref',
-               '--format=%(refname) %(objectname)', "refs/remotes/$name");
-       for (@refs) {
-               my ($ref, $object) = split;
-               $git->command(qw(update-ref -d), $ref, $object);
-       }
-       return 0;
-}
-
-sub add_usage {
-       print STDERR "usage: git remote add [-f] [-t track]* [-m master] <name> <url>\n";
-       exit(1);
-}
-
-my $VERBOSE = 0;
-@ARGV = grep {
-       if ($_ eq '-v' or $_ eq '--verbose') {
-               $VERBOSE=1;
-               0
-       } else {
-               1
-       }
-} @ARGV;
-
-if (!@ARGV) {
-       for (sort keys %$remote) {
-               print "$_";
-               print "\t$remote->{$_}->{URL}" if $VERBOSE;
-               print "\n";
-       }
-}
-elsif ($ARGV[0] eq 'show') {
-       my $ls_remote = 1;
-       my $i;
-       for ($i = 1; $i < @ARGV; $i++) {
-               if ($ARGV[$i] eq '-n') {
-                       $ls_remote = 0;
-               }
-               else {
-                       last;
-               }
-       }
-       if ($i >= @ARGV) {
-               print STDERR "usage: git remote show <remote>\n";
-               exit(1);
-       }
-       my $status = 0;
-       for (; $i < @ARGV; $i++) {
-               $status |= show_remote($ARGV[$i], $ls_remote);
-       }
-       exit($status);
-}
-elsif ($ARGV[0] eq 'update') {
-       if (@ARGV <= 1) {
-               update_remote("default");
-               exit(1);
-       }
-       for (my $i = 1; $i < @ARGV; $i++) {
-               update_remote($ARGV[$i]);
-       }
-}
-elsif ($ARGV[0] eq 'prune') {
-       my $ls_remote = 1;
-       my $i;
-       for ($i = 1; $i < @ARGV; $i++) {
-               if ($ARGV[$i] eq '-n') {
-                       $ls_remote = 0;
-               }
-               else {
-                       last;
-               }
-       }
-       if ($i >= @ARGV) {
-               print STDERR "usage: git remote prune <remote>\n";
-               exit(1);
-       }
-       my $status = 0;
-       for (; $i < @ARGV; $i++) {
-               $status |= prune_remote($ARGV[$i], $ls_remote);
-       }
-        exit($status);
-}
-elsif ($ARGV[0] eq 'add') {
-       my %opts = ();
-       while (1 < @ARGV && $ARGV[1] =~ /^-/) {
-               my $opt = $ARGV[1];
-               shift @ARGV;
-               if ($opt eq '-f' || $opt eq '--fetch') {
-                       $opts{'fetch'} = 1;
-                       next;
-               }
-               if ($opt eq '-t' || $opt eq '--track') {
-                       if (@ARGV < 1) {
-                               add_usage();
-                       }
-                       $opts{'track'} ||= [];
-                       push @{$opts{'track'}}, $ARGV[1];
-                       shift @ARGV;
-                       next;
-               }
-               if ($opt eq '-m' || $opt eq '--master') {
-                       if ((@ARGV < 1) || exists $opts{'master'}) {
-                               add_usage();
-                       }
-                       $opts{'master'} = $ARGV[1];
-                       shift @ARGV;
-                       next;
-               }
-               if ($opt eq '--mirror') {
-                       $opts{'mirror'} = 1;
-                       next;
-               }
-               add_usage();
-       }
-       if (@ARGV != 3) {
-               add_usage();
-       }
-       add_remote($ARGV[1], $ARGV[2], \%opts);
-}
-elsif ($ARGV[0] eq 'rm') {
-       if (@ARGV <= 1) {
-               print STDERR "usage: git remote rm <remote>\n";
-               exit(1);
-       }
-       exit(rm_remote($ARGV[1]));
-}
-else {
-       print STDERR "usage: git remote\n";
-       print STDERR "       git remote add <name> <url>\n";
-       print STDERR "       git remote rm <name>\n";
-       print STDERR "       git remote show <name>\n";
-       print STDERR "       git remote prune <name>\n";
-       print STDERR "       git remote update [group]\n";
-       exit(1);
-}
diff --git a/contrib/examples/git-repack.sh b/contrib/examples/git-repack.sh
deleted file mode 100755 (executable)
index 672af93..0000000
+++ /dev/null
@@ -1,194 +0,0 @@
-#!/bin/sh
-#
-# Copyright (c) 2005 Linus Torvalds
-#
-
-OPTIONS_KEEPDASHDASH=
-OPTIONS_SPEC="\
-git repack [options]
---
-a               pack everything in a single pack
-A               same as -a, and turn unreachable objects loose
-d               remove redundant packs, and run git-prune-packed
-f               pass --no-reuse-delta to git-pack-objects
-F               pass --no-reuse-object to git-pack-objects
-n               do not run git-update-server-info
-q,quiet         be quiet
-l               pass --local to git-pack-objects
-unpack-unreachable=  with -A, do not loosen objects older than this
- Packing constraints
-window=         size of the window used for delta compression
-window-memory=  same as the above, but limit memory size instead of entries count
-depth=          limits the maximum delta depth
-max-pack-size=  maximum size of each packfile
-"
-SUBDIRECTORY_OK='Yes'
-. git-sh-setup
-
-no_update_info= all_into_one= remove_redundant= unpack_unreachable=
-local= no_reuse= extra=
-while test $# != 0
-do
-       case "$1" in
-       -n)     no_update_info=t ;;
-       -a)     all_into_one=t ;;
-       -A)     all_into_one=t
-               unpack_unreachable=--unpack-unreachable ;;
-       --unpack-unreachable)
-               unpack_unreachable="--unpack-unreachable=$2"; shift ;;
-       -d)     remove_redundant=t ;;
-       -q)     GIT_QUIET=t ;;
-       -f)     no_reuse=--no-reuse-delta ;;
-       -F)     no_reuse=--no-reuse-object ;;
-       -l)     local=--local ;;
-       --max-pack-size|--window|--window-memory|--depth)
-               extra="$extra $1=$2"; shift ;;
-       --) shift; break;;
-       *)      usage ;;
-       esac
-       shift
-done
-
-case "$(git config --bool repack.usedeltabaseoffset || echo true)" in
-true)
-       extra="$extra --delta-base-offset" ;;
-esac
-
-PACKDIR="$GIT_OBJECT_DIRECTORY/pack"
-PACKTMP="$PACKDIR/.tmp-$$-pack"
-rm -f "$PACKTMP"-*
-trap 'rm -f "$PACKTMP"-*' 0 1 2 3 15
-
-# There will be more repacking strategies to come...
-case ",$all_into_one," in
-,,)
-       args='--unpacked --incremental'
-       ;;
-,t,)
-       args= existing=
-       if [ -d "$PACKDIR" ]; then
-               for e in $(cd "$PACKDIR" && find . -type f -name '*.pack' \
-                       | sed -e 's/^\.\///' -e 's/\.pack$//')
-               do
-                       if [ -e "$PACKDIR/$e.keep" ]; then
-                               : keep
-                       else
-                               existing="$existing $e"
-                       fi
-               done
-               if test -n "$existing" && test -n "$unpack_unreachable" && \
-                       test -n "$remove_redundant"
-               then
-                       # This may have arbitrary user arguments, so we
-                       # have to protect it against whitespace splitting
-                       # when it gets run as "pack-objects $args" later.
-                       # Fortunately, we know it's an approxidate, so we
-                       # can just use dots instead.
-                       args="$args $(echo "$unpack_unreachable" | tr ' ' .)"
-               fi
-       fi
-       ;;
-esac
-
-mkdir -p "$PACKDIR" || exit
-
-args="$args $local ${GIT_QUIET:+-q} $no_reuse$extra"
-names=$(git pack-objects --keep-true-parents --honor-pack-keep --non-empty --all --reflog $args </dev/null "$PACKTMP") ||
-       exit 1
-if [ -z "$names" ]; then
-       say Nothing new to pack.
-fi
-
-# Ok we have prepared all new packfiles.
-
-# First see if there are packs of the same name and if so
-# if we can move them out of the way (this can happen if we
-# repacked immediately after packing fully.
-rollback=
-failed=
-for name in $names
-do
-       for sfx in pack idx
-       do
-               file=pack-$name.$sfx
-               test -f "$PACKDIR/$file" || continue
-               rm -f "$PACKDIR/old-$file" &&
-               mv "$PACKDIR/$file" "$PACKDIR/old-$file" || {
-                       failed=t
-                       break
-               }
-               rollback="$rollback $file"
-       done
-       test -z "$failed" || break
-done
-
-# If renaming failed for any of them, roll the ones we have
-# already renamed back to their original names.
-if test -n "$failed"
-then
-       rollback_failure=
-       for file in $rollback
-       do
-               mv "$PACKDIR/old-$file" "$PACKDIR/$file" ||
-               rollback_failure="$rollback_failure $file"
-       done
-       if test -n "$rollback_failure"
-       then
-               echo >&2 "WARNING: Some packs in use have been renamed by"
-               echo >&2 "WARNING: prefixing old- to their name, in order to"
-               echo >&2 "WARNING: replace them with the new version of the"
-               echo >&2 "WARNING: file.  But the operation failed, and"
-               echo >&2 "WARNING: attempt to rename them back to their"
-               echo >&2 "WARNING: original names also failed."
-               echo >&2 "WARNING: Please rename them in $PACKDIR manually:"
-               for file in $rollback_failure
-               do
-                       echo >&2 "WARNING:   old-$file -> $file"
-               done
-       fi
-       exit 1
-fi
-
-# Now the ones with the same name are out of the way...
-fullbases=
-for name in $names
-do
-       fullbases="$fullbases pack-$name"
-       chmod a-w "$PACKTMP-$name.pack"
-       chmod a-w "$PACKTMP-$name.idx"
-       mv -f "$PACKTMP-$name.pack" "$PACKDIR/pack-$name.pack" &&
-       mv -f "$PACKTMP-$name.idx"  "$PACKDIR/pack-$name.idx" ||
-       exit
-done
-
-# Remove the "old-" files
-for name in $names
-do
-       rm -f "$PACKDIR/old-pack-$name.idx"
-       rm -f "$PACKDIR/old-pack-$name.pack"
-done
-
-# End of pack replacement.
-
-if test "$remove_redundant" = t
-then
-       # We know $existing are all redundant.
-       if [ -n "$existing" ]
-       then
-               ( cd "$PACKDIR" &&
-                 for e in $existing
-                 do
-                       case " $fullbases " in
-                       *" $e "*) ;;
-                       *)      rm -f "$e.pack" "$e.idx" "$e.keep" ;;
-                       esac
-                 done
-               )
-       fi
-       git prune-packed ${GIT_QUIET:+-q}
-fi
-
-case "$no_update_info" in
-t) : ;;
-*) git update-server-info ;;
-esac
diff --git a/contrib/examples/git-rerere.perl b/contrib/examples/git-rerere.perl
deleted file mode 100755 (executable)
index 4f69209..0000000
+++ /dev/null
@@ -1,284 +0,0 @@
-#!/usr/bin/perl
-#
-# REuse REcorded REsolve.  This tool records a conflicted automerge
-# result and its hand resolution, and helps to resolve future
-# automerge that results in the same conflict.
-#
-# To enable this feature, create a directory 'rr-cache' under your
-# .git/ directory.
-
-use Digest;
-use File::Path;
-use File::Copy;
-
-my $git_dir = $::ENV{GIT_DIR} || ".git";
-my $rr_dir = "$git_dir/rr-cache";
-my $merge_rr = "$git_dir/rr-cache/MERGE_RR";
-
-my %merge_rr = ();
-
-sub read_rr {
-       if (!-f $merge_rr) {
-               %merge_rr = ();
-               return;
-       }
-       my $in;
-       local $/ = "\0";
-       open $in, "<$merge_rr" or die "$!: $merge_rr";
-       while (<$in>) {
-               chomp;
-               my ($name, $path) = /^([0-9a-f]{40})\t(.*)$/s;
-               $merge_rr{$path} = $name;
-       }
-       close $in;
-}
-
-sub write_rr {
-       my $out;
-       open $out, ">$merge_rr" or die "$!: $merge_rr";
-       for my $path (sort keys %merge_rr) {
-               my $name = $merge_rr{$path};
-               print $out "$name\t$path\0";
-       }
-       close $out;
-}
-
-sub compute_conflict_name {
-       my ($path) = @_;
-       my @side = ();
-       my $in;
-       open $in, "<$path"  or die "$!: $path";
-
-       my $sha1 = Digest->new("SHA-1");
-       my $hunk = 0;
-       while (<$in>) {
-               if (/^<<<<<<< .*/) {
-                       $hunk++;
-                       @side = ([], undef);
-               }
-               elsif (/^=======$/) {
-                       $side[1] = [];
-               }
-               elsif (/^>>>>>>> .*/) {
-                       my ($one, $two);
-                       $one = join('', @{$side[0]});
-                       $two = join('', @{$side[1]});
-                       if ($two le $one) {
-                               ($one, $two) = ($two, $one);
-                       }
-                       $sha1->add($one);
-                       $sha1->add("\0");
-                       $sha1->add($two);
-                       $sha1->add("\0");
-                       @side = ();
-               }
-               elsif (@side == 0) {
-                       next;
-               }
-               elsif (defined $side[1]) {
-                       push @{$side[1]}, $_;
-               }
-               else {
-                       push @{$side[0]}, $_;
-               }
-       }
-       close $in;
-       return ($sha1->hexdigest, $hunk);
-}
-
-sub record_preimage {
-       my ($path, $name) = @_;
-       my @side = ();
-       my ($in, $out);
-       open $in, "<$path"  or die "$!: $path";
-       open $out, ">$name" or die "$!: $name";
-
-       while (<$in>) {
-               if (/^<<<<<<< .*/) {
-                       @side = ([], undef);
-               }
-               elsif (/^=======$/) {
-                       $side[1] = [];
-               }
-               elsif (/^>>>>>>> .*/) {
-                       my ($one, $two);
-                       $one = join('', @{$side[0]});
-                       $two = join('', @{$side[1]});
-                       if ($two le $one) {
-                               ($one, $two) = ($two, $one);
-                       }
-                       print $out "<<<<<<<\n";
-                       print $out $one;
-                       print $out "=======\n";
-                       print $out $two;
-                       print $out ">>>>>>>\n";
-                       @side = ();
-               }
-               elsif (@side == 0) {
-                       print $out $_;
-               }
-               elsif (defined $side[1]) {
-                       push @{$side[1]}, $_;
-               }
-               else {
-                       push @{$side[0]}, $_;
-               }
-       }
-       close $out;
-       close $in;
-}
-
-sub find_conflict {
-       my $in;
-       local $/ = "\0";
-       my $pid = open($in, '-|');
-       die "$!" unless defined $pid;
-       if (!$pid) {
-               exec(qw(git ls-files -z -u)) or die "$!: ls-files";
-       }
-       my %path = ();
-       my @path = ();
-       while (<$in>) {
-               chomp;
-               my ($mode, $sha1, $stage, $path) =
-                   /^([0-7]+) ([0-9a-f]{40}) ([123])\t(.*)$/s;
-               $path{$path} |= (1 << $stage);
-       }
-       close $in;
-       while (my ($path, $status) = each %path) {
-               if ($status == 14) { push @path, $path; }
-       }
-       return @path;
-}
-
-sub merge {
-       my ($name, $path) = @_;
-       record_preimage($path, "$rr_dir/$name/thisimage");
-       unless (system('git', 'merge-file', map { "$rr_dir/$name/${_}image" }
-                      qw(this pre post))) {
-               my $in;
-               open $in, "<$rr_dir/$name/thisimage" or
-                   die "$!: $name/thisimage";
-               my $out;
-               open $out, ">$path" or die "$!: $path";
-               while (<$in>) { print $out $_; }
-               close $in;
-               close $out;
-               return 1;
-       }
-       return 0;
-}
-
-sub garbage_collect_rerere {
-       # We should allow specifying these from the command line and
-       # that is why the caller gives @ARGV to us, but I am lazy.
-
-       my $cutoff_noresolve = 15; # two weeks
-       my $cutoff_resolve = 60; # two months
-       my @to_remove;
-       while (<$rr_dir/*/preimage>) {
-               my ($dir) = /^(.*)\/preimage$/;
-               my $cutoff = ((-f "$dir/postimage")
-                             ? $cutoff_resolve
-                             : $cutoff_noresolve);
-               my $age = -M "$_";
-               if ($cutoff <= $age) {
-                       push @to_remove, $dir;
-               }
-       }
-       if (@to_remove) {
-               rmtree(\@to_remove);
-       }
-}
-
--d "$rr_dir" || exit(0);
-
-read_rr();
-
-if (@ARGV) {
-       my $arg = shift @ARGV;
-       if ($arg eq 'clear') {
-               for my $path (keys %merge_rr) {
-                       my $name = $merge_rr{$path};
-                       if (-d "$rr_dir/$name" &&
-                           ! -f "$rr_dir/$name/postimage") {
-                               rmtree(["$rr_dir/$name"]);
-                       }
-               }
-               unlink $merge_rr;
-       }
-       elsif ($arg eq 'status') {
-               for my $path (keys %merge_rr) {
-                       print $path, "\n";
-               }
-       }
-       elsif ($arg eq 'diff') {
-               for my $path (keys %merge_rr) {
-                       my $name = $merge_rr{$path};
-                       system('diff', ((@ARGV == 0) ? ('-u') : @ARGV),
-                               '-L', "a/$path", '-L', "b/$path",
-                               "$rr_dir/$name/preimage", $path);
-               }
-       }
-       elsif ($arg eq 'gc') {
-               garbage_collect_rerere(@ARGV);
-       }
-       else {
-               die "$0 unknown command: $arg\n";
-       }
-       exit 0;
-}
-
-my %conflict = map { $_ => 1 } find_conflict();
-
-# MERGE_RR records paths with conflicts immediately after merge
-# failed.  Some of the conflicted paths might have been hand resolved
-# in the working tree since then, but the initial run would catch all
-# and register their preimages.
-
-for my $path (keys %conflict) {
-       # This path has conflict.  If it is not recorded yet,
-       # record the pre-image.
-       if (!exists $merge_rr{$path}) {
-               my ($name, $hunk) = compute_conflict_name($path);
-               next unless ($hunk);
-               $merge_rr{$path} = $name;
-               if (! -d "$rr_dir/$name") {
-                       mkpath("$rr_dir/$name", 0, 0777);
-                       print STDERR "Recorded preimage for '$path'\n";
-                       record_preimage($path, "$rr_dir/$name/preimage");
-               }
-       }
-}
-
-# Now some of the paths that had conflicts earlier might have been
-# hand resolved.  Others may be similar to a conflict already that
-# was resolved before.
-
-for my $path (keys %merge_rr) {
-       my $name = $merge_rr{$path};
-
-       # We could resolve this automatically if we have images.
-       if (-f "$rr_dir/$name/preimage" &&
-           -f "$rr_dir/$name/postimage") {
-               if (merge($name, $path)) {
-                       print STDERR "Resolved '$path' using previous resolution.\n";
-                       # Then we do not have to worry about this path
-                       # anymore.
-                       delete $merge_rr{$path};
-                       next;
-               }
-       }
-
-       # Let's see if we have resolved it.
-       (undef, my $hunk) = compute_conflict_name($path);
-       next if ($hunk);
-
-       print STDERR "Recorded resolution for '$path'.\n";
-       copy($path, "$rr_dir/$name/postimage");
-       # And we do not have to worry about this path anymore.
-       delete $merge_rr{$path};
-}
-
-# Write out the rest.
-write_rr();
diff --git a/contrib/examples/git-reset.sh b/contrib/examples/git-reset.sh
deleted file mode 100755 (executable)
index cb1bbf3..0000000
+++ /dev/null
@@ -1,106 +0,0 @@
-#!/bin/sh
-#
-# Copyright (c) 2005, 2006 Linus Torvalds and Junio C Hamano
-#
-USAGE='[--mixed | --soft | --hard]  [<commit-ish>] [ [--] <paths>...]'
-SUBDIRECTORY_OK=Yes
-. git-sh-setup
-set_reflog_action "reset $*"
-require_work_tree
-
-update= reset_type=--mixed
-unset rev
-
-while test $# != 0
-do
-       case "$1" in
-       --mixed | --soft | --hard)
-               reset_type="$1"
-               ;;
-       --)
-               break
-               ;;
-       -*)
-               usage
-               ;;
-       *)
-               rev=$(git rev-parse --verify "$1") || exit
-               shift
-               break
-               ;;
-       esac
-       shift
-done
-
-: ${rev=HEAD}
-rev=$(git rev-parse --verify $rev^0) || exit
-
-# Skip -- in "git reset HEAD -- foo" and "git reset -- foo".
-case "$1" in --) shift ;; esac
-
-# git reset --mixed tree [--] paths... can be used to
-# load chosen paths from the tree into the index without
-# affecting the working tree or HEAD.
-if test $# != 0
-then
-       test "$reset_type" = "--mixed" ||
-               die "Cannot do partial $reset_type reset."
-
-       git diff-index --cached $rev -- "$@" |
-       sed -e 's/^:\([0-7][0-7]*\) [0-7][0-7]* \([0-9a-f][0-9a-f]*\) [0-9a-f][0-9a-f]* [A-Z]   \(.*\)$/\1 \2   \3/' |
-       git update-index --add --remove --index-info || exit
-       git update-index --refresh
-       exit
-fi
-
-cd_to_toplevel
-
-if test "$reset_type" = "--hard"
-then
-       update=-u
-fi
-
-# Soft reset does not touch the index file or the working tree
-# at all, but requires them in a good order.  Other resets reset
-# the index file to the tree object we are switching to.
-if test "$reset_type" = "--soft"
-then
-       if test -f "$GIT_DIR/MERGE_HEAD" ||
-          test "" != "$(git ls-files --unmerged)"
-       then
-               die "Cannot do a soft reset in the middle of a merge."
-       fi
-else
-       git read-tree -v --reset $update "$rev" || exit
-fi
-
-# Any resets update HEAD to the head being switched to.
-if orig=$(git rev-parse --verify HEAD 2>/dev/null)
-then
-       echo "$orig" >"$GIT_DIR/ORIG_HEAD"
-else
-       rm -f "$GIT_DIR/ORIG_HEAD"
-fi
-git update-ref -m "$GIT_REFLOG_ACTION" HEAD "$rev"
-update_ref_status=$?
-
-case "$reset_type" in
---hard )
-       test $update_ref_status = 0 && {
-               printf "HEAD is now at "
-               GIT_PAGER= git log --max-count=1 --pretty=oneline \
-                       --abbrev-commit HEAD
-       }
-       ;;
---soft )
-       ;; # Nothing else to do
---mixed )
-       # Report what has not been updated.
-       git update-index --refresh
-       ;;
-esac
-
-rm -f "$GIT_DIR/MERGE_HEAD" "$GIT_DIR/rr-cache/MERGE_RR" \
-       "$GIT_DIR/SQUASH_MSG" "$GIT_DIR/MERGE_MSG"
-
-exit $update_ref_status
diff --git a/contrib/examples/git-resolve.sh b/contrib/examples/git-resolve.sh
deleted file mode 100755 (executable)
index 3099dc8..0000000
+++ /dev/null
@@ -1,112 +0,0 @@
-#!/bin/sh
-#
-# Copyright (c) 2005 Linus Torvalds
-#
-# Resolve two trees.
-#
-
-echo 'WARNING: This command is DEPRECATED and will be removed very soon.' >&2
-echo 'WARNING: Please use git-merge or git-pull instead.' >&2
-sleep 2
-
-USAGE='<head> <remote> <merge-message>'
-. git-sh-setup
-
-dropheads() {
-       rm -f -- "$GIT_DIR/MERGE_HEAD" \
-               "$GIT_DIR/LAST_MERGE" || exit 1
-}
-
-head=$(git rev-parse --verify "$1"^0) &&
-merge=$(git rev-parse --verify "$2"^0) &&
-merge_name="$2" &&
-merge_msg="$3" || usage
-
-#
-# The remote name is just used for the message,
-# but we do want it.
-#
-if [ -z "$head" -o -z "$merge" -o -z "$merge_msg" ]; then
-       usage
-fi
-
-dropheads
-echo $head > "$GIT_DIR"/ORIG_HEAD
-echo $merge > "$GIT_DIR"/LAST_MERGE
-
-common=$(git merge-base $head $merge)
-if [ -z "$common" ]; then
-       die "Unable to find common commit between" $merge $head
-fi
-
-case "$common" in
-"$merge")
-       echo "Already up to date. Yeeah!"
-       dropheads
-       exit 0
-       ;;
-"$head")
-       echo "Updating $(git rev-parse --short $head)..$(git rev-parse --short $merge)"
-       git read-tree -u -m $head $merge || exit 1
-       git update-ref -m "resolve $merge_name: Fast-forward" \
-               HEAD "$merge" "$head"
-       git diff-tree -p $head $merge | git apply --stat
-       dropheads
-       exit 0
-       ;;
-esac
-
-# We are going to make a new commit.
-git var GIT_COMMITTER_IDENT >/dev/null || exit
-
-# Find an optimum merge base if there are more than one candidates.
-LF='
-'
-common=$(git merge-base -a $head $merge)
-case "$common" in
-?*"$LF"?*)
-       echo "Trying to find the optimum merge base."
-       G=.tmp-index$$
-       best=
-       best_cnt=-1
-       for c in $common
-       do
-               rm -f $G
-               GIT_INDEX_FILE=$G git read-tree -m $c $head $merge \
-                       2>/dev/null || continue
-               # Count the paths that are unmerged.
-               cnt=$(GIT_INDEX_FILE=$G git ls-files --unmerged | wc -l)
-               if test $best_cnt -le 0 || test $cnt -le $best_cnt
-               then
-                       best=$c
-                       best_cnt=$cnt
-                       if test "$best_cnt" -eq 0
-                       then
-                               # Cannot do any better than all trivial merge.
-                               break
-                       fi
-               fi
-       done
-       rm -f $G
-       common="$best"
-esac
-
-echo "Trying to merge $merge into $head using $common."
-git update-index --refresh 2>/dev/null
-git read-tree -u -m $common $head $merge || exit 1
-result_tree=$(git write-tree  2> /dev/null)
-if [ $? -ne 0 ]; then
-       echo "Simple merge failed, trying Automatic merge"
-       git-merge-index -o git-merge-one-file -a
-       if [ $? -ne 0 ]; then
-               echo $merge > "$GIT_DIR"/MERGE_HEAD
-               die "Automatic merge failed, fix up by hand"
-       fi
-       result_tree=$(git write-tree) || exit 1
-fi
-result_commit=$(echo "$merge_msg" | git commit-tree $result_tree -p $head -p $merge)
-echo "Committed merge $result_commit"
-git update-ref -m "resolve $merge_name: In-index merge" \
-       HEAD "$result_commit" "$head"
-git diff-tree -p $head $result_commit | git apply --stat
-dropheads
diff --git a/contrib/examples/git-revert.sh b/contrib/examples/git-revert.sh
deleted file mode 100755 (executable)
index 197838d..0000000
+++ /dev/null
@@ -1,207 +0,0 @@
-#!/bin/sh
-#
-# Copyright (c) 2005 Linus Torvalds
-# Copyright (c) 2005 Junio C Hamano
-#
-
-case "$0" in
-*-revert* )
-       test -t 0 && edit=-e
-       replay=
-       me=revert
-       USAGE='[--edit | --no-edit] [-n] <commit-ish>' ;;
-*-cherry-pick* )
-       replay=t
-       edit=
-       me=cherry-pick
-       USAGE='[--edit] [-n] [-r] [-x] <commit-ish>'  ;;
-* )
-       echo >&2 "What are you talking about?"
-       exit 1 ;;
-esac
-
-SUBDIRECTORY_OK=Yes ;# we will cd up
-. git-sh-setup
-require_work_tree
-cd_to_toplevel
-
-no_commit=
-xopt=
-while case "$#" in 0) break ;; esac
-do
-       case "$1" in
-       -n|--n|--no|--no-|--no-c|--no-co|--no-com|--no-comm|\
-           --no-commi|--no-commit)
-               no_commit=t
-               ;;
-       -e|--e|--ed|--edi|--edit)
-               edit=-e
-               ;;
-       --n|--no|--no-|--no-e|--no-ed|--no-edi|--no-edit)
-               edit=
-               ;;
-       -r)
-               : no-op ;;
-       -x|--i-really-want-to-expose-my-private-commit-object-name)
-               replay=
-               ;;
-       -X?*)
-               xopt="$xopt$(git rev-parse --sq-quote "--${1#-X}")"
-               ;;
-       --strategy-option=*)
-               xopt="$xopt$(git rev-parse --sq-quote "--${1#--strategy-option=}")"
-               ;;
-       -X|--strategy-option)
-               shift
-               xopt="$xopt$(git rev-parse --sq-quote "--$1")"
-               ;;
-       -*)
-               usage
-               ;;
-       *)
-               break
-               ;;
-       esac
-       shift
-done
-
-set_reflog_action "$me"
-
-test "$me,$replay" = "revert,t" && usage
-
-case "$no_commit" in
-t)
-       # We do not intend to commit immediately.  We just want to
-       # merge the differences in.
-       head=$(git-write-tree) ||
-               die "Your index file is unmerged."
-       ;;
-*)
-       head=$(git-rev-parse --verify HEAD) ||
-               die "You do not have a valid HEAD"
-       files=$(git-diff-index --cached --name-only $head) || exit
-       if [ "$files" ]; then
-               die "Dirty index: cannot $me (dirty: $files)"
-       fi
-       ;;
-esac
-
-rev=$(git-rev-parse --verify "$@") &&
-commit=$(git-rev-parse --verify "$rev^0") ||
-       die "Not a single commit $@"
-prev=$(git-rev-parse --verify "$commit^1" 2>/dev/null) ||
-       die "Cannot run $me a root commit"
-git-rev-parse --verify "$commit^2" >/dev/null 2>&1 &&
-       die "Cannot run $me a multi-parent commit."
-
-encoding=$(git config i18n.commitencoding || echo UTF-8)
-
-# "commit" is an existing commit.  We would want to apply
-# the difference it introduces since its first parent "prev"
-# on top of the current HEAD if we are cherry-pick.  Or the
-# reverse of it if we are revert.
-
-case "$me" in
-revert)
-       git show -s --pretty=oneline --encoding="$encoding" $commit |
-       sed -e '
-               s/^[^ ]* /Revert "/
-               s/$/"/
-       '
-       echo
-       echo "This reverts commit $commit."
-       test "$rev" = "$commit" ||
-       echo "(original 'git revert' arguments: $@)"
-       base=$commit next=$prev
-       ;;
-
-cherry-pick)
-       pick_author_script='
-       /^author /{
-               s/'\''/'\''\\'\'\''/g
-               h
-               s/^author \([^<]*\) <[^>]*> .*$/\1/
-               s/'\''/'\''\'\'\''/g
-               s/.*/GIT_AUTHOR_NAME='\''&'\''/p
-
-               g
-               s/^author [^<]* <\([^>]*\)> .*$/\1/
-               s/'\''/'\''\'\'\''/g
-               s/.*/GIT_AUTHOR_EMAIL='\''&'\''/p
-
-               g
-               s/^author [^<]* <[^>]*> \(.*\)$/\1/
-               s/'\''/'\''\'\'\''/g
-               s/.*/GIT_AUTHOR_DATE='\''&'\''/p
-
-               q
-       }'
-
-       logmsg=$(git show -s --pretty=raw --encoding="$encoding" "$commit")
-       set_author_env=$(echo "$logmsg" |
-       LANG=C LC_ALL=C sed -ne "$pick_author_script")
-       eval "$set_author_env"
-       export GIT_AUTHOR_NAME
-       export GIT_AUTHOR_EMAIL
-       export GIT_AUTHOR_DATE
-
-       echo "$logmsg" |
-       sed -e '1,/^$/d' -e 's/^    //'
-       case "$replay" in
-       '')
-               echo "(cherry picked from commit $commit)"
-               test "$rev" = "$commit" ||
-               echo "(original 'git cherry-pick' arguments: $@)"
-               ;;
-       esac
-       base=$prev next=$commit
-       ;;
-
-esac >.msg
-
-eval GITHEAD_$head=HEAD
-eval GITHEAD_$next='$(git show -s \
-       --pretty=oneline --encoding="$encoding" "$commit" |
-       sed -e "s/^[^ ]* //")'
-export GITHEAD_$head GITHEAD_$next
-
-# This three way merge is an interesting one.  We are at
-# $head, and would want to apply the change between $commit
-# and $prev on top of us (when reverting), or the change between
-# $prev and $commit on top of us (when cherry-picking or replaying).
-
-eval "git merge-recursive $xopt $base -- $head $next" &&
-result=$(git-write-tree 2>/dev/null) || {
-       mv -f .msg "$GIT_DIR/MERGE_MSG"
-       {
-           echo '
-Conflicts:
-'
-               git ls-files --unmerged |
-               sed -e 's/^[^   ]*      /       /' |
-               uniq
-       } >>"$GIT_DIR/MERGE_MSG"
-       echo >&2 "Automatic $me failed.  After resolving the conflicts,"
-       echo >&2 "mark the corrected paths with 'git-add <paths>'"
-       echo >&2 "and commit the result."
-       case "$me" in
-       cherry-pick)
-               echo >&2 "You may choose to use the following when making"
-               echo >&2 "the commit:"
-               echo >&2 "$set_author_env"
-       esac
-       exit 1
-}
-
-# If we are cherry-pick, and if the merge did not result in
-# hand-editing, we will hit this commit and inherit the original
-# author date and name.
-# If we are revert, or if our cherry-pick results in a hand merge,
-# we had better say that the current user is responsible for that.
-
-case "$no_commit" in
-'')
-       git-commit -n -F .msg $edit
-       rm -f .msg
-       ;;
-esac
diff --git a/contrib/examples/git-svnimport.perl b/contrib/examples/git-svnimport.perl
deleted file mode 100755 (executable)
index 75a43e2..0000000
+++ /dev/null
@@ -1,976 +0,0 @@
-#!/usr/bin/perl
-
-# This tool is copyright (c) 2005, Matthias Urlichs.
-# It is released under the Gnu Public License, version 2.
-#
-# The basic idea is to pull and analyze SVN changes.
-#
-# Checking out the files is done by a single long-running SVN connection.
-#
-# The head revision is on branch "origin" by default.
-# You can change that with the '-o' option.
-
-use strict;
-use warnings;
-use Getopt::Std;
-use File::Copy;
-use File::Spec;
-use File::Temp qw(tempfile);
-use File::Path qw(mkpath);
-use File::Basename qw(basename dirname);
-use Time::Local;
-use IO::Pipe;
-use POSIX qw(strftime dup2);
-use IPC::Open2;
-use SVN::Core;
-use SVN::Ra;
-
-die "Need SVN:Core 1.2.1 or better" if $SVN::Core::VERSION lt "1.2.1";
-
-$SIG{'PIPE'}="IGNORE";
-$ENV{'TZ'}="UTC";
-
-our($opt_h,$opt_o,$opt_v,$opt_u,$opt_C,$opt_i,$opt_m,$opt_M,$opt_t,$opt_T,
-    $opt_b,$opt_r,$opt_I,$opt_A,$opt_s,$opt_l,$opt_d,$opt_D,$opt_S,$opt_F,
-    $opt_P,$opt_R);
-
-sub usage() {
-       print STDERR <<END;
-usage: ${\basename $0}     # fetch/update GIT from SVN
-       [-o branch-for-HEAD] [-h] [-v] [-l max_rev] [-R repack_each_revs]
-       [-C GIT_repository] [-t tagname] [-T trunkname] [-b branchname]
-       [-d|-D] [-i] [-u] [-r] [-I ignorefilename] [-s start_chg]
-       [-m] [-M regex] [-A author_file] [-S] [-F] [-P project_name] [SVN_URL]
-END
-       exit(1);
-}
-
-getopts("A:b:C:dDFhiI:l:mM:o:rs:t:T:SP:R:uv") or usage();
-usage if $opt_h;
-
-my $tag_name = $opt_t || "tags";
-my $trunk_name = defined $opt_T ? $opt_T : "trunk";
-my $branch_name = $opt_b || "branches";
-my $project_name = $opt_P || "";
-$project_name = "/" . $project_name if ($project_name);
-my $repack_after = $opt_R || 1000;
-my $root_pool = SVN::Pool->new_default;
-
-@ARGV == 1 or @ARGV == 2 or usage();
-
-$opt_o ||= "origin";
-$opt_s ||= 1;
-my $git_tree = $opt_C;
-$git_tree ||= ".";
-
-my $svn_url = $ARGV[0];
-my $svn_dir = $ARGV[1];
-
-our @mergerx = ();
-if ($opt_m) {
-       my $branch_esc = quotemeta ($branch_name);
-       my $trunk_esc  = quotemeta ($trunk_name);
-       @mergerx =
-       (
-               qr!\b(?:merg(?:ed?|ing))\b.*?\b((?:(?<=$branch_esc/)[\w\.\-]+)|(?:$trunk_esc))\b!i,
-               qr!\b(?:from|of)\W+((?:(?<=$branch_esc/)[\w\.\-]+)|(?:$trunk_esc))\b!i,
-               qr!\b(?:from|of)\W+(?:the )?([\w\.\-]+)[-\s]branch\b!i
-       );
-}
-if ($opt_M) {
-       unshift (@mergerx, qr/$opt_M/);
-}
-
-# Absolutize filename now, since we will have chdir'ed by the time we
-# get around to opening it.
-$opt_A = File::Spec->rel2abs($opt_A) if $opt_A;
-
-our %users = ();
-our $users_file = undef;
-sub read_users($) {
-       $users_file = File::Spec->rel2abs(@_);
-       die "Cannot open $users_file\n" unless -f $users_file;
-       open(my $authors,$users_file);
-       while(<$authors>) {
-               chomp;
-               next unless /^(\S+?)\s*=\s*(.+?)\s*<(.+)>\s*$/;
-               (my $user,my $name,my $email) = ($1,$2,$3);
-               $users{$user} = [$name,$email];
-       }
-       close($authors);
-}
-
-select(STDERR); $|=1; select(STDOUT);
-
-
-package SVNconn;
-# Basic SVN connection.
-# We're only interested in connecting and downloading, so ...
-
-use File::Spec;
-use File::Temp qw(tempfile);
-use POSIX qw(strftime dup2);
-use Fcntl qw(SEEK_SET);
-
-sub new {
-       my($what,$repo) = @_;
-       $what=ref($what) if ref($what);
-
-       my $self = {};
-       $self->{'buffer'} = "";
-       bless($self,$what);
-
-       $repo =~ s#/+$##;
-       $self->{'fullrep'} = $repo;
-       $self->conn();
-
-       return $self;
-}
-
-sub conn {
-       my $self = shift;
-       my $repo = $self->{'fullrep'};
-       my $auth = SVN::Core::auth_open ([SVN::Client::get_simple_provider,
-                         SVN::Client::get_ssl_server_trust_file_provider,
-                         SVN::Client::get_username_provider]);
-       my $s = SVN::Ra->new(url => $repo, auth => $auth, pool => $root_pool);
-       die "SVN connection to $repo: $!\n" unless defined $s;
-       $self->{'svn'} = $s;
-       $self->{'repo'} = $repo;
-       $self->{'maxrev'} = $s->get_latest_revnum();
-}
-
-sub file {
-       my($self,$path,$rev) = @_;
-
-       my ($fh, $name) = tempfile('gitsvn.XXXXXX',
-                   DIR => File::Spec->tmpdir(), UNLINK => 1);
-
-       print "... $rev $path ...\n" if $opt_v;
-       my (undef, $properties);
-       $path =~ s#^/*##;
-       my $subpool = SVN::Pool::new_default_sub;
-       eval { (undef, $properties)
-                  = $self->{'svn'}->get_file($path,$rev,$fh); };
-       if($@) {
-               return undef if $@ =~ /Attempted to get checksum/;
-               die $@;
-       }
-       my $mode;
-       if (exists $properties->{'svn:executable'}) {
-               $mode = '100755';
-       } elsif (exists $properties->{'svn:special'}) {
-               my ($special_content, $filesize);
-               $filesize = tell $fh;
-               seek $fh, 0, SEEK_SET;
-               read $fh, $special_content, $filesize;
-               if ($special_content =~ s/^link //) {
-                       $mode = '120000';
-                       seek $fh, 0, SEEK_SET;
-                       truncate $fh, 0;
-                       print $fh $special_content;
-               } else {
-                       die "unexpected svn:special file encountered";
-               }
-       } else {
-               $mode = '100644';
-       }
-       close ($fh);
-
-       return ($name, $mode);
-}
-
-sub ignore {
-       my($self,$path,$rev) = @_;
-
-       print "... $rev $path ...\n" if $opt_v;
-       $path =~ s#^/*##;
-       my $subpool = SVN::Pool::new_default_sub;
-       my (undef,undef,$properties)
-           = $self->{'svn'}->get_dir($path,$rev,undef);
-       if (exists $properties->{'svn:ignore'}) {
-               my ($fh, $name) = tempfile('gitsvn.XXXXXX',
-                                          DIR => File::Spec->tmpdir(),
-                                          UNLINK => 1);
-               print $fh $properties->{'svn:ignore'};
-               close($fh);
-               return $name;
-       } else {
-               return undef;
-       }
-}
-
-sub dir_list {
-       my($self,$path,$rev) = @_;
-       $path =~ s#^/*##;
-       my $subpool = SVN::Pool::new_default_sub;
-       my ($dirents,undef,$properties)
-           = $self->{'svn'}->get_dir($path,$rev,undef);
-       return $dirents;
-}
-
-package main;
-use URI;
-
-our $svn = $svn_url;
-$svn .= "/$svn_dir" if defined $svn_dir;
-my $svn2 = SVNconn->new($svn);
-$svn = SVNconn->new($svn);
-
-my $lwp_ua;
-if($opt_d or $opt_D) {
-       $svn_url = URI->new($svn_url)->canonical;
-       if($opt_D) {
-               $svn_dir =~ s#/*$#/#;
-       } else {
-               $svn_dir = "";
-       }
-       if ($svn_url->scheme eq "http") {
-               use LWP::UserAgent;
-               $lwp_ua = LWP::UserAgent->new(keep_alive => 1, requests_redirectable => []);
-       } else {
-               print STDERR "Warning: not HTTP; turning off direct file access\n";
-               $opt_d=0;
-       }
-}
-
-sub pdate($) {
-       my($d) = @_;
-       $d =~ m#(\d\d\d\d)-(\d\d)-(\d\d)T(\d\d):(\d\d):(\d\d)#
-               or die "Unparseable date: $d\n";
-       my $y=$1; $y+=1900 if $y<1000;
-       return timegm($6||0,$5,$4,$3,$2-1,$y);
-}
-
-sub getwd() {
-       my $pwd = `pwd`;
-       chomp $pwd;
-       return $pwd;
-}
-
-
-sub get_headref($$) {
-    my $name    = shift;
-    my $git_dir = shift;
-    my $sha;
-
-    if (open(C,"$git_dir/refs/heads/$name")) {
-       chomp($sha = <C>);
-       close(C);
-       length($sha) == 40
-           or die "Cannot get head id for $name ($sha): $!\n";
-    }
-    return $sha;
-}
-
-
--d $git_tree
-       or mkdir($git_tree,0777)
-       or die "Could not create $git_tree: $!";
-chdir($git_tree);
-
-my $orig_branch = "";
-my $forward_master = 0;
-my %branches;
-
-my $git_dir = $ENV{"GIT_DIR"} || ".git";
-$git_dir = getwd()."/".$git_dir unless $git_dir =~ m#^/#;
-$ENV{"GIT_DIR"} = $git_dir;
-my $orig_git_index;
-$orig_git_index = $ENV{GIT_INDEX_FILE} if exists $ENV{GIT_INDEX_FILE};
-my ($git_ih, $git_index) = tempfile('gitXXXXXX', SUFFIX => '.idx',
-                                   DIR => File::Spec->tmpdir());
-close ($git_ih);
-$ENV{GIT_INDEX_FILE} = $git_index;
-my $maxnum = 0;
-my $last_rev = "";
-my $last_branch;
-my $current_rev = $opt_s || 1;
-unless(-d $git_dir) {
-       system("git init");
-       die "Cannot init the GIT db at $git_tree: $?\n" if $?;
-       system("git read-tree --empty");
-       die "Cannot init an empty tree: $?\n" if $?;
-
-       $last_branch = $opt_o;
-       $orig_branch = "";
-} else {
-       -f "$git_dir/refs/heads/$opt_o"
-               or die "Branch '$opt_o' does not exist.\n".
-                      "Either use the correct '-o branch' option,\n".
-                      "or import to a new repository.\n";
-
-       -f "$git_dir/svn2git"
-               or die "'$git_dir/svn2git' does not exist.\n".
-                      "You need that file for incremental imports.\n";
-       open(F, "git symbolic-ref HEAD |") or
-               die "Cannot run git-symbolic-ref: $!\n";
-       chomp ($last_branch = <F>);
-       $last_branch = basename($last_branch);
-       close(F);
-       unless($last_branch) {
-               warn "Cannot read the last branch name: $! -- assuming 'master'\n";
-               $last_branch = "master";
-       }
-       $orig_branch = $last_branch;
-       $last_rev = get_headref($orig_branch, $git_dir);
-       if (-f "$git_dir/SVN2GIT_HEAD") {
-               die <<EOM;
-SVN2GIT_HEAD exists.
-Make sure your working directory corresponds to HEAD and remove SVN2GIT_HEAD.
-You may need to run
-
-    git-read-tree -m -u SVN2GIT_HEAD HEAD
-EOM
-       }
-       system('cp', "$git_dir/HEAD", "$git_dir/SVN2GIT_HEAD");
-
-       $forward_master =
-           $opt_o ne 'master' && -f "$git_dir/refs/heads/master" &&
-           system('cmp', '-s', "$git_dir/refs/heads/master",
-                               "$git_dir/refs/heads/$opt_o") == 0;
-
-       # populate index
-       system('git', 'read-tree', $last_rev);
-       die "read-tree failed: $?\n" if $?;
-
-       # Get the last import timestamps
-       open my $B,"<", "$git_dir/svn2git";
-       while(<$B>) {
-               chomp;
-               my($num,$branch,$ref) = split;
-               $branches{$branch}{$num} = $ref;
-               $branches{$branch}{"LAST"} = $ref;
-               $current_rev = $num+1 if $current_rev <= $num;
-       }
-       close($B);
-}
--d $git_dir
-       or die "Could not create git subdir ($git_dir).\n";
-
-my $default_authors = "$git_dir/svn-authors";
-if ($opt_A) {
-       read_users($opt_A);
-       copy($opt_A,$default_authors) or die "Copy failed: $!";
-} else {
-       read_users($default_authors) if -f $default_authors;
-}
-
-open BRANCHES,">>", "$git_dir/svn2git";
-
-sub node_kind($$) {
-       my ($svnpath, $revision) = @_;
-       $svnpath =~ s#^/*##;
-       my $subpool = SVN::Pool::new_default_sub;
-       my $kind = $svn->{'svn'}->check_path($svnpath,$revision);
-       return $kind;
-}
-
-sub get_file($$$) {
-       my($svnpath,$rev,$path) = @_;
-
-       # now get it
-       my ($name,$mode);
-       if($opt_d) {
-               my($req,$res);
-
-               # /svn/!svn/bc/2/django/trunk/django-docs/build.py
-               my $url=$svn_url->clone();
-               $url->path($url->path."/!svn/bc/$rev/$svn_dir$svnpath");
-               print "... $path...\n" if $opt_v;
-               $req = HTTP::Request->new(GET => $url);
-               $res = $lwp_ua->request($req);
-               if ($res->is_success) {
-                       my $fh;
-                       ($fh, $name) = tempfile('gitsvn.XXXXXX',
-                       DIR => File::Spec->tmpdir(), UNLINK => 1);
-                       print $fh $res->content;
-                       close($fh) or die "Could not write $name: $!\n";
-               } else {
-                       return undef if $res->code == 301; # directory?
-                       die $res->status_line." at $url\n";
-               }
-               $mode = '0644'; # can't obtain mode via direct http request?
-       } else {
-               ($name,$mode) = $svn->file("$svnpath",$rev);
-               return undef unless defined $name;
-       }
-
-       my $pid = open(my $F, '-|');
-       die $! unless defined $pid;
-       if (!$pid) {
-           exec("git", "hash-object", "-w", $name)
-               or die "Cannot create object: $!\n";
-       }
-       my $sha = <$F>;
-       chomp $sha;
-       close $F;
-       unlink $name;
-       return [$mode, $sha, $path];
-}
-
-sub get_ignore($$$$$) {
-       my($new,$old,$rev,$path,$svnpath) = @_;
-
-       return unless $opt_I;
-       my $name = $svn->ignore("$svnpath",$rev);
-       if ($path eq '/') {
-               $path = $opt_I;
-       } else {
-               $path = File::Spec->catfile($path,$opt_I);
-       }
-       if (defined $name) {
-               my $pid = open(my $F, '-|');
-               die $! unless defined $pid;
-               if (!$pid) {
-                       exec("git", "hash-object", "-w", $name)
-                           or die "Cannot create object: $!\n";
-               }
-               my $sha = <$F>;
-               chomp $sha;
-               close $F;
-               unlink $name;
-               push(@$new,['0644',$sha,$path]);
-       } elsif (defined $old) {
-               push(@$old,$path);
-       }
-}
-
-sub project_path($$)
-{
-       my ($path, $project) = @_;
-
-       $path = "/".$path unless ($path =~ m#^\/#) ;
-       return $1 if ($path =~ m#^$project\/(.*)$#);
-
-       $path =~ s#\.#\\\.#g;
-       $path =~ s#\+#\\\+#g;
-       return "/" if ($project =~ m#^$path.*$#);
-
-       return undef;
-}
-
-sub split_path($$) {
-       my($rev,$path) = @_;
-       my $branch;
-
-       if($path =~ s#^/\Q$tag_name\E/([^/]+)/?##) {
-               $branch = "/$1";
-       } elsif($path =~ s#^/\Q$trunk_name\E/?##) {
-               $branch = "/";
-       } elsif($path =~ s#^/\Q$branch_name\E/([^/]+)/?##) {
-               $branch = $1;
-       } else {
-               my %no_error = (
-                       "/" => 1,
-                       "/$tag_name" => 1,
-                       "/$branch_name" => 1
-               );
-               print STDERR "$rev: Unrecognized path: $path\n" unless (defined $no_error{$path});
-               return ()
-       }
-       if ($path eq "") {
-               $path = "/";
-       } elsif ($project_name) {
-               $path = project_path($path, $project_name);
-       }
-       return ($branch,$path);
-}
-
-sub branch_rev($$) {
-
-       my ($srcbranch,$uptorev) = @_;
-
-       my $bbranches = $branches{$srcbranch};
-       my @revs = reverse sort { ($a eq 'LAST' ? 0 : $a) <=> ($b eq 'LAST' ? 0 : $b) } keys %$bbranches;
-       my $therev;
-       foreach my $arev(@revs) {
-               next if  ($arev eq 'LAST');
-               if ($arev <= $uptorev) {
-                       $therev = $arev;
-                       last;
-               }
-       }
-       return $therev;
-}
-
-sub expand_svndir($$$);
-
-sub expand_svndir($$$)
-{
-       my ($svnpath, $rev, $path) = @_;
-       my @list;
-       get_ignore(\@list, undef, $rev, $path, $svnpath);
-       my $dirents = $svn->dir_list($svnpath, $rev);
-       foreach my $p(keys %$dirents) {
-               my $kind = node_kind($svnpath.'/'.$p, $rev);
-               if ($kind eq $SVN::Node::file) {
-                       my $f = get_file($svnpath.'/'.$p, $rev, $path.'/'.$p);
-                       push(@list, $f) if $f;
-               } elsif ($kind eq $SVN::Node::dir) {
-                       push(@list,
-                            expand_svndir($svnpath.'/'.$p, $rev, $path.'/'.$p));
-               }
-       }
-       return @list;
-}
-
-sub copy_path($$$$$$$$) {
-       # Somebody copied a whole subdirectory.
-       # We need to find the index entries from the old version which the
-       # SVN log entry points to, and add them to the new place.
-
-       my($newrev,$newbranch,$path,$oldpath,$rev,$node_kind,$new,$parents) = @_;
-
-       my($srcbranch,$srcpath) = split_path($rev,$oldpath);
-       unless(defined $srcbranch && defined $srcpath) {
-               print "Path not found when copying from $oldpath @ $rev.\n".
-                       "Will try to copy from original SVN location...\n"
-                       if $opt_v;
-               push (@$new, expand_svndir($oldpath, $rev, $path));
-               return;
-       }
-       my $therev = branch_rev($srcbranch, $rev);
-       my $gitrev = $branches{$srcbranch}{$therev};
-       unless($gitrev) {
-               print STDERR "$newrev:$newbranch: could not find $oldpath \@ $rev\n";
-               return;
-       }
-       if ($srcbranch ne $newbranch) {
-               push(@$parents, $branches{$srcbranch}{'LAST'});
-       }
-       print "$newrev:$newbranch:$path: copying from $srcbranch:$srcpath @ $rev\n" if $opt_v;
-       if ($node_kind eq $SVN::Node::dir) {
-               $srcpath =~ s#/*$#/#;
-       }
-
-       my $pid = open my $f,'-|';
-       die $! unless defined $pid;
-       if (!$pid) {
-               exec("git","ls-tree","-r","-z",$gitrev,$srcpath)
-                       or die $!;
-       }
-       local $/ = "\0";
-       while(<$f>) {
-               chomp;
-               my($m,$p) = split(/\t/,$_,2);
-               my($mode,$type,$sha1) = split(/ /,$m);
-               next if $type ne "blob";
-               if ($node_kind eq $SVN::Node::dir) {
-                       $p = $path . substr($p,length($srcpath)-1);
-               } else {
-                       $p = $path;
-               }
-               push(@$new,[$mode,$sha1,$p]);
-       }
-       close($f) or
-               print STDERR "$newrev:$newbranch: could not list files in $oldpath \@ $rev\n";
-}
-
-sub commit {
-       my($branch, $changed_paths, $revision, $author, $date, $message) = @_;
-       my($committer_name,$committer_email,$dest);
-       my($author_name,$author_email);
-       my(@old,@new,@parents);
-
-       if (not defined $author or $author eq "") {
-               $committer_name = $committer_email = "unknown";
-       } elsif (defined $users_file) {
-               die "User $author is not listed in $users_file\n"
-                   unless exists $users{$author};
-               ($committer_name,$committer_email) = @{$users{$author}};
-       } elsif ($author =~ /^(.*?)\s+<(.*)>$/) {
-               ($committer_name, $committer_email) = ($1, $2);
-       } else {
-               $author =~ s/^<(.*)>$/$1/;
-               $committer_name = $committer_email = $author;
-       }
-
-       if ($opt_F && $message =~ /From:\s+(.*?)\s+<(.*)>\s*\n/) {
-               ($author_name, $author_email) = ($1, $2);
-               print "Author from From: $1 <$2>\n" if ($opt_v);;
-       } elsif ($opt_S && $message =~ /Signed-off-by:\s+(.*?)\s+<(.*)>\s*\n/) {
-               ($author_name, $author_email) = ($1, $2);
-               print "Author from Signed-off-by: $1 <$2>\n" if ($opt_v);;
-       } else {
-               $author_name = $committer_name;
-               $author_email = $committer_email;
-       }
-
-       $date = pdate($date);
-
-       my $tag;
-       my $parent;
-       if($branch eq "/") { # trunk
-               $parent = $opt_o;
-       } elsif($branch =~ m#^/(.+)#) { # tag
-               $tag = 1;
-               $parent = $1;
-       } else { # "normal" branch
-               # nothing to do
-               $parent = $branch;
-       }
-       $dest = $parent;
-
-       my $prev = $changed_paths->{"/"};
-       if($prev and $prev->[0] eq "A") {
-               delete $changed_paths->{"/"};
-               my $oldpath = $prev->[1];
-               my $rev;
-               if(defined $oldpath) {
-                       my $p;
-                       ($parent,$p) = split_path($revision,$oldpath);
-                       if(defined $parent) {
-                               if($parent eq "/") {
-                                       $parent = $opt_o;
-                               } else {
-                                       $parent =~ s#^/##; # if it's a tag
-                               }
-                       }
-               } else {
-                       $parent = undef;
-               }
-       }
-
-       my $rev;
-       if($revision > $opt_s and defined $parent) {
-               open(H,'-|',"git","rev-parse","--verify",$parent);
-               $rev = <H>;
-               close(H) or do {
-                       print STDERR "$revision: cannot find commit '$parent'!\n";
-                       return;
-               };
-               chop $rev;
-               if(length($rev) != 40) {
-                       print STDERR "$revision: cannot find commit '$parent'!\n";
-                       return;
-               }
-               $rev = $branches{($parent eq $opt_o) ? "/" : $parent}{"LAST"};
-               if($revision != $opt_s and not $rev) {
-                       print STDERR "$revision: do not know ancestor for '$parent'!\n";
-                       return;
-               }
-       } else {
-               $rev = undef;
-       }
-
-#      if($prev and $prev->[0] eq "A") {
-#              if(not $tag) {
-#                      unless(open(H,"> $git_dir/refs/heads/$branch")) {
-#                              print STDERR "$revision: Could not create branch $branch: $!\n";
-#                              $state=11;
-#                              next;
-#                      }
-#                      print H "$rev\n"
-#                              or die "Could not write branch $branch: $!";
-#                      close(H)
-#                              or die "Could not write branch $branch: $!";
-#              }
-#      }
-       if(not defined $rev) {
-               unlink($git_index);
-       } elsif ($rev ne $last_rev) {
-               print "Switching from $last_rev to $rev ($branch)\n" if $opt_v;
-               system("git", "read-tree", $rev);
-               die "read-tree failed for $rev: $?\n" if $?;
-               $last_rev = $rev;
-       }
-
-       push (@parents, $rev) if defined $rev;
-
-       my $cid;
-       if($tag and not %$changed_paths) {
-               $cid = $rev;
-       } else {
-               my @paths = sort keys %$changed_paths;
-               foreach my $path(@paths) {
-                       my $action = $changed_paths->{$path};
-
-                       if ($action->[0] eq "R") {
-                               # refer to a file/tree in an earlier commit
-                               push(@old,$path); # remove any old stuff
-                       }
-                       if(($action->[0] eq "A") || ($action->[0] eq "R")) {
-                               my $node_kind = node_kind($action->[3], $revision);
-                               if ($node_kind eq $SVN::Node::file) {
-                                       my $f = get_file($action->[3],
-                                                        $revision, $path);
-                                       if ($f) {
-                                               push(@new,$f) if $f;
-                                       } else {
-                                               my $opath = $action->[3];
-                                               print STDERR "$revision: $branch: could not fetch '$opath'\n";
-                                       }
-                               } elsif ($node_kind eq $SVN::Node::dir) {
-                                       if($action->[1]) {
-                                               copy_path($revision, $branch,
-                                                         $path, $action->[1],
-                                                         $action->[2], $node_kind,
-                                                         \@new, \@parents);
-                                       } else {
-                                               get_ignore(\@new, \@old, $revision,
-                                                          $path, $action->[3]);
-                                       }
-                               }
-                       } elsif ($action->[0] eq "D") {
-                               push(@old,$path);
-                       } elsif ($action->[0] eq "M") {
-                               my $node_kind = node_kind($action->[3], $revision);
-                               if ($node_kind eq $SVN::Node::file) {
-                                       my $f = get_file($action->[3],
-                                                        $revision, $path);
-                                       push(@new,$f) if $f;
-                               } elsif ($node_kind eq $SVN::Node::dir) {
-                                       get_ignore(\@new, \@old, $revision,
-                                                  $path, $action->[3]);
-                               }
-                       } else {
-                               die "$revision: unknown action '".$action->[0]."' for $path\n";
-                       }
-               }
-
-               while(@old) {
-                       my @o1;
-                       if(@old > 55) {
-                               @o1 = splice(@old,0,50);
-                       } else {
-                               @o1 = @old;
-                               @old = ();
-                       }
-                       my $pid = open my $F, "-|";
-                       die "$!" unless defined $pid;
-                       if (!$pid) {
-                               exec("git", "ls-files", "-z", @o1) or die $!;
-                       }
-                       @o1 = ();
-                       local $/ = "\0";
-                       while(<$F>) {
-                               chomp;
-                               push(@o1,$_);
-                       }
-                       close($F);
-
-                       while(@o1) {
-                               my @o2;
-                               if(@o1 > 55) {
-                                       @o2 = splice(@o1,0,50);
-                               } else {
-                                       @o2 = @o1;
-                                       @o1 = ();
-                               }
-                               system("git","update-index","--force-remove","--",@o2);
-                               die "Cannot remove files: $?\n" if $?;
-                       }
-               }
-               while(@new) {
-                       my @n2;
-                       if(@new > 12) {
-                               @n2 = splice(@new,0,10);
-                       } else {
-                               @n2 = @new;
-                               @new = ();
-                       }
-                       system("git","update-index","--add",
-                               (map { ('--cacheinfo', @$_) } @n2));
-                       die "Cannot add files: $?\n" if $?;
-               }
-
-               my $pid = open(C,"-|");
-               die "Cannot fork: $!" unless defined $pid;
-               unless($pid) {
-                       exec("git","write-tree");
-                       die "Cannot exec git-write-tree: $!\n";
-               }
-               chomp(my $tree = <C>);
-               length($tree) == 40
-                       or die "Cannot get tree id ($tree): $!\n";
-               close(C)
-                       or die "Error running git-write-tree: $?\n";
-               print "Tree ID $tree\n" if $opt_v;
-
-               my $pr = IO::Pipe->new() or die "Cannot open pipe: $!\n";
-               my $pw = IO::Pipe->new() or die "Cannot open pipe: $!\n";
-               $pid = fork();
-               die "Fork: $!\n" unless defined $pid;
-               unless($pid) {
-                       $pr->writer();
-                       $pw->reader();
-                       open(OUT,">&STDOUT");
-                       dup2($pw->fileno(),0);
-                       dup2($pr->fileno(),1);
-                       $pr->close();
-                       $pw->close();
-
-                       my @par = ();
-
-                       # loose detection of merges
-                       # based on the commit msg
-                       foreach my $rx (@mergerx) {
-                               if ($message =~ $rx) {
-                                       my $mparent = $1;
-                                       if ($mparent eq 'HEAD') { $mparent = $opt_o };
-                                       if ( -e "$git_dir/refs/heads/$mparent") {
-                                               $mparent = get_headref($mparent, $git_dir);
-                                               push (@parents, $mparent);
-                                               print OUT "Merge parent branch: $mparent\n" if $opt_v;
-                                       }
-                               }
-                       }
-                       my %seen_parents = ();
-                       my @unique_parents = grep { ! $seen_parents{$_} ++ } @parents;
-                       foreach my $bparent (@unique_parents) {
-                               push @par, '-p', $bparent;
-                               print OUT "Merge parent branch: $bparent\n" if $opt_v;
-                       }
-
-                       exec("env",
-                               "GIT_AUTHOR_NAME=$author_name",
-                               "GIT_AUTHOR_EMAIL=$author_email",
-                               "GIT_AUTHOR_DATE=".strftime("+0000 %Y-%m-%d %H:%M:%S",gmtime($date)),
-                               "GIT_COMMITTER_NAME=$committer_name",
-                               "GIT_COMMITTER_EMAIL=$committer_email",
-                               "GIT_COMMITTER_DATE=".strftime("+0000 %Y-%m-%d %H:%M:%S",gmtime($date)),
-                               "git", "commit-tree", $tree,@par);
-                       die "Cannot exec git-commit-tree: $!\n";
-               }
-               $pw->writer();
-               $pr->reader();
-
-               $message =~ s/[\s\n]+\z//;
-               $message = "r$revision: $message" if $opt_r;
-
-               print $pw "$message\n"
-                       or die "Error writing to git-commit-tree: $!\n";
-               $pw->close();
-
-               print "Committed change $revision:$branch ".strftime("%Y-%m-%d %H:%M:%S",gmtime($date)).")\n" if $opt_v;
-               chomp($cid = <$pr>);
-               length($cid) == 40
-                       or die "Cannot get commit id ($cid): $!\n";
-               print "Commit ID $cid\n" if $opt_v;
-               $pr->close();
-
-               waitpid($pid,0);
-               die "Error running git-commit-tree: $?\n" if $?;
-       }
-
-       if (not defined $cid) {
-               $cid = $branches{"/"}{"LAST"};
-       }
-
-       if(not defined $dest) {
-               print "... no known parent\n" if $opt_v;
-       } elsif(not $tag) {
-               print "Writing to refs/heads/$dest\n" if $opt_v;
-               open(C,">$git_dir/refs/heads/$dest") and
-               print C ("$cid\n") and
-               close(C)
-                       or die "Cannot write branch $dest for update: $!\n";
-       }
-
-       if ($tag) {
-               $last_rev = "-" if %$changed_paths;
-               # the tag was 'complex', i.e. did not refer to a "real" revision
-
-               $dest =~ tr/_/\./ if $opt_u;
-
-               system('git', 'tag', '-f', $dest, $cid) == 0
-                       or die "Cannot create tag $dest: $!\n";
-
-               print "Created tag '$dest' on '$branch'\n" if $opt_v;
-       }
-       $branches{$branch}{"LAST"} = $cid;
-       $branches{$branch}{$revision} = $cid;
-       $last_rev = $cid;
-       print BRANCHES "$revision $branch $cid\n";
-       print "DONE: $revision $dest $cid\n" if $opt_v;
-}
-
-sub commit_all {
-       # Recursive use of the SVN connection does not work
-       local $svn = $svn2;
-
-       my ($changed_paths, $revision, $author, $date, $message) = @_;
-       my %p;
-       while(my($path,$action) = each %$changed_paths) {
-               $p{$path} = [ $action->action,$action->copyfrom_path, $action->copyfrom_rev, $path ];
-       }
-       $changed_paths = \%p;
-
-       my %done;
-       my @col;
-       my $pref;
-       my $branch;
-
-       while(my($path,$action) = each %$changed_paths) {
-               ($branch,$path) = split_path($revision,$path);
-               next if not defined $branch;
-               next if not defined $path;
-               $done{$branch}{$path} = $action;
-       }
-       while(($branch,$changed_paths) = each %done) {
-               commit($branch, $changed_paths, $revision, $author, $date, $message);
-       }
-}
-
-$opt_l = $svn->{'maxrev'} if not defined $opt_l or $opt_l > $svn->{'maxrev'};
-
-if ($opt_l < $current_rev) {
-    print "Up to date: no new revisions to fetch!\n" if $opt_v;
-    unlink("$git_dir/SVN2GIT_HEAD");
-    exit;
-}
-
-print "Processing from $current_rev to $opt_l ...\n" if $opt_v;
-
-my $from_rev;
-my $to_rev = $current_rev - 1;
-
-my $subpool = SVN::Pool::new_default_sub;
-while ($to_rev < $opt_l) {
-       $subpool->clear;
-       $from_rev = $to_rev + 1;
-       $to_rev = $from_rev + $repack_after;
-       $to_rev = $opt_l if $opt_l < $to_rev;
-       print "Fetching from $from_rev to $to_rev ...\n" if $opt_v;
-       $svn->{'svn'}->get_log("",$from_rev,$to_rev,0,1,1,\&commit_all);
-       my $pid = fork();
-       die "Fork: $!\n" unless defined $pid;
-       unless($pid) {
-               exec("git", "repack", "-d")
-                       or die "Cannot repack: $!\n";
-       }
-       waitpid($pid, 0);
-}
-
-
-unlink($git_index);
-
-if (defined $orig_git_index) {
-       $ENV{GIT_INDEX_FILE} = $orig_git_index;
-} else {
-       delete $ENV{GIT_INDEX_FILE};
-}
-
-# Now switch back to the branch we were in before all of this happened
-if($orig_branch) {
-       print "DONE\n" if $opt_v and (not defined $opt_l or $opt_l > 0);
-       system("cp","$git_dir/refs/heads/$opt_o","$git_dir/refs/heads/master")
-               if $forward_master;
-       unless ($opt_i) {
-               system('git', 'read-tree', '-m', '-u', 'SVN2GIT_HEAD', 'HEAD');
-               die "read-tree failed: $?\n" if $?;
-       }
-} else {
-       $orig_branch = "master";
-       print "DONE; creating $orig_branch branch\n" if $opt_v and (not defined $opt_l or $opt_l > 0);
-       system("cp","$git_dir/refs/heads/$opt_o","$git_dir/refs/heads/master")
-               unless -f "$git_dir/refs/heads/master";
-       system('git', 'update-ref', 'HEAD', "$orig_branch");
-       unless ($opt_i) {
-               system('git checkout');
-               die "checkout failed: $?\n" if $?;
-       }
-}
-unlink("$git_dir/SVN2GIT_HEAD");
-close(BRANCHES);
diff --git a/contrib/examples/git-svnimport.txt b/contrib/examples/git-svnimport.txt
deleted file mode 100644 (file)
index 3f0a9c3..0000000
+++ /dev/null
@@ -1,179 +0,0 @@
-git-svnimport(1)
-================
-v0.1, July 2005
-
-NAME
-----
-git-svnimport - Import a SVN repository into git
-
-
-SYNOPSIS
---------
-[verse]
-'git-svnimport' [ -o <branch-for-HEAD> ] [ -h ] [ -v ] [ -d | -D ]
-               [ -C <GIT_repository> ] [ -i ] [ -u ] [-l limit_rev]
-               [ -b branch_subdir ] [ -T trunk_subdir ] [ -t tag_subdir ]
-               [ -s start_chg ] [ -m ] [ -r ] [ -M regex ]
-               [ -I <ignorefile_name> ] [ -A <author_file> ]
-               [ -R <repack_each_revs>] [ -P <path_from_trunk> ]
-               <SVN_repository_URL> [ <path> ]
-
-
-DESCRIPTION
------------
-Imports a SVN repository into git. It will either create a new
-repository, or incrementally import into an existing one.
-
-SVN access is done by the SVN::Perl module.
-
-git-svnimport assumes that SVN repositories are organized into one
-"trunk" directory where the main development happens, "branches/FOO"
-directories for branches, and "/tags/FOO" directories for tags.
-Other subdirectories are ignored.
-
-git-svnimport creates a file ".git/svn2git", which is required for
-incremental SVN imports.
-
-OPTIONS
--------
--C <target-dir>::
-        The GIT repository to import to.  If the directory doesn't
-        exist, it will be created.  Default is the current directory.
-
--s <start_rev>::
-        Start importing at this SVN change number. The  default is 1.
-+
-When importing incrementally, you might need to edit the .git/svn2git file.
-
--i::
-       Import-only: don't perform a checkout after importing.  This option
-       ensures the working directory and index remain untouched and will
-       not create them if they do not exist.
-
--T <trunk_subdir>::
-       Name the SVN trunk. Default "trunk".
-
--t <tag_subdir>::
-       Name the SVN subdirectory for tags. Default "tags".
-
--b <branch_subdir>::
-       Name the SVN subdirectory for branches. Default "branches".
-
--o <branch-for-HEAD>::
-       The 'trunk' branch from SVN is imported to the 'origin' branch within
-       the git repository. Use this option if you want to import into a
-       different branch.
-
--r::
-       Prepend 'rX: ' to commit messages, where X is the imported
-       subversion revision.
-
--u::
-       Replace underscores in tag names with periods.
-
--I <ignorefile_name>::
-       Import the svn:ignore directory property to files with this
-       name in each directory. (The Subversion and GIT ignore
-       syntaxes are similar enough that using the Subversion patterns
-       directly with "-I .gitignore" will almost always just work.)
-
--A <author_file>::
-       Read a file with lines on the form
-+
-------
-       username = User's Full Name <email@addr.es>
-
-------
-+
-and use "User's Full Name <email@addr.es>" as the GIT
-author and committer for Subversion commits made by
-"username". If encountering a commit made by a user not in the
-list, abort.
-+
-For convenience, this data is saved to $GIT_DIR/svn-authors
-each time the -A option is provided, and read from that same
-file each time git-svnimport is run with an existing GIT
-repository without -A.
-
--m::
-       Attempt to detect merges based on the commit message. This option
-       will enable default regexes that try to capture the name source
-       branch name from the commit message.
-
--M <regex>::
-       Attempt to detect merges based on the commit message with a custom
-       regex. It can be used with -m to also see the default regexes.
-       You must escape forward slashes.
-
--l <max_rev>::
-       Specify a maximum revision number to pull.
-+
-Formerly, this option controlled how many revisions to pull,
-due to SVN memory leaks. (These have been worked around.)
-
--R <repack_each_revs>::
-       Specify how often git repository should be repacked.
-+
-The default value is 1000. git-svnimport will do imports in chunks of 1000
-revisions, after each chunk the git repository will be repacked. To disable
-this behavior specify some large value here which is greater than the number of
-revisions to import.
-
--P <path_from_trunk>::
-       Partial import of the SVN tree.
-+
-By default, the whole tree on the SVN trunk (/trunk) is imported.
-'-P my/proj' will import starting only from '/trunk/my/proj'.
-This option is useful when you want to import one project from a
-svn repo which hosts multiple projects under the same trunk.
-
--v::
-       Verbosity: let 'svnimport' report what it is doing.
-
--d::
-       Use direct HTTP requests if possible. The "<path>" argument is used
-       only for retrieving the SVN logs; the path to the contents is
-       included in the SVN log.
-
--D::
-       Use direct HTTP requests if possible. The "<path>" argument is used
-       for retrieving the logs, as well as for the contents.
-+
-There's no safe way to automatically find out which of these options to
-use, so you need to try both. Usually, the one that's wrong will die
-with a 40x error pretty quickly.
-
-<SVN_repository_URL>::
-       The URL of the SVN module you want to import. For local
-       repositories, use "file:///absolute/path".
-+
-If you're using the "-d" or "-D" option, this is the URL of the SVN
-repository itself; it usually ends in "/svn".
-
-<path>::
-       The path to the module you want to check out.
-
--h::
-       Print a short usage message and exit.
-
-OUTPUT
-------
-If '-v' is specified, the script reports what it is doing.
-
-Otherwise, success is indicated the Unix way, i.e. by simply exiting with
-a zero exit status.
-
-Author
-------
-Written by Matthias Urlichs <smurf@smurf.noris.de>, with help from
-various participants of the git-list <git@vger.kernel.org>.
-
-Based on a cvs2git script by the same author.
-
-Documentation
---------------
-Documentation by Matthias Urlichs <smurf@smurf.noris.de>.
-
-GIT
----
-Part of the linkgit:git[7] suite
diff --git a/contrib/examples/git-tag.sh b/contrib/examples/git-tag.sh
deleted file mode 100755 (executable)
index 1bd8f3c..0000000
+++ /dev/null
@@ -1,205 +0,0 @@
-#!/bin/sh
-# Copyright (c) 2005 Linus Torvalds
-
-USAGE='[-n [<num>]] -l [<pattern>] | [-a | -s | -u <key-id>] [-f | -d | -v] [-m <msg>] <tagname> [<head>]'
-SUBDIRECTORY_OK='Yes'
-. git-sh-setup
-
-message_given=
-annotate=
-signed=
-force=
-message=
-username=
-list=
-verify=
-LINES=0
-while test $# != 0
-do
-    case "$1" in
-    -a)
-       annotate=1
-       shift
-       ;;
-    -s)
-       annotate=1
-       signed=1
-       shift
-       ;;
-    -f)
-       force=1
-       shift
-       ;;
-    -n)
-        case "$#,$2" in
-       1,* | *,-*)
-               LINES=1         # no argument
-               ;;
-       *)      shift
-               LINES=$(expr "$1" : '\([0-9]*\)')
-               [ -z "$LINES" ] && LINES=1 # 1 line is default when -n is used
-               ;;
-       esac
-       shift
-       ;;
-    -l)
-       list=1
-       shift
-       case $# in
-       0)      PATTERN=
-               ;;
-       *)
-               PATTERN="$1"    # select tags by shell pattern, not re
-               shift
-               ;;
-       esac
-       git rev-parse --symbolic --tags | sort |
-           while read TAG
-           do
-               case "$TAG" in
-               *$PATTERN*) ;;
-               *)          continue ;;
-               esac
-               [ "$LINES" -le 0 ] && { echo "$TAG"; continue ;}
-               OBJTYPE=$(git cat-file -t "$TAG")
-               case $OBJTYPE in
-               tag)
-                       ANNOTATION=$(git cat-file tag "$TAG" |
-                               sed -e '1,/^$/d' |
-                               sed -n -e "
-                                       /^-----BEGIN PGP SIGNATURE-----\$/q
-                                       2,\$s/^/    /
-                                       p
-                                       ${LINES}q
-                               ")
-                       printf "%-15s %s\n" "$TAG" "$ANNOTATION"
-                       ;;
-               *)      echo "$TAG"
-                       ;;
-               esac
-           done
-       ;;
-    -m)
-       annotate=1
-       shift
-       message="$1"
-       if test "$#" = "0"; then
-           die "error: option -m needs an argument"
-       else
-           message="$1"
-           message_given=1
-           shift
-       fi
-       ;;
-    -F)
-       annotate=1
-       shift
-       if test "$#" = "0"; then
-           die "error: option -F needs an argument"
-       else
-           message="$(cat "$1")"
-           message_given=1
-           shift
-       fi
-       ;;
-    -u)
-       annotate=1
-       signed=1
-       shift
-       if test "$#" = "0"; then
-           die "error: option -u needs an argument"
-       else
-           username="$1"
-           shift
-       fi
-       ;;
-    -d)
-       shift
-       had_error=0
-       for tag
-       do
-               cur=$(git show-ref --verify --hash -- "refs/tags/$tag") || {
-                       echo >&2 "Seriously, what tag are you talking about?"
-                       had_error=1
-                       continue
-               }
-               git update-ref -m 'tag: delete' -d "refs/tags/$tag" "$cur" || {
-                       had_error=1
-                       continue
-               }
-               echo "Deleted tag $tag."
-       done
-       exit $had_error
-       ;;
-    -v)
-       shift
-       tag_name="$1"
-       tag=$(git show-ref --verify --hash -- "refs/tags/$tag_name") ||
-               die "Seriously, what tag are you talking about?"
-       git-verify-tag -v "$tag"
-       exit $?
-       ;;
-    -*)
-        usage
-       ;;
-    *)
-       break
-       ;;
-    esac
-done
-
-[ -n "$list" ] && exit 0
-
-name="$1"
-[ "$name" ] || usage
-prev=0000000000000000000000000000000000000000
-if git show-ref --verify --quiet -- "refs/tags/$name"
-then
-    test -n "$force" || die "tag '$name' already exists"
-    prev=$(git rev-parse "refs/tags/$name")
-fi
-shift
-git check-ref-format "tags/$name" ||
-       die "we do not like '$name' as a tag name."
-
-object=$(git rev-parse --verify --default HEAD "$@") || exit 1
-type=$(git cat-file -t $object) || exit 1
-tagger=$(git var GIT_COMMITTER_IDENT) || exit 1
-
-test -n "$username" ||
-       username=$(git config user.signingkey) ||
-       username=$(expr "z$tagger" : 'z\(.*>\)')
-
-trap 'rm -f "$GIT_DIR"/TAG_TMP* "$GIT_DIR"/TAG_FINALMSG "$GIT_DIR"/TAG_EDITMSG' 0
-
-if [ "$annotate" ]; then
-    if [ -z "$message_given" ]; then
-        ( echo "#"
-          echo "# Write a tag message"
-          echo "#" ) > "$GIT_DIR"/TAG_EDITMSG
-        git_editor "$GIT_DIR"/TAG_EDITMSG || exit
-    else
-        printf '%s\n' "$message" >"$GIT_DIR"/TAG_EDITMSG
-    fi
-
-    grep -v '^#' <"$GIT_DIR"/TAG_EDITMSG |
-    git stripspace >"$GIT_DIR"/TAG_FINALMSG
-
-    [ -s "$GIT_DIR"/TAG_FINALMSG -o -n "$message_given" ] || {
-       echo >&2 "No tag message?"
-       exit 1
-    }
-
-    ( printf 'object %s\ntype %s\ntag %s\ntagger %s\n\n' \
-       "$object" "$type" "$name" "$tagger";
-      cat "$GIT_DIR"/TAG_FINALMSG ) >"$GIT_DIR"/TAG_TMP
-    rm -f "$GIT_DIR"/TAG_TMP.asc "$GIT_DIR"/TAG_FINALMSG
-    if [ "$signed" ]; then
-       gpg -bsa -u "$username" "$GIT_DIR"/TAG_TMP &&
-       cat "$GIT_DIR"/TAG_TMP.asc >>"$GIT_DIR"/TAG_TMP ||
-       die "failed to sign the tag with GPG."
-    fi
-    object=$(git-mktag < "$GIT_DIR"/TAG_TMP)
-fi
-
-git update-ref "refs/tags/$name" "$object" "$prev"
diff --git a/contrib/examples/git-verify-tag.sh b/contrib/examples/git-verify-tag.sh
deleted file mode 100755 (executable)
index 0902a5c..0000000
+++ /dev/null
@@ -1,45 +0,0 @@
-#!/bin/sh
-
-USAGE='<tag>'
-SUBDIRECTORY_OK='Yes'
-. git-sh-setup
-
-verbose=
-while test $# != 0
-do
-       case "$1" in
-       -v|--v|--ve|--ver|--verb|--verbo|--verbos|--verbose)
-               verbose=t ;;
-       *)
-               break ;;
-       esac
-       shift
-done
-
-if [ "$#" != "1" ]
-then
-       usage
-fi
-
-type="$(git cat-file -t "$1" 2>/dev/null)" ||
-       die "$1: no such object."
-
-test "$type" = tag ||
-       die "$1: cannot verify a non-tag object of type $type."
-
-case "$verbose" in
-t)
-       git cat-file -p "$1" |
-       sed -n -e '/^-----BEGIN PGP SIGNATURE-----/q' -e p
-       ;;
-esac
-
-trap 'rm -f "$GIT_DIR/.tmp-vtag"' 0
-
-git cat-file tag "$1" >"$GIT_DIR/.tmp-vtag" || exit 1
-sed -n -e '
-       /^-----BEGIN PGP SIGNATURE-----$/q
-       p
-' <"$GIT_DIR/.tmp-vtag" |
-gpg --verify "$GIT_DIR/.tmp-vtag" - || exit 1
-rm -f "$GIT_DIR/.tmp-vtag"
diff --git a/contrib/examples/git-whatchanged.sh b/contrib/examples/git-whatchanged.sh
deleted file mode 100755 (executable)
index 2edbdc6..0000000
+++ /dev/null
@@ -1,28 +0,0 @@
-#!/bin/sh
-
-USAGE='[-p] [--max-count=<n>] [<since>..<limit>] [--pretty=<format>] [-m] [git-diff-tree options] [git-rev-list options]'
-SUBDIRECTORY_OK='Yes'
-. git-sh-setup
-
-diff_tree_flags=$(git-rev-parse --sq --no-revs --flags "$@") || exit
-case "$0" in
-*whatchanged)
-       count=
-       test -z "$diff_tree_flags" &&
-               diff_tree_flags=$(git config --get whatchanged.difftree)
-       diff_tree_default_flags='-c -M --abbrev' ;;
-*show)
-       count=-n1
-       test -z "$diff_tree_flags" &&
-               diff_tree_flags=$(git config --get show.difftree)
-       diff_tree_default_flags='--cc --always' ;;
-esac
-test -z "$diff_tree_flags" &&
-       diff_tree_flags="$diff_tree_default_flags"
-
-rev_list_args=$(git-rev-parse --sq --default HEAD --revs-only "$@") &&
-diff_tree_args=$(git-rev-parse --sq --no-revs --no-flags "$@") &&
-
-eval "git-rev-list $count $rev_list_args" |
-eval "git-diff-tree --stdin --pretty -r $diff_tree_flags $diff_tree_args" |
-LESS="$LESS -S" ${PAGER:-less}
index a4b6f7a2cd4122adbb74a565b34270578b7b6aed..4e603512a39fe209b537cdc47c344c99f7cc38f1 100644 (file)
@@ -21,8 +21,9 @@ HERE=contrib/mw-to-git/
 INSTALL = install
 
 SCRIPT_PERL_FULL=$(patsubst %,$(HERE)/%,$(SCRIPT_PERL))
-INSTLIBDIR=$(shell $(MAKE) -C $(GIT_ROOT_DIR)/perl \
-                -s --no-print-directory instlibdir)
+INSTLIBDIR=$(shell $(MAKE) -C $(GIT_ROOT_DIR)/ \
+                -s --no-print-directory prefix=$(prefix) \
+                perllibdir=$(perllibdir) perllibdir)
 DESTDIR_SQ = $(subst ','\'',$(DESTDIR))
 INSTLIBDIR_SQ = $(subst ','\'',$(INSTLIBDIR))
 
index b9e2fc8540d5a8c6f7ceb05fc97a7c502732d8f2..151a1970419fd042466932331b3ab216e40d2560 100644 (file)
@@ -1,10 +1,10 @@
 TL;DR: Run update_unicode.sh after the publication of a new Unicode
-standard and commit the resulting unicode_widths.h file.
+standard and commit the resulting unicode-widths.h file.
 
 The long version
 ================
 
-The Git source code ships the file unicode_widths.h which contains
+The Git source code ships the file unicode-widths.h which contains
 tables of zero and double width Unicode code points, respectively.
 These tables are generated using update_unicode.sh in this directory.
 update_unicode.sh itself uses a third-party tool, uniset, to query two
@@ -16,5 +16,5 @@ This requires a current-ish version of autoconf (2.69 works per December
 
 On each run, update_unicode.sh checks whether more recent Unicode data
 files are available from the Unicode consortium, and rebuilds the header
-unicode_widths.h with the new data. The new header can then be
+unicode-widths.h with the new data. The new header can then be
 committed.
index e05db92d3fb255d09a1844ceb6428b6279e8eea9..aa90865befa428e0b0972197bc0e79d008fd7053 100755 (executable)
@@ -6,7 +6,7 @@
 #Cf Format          a format control character
 #
 cd "$(dirname "$0")"
-UNICODEWIDTH_H=$(git rev-parse --show-toplevel)/unicode_width.h
+UNICODEWIDTH_H=$(git rev-parse --show-toplevel)/unicode-width.h
 
 wget -N http://www.unicode.org/Public/UCD/latest/ucd/UnicodeData.txt \
        http://www.unicode.org/Public/UCD/latest/ucd/EastAsianWidth.txt &&
index cc562f65094cd696466f86c3184f61cd025c2117..64d0d30e08de4acd496bf955d9ce64afa0ff5b8b 100644 (file)
--- a/convert.c
+++ b/convert.c
@@ -7,6 +7,7 @@
 #include "sigchain.h"
 #include "pkt-line.h"
 #include "sub-process.h"
+#include "utf8.h"
 
 /*
  * convert.c - convert a file when checking it out and checking it in.
@@ -265,6 +266,241 @@ static int will_convert_lf_to_crlf(size_t len, struct text_stat *stats,
 
 }
 
+static int validate_encoding(const char *path, const char *enc,
+                     const char *data, size_t len, int die_on_error)
+{
+       /* We only check for UTF here as UTF?? can be an alias for UTF-?? */
+       if (istarts_with(enc, "UTF")) {
+               /*
+                * Check for detectable errors in UTF encodings
+                */
+               if (has_prohibited_utf_bom(enc, data, len)) {
+                       const char *error_msg = _(
+                               "BOM is prohibited in '%s' if encoded as %s");
+                       /*
+                        * This advice is shown for UTF-??BE and UTF-??LE encodings.
+                        * We cut off the last two characters of the encoding name
+                        * to generate the encoding name suitable for BOMs.
+                        */
+                       const char *advise_msg = _(
+                               "The file '%s' contains a byte order "
+                               "mark (BOM). Please use UTF-%s as "
+                               "working-tree-encoding.");
+                       const char *stripped = NULL;
+                       char *upper = xstrdup_toupper(enc);
+                       upper[strlen(upper)-2] = '\0';
+                       if (!skip_prefix(upper, "UTF-", &stripped))
+                               skip_prefix(stripped, "UTF", &stripped);
+                       advise(advise_msg, path, stripped);
+                       free(upper);
+                       if (die_on_error)
+                               die(error_msg, path, enc);
+                       else {
+                               return error(error_msg, path, enc);
+                       }
+
+               } else if (is_missing_required_utf_bom(enc, data, len)) {
+                       const char *error_msg = _(
+                               "BOM is required in '%s' if encoded as %s");
+                       const char *advise_msg = _(
+                               "The file '%s' is missing a byte order "
+                               "mark (BOM). Please use UTF-%sBE or UTF-%sLE "
+                               "(depending on the byte order) as "
+                               "working-tree-encoding.");
+                       const char *stripped = NULL;
+                       char *upper = xstrdup_toupper(enc);
+                       if (!skip_prefix(upper, "UTF-", &stripped))
+                               skip_prefix(stripped, "UTF", &stripped);
+                       advise(advise_msg, path, stripped, stripped);
+                       free(upper);
+                       if (die_on_error)
+                               die(error_msg, path, enc);
+                       else {
+                               return error(error_msg, path, enc);
+                       }
+               }
+
+       }
+       return 0;
+}
+
+static void trace_encoding(const char *context, const char *path,
+                          const char *encoding, const char *buf, size_t len)
+{
+       static struct trace_key coe = TRACE_KEY_INIT(WORKING_TREE_ENCODING);
+       struct strbuf trace = STRBUF_INIT;
+       int i;
+
+       strbuf_addf(&trace, "%s (%s, considered %s):\n", context, path, encoding);
+       for (i = 0; i < len && buf; ++i) {
+               strbuf_addf(
+                       &trace,"| \e[2m%2i:\e[0m %2x \e[2m%c\e[0m%c",
+                       i,
+                       (unsigned char) buf[i],
+                       (buf[i] > 32 && buf[i] < 127 ? buf[i] : ' '),
+                       ((i+1) % 8 && (i+1) < len ? ' ' : '\n')
+               );
+       }
+       strbuf_addchars(&trace, '\n', 1);
+
+       trace_strbuf(&coe, &trace);
+       strbuf_release(&trace);
+}
+
+static int check_roundtrip(const char *enc_name)
+{
+       /*
+        * check_roundtrip_encoding contains a string of comma and/or
+        * space separated encodings (eg. "UTF-16, ASCII, CP1125").
+        * Search for the given encoding in that string.
+        */
+       const char *found = strcasestr(check_roundtrip_encoding, enc_name);
+       const char *next;
+       int len;
+       if (!found)
+               return 0;
+       next = found + strlen(enc_name);
+       len = strlen(check_roundtrip_encoding);
+       return (found && (
+                       /*
+                        * check that the found encoding is at the
+                        * beginning of check_roundtrip_encoding or
+                        * that it is prefixed with a space or comma
+                        */
+                       found == check_roundtrip_encoding || (
+                               (isspace(found[-1]) || found[-1] == ',')
+                       )
+               ) && (
+                       /*
+                        * check that the found encoding is at the
+                        * end of check_roundtrip_encoding or
+                        * that it is suffixed with a space or comma
+                        */
+                       next == check_roundtrip_encoding + len || (
+                               next < check_roundtrip_encoding + len &&
+                               (isspace(next[0]) || next[0] == ',')
+                       )
+               ));
+}
+
+static const char *default_encoding = "UTF-8";
+
+static int encode_to_git(const char *path, const char *src, size_t src_len,
+                        struct strbuf *buf, const char *enc, int conv_flags)
+{
+       char *dst;
+       int dst_len;
+       int die_on_error = conv_flags & CONV_WRITE_OBJECT;
+
+       /*
+        * No encoding is specified or there is nothing to encode.
+        * Tell the caller that the content was not modified.
+        */
+       if (!enc || (src && !src_len))
+               return 0;
+
+       /*
+        * Looks like we got called from "would_convert_to_git()".
+        * This means Git wants to know if it would encode (= modify!)
+        * the content. Let's answer with "yes", since an encoding was
+        * specified.
+        */
+       if (!buf && !src)
+               return 1;
+
+       if (validate_encoding(path, enc, src, src_len, die_on_error))
+               return 0;
+
+       trace_encoding("source", path, enc, src, src_len);
+       dst = reencode_string_len(src, src_len, default_encoding, enc,
+                                 &dst_len);
+       if (!dst) {
+               /*
+                * We could add the blob "as-is" to Git. However, on checkout
+                * we would try to reencode to the original encoding. This
+                * would fail and we would leave the user with a messed-up
+                * working tree. Let's try to avoid this by screaming loud.
+                */
+               const char* msg = _("failed to encode '%s' from %s to %s");
+               if (die_on_error)
+                       die(msg, path, enc, default_encoding);
+               else {
+                       error(msg, path, enc, default_encoding);
+                       return 0;
+               }
+       }
+       trace_encoding("destination", path, default_encoding, dst, dst_len);
+
+       /*
+        * UTF supports lossless conversion round tripping [1] and conversions
+        * between UTF and other encodings are mostly round trip safe as
+        * Unicode aims to be a superset of all other character encodings.
+        * However, certain encodings (e.g. SHIFT-JIS) are known to have round
+        * trip issues [2]. Check the round trip conversion for all encodings
+        * listed in core.checkRoundtripEncoding.
+        *
+        * The round trip check is only performed if content is written to Git.
+        * This ensures that no information is lost during conversion to/from
+        * the internal UTF-8 representation.
+        *
+        * Please note, the code below is not tested because I was not able to
+        * generate a faulty round trip without an iconv error. Iconv errors
+        * are already caught above.
+        *
+        * [1] http://unicode.org/faq/utf_bom.html#gen2
+        * [2] https://support.microsoft.com/en-us/help/170559/prb-conversion-problem-between-shift-jis-and-unicode
+        */
+       if (die_on_error && check_roundtrip(enc)) {
+               char *re_src;
+               int re_src_len;
+
+               re_src = reencode_string_len(dst, dst_len,
+                                            enc, default_encoding,
+                                            &re_src_len);
+
+               trace_printf("Checking roundtrip encoding for %s...\n", enc);
+               trace_encoding("reencoded source", path, enc,
+                              re_src, re_src_len);
+
+               if (!re_src || src_len != re_src_len ||
+                   memcmp(src, re_src, src_len)) {
+                       const char* msg = _("encoding '%s' from %s to %s and "
+                                           "back is not the same");
+                       die(msg, path, enc, default_encoding);
+               }
+
+               free(re_src);
+       }
+
+       strbuf_attach(buf, dst, dst_len, dst_len + 1);
+       return 1;
+}
+
+static int encode_to_worktree(const char *path, const char *src, size_t src_len,
+                             struct strbuf *buf, const char *enc)
+{
+       char *dst;
+       int dst_len;
+
+       /*
+        * No encoding is specified or there is nothing to encode.
+        * Tell the caller that the content was not modified.
+        */
+       if (!enc || (src && !src_len))
+               return 0;
+
+       dst = reencode_string_len(src, src_len, enc, default_encoding,
+                                 &dst_len);
+       if (!dst) {
+               error("failed to encode '%s' from %s to %s",
+                       path, default_encoding, enc);
+               return 0;
+       }
+
+       strbuf_attach(buf, dst, dst_len, dst_len + 1);
+       return 1;
+}
+
 static int crlf_to_git(const struct index_state *istate,
                       const char *path, const char *src, size_t len,
                       struct strbuf *buf,
@@ -914,7 +1150,7 @@ static int ident_to_worktree(const char *path, const char *src, size_t len,
                to_free = strbuf_detach(buf, NULL);
        hash_object_file(src, len, "blob", &oid);
 
-       strbuf_grow(buf, len + cnt * 43);
+       strbuf_grow(buf, len + cnt * (the_hash_algo->hexsz + 3));
        for (;;) {
                /* step 1: run to the next '$' */
                dollar = memchr(src, '$', len);
@@ -978,6 +1214,24 @@ static int ident_to_worktree(const char *path, const char *src, size_t len,
        return 1;
 }
 
+static const char *git_path_check_encoding(struct attr_check_item *check)
+{
+       const char *value = check->value;
+
+       if (ATTR_UNSET(value) || !strlen(value))
+               return NULL;
+
+       if (ATTR_TRUE(value) || ATTR_FALSE(value)) {
+               die(_("true/false are no valid working-tree-encodings"));
+       }
+
+       /* Don't encode to the default encoding */
+       if (same_encoding(value, default_encoding))
+               return NULL;
+
+       return value;
+}
+
 static enum crlf_action git_path_check_crlf(struct attr_check_item *check)
 {
        const char *value = check->value;
@@ -1033,6 +1287,7 @@ struct conv_attrs {
        enum crlf_action attr_action; /* What attr says */
        enum crlf_action crlf_action; /* When no attr is set, use core.autocrlf */
        int ident;
+       const char *working_tree_encoding; /* Supported encoding or default encoding if NULL */
 };
 
 static void convert_attrs(struct conv_attrs *ca, const char *path)
@@ -1041,7 +1296,8 @@ static void convert_attrs(struct conv_attrs *ca, const char *path)
 
        if (!check) {
                check = attr_check_initl("crlf", "ident", "filter",
-                                        "eol", "text", NULL);
+                                        "eol", "text", "working-tree-encoding",
+                                        NULL);
                user_convert_tail = &user_convert;
                git_config(read_convert_config, NULL);
        }
@@ -1064,6 +1320,7 @@ static void convert_attrs(struct conv_attrs *ca, const char *path)
                        else if (eol_attr == EOL_CRLF)
                                ca->crlf_action = CRLF_TEXT_CRLF;
                }
+               ca->working_tree_encoding = git_path_check_encoding(ccheck + 5);
        } else {
                ca->drv = NULL;
                ca->crlf_action = CRLF_UNDEFINED;
@@ -1144,6 +1401,13 @@ int convert_to_git(const struct index_state *istate,
                src = dst->buf;
                len = dst->len;
        }
+
+       ret |= encode_to_git(path, src, len, dst, ca.working_tree_encoding, conv_flags);
+       if (ret && dst) {
+               src = dst->buf;
+               len = dst->len;
+       }
+
        if (!(conv_flags & CONV_EOL_KEEP_CRLF)) {
                ret |= crlf_to_git(istate, path, src, len, dst, ca.crlf_action, conv_flags);
                if (ret && dst) {
@@ -1167,6 +1431,7 @@ void convert_to_git_filter_fd(const struct index_state *istate,
        if (!apply_filter(path, NULL, 0, fd, dst, ca.drv, CAP_CLEAN, NULL))
                die("%s: clean filter '%s' failed", path, ca.drv->name);
 
+       encode_to_git(path, dst->buf, dst->len, dst, ca.working_tree_encoding, conv_flags);
        crlf_to_git(istate, path, dst->buf, dst->len, dst, ca.crlf_action, conv_flags);
        ident_to_git(path, dst->buf, dst->len, dst, ca.ident);
 }
@@ -1198,6 +1463,12 @@ static int convert_to_working_tree_internal(const char *path, const char *src,
                }
        }
 
+       ret |= encode_to_worktree(path, src, len, dst, ca.working_tree_encoding);
+       if (ret) {
+               src = dst->buf;
+               len = dst->len;
+       }
+
        ret_filter = apply_filter(
                path, src, len, -1, dst, ca.drv, CAP_SMUDGE, dco);
        if (!ret_filter && ca.drv && ca.drv->required)
@@ -1510,7 +1781,7 @@ struct ident_filter {
        struct stream_filter filter;
        struct strbuf left;
        int state;
-       char ident[45]; /* ": x40 $" */
+       char ident[GIT_MAX_HEXSZ + 5]; /* ": x40 $" */
 };
 
 static int is_foreign_ident(const char *str)
@@ -1635,12 +1906,12 @@ static struct stream_filter_vtbl ident_vtbl = {
        ident_free_fn,
 };
 
-static struct stream_filter *ident_filter(const unsigned char *sha1)
+static struct stream_filter *ident_filter(const struct object_id *oid)
 {
        struct ident_filter *ident = xmalloc(sizeof(*ident));
 
        xsnprintf(ident->ident, sizeof(ident->ident),
-                 ": %s $", sha1_to_hex(sha1));
+                 ": %s $", oid_to_hex(oid));
        strbuf_init(&ident->left, 0);
        ident->filter.vtbl = &ident_vtbl;
        ident->state = 0;
@@ -1655,7 +1926,7 @@ static struct stream_filter *ident_filter(const unsigned char *sha1)
  * Note that you would be crazy to set CRLF, smuge/clean or ident to a
  * large binary blob you would want us not to slurp into the memory!
  */
-struct stream_filter *get_stream_filter(const char *path, const unsigned char *sha1)
+struct stream_filter *get_stream_filter(const char *path, const struct object_id *oid)
 {
        struct conv_attrs ca;
        struct stream_filter *filter = NULL;
@@ -1664,11 +1935,14 @@ struct stream_filter *get_stream_filter(const char *path, const unsigned char *s
        if (ca.drv && (ca.drv->process || ca.drv->smudge || ca.drv->clean))
                return NULL;
 
+       if (ca.working_tree_encoding)
+               return NULL;
+
        if (ca.crlf_action == CRLF_AUTO || ca.crlf_action == CRLF_AUTO_CRLF)
                return NULL;
 
        if (ca.ident)
-               filter = ident_filter(sha1);
+               filter = ident_filter(oid);
 
        if (output_eol(ca.crlf_action) == EOL_CRLF)
                filter = cascade_filter(filter, lf_to_crlf_filter());
index 65ab3e516745775b52f36a370ba7880a34c8b2a5..01385d92886223ab7b1d951d12c5de9b07612401 100644 (file)
--- a/convert.h
+++ b/convert.h
@@ -12,6 +12,7 @@ struct index_state;
 #define CONV_EOL_RNDTRP_WARN  (1<<1) /* Warn if CRLF to LF to CRLF is different */
 #define CONV_EOL_RENORMALIZE  (1<<2) /* Convert CRLF to LF */
 #define CONV_EOL_KEEP_CRLF    (1<<3) /* Keep CRLF line endings as is */
+#define CONV_WRITE_OBJECT     (1<<4) /* Content is written to the index */
 
 extern int global_conv_flags_eol;
 
@@ -55,6 +56,7 @@ struct delayed_checkout {
 };
 
 extern enum eol core_eol;
+extern char *check_roundtrip_encoding;
 extern const char *get_cached_convert_stats_ascii(const struct index_state *istate,
                                                  const char *path);
 extern const char *get_wt_convert_stats_ascii(const char *path);
@@ -93,7 +95,7 @@ extern int would_convert_to_git_filter_fd(const char *path);
 
 struct stream_filter; /* opaque */
 
-extern struct stream_filter *get_stream_filter(const char *path, const unsigned char *);
+extern struct stream_filter *get_stream_filter(const char *path, const struct object_id *);
 extern void free_stream_filter(struct stream_filter *);
 extern int is_null_stream_filter(struct stream_filter *);
 
index 9747f47b18bf2e622f11f41889faaa4b1845ac8d..62be651b03b55ee4d478706e51ea8606b10739f0 100644 (file)
@@ -5,6 +5,7 @@
 #include "run-command.h"
 #include "url.h"
 #include "prompt.h"
+#include "sigchain.h"
 
 void credential_init(struct credential *c)
 {
@@ -227,8 +228,10 @@ static int run_credential_helper(struct credential *c,
                return -1;
 
        fp = xfdopen(helper.in, "w");
+       sigchain_push(SIGPIPE, SIG_IGN);
        credential_write(c, fp);
        fclose(fp);
+       sigchain_pop(SIGPIPE);
 
        if (want_output) {
                int r;
index 5eda7fb6af673ae82989dab871aaac74ec9ff629..53ce37f7ca42996dbfb4cf80e2127ea43496734d 100644 (file)
@@ -53,7 +53,7 @@ void hashflush(struct hashfile *f)
        }
 }
 
-int hashclose(struct hashfile *f, unsigned char *result, unsigned int flags)
+int finalize_hashfile(struct hashfile *f, unsigned char *result, unsigned int flags)
 {
        int fd;
 
@@ -61,11 +61,11 @@ int hashclose(struct hashfile *f, unsigned char *result, unsigned int flags)
        the_hash_algo->final_fn(f->buffer, &f->ctx);
        if (result)
                hashcpy(result, f->buffer);
-       if (flags & (CSUM_CLOSE | CSUM_FSYNC)) {
-               /* write checksum and close fd */
+       if (flags & CSUM_HASH_IN_STREAM)
                flush(f, f->buffer, the_hash_algo->rawsz);
-               if (flags & CSUM_FSYNC)
-                       fsync_or_die(f->fd, f->name);
+       if (flags & CSUM_FSYNC)
+               fsync_or_die(f->fd, f->name);
+       if (flags & CSUM_CLOSE) {
                if (close(f->fd))
                        die_errno("%s: sha1 file error on close", f->name);
                fd = 0;
index 992e5c014122d8fed3ee782d400e61de78e55271..c5a2e335e7e063528da8386cc95fba4f7bb5bfe8 100644 (file)
@@ -26,14 +26,15 @@ struct hashfile_checkpoint {
 extern void hashfile_checkpoint(struct hashfile *, struct hashfile_checkpoint *);
 extern int hashfile_truncate(struct hashfile *, struct hashfile_checkpoint *);
 
-/* hashclose flags */
-#define CSUM_CLOSE     1
-#define CSUM_FSYNC     2
+/* finalize_hashfile flags */
+#define CSUM_CLOSE             1
+#define CSUM_FSYNC             2
+#define CSUM_HASH_IN_STREAM    4
 
 extern struct hashfile *hashfd(int fd, const char *name);
 extern struct hashfile *hashfd_check(const char *name);
 extern struct hashfile *hashfd_throughput(int fd, const char *name, struct progress *tp);
-extern int hashclose(struct hashfile *, unsigned char *, unsigned int);
+extern int finalize_hashfile(struct hashfile *, unsigned char *, unsigned int);
 extern void hashwrite(struct hashfile *, const void *, unsigned int);
 extern void hashflush(struct hashfile *f);
 extern void crc32_begin(struct hashfile *);
index fe833ea7de7685968915c93950bb54768dd04586..9d2e0d20ef302aaeac02aaeba791509dd43a2712 100644 (file)
--- a/daemon.c
+++ b/daemon.c
@@ -1459,7 +1459,7 @@ int cmd_main(int argc, const char **argv)
                die("base-path '%s' does not exist or is not a directory",
                    base_path);
 
-       if (inetd_mode) {
+       if (log_destination != LOG_DESTINATION_STDERR) {
                if (!freopen("/dev/null", "w", stderr))
                        die_errno("failed to redirect stderr to /dev/null");
        }
diff --git a/detect-compiler b/detect-compiler
new file mode 100755 (executable)
index 0000000..70b7544
--- /dev/null
@@ -0,0 +1,53 @@
+#!/bin/sh
+#
+# Probe the compiler for vintage, version, etc. This is used for setting
+# optional make knobs under the DEVELOPER knob.
+
+CC="$*"
+
+# we get something like (this is at least true for gcc and clang)
+#
+# FreeBSD clang version 3.4.1 (tags/RELEASE...)
+get_version_line() {
+       $CC -v 2>&1 | grep ' version '
+}
+
+get_family() {
+       get_version_line | sed 's/^\(.*\) version [0-9][^ ]* .*/\1/'
+}
+
+get_version() {
+       get_version_line | sed 's/^.* version \([0-9][^ ]*\) .*/\1/'
+}
+
+print_flags() {
+       family=$1
+       version=$(get_version | cut -f 1 -d .)
+
+       # Print a feature flag not only for the current version, but also
+       # for any prior versions we encompass. This avoids needing to do
+       # numeric comparisons in make, which are awkward.
+       while test "$version" -gt 0
+       do
+               echo $family$version
+               version=$((version - 1))
+       done
+}
+
+case "$(get_family)" in
+gcc)
+       print_flags gcc
+       ;;
+clang)
+       print_flags clang
+       ;;
+"FreeBSD clang")
+       print_flags clang
+       ;;
+"Apple LLVM")
+       print_flags clang
+       ;;
+*)
+       : unknown compiler family
+       ;;
+esac
diff --git a/diff.c b/diff.c
index 4c59f5f5d3d32286daefbeeb25ed5291070fe762..4753170fe1296c87579db3376aa5a625b0042f19 100644 (file)
--- a/diff.c
+++ b/diff.c
@@ -3638,7 +3638,8 @@ int diff_populate_filespec(struct diff_filespec *s, unsigned int flags)
        else {
                enum object_type type;
                if (size_only || (flags & CHECK_BINARY)) {
-                       type = sha1_object_info(s->oid.hash, &s->size);
+                       type = oid_object_info(the_repository, &s->oid,
+                                              &s->size);
                        if (type < 0)
                                die("unable to read %s",
                                    oid_to_hex(&s->oid));
@@ -3649,7 +3650,7 @@ int diff_populate_filespec(struct diff_filespec *s, unsigned int flags)
                                return 0;
                        }
                }
-               s->data = read_sha1_file(s->oid.hash, &type, &s->size);
+               s->data = read_object_file(&s->oid, &type, &s->size);
                if (!s->data)
                        die("unable to read %s", oid_to_hex(&s->oid));
                s->should_free = 1;
@@ -3834,7 +3835,7 @@ static int similarity_index(struct diff_filepair *p)
 static const char *diff_abbrev_oid(const struct object_id *oid, int abbrev)
 {
        if (startup_info->have_repository)
-               return find_unique_abbrev(oid->hash, abbrev);
+               return find_unique_abbrev(oid, abbrev);
        else {
                char *hex = oid_to_hex(oid);
                if (abbrev < 0)
diff --git a/dir.c b/dir.c
index dedbf5d476f207e39c1b7853ec8c97553181e5fb..be08d3d296f6d565202fc51586f928b5e274e8a9 100644 (file)
--- a/dir.c
+++ b/dir.c
@@ -19,6 +19,7 @@
 #include "varint.h"
 #include "ewah/ewok.h"
 #include "fsmonitor.h"
+#include "submodule-config.h"
 
 /*
  * Tells read_directory_recursive how a file or directory should be treated.
@@ -243,7 +244,7 @@ static int do_read_blob(const struct object_id *oid, struct oid_stat *oid_stat,
        *size_out = 0;
        *data_out = NULL;
 
-       data = read_sha1_file(oid->hash, &type, &sz);
+       data = read_object_file(oid, &type, &sz);
        if (!data || type != OBJ_BLOB) {
                free(data);
                return -1;
@@ -3010,8 +3011,57 @@ void untracked_cache_add_to_index(struct index_state *istate,
        untracked_cache_invalidate_path(istate, path, 1);
 }
 
-/* Update gitfile and core.worktree setting to connect work tree and git dir */
-void connect_work_tree_and_git_dir(const char *work_tree_, const char *git_dir_)
+static void connect_wt_gitdir_in_nested(const char *sub_worktree,
+                                       const char *sub_gitdir)
+{
+       int i;
+       struct repository subrepo;
+       struct strbuf sub_wt = STRBUF_INIT;
+       struct strbuf sub_gd = STRBUF_INIT;
+
+       const struct submodule *sub;
+
+       /* If the submodule has no working tree, we can ignore it. */
+       if (repo_init(&subrepo, sub_gitdir, sub_worktree))
+               return;
+
+       if (repo_read_index(&subrepo) < 0)
+               die("index file corrupt in repo %s", subrepo.gitdir);
+
+       for (i = 0; i < subrepo.index->cache_nr; i++) {
+               const struct cache_entry *ce = subrepo.index->cache[i];
+
+               if (!S_ISGITLINK(ce->ce_mode))
+                       continue;
+
+               while (i + 1 < subrepo.index->cache_nr &&
+                      !strcmp(ce->name, subrepo.index->cache[i + 1]->name))
+                       /*
+                        * Skip entries with the same name in different stages
+                        * to make sure an entry is returned only once.
+                        */
+                       i++;
+
+               sub = submodule_from_path(&subrepo, &null_oid, ce->name);
+               if (!sub || !is_submodule_active(&subrepo, ce->name))
+                       /* .gitmodules broken or inactive sub */
+                       continue;
+
+               strbuf_reset(&sub_wt);
+               strbuf_reset(&sub_gd);
+               strbuf_addf(&sub_wt, "%s/%s", sub_worktree, sub->path);
+               strbuf_addf(&sub_gd, "%s/modules/%s", sub_gitdir, sub->name);
+
+               connect_work_tree_and_git_dir(sub_wt.buf, sub_gd.buf, 1);
+       }
+       strbuf_release(&sub_wt);
+       strbuf_release(&sub_gd);
+       repo_clear(&subrepo);
+}
+
+void connect_work_tree_and_git_dir(const char *work_tree_,
+                                  const char *git_dir_,
+                                  int recurse_into_nested)
 {
        struct strbuf gitfile_sb = STRBUF_INIT;
        struct strbuf cfg_sb = STRBUF_INIT;
@@ -3041,6 +3091,10 @@ void connect_work_tree_and_git_dir(const char *work_tree_, const char *git_dir_)
        strbuf_release(&gitfile_sb);
        strbuf_release(&cfg_sb);
        strbuf_release(&rel_path);
+
+       if (recurse_into_nested)
+               connect_wt_gitdir_in_nested(work_tree, git_dir);
+
        free(work_tree);
        free(git_dir);
 }
@@ -3054,5 +3108,5 @@ void relocate_gitdir(const char *path, const char *old_git_dir, const char *new_
                die_errno(_("could not migrate git directory from '%s' to '%s'"),
                        old_git_dir, new_git_dir);
 
-       connect_work_tree_and_git_dir(path, new_git_dir);
+       connect_work_tree_and_git_dir(path, new_git_dir, 0);
 }
diff --git a/dir.h b/dir.h
index b0758b82a20017dd3ce29c54454678f026718078..3870193e527b31186d5965888040af6c810e73ee 100644 (file)
--- a/dir.h
+++ b/dir.h
@@ -359,7 +359,17 @@ struct untracked_cache *read_untracked_extension(const void *data, unsigned long
 void write_untracked_extension(struct strbuf *out, struct untracked_cache *untracked);
 void add_untracked_cache(struct index_state *istate);
 void remove_untracked_cache(struct index_state *istate);
-extern void connect_work_tree_and_git_dir(const char *work_tree, const char *git_dir);
+
+/*
+ * Connect a worktree to a git directory by creating (or overwriting) a
+ * '.git' file containing the location of the git directory. In the git
+ * directory set the core.worktree setting to indicate where the worktree is.
+ * When `recurse_into_nested` is set, recurse into any nested submodules,
+ * connecting them as well.
+ */
+extern void connect_work_tree_and_git_dir(const char *work_tree,
+                                         const char *git_dir,
+                                         int recurse_into_nested);
 extern void relocate_gitdir(const char *path,
                            const char *old_git_dir,
                            const char *new_git_dir);
diff --git a/entry.c b/entry.c
index 6c33112aea7dc8e584678bcb5e5ea292d29265a9..2101201a111785f449a65e785e9ed5b57b7aa196 100644 (file)
--- a/entry.c
+++ b/entry.c
@@ -85,7 +85,7 @@ static int create_file(const char *path, unsigned int mode)
 static void *read_blob_entry(const struct cache_entry *ce, unsigned long *size)
 {
        enum object_type type;
-       void *blob_data = read_sha1_file(ce->oid.hash, &type, size);
+       void *blob_data = read_object_file(&ce->oid, &type, size);
 
        if (blob_data) {
                if (type == OBJ_BLOB)
@@ -266,7 +266,7 @@ static int write_entry(struct cache_entry *ce,
 
        if (ce_mode_s_ifmt == S_IFREG) {
                struct stream_filter *filter = get_stream_filter(ce->name,
-                                                                ce->oid.hash);
+                                                                &ce->oid);
                if (filter &&
                    !streaming_write_entry(ce, path, filter,
                                           state, to_tempfile,
index d6dd64662ce4d09296b61708b020a71eef3488f6..2a6de2330bc024d19ab0c1d8cc594f146ca6da11 100644 (file)
@@ -13,6 +13,9 @@
 #include "refs.h"
 #include "fmt-merge-msg.h"
 #include "commit.h"
+#include "argv-array.h"
+#include "object-store.h"
+#include "chdir-notify.h"
 
 int trust_executable_bit = 1;
 int trust_ctime = 1;
@@ -48,10 +51,11 @@ const char *editor_program;
 const char *askpass_program;
 const char *excludes_file;
 enum auto_crlf auto_crlf = AUTO_CRLF_FALSE;
-int check_replace_refs = 1;
+int check_replace_refs = 1; /* NEEDSWORK: rename to read_replace_refs */
 char *git_replace_ref_base;
 enum eol core_eol = EOL_UNSET;
 int global_conv_flags_eol = CONV_EOL_RNDTRP_WARN;
+char *check_roundtrip_encoding = "SHIFT-JIS";
 unsigned whitespace_rule_cfg = WS_DEFAULT_RULE;
 enum branch_track git_branch_track = BRANCH_TRACK_REMOTE;
 enum rebase_setup_type autorebase = AUTOREBASE_NEVER;
@@ -62,6 +66,7 @@ enum push_default_type push_default = PUSH_DEFAULT_UNSPECIFIED;
 enum object_creation_mode object_creation_mode = OBJECT_CREATION_MODE;
 char *notes_ref_name;
 int grafts_replace_parents = 1;
+int core_commit_graph;
 int core_apply_sparse_checkout;
 int merge_log_config = -1;
 int precomposed_unicode = -1; /* see probe_utf8_pathname_composition() */
@@ -147,10 +152,35 @@ static char *expand_namespace(const char *raw_namespace)
        return strbuf_detach(&buf, NULL);
 }
 
-void setup_git_env(void)
+/*
+ * Wrapper of getenv() that returns a strdup value. This value is kept
+ * in argv to be freed later.
+ */
+static const char *getenv_safe(struct argv_array *argv, const char *name)
+{
+       const char *value = getenv(name);
+
+       if (!value)
+               return NULL;
+
+       argv_array_push(argv, value);
+       return argv->argv[argv->argc - 1];
+}
+
+void setup_git_env(const char *git_dir)
 {
        const char *shallow_file;
        const char *replace_ref_base;
+       struct set_gitdir_args args = { NULL };
+       struct argv_array to_free = ARGV_ARRAY_INIT;
+
+       args.commondir = getenv_safe(&to_free, GIT_COMMON_DIR_ENVIRONMENT);
+       args.object_dir = getenv_safe(&to_free, DB_ENVIRONMENT);
+       args.graft_file = getenv_safe(&to_free, GRAFT_ENVIRONMENT);
+       args.index_file = getenv_safe(&to_free, INDEX_ENVIRONMENT);
+       args.alternate_db = getenv_safe(&to_free, ALTERNATE_DB_ENVIRONMENT);
+       repo_set_gitdir(the_repository, git_dir, &args);
+       argv_array_clear(&to_free);
 
        if (getenv(NO_REPLACE_OBJECTS_ENVIRONMENT))
                check_replace_refs = 0;
@@ -244,9 +274,9 @@ const char *get_git_work_tree(void)
 
 char *get_object_directory(void)
 {
-       if (!the_repository->objectdir)
+       if (!the_repository->objects->objectdir)
                BUG("git environment hasn't been setup");
-       return the_repository->objectdir;
+       return the_repository->objects->objectdir;
 }
 
 int odb_mkstemp(struct strbuf *temp_filename, const char *pattern)
@@ -296,13 +326,31 @@ char *get_graft_file(void)
        return the_repository->graft_file;
 }
 
-int set_git_dir(const char *path)
+static void set_git_dir_1(const char *path)
 {
        if (setenv(GIT_DIR_ENVIRONMENT, path, 1))
-               return error("Could not set GIT_DIR to '%s'", path);
-       repo_set_gitdir(the_repository, path);
-       setup_git_env();
-       return 0;
+               die("could not set GIT_DIR to '%s'", path);
+       setup_git_env(path);
+}
+
+static void update_relative_gitdir(const char *name,
+                                  const char *old_cwd,
+                                  const char *new_cwd,
+                                  void *data)
+{
+       char *path = reparent_relative_path(old_cwd, new_cwd, get_git_dir());
+       trace_printf_key(&trace_setup_key,
+                        "setup: move $GIT_DIR to '%s'",
+                        path);
+       set_git_dir_1(path);
+       free(path);
+}
+
+void set_git_dir(const char *path)
+{
+       set_git_dir_1(path);
+       if (!is_absolute_path(path))
+               chdir_notify_register(NULL, update_relative_gitdir, NULL);
 }
 
 const char *get_log_output_encoding(void)
diff --git a/exec-cmd.c b/exec-cmd.c
new file mode 100644 (file)
index 0000000..02d31ee
--- /dev/null
@@ -0,0 +1,365 @@
+#include "cache.h"
+#include "exec-cmd.h"
+#include "quote.h"
+#include "argv-array.h"
+
+#if defined(RUNTIME_PREFIX)
+
+#if defined(HAVE_NS_GET_EXECUTABLE_PATH)
+#include <mach-o/dyld.h>
+#endif
+
+#if defined(HAVE_BSD_KERN_PROC_SYSCTL)
+#include <sys/param.h>
+#include <sys/types.h>
+#include <sys/sysctl.h>
+#endif
+
+#endif /* RUNTIME_PREFIX */
+
+#define MAX_ARGS 32
+
+static const char *system_prefix(void);
+
+#ifdef RUNTIME_PREFIX
+
+/**
+ * When using a runtime prefix, Git dynamically resolves paths relative to its
+ * executable.
+ *
+ * The method for determining the path of the executable is highly
+ * platform-specific.
+ */
+
+/**
+ * Path to the current Git executable. Resolved on startup by
+ * 'git_resolve_executable_dir'.
+ */
+static const char *executable_dirname;
+
+static const char *system_prefix(void)
+{
+       static const char *prefix;
+
+       assert(executable_dirname);
+       assert(is_absolute_path(executable_dirname));
+
+       if (!prefix &&
+           !(prefix = strip_path_suffix(executable_dirname, GIT_EXEC_PATH)) &&
+           !(prefix = strip_path_suffix(executable_dirname, BINDIR)) &&
+           !(prefix = strip_path_suffix(executable_dirname, "git"))) {
+               prefix = FALLBACK_RUNTIME_PREFIX;
+               trace_printf("RUNTIME_PREFIX requested, "
+                               "but prefix computation failed.  "
+                               "Using static fallback '%s'.\n", prefix);
+       }
+       return prefix;
+}
+
+/*
+ * Resolves the executable path from argv[0], only if it is absolute.
+ *
+ * Returns 0 on success, -1 on failure.
+ */
+static int git_get_exec_path_from_argv0(struct strbuf *buf, const char *argv0)
+{
+       const char *slash;
+
+       if (!argv0 || !*argv0)
+               return -1;
+
+       slash = find_last_dir_sep(argv0);
+       if (slash) {
+               trace_printf("trace: resolved executable path from argv0: %s\n",
+                            argv0);
+               strbuf_add_absolute_path(buf, argv0);
+               return 0;
+       }
+       return -1;
+}
+
+#ifdef PROCFS_EXECUTABLE_PATH
+/*
+ * Resolves the executable path by examining a procfs symlink.
+ *
+ * Returns 0 on success, -1 on failure.
+ */
+static int git_get_exec_path_procfs(struct strbuf *buf)
+{
+       if (strbuf_realpath(buf, PROCFS_EXECUTABLE_PATH, 0)) {
+               trace_printf(
+                       "trace: resolved executable path from procfs: %s\n",
+                       buf->buf);
+               return 0;
+       }
+       return -1;
+}
+#endif /* PROCFS_EXECUTABLE_PATH */
+
+#ifdef HAVE_BSD_KERN_PROC_SYSCTL
+/*
+ * Resolves the executable path using KERN_PROC_PATHNAME BSD sysctl.
+ *
+ * Returns 0 on success, -1 on failure.
+ */
+static int git_get_exec_path_bsd_sysctl(struct strbuf *buf)
+{
+       int mib[4];
+       char path[MAXPATHLEN];
+       size_t cb = sizeof(path);
+
+       mib[0] = CTL_KERN;
+       mib[1] = KERN_PROC;
+       mib[2] = KERN_PROC_PATHNAME;
+       mib[3] = -1;
+       if (!sysctl(mib, 4, path, &cb, NULL, 0)) {
+               trace_printf(
+                       "trace: resolved executable path from sysctl: %s\n",
+                       path);
+               strbuf_addstr(buf, path);
+               return 0;
+       }
+       return -1;
+}
+#endif /* HAVE_BSD_KERN_PROC_SYSCTL */
+
+#ifdef HAVE_NS_GET_EXECUTABLE_PATH
+/*
+ * Resolves the executable path by querying Darwin application stack.
+ *
+ * Returns 0 on success, -1 on failure.
+ */
+static int git_get_exec_path_darwin(struct strbuf *buf)
+{
+       char path[PATH_MAX];
+       uint32_t size = sizeof(path);
+       if (!_NSGetExecutablePath(path, &size)) {
+               trace_printf(
+                       "trace: resolved executable path from Darwin stack: %s\n",
+                       path);
+               strbuf_addstr(buf, path);
+               return 0;
+       }
+       return -1;
+}
+#endif /* HAVE_NS_GET_EXECUTABLE_PATH */
+
+#ifdef HAVE_WPGMPTR
+/*
+ * Resolves the executable path by using the global variable _wpgmptr.
+ *
+ * Returns 0 on success, -1 on failure.
+ */
+static int git_get_exec_path_wpgmptr(struct strbuf *buf)
+{
+       int len = wcslen(_wpgmptr) * 3 + 1;
+       strbuf_grow(buf, len);
+       len = xwcstoutf(buf->buf, _wpgmptr, len);
+       if (len < 0)
+               return -1;
+       buf->len += len;
+       return 0;
+}
+#endif /* HAVE_WPGMPTR */
+
+/*
+ * Resolves the absolute path of the current executable.
+ *
+ * Returns 0 on success, -1 on failure.
+ */
+static int git_get_exec_path(struct strbuf *buf, const char *argv0)
+{
+       /*
+        * Identifying the executable path is operating system specific.
+        * Selectively employ all available methods in order of preference,
+        * preferring highly-available authoritative methods over
+        * selectively-available or non-authoritative methods.
+        *
+        * All cases fall back on resolving against argv[0] if there isn't a
+        * better functional method. However, note that argv[0] can be
+        * used-supplied on many operating systems, and is not authoritative
+        * in those cases.
+        *
+        * Each of these functions returns 0 on success, so evaluation will stop
+        * after the first successful method.
+        */
+       if (
+#ifdef HAVE_BSD_KERN_PROC_SYSCTL
+               git_get_exec_path_bsd_sysctl(buf) &&
+#endif /* HAVE_BSD_KERN_PROC_SYSCTL */
+
+#ifdef HAVE_NS_GET_EXECUTABLE_PATH
+               git_get_exec_path_darwin(buf) &&
+#endif /* HAVE_NS_GET_EXECUTABLE_PATH */
+
+#ifdef PROCFS_EXECUTABLE_PATH
+               git_get_exec_path_procfs(buf) &&
+#endif /* PROCFS_EXECUTABLE_PATH */
+
+#ifdef HAVE_WPGMPTR
+               git_get_exec_path_wpgmptr(buf) &&
+#endif /* HAVE_WPGMPTR */
+
+               git_get_exec_path_from_argv0(buf, argv0)) {
+               return -1;
+       }
+
+       if (strbuf_normalize_path(buf)) {
+               trace_printf("trace: could not normalize path: %s\n", buf->buf);
+               return -1;
+       }
+
+       return 0;
+}
+
+void git_resolve_executable_dir(const char *argv0)
+{
+       struct strbuf buf = STRBUF_INIT;
+       char *resolved;
+       const char *slash;
+
+       if (git_get_exec_path(&buf, argv0)) {
+               trace_printf(
+                       "trace: could not determine executable path from: %s\n",
+                       argv0);
+               strbuf_release(&buf);
+               return;
+       }
+
+       resolved = strbuf_detach(&buf, NULL);
+       slash = find_last_dir_sep(resolved);
+       if (slash)
+               resolved[slash - resolved] = '\0';
+
+       executable_dirname = resolved;
+       trace_printf("trace: resolved executable dir: %s\n",
+                    executable_dirname);
+}
+
+#else
+
+/*
+ * When not using a runtime prefix, Git uses a hard-coded path.
+ */
+static const char *system_prefix(void)
+{
+       return FALLBACK_RUNTIME_PREFIX;
+}
+
+/*
+ * This is called during initialization, but No work needs to be done here when
+ * runtime prefix is not being used.
+ */
+void git_resolve_executable_dir(const char *argv0)
+{
+}
+
+#endif /* RUNTIME_PREFIX */
+
+char *system_path(const char *path)
+{
+       struct strbuf d = STRBUF_INIT;
+
+       if (is_absolute_path(path))
+               return xstrdup(path);
+
+       strbuf_addf(&d, "%s/%s", system_prefix(), path);
+       return strbuf_detach(&d, NULL);
+}
+
+static const char *exec_path_value;
+
+void git_set_exec_path(const char *exec_path)
+{
+       exec_path_value = exec_path;
+       /*
+        * Propagate this setting to external programs.
+        */
+       setenv(EXEC_PATH_ENVIRONMENT, exec_path, 1);
+}
+
+/* Returns the highest-priority location to look for git programs. */
+const char *git_exec_path(void)
+{
+       if (!exec_path_value) {
+               const char *env = getenv(EXEC_PATH_ENVIRONMENT);
+               if (env && *env)
+                       exec_path_value = xstrdup(env);
+               else
+                       exec_path_value = system_path(GIT_EXEC_PATH);
+       }
+       return exec_path_value;
+}
+
+static void add_path(struct strbuf *out, const char *path)
+{
+       if (path && *path) {
+               strbuf_add_absolute_path(out, path);
+               strbuf_addch(out, PATH_SEP);
+       }
+}
+
+void setup_path(void)
+{
+       const char *exec_path = git_exec_path();
+       const char *old_path = getenv("PATH");
+       struct strbuf new_path = STRBUF_INIT;
+
+       git_set_exec_path(exec_path);
+       add_path(&new_path, exec_path);
+
+       if (old_path)
+               strbuf_addstr(&new_path, old_path);
+       else
+               strbuf_addstr(&new_path, _PATH_DEFPATH);
+
+       setenv("PATH", new_path.buf, 1);
+
+       strbuf_release(&new_path);
+}
+
+const char **prepare_git_cmd(struct argv_array *out, const char **argv)
+{
+       argv_array_push(out, "git");
+       argv_array_pushv(out, argv);
+       return out->argv;
+}
+
+int execv_git_cmd(const char **argv)
+{
+       struct argv_array nargv = ARGV_ARRAY_INIT;
+
+       prepare_git_cmd(&nargv, argv);
+       trace_argv_printf(nargv.argv, "trace: exec:");
+
+       /* execvp() can only ever return if it fails */
+       sane_execvp("git", (char **)nargv.argv);
+
+       trace_printf("trace: exec failed: %s\n", strerror(errno));
+
+       argv_array_clear(&nargv);
+       return -1;
+}
+
+int execl_git_cmd(const char *cmd, ...)
+{
+       int argc;
+       const char *argv[MAX_ARGS + 1];
+       const char *arg;
+       va_list param;
+
+       va_start(param, cmd);
+       argv[0] = cmd;
+       argc = 1;
+       while (argc < MAX_ARGS) {
+               arg = argv[argc++] = va_arg(param, char *);
+               if (!arg)
+                       break;
+       }
+       va_end(param);
+       if (MAX_ARGS <= argc)
+               return error("too many args to run %s", cmd);
+
+       argv[argc] = NULL;
+       return execv_git_cmd(argv);
+}
diff --git a/exec-cmd.h b/exec-cmd.h
new file mode 100644 (file)
index 0000000..2522453
--- /dev/null
@@ -0,0 +1,16 @@
+#ifndef GIT_EXEC_CMD_H
+#define GIT_EXEC_CMD_H
+
+struct argv_array;
+
+extern void git_set_exec_path(const char *exec_path);
+extern void git_resolve_executable_dir(const char *path);
+extern const char *git_exec_path(void);
+extern void setup_path(void);
+extern const char **prepare_git_cmd(struct argv_array *out, const char **argv);
+extern int execv_git_cmd(const char **argv); /* NULL terminated */
+LAST_ARG_MUST_BE_NULL
+extern int execl_git_cmd(const char *cmd, ...);
+extern char *system_path(const char *path);
+
+#endif /* GIT_EXEC_CMD_H */
diff --git a/exec_cmd.c b/exec_cmd.c
deleted file mode 100644 (file)
index ce192a2..0000000
+++ /dev/null
@@ -1,165 +0,0 @@
-#include "cache.h"
-#include "exec_cmd.h"
-#include "quote.h"
-#include "argv-array.h"
-#define MAX_ARGS       32
-
-static const char *argv_exec_path;
-
-#ifdef RUNTIME_PREFIX
-static const char *argv0_path;
-
-static const char *system_prefix(void)
-{
-       static const char *prefix;
-
-       assert(argv0_path);
-       assert(is_absolute_path(argv0_path));
-
-       if (!prefix &&
-           !(prefix = strip_path_suffix(argv0_path, GIT_EXEC_PATH)) &&
-           !(prefix = strip_path_suffix(argv0_path, BINDIR)) &&
-           !(prefix = strip_path_suffix(argv0_path, "git"))) {
-               prefix = PREFIX;
-               trace_printf("RUNTIME_PREFIX requested, "
-                               "but prefix computation failed.  "
-                               "Using static fallback '%s'.\n", prefix);
-       }
-       return prefix;
-}
-
-void git_extract_argv0_path(const char *argv0)
-{
-       const char *slash;
-
-       if (!argv0 || !*argv0)
-               return;
-
-       slash = find_last_dir_sep(argv0);
-
-       if (slash)
-               argv0_path = xstrndup(argv0, slash - argv0);
-}
-
-#else
-
-static const char *system_prefix(void)
-{
-       return PREFIX;
-}
-
-void git_extract_argv0_path(const char *argv0)
-{
-}
-
-#endif /* RUNTIME_PREFIX */
-
-char *system_path(const char *path)
-{
-       struct strbuf d = STRBUF_INIT;
-
-       if (is_absolute_path(path))
-               return xstrdup(path);
-
-       strbuf_addf(&d, "%s/%s", system_prefix(), path);
-       return strbuf_detach(&d, NULL);
-}
-
-void git_set_argv_exec_path(const char *exec_path)
-{
-       argv_exec_path = exec_path;
-       /*
-        * Propagate this setting to external programs.
-        */
-       setenv(EXEC_PATH_ENVIRONMENT, exec_path, 1);
-}
-
-
-/* Returns the highest-priority, location to look for git programs. */
-const char *git_exec_path(void)
-{
-       static char *cached_exec_path;
-
-       if (argv_exec_path)
-               return argv_exec_path;
-
-       if (!cached_exec_path) {
-               const char *env = getenv(EXEC_PATH_ENVIRONMENT);
-               if (env && *env)
-                       cached_exec_path = xstrdup(env);
-               else
-                       cached_exec_path = system_path(GIT_EXEC_PATH);
-       }
-       return cached_exec_path;
-}
-
-static void add_path(struct strbuf *out, const char *path)
-{
-       if (path && *path) {
-               strbuf_add_absolute_path(out, path);
-               strbuf_addch(out, PATH_SEP);
-       }
-}
-
-void setup_path(void)
-{
-       const char *old_path = getenv("PATH");
-       struct strbuf new_path = STRBUF_INIT;
-
-       add_path(&new_path, git_exec_path());
-
-       if (old_path)
-               strbuf_addstr(&new_path, old_path);
-       else
-               strbuf_addstr(&new_path, _PATH_DEFPATH);
-
-       setenv("PATH", new_path.buf, 1);
-
-       strbuf_release(&new_path);
-}
-
-const char **prepare_git_cmd(struct argv_array *out, const char **argv)
-{
-       argv_array_push(out, "git");
-       argv_array_pushv(out, argv);
-       return out->argv;
-}
-
-int execv_git_cmd(const char **argv) {
-       struct argv_array nargv = ARGV_ARRAY_INIT;
-
-       prepare_git_cmd(&nargv, argv);
-       trace_argv_printf(nargv.argv, "trace: exec:");
-
-       /* execvp() can only ever return if it fails */
-       sane_execvp("git", (char **)nargv.argv);
-
-       trace_printf("trace: exec failed: %s\n", strerror(errno));
-
-       argv_array_clear(&nargv);
-       return -1;
-}
-
-
-int execl_git_cmd(const char *cmd,...)
-{
-       int argc;
-       const char *argv[MAX_ARGS + 1];
-       const char *arg;
-       va_list param;
-
-       va_start(param, cmd);
-       argv[0] = cmd;
-       argc = 1;
-       while (argc < MAX_ARGS) {
-               arg = argv[argc++] = va_arg(param, char *);
-               if (!arg)
-                       break;
-       }
-       va_end(param);
-       if (MAX_ARGS <= argc)
-               return error("too many args to run %s", cmd);
-
-       argv[argc] = NULL;
-       return execv_git_cmd(argv);
-}
diff --git a/exec_cmd.h b/exec_cmd.h
deleted file mode 100644 (file)
index ff0b480..0000000
+++ /dev/null
@@ -1,16 +0,0 @@
-#ifndef GIT_EXEC_CMD_H
-#define GIT_EXEC_CMD_H
-
-struct argv_array;
-
-extern void git_set_argv_exec_path(const char *exec_path);
-extern void git_extract_argv0_path(const char *path);
-extern const char *git_exec_path(void);
-extern void setup_path(void);
-extern const char **prepare_git_cmd(struct argv_array *out, const char **argv);
-extern int execv_git_cmd(const char **argv); /* NULL terminated */
-LAST_ARG_MUST_BE_NULL
-extern int execl_git_cmd(const char *cmd, ...);
-extern char *system_path(const char *path);
-
-#endif /* GIT_EXEC_CMD_H */
index b5db5d20b1b20c642cf97f990eb538ee6fbaa299..b2338fa8eb20d5c52d2bf03d73dca050721c292c 100644 (file)
@@ -154,6 +154,7 @@ Format of STDIN stream:
 
 #include "builtin.h"
 #include "cache.h"
+#include "repository.h"
 #include "config.h"
 #include "lockfile.h"
 #include "object.h"
@@ -168,6 +169,8 @@ Format of STDIN stream:
 #include "dir.h"
 #include "run-command.h"
 #include "packfile.h"
+#include "object-store.h"
+#include "mem-pool.h"
 
 #define PACK_ID_BITS 16
 #define MAX_PACK_ID ((1<<PACK_ID_BITS)-1)
@@ -209,13 +212,6 @@ struct last_object {
        unsigned no_swap : 1;
 };
 
-struct mem_pool {
-       struct mem_pool *next_pool;
-       char *next_free;
-       char *end;
-       uintmax_t space[FLEX_ARRAY]; /* more */
-};
-
 struct atom_str {
        struct atom_str *next_atom;
        unsigned short str_len;
@@ -304,9 +300,8 @@ static int global_argc;
 static const char **global_argv;
 
 /* Memory pools */
-static size_t mem_pool_alloc = 2*1024*1024 - sizeof(struct mem_pool);
-static size_t total_allocd;
-static struct mem_pool *mem_pool;
+static struct mem_pool fi_mem_pool =  {NULL, 2*1024*1024 -
+                                      sizeof(struct mp_block), 0 };
 
 /* Atom management */
 static unsigned int atom_table_sz = 4451;
@@ -341,6 +336,7 @@ static unsigned int tree_entry_alloc = 1000;
 static void *avail_tree_entry;
 static unsigned int avail_tree_table_sz = 100;
 static struct avail_tree_content **avail_tree_table;
+static size_t tree_entry_allocd;
 static struct strbuf old_tree = STRBUF_INIT;
 static struct strbuf new_tree = STRBUF_INIT;
 
@@ -634,49 +630,10 @@ static unsigned int hc_str(const char *s, size_t len)
        return r;
 }
 
-static void *pool_alloc(size_t len)
-{
-       struct mem_pool *p;
-       void *r;
-
-       /* round up to a 'uintmax_t' alignment */
-       if (len & (sizeof(uintmax_t) - 1))
-               len += sizeof(uintmax_t) - (len & (sizeof(uintmax_t) - 1));
-
-       for (p = mem_pool; p; p = p->next_pool)
-               if ((p->end - p->next_free >= len))
-                       break;
-
-       if (!p) {
-               if (len >= (mem_pool_alloc/2)) {
-                       total_allocd += len;
-                       return xmalloc(len);
-               }
-               total_allocd += sizeof(struct mem_pool) + mem_pool_alloc;
-               p = xmalloc(st_add(sizeof(struct mem_pool), mem_pool_alloc));
-               p->next_pool = mem_pool;
-               p->next_free = (char *) p->space;
-               p->end = p->next_free + mem_pool_alloc;
-               mem_pool = p;
-       }
-
-       r = p->next_free;
-       p->next_free += len;
-       return r;
-}
-
-static void *pool_calloc(size_t count, size_t size)
-{
-       size_t len = count * size;
-       void *r = pool_alloc(len);
-       memset(r, 0, len);
-       return r;
-}
-
 static char *pool_strdup(const char *s)
 {
        size_t len = strlen(s) + 1;
-       char *r = pool_alloc(len);
+       char *r = mem_pool_alloc(&fi_mem_pool, len);
        memcpy(r, s, len);
        return r;
 }
@@ -685,7 +642,7 @@ static void insert_mark(uintmax_t idnum, struct object_entry *oe)
 {
        struct mark_set *s = marks;
        while ((idnum >> s->shift) >= 1024) {
-               s = pool_calloc(1, sizeof(struct mark_set));
+               s = mem_pool_calloc(&fi_mem_pool, 1, sizeof(struct mark_set));
                s->shift = marks->shift + 10;
                s->data.sets[0] = marks;
                marks = s;
@@ -694,7 +651,7 @@ static void insert_mark(uintmax_t idnum, struct object_entry *oe)
                uintmax_t i = idnum >> s->shift;
                idnum -= i << s->shift;
                if (!s->data.sets[i]) {
-                       s->data.sets[i] = pool_calloc(1, sizeof(struct mark_set));
+                       s->data.sets[i] = mem_pool_calloc(&fi_mem_pool, 1, sizeof(struct mark_set));
                        s->data.sets[i]->shift = s->shift - 10;
                }
                s = s->data.sets[i];
@@ -732,7 +689,7 @@ static struct atom_str *to_atom(const char *s, unsigned short len)
                if (c->str_len == len && !strncmp(s, c->str_dat, len))
                        return c;
 
-       c = pool_alloc(sizeof(struct atom_str) + len + 1);
+       c = mem_pool_alloc(&fi_mem_pool, sizeof(struct atom_str) + len + 1);
        c->str_len = len;
        memcpy(c->str_dat, s, len);
        c->str_dat[len] = 0;
@@ -763,7 +720,7 @@ static struct branch *new_branch(const char *name)
        if (check_refname_format(name, REFNAME_ALLOW_ONELEVEL))
                die("Branch name doesn't conform to GIT standards: %s", name);
 
-       b = pool_calloc(1, sizeof(struct branch));
+       b = mem_pool_calloc(&fi_mem_pool, 1, sizeof(struct branch));
        b->name = pool_strdup(name);
        b->table_next_branch = branch_table[hc];
        b->branch_tree.versions[0].mode = S_IFDIR;
@@ -799,7 +756,7 @@ static struct tree_content *new_tree_content(unsigned int cnt)
                        avail_tree_table[hc] = f->next_avail;
        } else {
                cnt = cnt & 7 ? ((cnt / 8) + 1) * 8 : cnt;
-               f = pool_alloc(sizeof(*t) + sizeof(t->entries[0]) * cnt);
+               f = mem_pool_alloc(&fi_mem_pool, sizeof(*t) + sizeof(t->entries[0]) * cnt);
                f->entry_capacity = cnt;
        }
 
@@ -844,7 +801,7 @@ static struct tree_entry *new_tree_entry(void)
 
        if (!avail_tree_entry) {
                unsigned int n = tree_entry_alloc;
-               total_allocd += n * sizeof(struct tree_entry);
+               tree_entry_allocd += n * sizeof(struct tree_entry);
                ALLOC_ARRAY(e, n);
                avail_tree_entry = e;
                while (n-- > 1) {
@@ -1016,7 +973,7 @@ static void end_packfile(void)
                struct tag *t;
 
                close_pack_windows(pack_data);
-               hashclose(pack_file, cur_pack_oid.hash, 0);
+               finalize_hashfile(pack_file, cur_pack_oid.hash, 0);
                fixup_pack_header_footer(pack_data->pack_fd, pack_data->sha1,
                                    pack_data->pack_name, object_count,
                                    cur_pack_oid.hash, pack_size);
@@ -1036,7 +993,7 @@ static void end_packfile(void)
                if (!new_p)
                        die("core git rejected index %s", idx_name);
                all_packs[pack_id] = new_p;
-               install_packed_git(new_p);
+               install_packed_git(the_repository, new_p);
                free(idx_name);
 
                /* Print the boundary */
@@ -1110,7 +1067,8 @@ static int store_object(
        if (e->idx.offset) {
                duplicate_count_by_type[type]++;
                return 1;
-       } else if (find_sha1_pack(oid.hash, packed_git)) {
+       } else if (find_sha1_pack(oid.hash,
+                                 get_packed_git(the_repository))) {
                e->type = type;
                e->pack_id = MAX_PACK_ID;
                e->idx.offset = 1; /* just not zero! */
@@ -1307,7 +1265,8 @@ static void stream_blob(uintmax_t len, struct object_id *oidout, uintmax_t mark)
                duplicate_count_by_type[OBJ_BLOB]++;
                truncate_pack(&checkpoint);
 
-       } else if (find_sha1_pack(oid.hash, packed_git)) {
+       } else if (find_sha1_pack(oid.hash,
+                                 get_packed_git(the_repository))) {
                e->type = OBJ_BLOB;
                e->pack_id = MAX_PACK_ID;
                e->idx.offset = 1; /* just not zero! */
@@ -1372,7 +1331,7 @@ static void *gfi_unpack_entry(
                 */
                p->pack_size = pack_size + the_hash_algo->rawsz;
        }
-       return unpack_entry(p, oe->idx.offset, &type, sizep);
+       return unpack_entry(the_repository, p, oe->idx.offset, &type, sizep);
 }
 
 static const char *get_mode(const char *str, uint16_t *modep)
@@ -1412,7 +1371,7 @@ static void load_tree(struct tree_entry *root)
                        die("Can't load tree %s", oid_to_hex(oid));
        } else {
                enum object_type type;
-               buf = read_sha1_file(oid->hash, &type, &size);
+               buf = read_object_file(oid, &type, &size);
                if (!buf || type != OBJ_TREE)
                        die("Can't load tree %s", oid_to_hex(oid));
        }
@@ -1913,7 +1872,8 @@ static void read_marks(void)
                        die("corrupt mark line: %s", line);
                e = find_object(&oid);
                if (!e) {
-                       enum object_type type = sha1_object_info(oid.hash, NULL);
+                       enum object_type type = oid_object_info(the_repository,
+                                                               &oid, NULL);
                        if (type < 0)
                                die("object not found: %s", oid_to_hex(&oid));
                        e = insert_object(&oid);
@@ -2443,7 +2403,8 @@ static void file_change_m(const char *p, struct branch *b)
                enum object_type expected = S_ISDIR(mode) ?
                                                OBJ_TREE: OBJ_BLOB;
                enum object_type type = oe ? oe->type :
-                                       sha1_object_info(oid.hash, NULL);
+                                       oid_object_info(the_repository, &oid,
+                                                       NULL);
                if (type < 0)
                        die("%s not found: %s",
                                        S_ISDIR(mode) ?  "Tree" : "Blob",
@@ -2583,8 +2544,9 @@ static void note_change_n(const char *p, struct branch *b, unsigned char *old_fa
                oidcpy(&commit_oid, &commit_oe->idx.oid);
        } else if (!get_oid(p, &commit_oid)) {
                unsigned long size;
-               char *buf = read_object_with_reference(commit_oid.hash,
-                       commit_type, &size, commit_oid.hash);
+               char *buf = read_object_with_reference(&commit_oid,
+                                                      commit_type, &size,
+                                                      &commit_oid);
                if (!buf || size < 46)
                        die("Not a valid commit: %s", p);
                free(buf);
@@ -2603,7 +2565,8 @@ static void note_change_n(const char *p, struct branch *b, unsigned char *old_fa
                        die("Not a blob (actually a %s): %s",
                                type_name(oe->type), command_buf.buf);
        } else if (!is_null_oid(&oid)) {
-               enum object_type type = sha1_object_info(oid.hash, NULL);
+               enum object_type type = oid_object_info(the_repository, &oid,
+                                                       NULL);
                if (type < 0)
                        die("Blob not found: %s", command_buf.buf);
                if (type != OBJ_BLOB)
@@ -2653,9 +2616,8 @@ static void parse_from_existing(struct branch *b)
                unsigned long size;
                char *buf;
 
-               buf = read_object_with_reference(b->oid.hash,
-                                                commit_type, &size,
-                                                b->oid.hash);
+               buf = read_object_with_reference(&b->oid, commit_type, &size,
+                                                &b->oid);
                parse_from_commit(b, buf, size);
                free(buf);
        }
@@ -2732,8 +2694,9 @@ static struct hash_list *parse_merge(unsigned int *count)
                        oidcpy(&n->oid, &oe->idx.oid);
                } else if (!get_oid(from, &n->oid)) {
                        unsigned long size;
-                       char *buf = read_object_with_reference(n->oid.hash,
-                               commit_type, &size, n->oid.hash);
+                       char *buf = read_object_with_reference(&n->oid,
+                                                              commit_type,
+                                                              &size, &n->oid);
                        if (!buf || size < 46)
                                die("Not a valid commit: %s", from);
                        free(buf);
@@ -2862,7 +2825,7 @@ static void parse_new_tag(const char *arg)
        enum object_type type;
        const char *v;
 
-       t = pool_alloc(sizeof(struct tag));
+       t = mem_pool_alloc(&fi_mem_pool, sizeof(struct tag));
        memset(t, 0, sizeof(struct tag));
        t->name = pool_strdup(arg);
        if (last_tag)
@@ -2890,7 +2853,7 @@ static void parse_new_tag(const char *arg)
        } else if (!get_oid(from, &oid)) {
                struct object_entry *oe = find_object(&oid);
                if (!oe) {
-                       type = sha1_object_info(oid.hash, NULL);
+                       type = oid_object_info(the_repository, &oid, NULL);
                        if (type < 0)
                                die("Not a valid object: %s", from);
                } else
@@ -2966,7 +2929,7 @@ static void cat_blob(struct object_entry *oe, struct object_id *oid)
        char *buf;
 
        if (!oe || oe->pack_id == MAX_PACK_ID) {
-               buf = read_sha1_file(oid->hash, &type, &size);
+               buf = read_object_file(oid, &type, &size);
        } else {
                type = oe->type;
                buf = gfi_unpack_entry(oe, &size);
@@ -3048,7 +3011,8 @@ static struct object_entry *dereference(struct object_entry *oe,
        unsigned long size;
        char *buf = NULL;
        if (!oe) {
-               enum object_type type = sha1_object_info(oid->hash, NULL);
+               enum object_type type = oid_object_info(the_repository, oid,
+                                                       NULL);
                if (type < 0)
                        die("object not found: %s", oid_to_hex(oid));
                /* cache it! */
@@ -3071,7 +3035,7 @@ static struct object_entry *dereference(struct object_entry *oe,
                buf = gfi_unpack_entry(oe, &size);
        } else {
                enum object_type unused;
-               buf = read_sha1_file(oid->hash, &unused, &size);
+               buf = read_object_file(oid, &unused, &size);
        }
        if (!buf)
                die("Can't load object %s", oid_to_hex(oid));
@@ -3461,17 +3425,16 @@ int cmd_main(int argc, const char **argv)
        atom_table = xcalloc(atom_table_sz, sizeof(struct atom_str*));
        branch_table = xcalloc(branch_table_sz, sizeof(struct branch*));
        avail_tree_table = xcalloc(avail_tree_table_sz, sizeof(struct avail_tree_content*));
-       marks = pool_calloc(1, sizeof(struct mark_set));
+       marks = mem_pool_calloc(&fi_mem_pool, 1, sizeof(struct mark_set));
 
        global_argc = argc;
        global_argv = argv;
 
-       rc_free = pool_alloc(cmd_save * sizeof(*rc_free));
+       rc_free = mem_pool_alloc(&fi_mem_pool, cmd_save * sizeof(*rc_free));
        for (i = 0; i < (cmd_save - 1); i++)
                rc_free[i].next = &rc_free[i + 1];
        rc_free[cmd_save - 1].next = NULL;
 
-       prepare_packed_git();
        start_packfile();
        set_die_routine(die_nicely);
        set_checkpoint_signal();
@@ -3541,8 +3504,8 @@ int cmd_main(int argc, const char **argv)
                fprintf(stderr, "Total branches:  %10lu (%10lu loads     )\n", branch_count, branch_load_count);
                fprintf(stderr, "      marks:     %10" PRIuMAX " (%10" PRIuMAX " unique    )\n", (((uintmax_t)1) << marks->shift) * 1024, marks_set_count);
                fprintf(stderr, "      atoms:     %10u\n", atom_cnt);
-               fprintf(stderr, "Memory total:    %10" PRIuMAX " KiB\n", (total_allocd + alloc_count*sizeof(struct object_entry))/1024);
-               fprintf(stderr, "       pools:    %10lu KiB\n", (unsigned long)(total_allocd/1024));
+               fprintf(stderr, "Memory total:    %10" PRIuMAX " KiB\n", (tree_entry_allocd + fi_mem_pool.pool_alloc + alloc_count*sizeof(struct object_entry))/1024);
+               fprintf(stderr, "       pools:    %10lu KiB\n", (unsigned long)((tree_entry_allocd + fi_mem_pool.pool_alloc) /1024));
                fprintf(stderr, "     objects:    %10" PRIuMAX " KiB\n", (alloc_count*sizeof(struct object_entry))/1024);
                fprintf(stderr, "---------------------------------------------------------------------\n");
                pack_report();
index 1d6117565c2067460efc50aa4e6ca2ecb167a976..490c38f833419cde65b97f75f454a7d148fff6a2 100644 (file)
@@ -1,11 +1,12 @@
 #include "cache.h"
+#include "repository.h"
 #include "config.h"
 #include "lockfile.h"
 #include "refs.h"
 #include "pkt-line.h"
 #include "commit.h"
 #include "tag.h"
-#include "exec_cmd.h"
+#include "exec-cmd.h"
 #include "pack.h"
 #include "sideband.h"
 #include "fetch-pack.h"
@@ -304,9 +305,9 @@ static void insert_one_alternate_object(struct object *obj)
 #define PIPESAFE_FLUSH 32
 #define LARGE_FLUSH 16384
 
-static int next_flush(struct fetch_pack_args *args, int count)
+static int next_flush(int stateless_rpc, int count)
 {
-       if (args->stateless_rpc) {
+       if (stateless_rpc) {
                if (count < LARGE_FLUSH)
                        count <<= 1;
                else
@@ -469,7 +470,7 @@ static int find_common(struct fetch_pack_args *args,
                        send_request(args, fd[1], &req_buf);
                        strbuf_setlen(&req_buf, state_len);
                        flushes++;
-                       flush_at = next_flush(args, count);
+                       flush_at = next_flush(args->stateless_rpc, count);
 
                        /*
                         * We keep one window "ahead" of the other side, and
@@ -711,6 +712,28 @@ static void mark_alternate_complete(struct object *obj)
        mark_complete(&obj->oid);
 }
 
+struct loose_object_iter {
+       struct oidset *loose_object_set;
+       struct ref *refs;
+};
+
+/*
+ *  If the number of refs is not larger than the number of loose objects,
+ *  this function stops inserting.
+ */
+static int add_loose_objects_to_set(const struct object_id *oid,
+                                   const char *path,
+                                   void *data)
+{
+       struct loose_object_iter *iter = data;
+       oidset_insert(iter->loose_object_set, oid);
+       if (iter->refs == NULL)
+               return 1;
+
+       iter->refs = iter->refs->next;
+       return 0;
+}
+
 static int everything_local(struct fetch_pack_args *args,
                            struct ref **refs,
                            struct ref **sought, int nr_sought)
@@ -719,16 +742,31 @@ static int everything_local(struct fetch_pack_args *args,
        int retval;
        int old_save_commit_buffer = save_commit_buffer;
        timestamp_t cutoff = 0;
+       struct oidset loose_oid_set = OIDSET_INIT;
+       int use_oidset = 0;
+       struct loose_object_iter iter = {&loose_oid_set, *refs};
+
+       /* Enumerate all loose objects or know refs are not so many. */
+       use_oidset = !for_each_loose_object(add_loose_objects_to_set,
+                                           &iter, 0);
 
        save_commit_buffer = 0;
 
        for (ref = *refs; ref; ref = ref->next) {
                struct object *o;
+               unsigned int flags = OBJECT_INFO_QUICK;
 
-               if (!has_object_file_with_flags(&ref->old_oid,
-                                               OBJECT_INFO_QUICK))
-                       continue;
+               if (use_oidset &&
+                   !oidset_contains(&loose_oid_set, &ref->old_oid)) {
+                       /*
+                        * I know this does not exist in the loose form,
+                        * so check if it exists in a non-loose form.
+                        */
+                       flags |= OBJECT_INFO_IGNORE_LOOSE;
+               }
 
+               if (!has_object_file_with_flags(&ref->old_oid, flags))
+                       continue;
                o = parse_object(&ref->old_oid);
                if (!o)
                        continue;
@@ -744,6 +782,8 @@ static int everything_local(struct fetch_pack_args *args,
                }
        }
 
+       oidset_clear(&loose_oid_set);
+
        if (!args->no_dependents) {
                if (!args->deepen) {
                        for_each_ref(mark_complete_oid, NULL);
@@ -1040,6 +1080,335 @@ static struct ref *do_fetch_pack(struct fetch_pack_args *args,
        return ref;
 }
 
+static void add_shallow_requests(struct strbuf *req_buf,
+                                const struct fetch_pack_args *args)
+{
+       if (is_repository_shallow())
+               write_shallow_commits(req_buf, 1, NULL);
+       if (args->depth > 0)
+               packet_buf_write(req_buf, "deepen %d", args->depth);
+       if (args->deepen_since) {
+               timestamp_t max_age = approxidate(args->deepen_since);
+               packet_buf_write(req_buf, "deepen-since %"PRItime, max_age);
+       }
+       if (args->deepen_not) {
+               int i;
+               for (i = 0; i < args->deepen_not->nr; i++) {
+                       struct string_list_item *s = args->deepen_not->items + i;
+                       packet_buf_write(req_buf, "deepen-not %s", s->string);
+               }
+       }
+}
+
+static void add_wants(const struct ref *wants, struct strbuf *req_buf)
+{
+       for ( ; wants ; wants = wants->next) {
+               const struct object_id *remote = &wants->old_oid;
+               const char *remote_hex;
+               struct object *o;
+
+               /*
+                * If that object is complete (i.e. it is an ancestor of a
+                * local ref), we tell them we have it but do not have to
+                * tell them about its ancestors, which they already know
+                * about.
+                *
+                * We use lookup_object here because we are only
+                * interested in the case we *know* the object is
+                * reachable and we have already scanned it.
+                */
+               if (((o = lookup_object(remote->hash)) != NULL) &&
+                   (o->flags & COMPLETE)) {
+                       continue;
+               }
+
+               remote_hex = oid_to_hex(remote);
+               packet_buf_write(req_buf, "want %s\n", remote_hex);
+       }
+}
+
+static void add_common(struct strbuf *req_buf, struct oidset *common)
+{
+       struct oidset_iter iter;
+       const struct object_id *oid;
+       oidset_iter_init(common, &iter);
+
+       while ((oid = oidset_iter_next(&iter))) {
+               packet_buf_write(req_buf, "have %s\n", oid_to_hex(oid));
+       }
+}
+
+static int add_haves(struct strbuf *req_buf, int *haves_to_send, int *in_vain)
+{
+       int ret = 0;
+       int haves_added = 0;
+       const struct object_id *oid;
+
+       while ((oid = get_rev())) {
+               packet_buf_write(req_buf, "have %s\n", oid_to_hex(oid));
+               if (++haves_added >= *haves_to_send)
+                       break;
+       }
+
+       *in_vain += haves_added;
+       if (!haves_added || *in_vain >= MAX_IN_VAIN) {
+               /* Send Done */
+               packet_buf_write(req_buf, "done\n");
+               ret = 1;
+       }
+
+       /* Increase haves to send on next round */
+       *haves_to_send = next_flush(1, *haves_to_send);
+
+       return ret;
+}
+
+static int send_fetch_request(int fd_out, const struct fetch_pack_args *args,
+                             const struct ref *wants, struct oidset *common,
+                             int *haves_to_send, int *in_vain)
+{
+       int ret = 0;
+       struct strbuf req_buf = STRBUF_INIT;
+
+       if (server_supports_v2("fetch", 1))
+               packet_buf_write(&req_buf, "command=fetch");
+       if (server_supports_v2("agent", 0))
+               packet_buf_write(&req_buf, "agent=%s", git_user_agent_sanitized());
+       if (args->server_options && args->server_options->nr &&
+           server_supports_v2("server-option", 1)) {
+               int i;
+               for (i = 0; i < args->server_options->nr; i++)
+                       packet_write_fmt(fd_out, "server-option=%s",
+                                        args->server_options->items[i].string);
+       }
+
+       packet_buf_delim(&req_buf);
+       if (args->use_thin_pack)
+               packet_buf_write(&req_buf, "thin-pack");
+       if (args->no_progress)
+               packet_buf_write(&req_buf, "no-progress");
+       if (args->include_tag)
+               packet_buf_write(&req_buf, "include-tag");
+       if (prefer_ofs_delta)
+               packet_buf_write(&req_buf, "ofs-delta");
+
+       /* Add shallow-info and deepen request */
+       if (server_supports_feature("fetch", "shallow", 0))
+               add_shallow_requests(&req_buf, args);
+       else if (is_repository_shallow() || args->deepen)
+               die(_("Server does not support shallow requests"));
+
+       /* add wants */
+       add_wants(wants, &req_buf);
+
+       /* Add all of the common commits we've found in previous rounds */
+       add_common(&req_buf, common);
+
+       /* Add initial haves */
+       ret = add_haves(&req_buf, haves_to_send, in_vain);
+
+       /* Send request */
+       packet_buf_flush(&req_buf);
+       write_or_die(fd_out, req_buf.buf, req_buf.len);
+
+       strbuf_release(&req_buf);
+       return ret;
+}
+
+/*
+ * Processes a section header in a server's response and checks if it matches
+ * `section`.  If the value of `peek` is 1, the header line will be peeked (and
+ * not consumed); if 0, the line will be consumed and the function will die if
+ * the section header doesn't match what was expected.
+ */
+static int process_section_header(struct packet_reader *reader,
+                                 const char *section, int peek)
+{
+       int ret;
+
+       if (packet_reader_peek(reader) != PACKET_READ_NORMAL)
+               die("error reading section header '%s'", section);
+
+       ret = !strcmp(reader->line, section);
+
+       if (!peek) {
+               if (!ret)
+                       die("expected '%s', received '%s'",
+                           section, reader->line);
+               packet_reader_read(reader);
+       }
+
+       return ret;
+}
+
+static int process_acks(struct packet_reader *reader, struct oidset *common)
+{
+       /* received */
+       int received_ready = 0;
+       int received_ack = 0;
+
+       process_section_header(reader, "acknowledgments", 0);
+       while (packet_reader_read(reader) == PACKET_READ_NORMAL) {
+               const char *arg;
+
+               if (!strcmp(reader->line, "NAK"))
+                       continue;
+
+               if (skip_prefix(reader->line, "ACK ", &arg)) {
+                       struct object_id oid;
+                       if (!get_oid_hex(arg, &oid)) {
+                               struct commit *commit;
+                               oidset_insert(common, &oid);
+                               commit = lookup_commit(&oid);
+                               mark_common(commit, 0, 1);
+                       }
+                       continue;
+               }
+
+               if (!strcmp(reader->line, "ready")) {
+                       clear_prio_queue(&rev_list);
+                       received_ready = 1;
+                       continue;
+               }
+
+               die("unexpected acknowledgment line: '%s'", reader->line);
+       }
+
+       if (reader->status != PACKET_READ_FLUSH &&
+           reader->status != PACKET_READ_DELIM)
+               die("error processing acks: %d", reader->status);
+
+       /* return 0 if no common, 1 if there are common, or 2 if ready */
+       return received_ready ? 2 : (received_ack ? 1 : 0);
+}
+
+static void receive_shallow_info(struct fetch_pack_args *args,
+                                struct packet_reader *reader)
+{
+       process_section_header(reader, "shallow-info", 0);
+       while (packet_reader_read(reader) == PACKET_READ_NORMAL) {
+               const char *arg;
+               struct object_id oid;
+
+               if (skip_prefix(reader->line, "shallow ", &arg)) {
+                       if (get_oid_hex(arg, &oid))
+                               die(_("invalid shallow line: %s"), reader->line);
+                       register_shallow(&oid);
+                       continue;
+               }
+               if (skip_prefix(reader->line, "unshallow ", &arg)) {
+                       if (get_oid_hex(arg, &oid))
+                               die(_("invalid unshallow line: %s"), reader->line);
+                       if (!lookup_object(oid.hash))
+                               die(_("object not found: %s"), reader->line);
+                       /* make sure that it is parsed as shallow */
+                       if (!parse_object(&oid))
+                               die(_("error in object: %s"), reader->line);
+                       if (unregister_shallow(&oid))
+                               die(_("no shallow found: %s"), reader->line);
+                       continue;
+               }
+               die(_("expected shallow/unshallow, got %s"), reader->line);
+       }
+
+       if (reader->status != PACKET_READ_FLUSH &&
+           reader->status != PACKET_READ_DELIM)
+               die("error processing shallow info: %d", reader->status);
+
+       setup_alternate_shallow(&shallow_lock, &alternate_shallow_file, NULL);
+       args->deepen = 1;
+}
+
+enum fetch_state {
+       FETCH_CHECK_LOCAL = 0,
+       FETCH_SEND_REQUEST,
+       FETCH_PROCESS_ACKS,
+       FETCH_GET_PACK,
+       FETCH_DONE,
+};
+
+static struct ref *do_fetch_pack_v2(struct fetch_pack_args *args,
+                                   int fd[2],
+                                   const struct ref *orig_ref,
+                                   struct ref **sought, int nr_sought,
+                                   char **pack_lockfile)
+{
+       struct ref *ref = copy_ref_list(orig_ref);
+       enum fetch_state state = FETCH_CHECK_LOCAL;
+       struct oidset common = OIDSET_INIT;
+       struct packet_reader reader;
+       int in_vain = 0;
+       int haves_to_send = INITIAL_FLUSH;
+       packet_reader_init(&reader, fd[0], NULL, 0,
+                          PACKET_READ_CHOMP_NEWLINE);
+
+       while (state != FETCH_DONE) {
+               switch (state) {
+               case FETCH_CHECK_LOCAL:
+                       sort_ref_list(&ref, ref_compare_name);
+                       QSORT(sought, nr_sought, cmp_ref_by_name);
+
+                       /* v2 supports these by default */
+                       allow_unadvertised_object_request |= ALLOW_REACHABLE_SHA1;
+                       use_sideband = 2;
+                       if (args->depth > 0 || args->deepen_since || args->deepen_not)
+                               args->deepen = 1;
+
+                       if (marked)
+                               for_each_ref(clear_marks, NULL);
+                       marked = 1;
+
+                       for_each_ref(rev_list_insert_ref_oid, NULL);
+                       for_each_cached_alternate(insert_one_alternate_object);
+
+                       /* Filter 'ref' by 'sought' and those that aren't local */
+                       if (everything_local(args, &ref, sought, nr_sought))
+                               state = FETCH_DONE;
+                       else
+                               state = FETCH_SEND_REQUEST;
+                       break;
+               case FETCH_SEND_REQUEST:
+                       if (send_fetch_request(fd[1], args, ref, &common,
+                                              &haves_to_send, &in_vain))
+                               state = FETCH_GET_PACK;
+                       else
+                               state = FETCH_PROCESS_ACKS;
+                       break;
+               case FETCH_PROCESS_ACKS:
+                       /* Process ACKs/NAKs */
+                       switch (process_acks(&reader, &common)) {
+                       case 2:
+                               state = FETCH_GET_PACK;
+                               break;
+                       case 1:
+                               in_vain = 0;
+                               /* fallthrough */
+                       default:
+                               state = FETCH_SEND_REQUEST;
+                               break;
+                       }
+                       break;
+               case FETCH_GET_PACK:
+                       /* Check for shallow-info section */
+                       if (process_section_header(&reader, "shallow-info", 1))
+                               receive_shallow_info(args, &reader);
+
+                       /* get the pack */
+                       process_section_header(&reader, "packfile", 0);
+                       if (get_pack(args, fd, pack_lockfile))
+                               die(_("git fetch-pack: fetch failed."));
+
+                       state = FETCH_DONE;
+                       break;
+               case FETCH_DONE:
+                       continue;
+               }
+       }
+
+       oidset_clear(&common);
+       return ref;
+}
+
 static void fetch_pack_config(void)
 {
        git_config_get_int("fetch.unpacklimit", &fetch_unpack_limit);
@@ -1185,7 +1554,8 @@ struct ref *fetch_pack(struct fetch_pack_args *args,
                       const char *dest,
                       struct ref **sought, int nr_sought,
                       struct oid_array *shallow,
-                      char **pack_lockfile)
+                      char **pack_lockfile,
+                      enum protocol_version version)
 {
        struct ref *ref_cpy;
        struct shallow_info si;
@@ -1199,9 +1569,13 @@ struct ref *fetch_pack(struct fetch_pack_args *args,
                die(_("no matching remote head"));
        }
        prepare_shallow_info(&si, shallow);
-       ref_cpy = do_fetch_pack(args, fd, ref, sought, nr_sought,
-                               &si, pack_lockfile);
-       reprepare_packed_git();
+       if (version == protocol_v2)
+               ref_cpy = do_fetch_pack_v2(args, fd, ref, sought, nr_sought,
+                                          pack_lockfile);
+       else
+               ref_cpy = do_fetch_pack(args, fd, ref, sought, nr_sought,
+                                       &si, pack_lockfile);
+       reprepare_packed_git(the_repository);
        update_shallow(args, sought, nr_sought, &si);
        clear_shallow_info(&si);
        return ref_cpy;
index 3e224a18226ec6219b09387704baf178821c4c23..bb45a366a82a4a7dae2524e0845ac33128076c57 100644 (file)
@@ -3,6 +3,7 @@
 
 #include "string-list.h"
 #include "run-command.h"
+#include "protocol.h"
 #include "list-objects-filter-options.h"
 
 struct oid_array;
@@ -14,6 +15,7 @@ struct fetch_pack_args {
        const char *deepen_since;
        const struct string_list *deepen_not;
        struct list_objects_filter_options filter_options;
+       const struct string_list *server_options;
        unsigned deepen_relative:1;
        unsigned quiet:1;
        unsigned keep_pack:1;
@@ -53,7 +55,8 @@ struct ref *fetch_pack(struct fetch_pack_args *args,
                       struct ref **sought,
                       int nr_sought,
                       struct oid_array *shallow,
-                      char **pack_lockfile);
+                      char **pack_lockfile,
+                      enum protocol_version version);
 
 /*
  * Print an appropriate error message for each sought ref that wasn't
diff --git a/fsck.c b/fsck.c
index 5c8c12dde381912eccf59bbf3ed8c774344809b8..640422a6c6b8b8f7439670e27c4eb110ef574525 100644 (file)
--- a/fsck.c
+++ b/fsck.c
@@ -396,9 +396,11 @@ static int fsck_walk_commit(struct commit *commit, void *data, struct fsck_optio
 
        name = get_object_name(options, &commit->object);
        if (name)
-               put_object_name(options, &commit->tree->object, "%s:", name);
+               put_object_name(options, &get_commit_tree(commit)->object,
+                               "%s:", name);
 
-       result = options->walk((struct object *)commit->tree, OBJ_TREE, data, options);
+       result = options->walk((struct object *)get_commit_tree(commit),
+                              OBJ_TREE, data, options);
        if (result < 0)
                return result;
        res = result;
@@ -772,7 +774,7 @@ static int fsck_commit_buffer(struct commit *commit, const char *buffer,
        err = fsck_ident(&buffer, &commit->object, options);
        if (err)
                return err;
-       if (!commit->tree) {
+       if (!get_commit_tree(commit)) {
                err = report(options, &commit->object, FSCK_MSG_BAD_TREE, "could not load commit's tree %s", sha1_to_hex(tree_sha1));
                if (err)
                        return err;
@@ -811,7 +813,7 @@ static int fsck_tag_buffer(struct tag *tag, const char *data,
                enum object_type type;
 
                buffer = to_free =
-                       read_sha1_file(tag->object.oid.hash, &type, &size);
+                       read_object_file(&tag->object.oid, &type, &size);
                if (!buffer)
                        return report(options, &tag->object,
                                FSCK_MSG_MISSING_TAG_OBJECT,
index 6d7bcd5d0ed8f2d3f5abdea2f26c6be72909b657..ed3d1a074d60309063d16044bfd83beb78ef0e9d 100644 (file)
@@ -104,7 +104,7 @@ static int query_fsmonitor(int version, uint64_t last_update, struct strbuf *que
        if (!(argv[0] = core_fsmonitor))
                return -1;
 
-       snprintf(ver, sizeof(version), "%d", version);
+       snprintf(ver, sizeof(ver), "%d", version);
        snprintf(date, sizeof(date), "%" PRIuMAX, (uintmax_t)last_update);
        argv[1] = ver;
        argv[2] = date;
@@ -185,6 +185,9 @@ void refresh_fsmonitor(struct index_state *istate)
                for (i = 0; i < istate->cache_nr; i++)
                        istate->cache[i]->ce_flags &= ~CE_FSMONITOR_VALID;
 
+               /* If we're going to check every file, ensure we save the results */
+               istate->cache_changed |= FSMONITOR_CHANGED;
+
                if (istate->untracked)
                        istate->untracked->use_fsmonitor = 0;
        }
index db727ea0204aa13acea0f189ea03e59b5cdb918f..7272771c8e445da194ea608443d8bc9c891b6b33 100644 (file)
--- a/gettext.c
+++ b/gettext.c
@@ -2,7 +2,8 @@
  * Copyright (c) 2010 Ævar Arnfjörð Bjarmason
  */
 
-#include "git-compat-util.h"
+#include "cache.h"
+#include "exec-cmd.h"
 #include "gettext.h"
 #include "strbuf.h"
 #include "utf8.h"
@@ -157,15 +158,24 @@ static void init_gettext_charset(const char *domain)
 
 void git_setup_gettext(void)
 {
-       const char *podir = getenv("GIT_TEXTDOMAINDIR");
+       const char *podir = getenv(GIT_TEXT_DOMAIN_DIR_ENVIRONMENT);
+       char *p = NULL;
 
        if (!podir)
-               podir = GIT_LOCALE_PATH;
+               podir = p = system_path(GIT_LOCALE_PATH);
+
+       if (!is_directory(podir)) {
+               free(p);
+               return;
+       }
+
        bindtextdomain("git", podir);
        setlocale(LC_MESSAGES, "");
        setlocale(LC_TIME, "");
        init_gettext_charset("git");
        textdomain("git");
+
+       free(p);
 }
 
 /* return the number of columns of string 's' in current locale */
index 07e383257b4985f7400f167d683a5fb692237d93..f9e4c5f9bc24404053ae3905251cc108eefb365e 100644 (file)
@@ -284,6 +284,10 @@ extern char *gitdirname(char *);
 #include <openssl/err.h>
 #endif
 
+#ifdef HAVE_SYSINFO
+# include <sys/sysinfo.h>
+#endif
+
 /* On most systems <netdb.h> would have given us this, but
  * not on some systems (e.g. z/OS).
  */
@@ -455,6 +459,7 @@ extern void (*get_warn_routine(void))(const char *warn, va_list params);
 extern void set_die_is_recursing_routine(int (*routine)(void));
 
 extern int starts_with(const char *str, const char *prefix);
+extern int istarts_with(const char *str, const char *prefix);
 
 /*
  * If the string "str" begins with the string found in "prefix", return 1.
index 98c76ec589b053c112ddc86b6821f2acf17b7ab0..64f21547c1ab99ef3ad98d57fe0fbadff9621838 100755 (executable)
@@ -251,8 +251,18 @@ done < "$tempdir"/backup-refs
 
 # The refs should be updated if their heads were rewritten
 git rev-parse --no-flags --revs-only --symbolic-full-name \
-       --default HEAD "$@" > "$tempdir"/raw-heads || exit
-sed -e '/^^/d' "$tempdir"/raw-heads >"$tempdir"/heads
+       --default HEAD "$@" > "$tempdir"/raw-refs || exit
+while read ref
+do
+       case "$ref" in ^?*) continue ;; esac
+
+       if git rev-parse --verify "$ref"^0 >/dev/null 2>&1
+       then
+               echo "$ref"
+       else
+               warn "WARNING: not rewriting '$ref' (not a committish)"
+       fi
+done >"$tempdir"/heads <"$tempdir"/raw-refs
 
 test -s "$tempdir"/heads ||
        die "You must specify a ref to rewrite."
@@ -310,7 +320,7 @@ git rev-list --reverse --topo-order --default HEAD \
        die "Could not get the commits"
 commits=$(wc -l <../revs | tr -d " ")
 
-test $commits -eq 0 && die "Found nothing to rewrite"
+test $commits -eq 0 && die_with_status 2 "Found nothing to rewrite"
 
 # Rewrite the commits
 report_progress ()
index 91c00e6489305a56faed6d4e89150709ba7f2aeb..6de74ce639cec90fcf7ba1797825526df675547a 100755 (executable)
@@ -3867,6 +3867,7 @@ bind .   <$M1B-Key-equal> {show_more_context;break}
 bind .   <$M1B-Key-plus> {show_more_context;break}
 bind .   <$M1B-Key-KP_Add> {show_more_context;break}
 bind .   <$M1B-Key-Return> do_commit
+bind .   <$M1B-Key-KP_Enter> do_commit
 foreach i [list $ui_index $ui_workdir] {
        bind $i <Button-1>       { toggle_or_diff click %W %x %y; break }
        bind $i <$M1B-Button-1>  { add_one_to_selection %W %x %y; break }
index aa6457bbb5f1b0d64d6e04f27394912483250117..589ff8f78aba8273651b33005c6f6abd1db2fa27 100644 (file)
@@ -2,7 +2,10 @@
 # Copyright (C) 2006, 2007 Shawn Pearce
 
 proc find_ssh_key {} {
-       foreach name {~/.ssh/id_dsa.pub ~/.ssh/id_rsa.pub ~/.ssh/identity.pub} {
+       foreach name {
+               ~/.ssh/id_dsa.pub ~/.ssh/id_ecdsa.pub ~/.ssh/id_ed25519.pub
+               ~/.ssh/id_rsa.pub ~/.ssh/identity.pub
+       } {
                if {[file exists $name]} {
                        set fh    [open $name r]
                        set cont  [read $fh]
index 351a712c8c503eb5a04466afb8aa5acd7f0259f5..88b3119a75068763cfee6d79ad4d53a83ad20a67 100644 (file)
@@ -1,6 +1,14 @@
 # Functions for supporting the use of themed Tk widgets in git-gui.
 # Copyright (C) 2009 Pat Thoyts <patthoyts@users.sourceforge.net>
 
+proc ttk_get_current_theme {} {
+       # Handle either current Tk or older versions of 8.5
+       if {[catch {set theme [ttk::style theme use]}]} {
+               set theme  $::ttk::currentTheme
+       }
+       return $theme
+}
+
 proc InitTheme {} {
        # Create a color label style (bg can be overridden by widget option)
        ttk::style layout Color.TLabel {
@@ -28,10 +36,7 @@ proc InitTheme {} {
                }
        }
 
-       # Handle either current Tk or older versions of 8.5
-       if {[catch {set theme [ttk::style theme use]}]} {
-               set theme  $::ttk::currentTheme
-       }
+       set theme [ttk_get_current_theme]
 
        if {[lsearch -exact {default alt classic clam} $theme] != -1} {
                # Simple override of standard ttk::entry to change the field
@@ -248,7 +253,7 @@ proc tspinbox {w args} {
 proc ttext {w args} {
        global use_ttk
        if {$use_ttk} {
-               switch -- [ttk::style theme use] {
+               switch -- [ttk_get_current_theme] {
                        "vista" - "xpnative" {
                                lappend args -highlightthickness 0 -borderwidth 0
                        }
index be3f068922c5a3ba0d1112f3b8b1cf1228567d2e..99b8c177875a7f26ae6c7f70be42c9d97231f7b9 100644 (file)
@@ -4,15 +4,6 @@
 # Copyright (c) 2010 Junio C Hamano.
 #
 
-# The whole contents of this file is run by dot-sourcing it from
-# inside a shell function.  It used to be that "return"s we see
-# below were not inside any function, and expected to return
-# to the function that dot-sourced us.
-#
-# However, older (9.x) versions of FreeBSD /bin/sh misbehave on such a
-# construct and continue to run the statements that follow such a "return".
-# As a work-around, we introduce an extra layer of a function
-# here, and immediately call it after defining it.
 git_rebase__am () {
 
 case "$action" in
@@ -41,60 +32,47 @@ else
 fi
 
 ret=0
-if test -n "$keep_empty"
-then
-       # we have to do this the hard way.  git format-patch completely squashes
-       # empty commits and even if it didn't the format doesn't really lend
-       # itself well to recording empty patches.  fortunately, cherry-pick
-       # makes this easy
-       git cherry-pick ${gpg_sign_opt:+"$gpg_sign_opt"} --allow-empty \
-               $allow_rerere_autoupdate --right-only "$revisions" \
-               $allow_empty_message \
-               ${restrict_revision+^$restrict_revision}
-       ret=$?
-else
-       rm -f "$GIT_DIR/rebased-patches"
+rm -f "$GIT_DIR/rebased-patches"
 
-       git format-patch -k --stdout --full-index --cherry-pick --right-only \
-               --src-prefix=a/ --dst-prefix=b/ --no-renames --no-cover-letter \
-               --pretty=mboxrd \
-               $git_format_patch_opt \
-               "$revisions" ${restrict_revision+^$restrict_revision} \
-               >"$GIT_DIR/rebased-patches"
-       ret=$?
+git format-patch -k --stdout --full-index --cherry-pick --right-only \
+       --src-prefix=a/ --dst-prefix=b/ --no-renames --no-cover-letter \
+       --pretty=mboxrd \
+       $git_format_patch_opt \
+       "$revisions" ${restrict_revision+^$restrict_revision} \
+       >"$GIT_DIR/rebased-patches"
+ret=$?
 
-       if test 0 != $ret
-       then
-               rm -f "$GIT_DIR/rebased-patches"
-               case "$head_name" in
-               refs/heads/*)
-                       git checkout -q "$head_name"
-                       ;;
-               *)
-                       git checkout -q "$orig_head"
-                       ;;
-               esac
+if test 0 != $ret
+then
+       rm -f "$GIT_DIR/rebased-patches"
+       case "$head_name" in
+       refs/heads/*)
+               git checkout -q "$head_name"
+               ;;
+       *)
+               git checkout -q "$orig_head"
+               ;;
+       esac
 
-               cat >&2 <<-EOF
+       cat >&2 <<-EOF
 
-               git encountered an error while preparing the patches to replay
-               these revisions:
+       git encountered an error while preparing the patches to replay
+       these revisions:
 
-                   $revisions
+           $revisions
 
-               As a result, git cannot rebase them.
-               EOF
-               return $ret
-       fi
+       As a result, git cannot rebase them.
+       EOF
+       return $ret
+fi
 
-       git am $git_am_opt --rebasing --resolvemsg="$resolvemsg" \
-               --patch-format=mboxrd \
-               $allow_rerere_autoupdate \
-               ${gpg_sign_opt:+"$gpg_sign_opt"} <"$GIT_DIR/rebased-patches"
-       ret=$?
+git am $git_am_opt --rebasing --resolvemsg="$resolvemsg" \
+       --patch-format=mboxrd \
+       $allow_rerere_autoupdate \
+       ${gpg_sign_opt:+"$gpg_sign_opt"} <"$GIT_DIR/rebased-patches"
+ret=$?
 
-       rm -f "$GIT_DIR/rebased-patches"
-fi
+rm -f "$GIT_DIR/rebased-patches"
 
 if test 0 != $ret
 then
@@ -105,5 +83,3 @@ fi
 move_to_original_branch
 
 }
-# ... and then we call the whole thing.
-git_rebase__am
index 331c8dfeac3cac2fd2d9d9287d5c487669fe80a0..cbf44f864828cca964693bea6a701ce882afd6eb 100644 (file)
@@ -155,13 +155,19 @@ reschedule_last_action () {
 append_todo_help () {
        gettext "
 Commands:
-p, pick = use commit
-r, reword = use commit, but edit the commit message
-e, edit = use commit, but stop for amending
-s, squash = use commit, but meld into previous commit
-f, fixup = like \"squash\", but discard this commit's log message
-x, exec = run command (the rest of the line) using shell
-d, drop = remove commit
+p, pick <commit> = use commit
+r, reword <commit> = use commit, but edit the commit message
+e, edit <commit> = use commit, but stop for amending
+s, squash <commit> = use commit, but meld into previous commit
+f, fixup <commit> = like \"squash\", but discard this commit's log message
+x, exec <commit> = run command (the rest of the line) using shell
+d, drop <commit> = remove commit
+l, label <label> = label current HEAD with a name
+t, reset <label> = reset HEAD to a label
+m, merge [-C <commit> | -c <commit>] <label> [# <oneline>]
+.       create a merge commit using the original merge commit's
+.       message (or the oneline, if no original merge commit was
+.       specified). Use -c <commit> to reword the commit message.
 
 These lines can be re-ordered; they are executed from top to bottom.
 " | git stripspace --comment-lines >>"$todo"
@@ -285,7 +291,7 @@ pick_one () {
                pick_one_preserving_merges "$@" && return
        output eval git cherry-pick $allow_rerere_autoupdate $allow_empty_message \
                        ${gpg_sign_opt:+$(git rev-parse --sq-quote "$gpg_sign_opt")} \
-                       "$strategy_args" $empty_args $ff "$@"
+                       $signoff "$strategy_args" $empty_args $ff "$@"
 
        # If cherry-pick dies it leaves the to-be-picked commit unrecorded. Reschedule
        # previous task so this commit is not lost.
@@ -307,17 +313,14 @@ pick_one_preserving_merges () {
        esac
        sha1=$(git rev-parse $sha1)
 
-       if test -f "$state_dir"/current-commit
+       if test -f "$state_dir"/current-commit && test "$fast_forward" = t
        then
-               if test "$fast_forward" = t
-               then
-                       while read current_commit
-                       do
-                               git rev-parse HEAD > "$rewritten"/$current_commit
-                       done <"$state_dir"/current-commit
-                       rm "$state_dir"/current-commit ||
-                               die "$(gettext "Cannot write current commit's replacement sha1")"
-               fi
+               while read current_commit
+               do
+                       git rev-parse HEAD > "$rewritten"/$current_commit
+               done <"$state_dir"/current-commit
+               rm "$state_dir"/current-commit ||
+                       die "$(gettext "Cannot write current commit's replacement sha1")"
        fi
 
        echo $sha1 >> "$state_dir"/current-commit
@@ -527,10 +530,10 @@ do_pick () {
                # resolve before manually running git commit --amend then git
                # rebase --continue.
                git commit --allow-empty --allow-empty-message --amend \
-                          --no-post-rewrite -n -q -C $sha1 &&
+                          --no-post-rewrite -n -q -C $sha1 $signoff &&
                        pick_one -n $sha1 &&
                        git commit --allow-empty --allow-empty-message \
-                                  --amend --no-post-rewrite -n -q -C $sha1 \
+                                  --amend --no-post-rewrite -n -q -C $sha1 $signoff \
                                   ${gpg_sign_opt:+"$gpg_sign_opt"} ||
                                   die_with_patch $sha1 "$(eval_gettext "Could not apply \$sha1... \$rest")"
        else
@@ -743,37 +746,39 @@ get_missing_commit_check_level () {
        printf '%s' "$check_level" | tr 'A-Z' 'a-z'
 }
 
-# The whole contents of this file is run by dot-sourcing it from
-# inside a shell function.  It used to be that "return"s we see
-# below were not inside any function, and expected to return
-# to the function that dot-sourced us.
+# Initiate an action. If the cannot be any
+# further action it  may exec a command
+# or exit and not return.
 #
-# However, older (9.x) versions of FreeBSD /bin/sh misbehave on such a
-# construct and continue to run the statements that follow such a "return".
-# As a work-around, we introduce an extra layer of a function
-# here, and immediately call it after defining it.
-git_rebase__interactive () {
-
-case "$action" in
-continue)
-       if test ! -d "$rewritten"
-       then
-               exec git rebase--helper ${force_rebase:+--no-ff} $allow_empty_message \
-                       --continue
-       fi
-       # do we have anything to commit?
-       if git diff-index --cached --quiet HEAD --
-       then
-               # Nothing to commit -- skip this commit
-
-               test ! -f "$GIT_DIR"/CHERRY_PICK_HEAD ||
-               rm "$GIT_DIR"/CHERRY_PICK_HEAD ||
-               die "$(gettext "Could not remove CHERRY_PICK_HEAD")"
-       else
-               if ! test -f "$author_script"
+# TODO: Consider a cleaner return model so it
+# never exits and always return 0 if process
+# is complete.
+#
+# Parameter 1 is the action to initiate.
+#
+# Returns 0 if the action was able to complete
+# and if 1 if further processing is required.
+initiate_action () {
+       case "$1" in
+       continue)
+               if test ! -d "$rewritten"
+               then
+                       exec git rebase--helper ${force_rebase:+--no-ff} $allow_empty_message \
+                               --continue
+               fi
+               # do we have anything to commit?
+               if git diff-index --cached --quiet HEAD --
                then
-                       gpg_sign_opt_quoted=${gpg_sign_opt:+$(git rev-parse --sq-quote "$gpg_sign_opt")}
-                       die "$(eval_gettext "\
+                       # Nothing to commit -- skip this commit
+
+                       test ! -f "$GIT_DIR"/CHERRY_PICK_HEAD ||
+                       rm "$GIT_DIR"/CHERRY_PICK_HEAD ||
+                       die "$(gettext "Could not remove CHERRY_PICK_HEAD")"
+               else
+                       if ! test -f "$author_script"
+                       then
+                               gpg_sign_opt_quoted=${gpg_sign_opt:+$(git rev-parse --sq-quote "$gpg_sign_opt")}
+                               die "$(eval_gettext "\
 You have staged changes in your working tree.
 If these changes are meant to be
 squashed into the previous commit, run:
@@ -788,88 +793,201 @@ In both cases, once you're done, continue with:
 
   git rebase --continue
 ")"
-               fi
-               . "$author_script" ||
-                       die "$(gettext "Error trying to find the author identity to amend commit")"
-               if test -f "$amend"
-               then
-                       current_head=$(git rev-parse --verify HEAD)
-                       test "$current_head" = $(cat "$amend") ||
-                       die "$(gettext "\
+                       fi
+                       . "$author_script" ||
+                               die "$(gettext "Error trying to find the author identity to amend commit")"
+                       if test -f "$amend"
+                       then
+                               current_head=$(git rev-parse --verify HEAD)
+                               test "$current_head" = $(cat "$amend") ||
+                               die "$(gettext "\
 You have uncommitted changes in your working tree. Please commit them
 first and then run 'git rebase --continue' again.")"
-                       do_with_author git commit --amend --no-verify -F "$msg" -e \
-                               ${gpg_sign_opt:+"$gpg_sign_opt"} $allow_empty_message ||
-                               die "$(gettext "Could not commit staged changes.")"
-               else
-                       do_with_author git commit --no-verify -F "$msg" -e \
-                               ${gpg_sign_opt:+"$gpg_sign_opt"} $allow_empty_message ||
-                               die "$(gettext "Could not commit staged changes.")"
+                               do_with_author git commit --amend --no-verify -F "$msg" -e \
+                                       ${gpg_sign_opt:+"$gpg_sign_opt"} $allow_empty_message ||
+                                       die "$(gettext "Could not commit staged changes.")"
+                       else
+                               do_with_author git commit --no-verify -F "$msg" -e \
+                                       ${gpg_sign_opt:+"$gpg_sign_opt"} $allow_empty_message ||
+                                       die "$(gettext "Could not commit staged changes.")"
+                       fi
                fi
-       fi
 
-       if test -r "$state_dir"/stopped-sha
+               if test -r "$state_dir"/stopped-sha
+               then
+                       record_in_rewritten "$(cat "$state_dir"/stopped-sha)"
+               fi
+
+               require_clean_work_tree "rebase"
+               do_rest
+               return 0
+               ;;
+       skip)
+               git rerere clear
+
+               if test ! -d "$rewritten"
+               then
+                       exec git rebase--helper ${force_rebase:+--no-ff} $allow_empty_message \
+                               --continue
+               fi
+               do_rest
+               return 0
+               ;;
+       edit-todo)
+               git stripspace --strip-comments <"$todo" >"$todo".new
+               mv -f "$todo".new "$todo"
+               collapse_todo_ids
+               append_todo_help
+               gettext "
+You are editing the todo file of an ongoing interactive rebase.
+To continue rebase after editing, run:
+    git rebase --continue
+
+" | git stripspace --comment-lines >>"$todo"
+
+               git_sequence_editor "$todo" ||
+                       die "$(gettext "Could not execute editor")"
+               expand_todo_ids
+
+               exit
+               ;;
+       show-current-patch)
+               exec git show REBASE_HEAD --
+               ;;
+       *)
+               return 1 # continue
+               ;;
+       esac
+}
+
+setup_reflog_action () {
+       comment_for_reflog start
+
+       if test ! -z "$switch_to"
        then
-               record_in_rewritten "$(cat "$state_dir"/stopped-sha)"
+               GIT_REFLOG_ACTION="$GIT_REFLOG_ACTION: checkout $switch_to"
+               output git checkout "$switch_to" -- ||
+                       die "$(eval_gettext "Could not checkout \$switch_to")"
+
+               comment_for_reflog start
        fi
+}
 
-       require_clean_work_tree "rebase"
-       do_rest
-       return 0
-       ;;
-skip)
-       git rerere clear
+init_basic_state () {
+       orig_head=$(git rev-parse --verify HEAD) || die "$(gettext "No HEAD?")"
+       mkdir -p "$state_dir" || die "$(eval_gettext "Could not create temporary \$state_dir")"
+       rm -f "$(git rev-parse --git-path REBASE_HEAD)"
 
-       if test ! -d "$rewritten"
+       : > "$state_dir"/interactive || die "$(gettext "Could not mark as interactive")"
+       write_basic_state
+}
+
+init_revisions_and_shortrevisions () {
+       shorthead=$(git rev-parse --short $orig_head)
+       shortonto=$(git rev-parse --short $onto)
+       if test -z "$rebase_root"
+               # this is now equivalent to ! -z "$upstream"
        then
-               exec git rebase--helper ${force_rebase:+--no-ff} $allow_empty_message \
-                       --continue
+               shortupstream=$(git rev-parse --short $upstream)
+               revisions=$upstream...$orig_head
+               shortrevisions=$shortupstream..$shorthead
+       else
+               revisions=$onto...$orig_head
+               shortrevisions=$shorthead
        fi
-       do_rest
-       return 0
-       ;;
-edit-todo)
-       git stripspace --strip-comments <"$todo" >"$todo".new
-       mv -f "$todo".new "$todo"
-       collapse_todo_ids
+}
+
+complete_action() {
+       test -s "$todo" || echo noop >> "$todo"
+       test -z "$autosquash" || git rebase--helper --rearrange-squash || exit
+       test -n "$cmd" && git rebase--helper --add-exec-commands "$cmd"
+
+       todocount=$(git stripspace --strip-comments <"$todo" | wc -l)
+       todocount=${todocount##* }
+
+cat >>"$todo" <<EOF
+
+$comment_char $(eval_ngettext \
+       "Rebase \$shortrevisions onto \$shortonto (\$todocount command)" \
+       "Rebase \$shortrevisions onto \$shortonto (\$todocount commands)" \
+       "$todocount")
+EOF
        append_todo_help
        gettext "
-You are editing the todo file of an ongoing interactive rebase.
-To continue rebase after editing, run:
-    git rebase --continue
+       However, if you remove everything, the rebase will be aborted.
+
+       " | git stripspace --comment-lines >>"$todo"
+
+       if test -z "$keep_empty"
+       then
+               printf '%s\n' "$comment_char $(gettext "Note that empty commits are commented out")" >>"$todo"
+       fi
 
-" | git stripspace --comment-lines >>"$todo"
 
+       has_action "$todo" ||
+               return 2
+
+       cp "$todo" "$todo".backup
+       collapse_todo_ids
        git_sequence_editor "$todo" ||
-               die "$(gettext "Could not execute editor")"
+               die_abort "$(gettext "Could not execute editor")"
+
+       has_action "$todo" ||
+               return 2
+
+       git rebase--helper --check-todo-list || {
+               ret=$?
+               checkout_onto
+               exit $ret
+       }
+
        expand_todo_ids
 
-       exit
-       ;;
-show-current-patch)
-       exec git show REBASE_HEAD --
-       ;;
-esac
+       test -d "$rewritten" || test -n "$force_rebase" ||
+       onto="$(git rebase--helper --skip-unnecessary-picks)" ||
+       die "Could not skip unnecessary pick commands"
 
-comment_for_reflog start
+       checkout_onto
+       if test -z "$rebase_root" && test ! -d "$rewritten"
+       then
+               require_clean_work_tree "rebase"
+               exec git rebase--helper ${force_rebase:+--no-ff} $allow_empty_message \
+                       --continue
+       fi
+       do_rest
+}
 
-if test ! -z "$switch_to"
-then
-       GIT_REFLOG_ACTION="$GIT_REFLOG_ACTION: checkout $switch_to"
-       output git checkout "$switch_to" -- ||
-               die "$(eval_gettext "Could not checkout \$switch_to")"
+git_rebase__interactive () {
+       initiate_action "$action"
+       ret=$?
+       if test $ret = 0; then
+               return 0
+       fi
 
-       comment_for_reflog start
-fi
+       setup_reflog_action
+       init_basic_state
 
-orig_head=$(git rev-parse --verify HEAD) || die "$(gettext "No HEAD?")"
-mkdir -p "$state_dir" || die "$(eval_gettext "Could not create temporary \$state_dir")"
-rm -f "$(git rev-parse --git-path REBASE_HEAD)"
+       init_revisions_and_shortrevisions
+
+       git rebase--helper --make-script ${keep_empty:+--keep-empty} \
+               ${rebase_merges:+--rebase-merges} \
+               ${rebase_cousins:+--rebase-cousins} \
+               $revisions ${restrict_revision+^$restrict_revision} >"$todo" ||
+       die "$(gettext "Could not generate todo list")"
+
+       complete_action
+}
+
+git_rebase__interactive__preserve_merges () {
+       initiate_action "$action"
+       ret=$?
+       if test $ret = 0; then
+               return 0
+       fi
+
+       setup_reflog_action
+       init_basic_state
 
-: > "$state_dir"/interactive || die "$(gettext "Could not mark as interactive")"
-write_basic_state
-if test t = "$preserve_merges"
-then
        if test -z "$rebase_root"
        then
                mkdir "$rewritten" &&
@@ -883,41 +1001,17 @@ then
                echo $onto > "$rewritten"/root ||
                        die "$(gettext "Could not init rewritten commits")"
        fi
-       # No cherry-pick because our first pass is to determine
-       # parents to rewrite and skipping dropped commits would
-       # prematurely end our probe
-       merges_option=
-else
-       merges_option="--no-merges --cherry-pick"
-fi
-
-shorthead=$(git rev-parse --short $orig_head)
-shortonto=$(git rev-parse --short $onto)
-if test -z "$rebase_root"
-       # this is now equivalent to ! -z "$upstream"
-then
-       shortupstream=$(git rev-parse --short $upstream)
-       revisions=$upstream...$orig_head
-       shortrevisions=$shortupstream..$shorthead
-else
-       revisions=$onto...$orig_head
-       shortrevisions=$shorthead
-fi
-if test t != "$preserve_merges"
-then
-       git rebase--helper --make-script ${keep_empty:+--keep-empty} \
-               $revisions ${restrict_revision+^$restrict_revision} >"$todo" ||
-       die "$(gettext "Could not generate todo list")"
-else
+
+       init_revisions_and_shortrevisions
+
        format=$(git config --get rebase.instructionFormat)
        # the 'rev-list .. | sed' requires %m to parse; the instruction requires %H to parse
-       git rev-list $merges_option --format="%m%H ${format:-%s}" \
+       git rev-list --format="%m%H ${format:-%s}" \
                --reverse --left-right --topo-order \
                $revisions ${restrict_revision+^$restrict_revision} | \
                sed -n "s/^>//p" |
        while read -r sha1 rest
        do
-
                if test -z "$keep_empty" && is_empty_commit $sha1 && ! is_merge_commit $sha1
                then
                        comment_out="$comment_char "
@@ -944,11 +1038,8 @@ else
                        printf '%s\n' "${comment_out}pick $sha1 $rest" >>"$todo"
                fi
        done
-fi
 
-# Watch for commits that been dropped by --cherry-pick
-if test t = "$preserve_merges"
-then
+       # Watch for commits that been dropped by --cherry-pick
        mkdir "$dropped"
        # Save all non-cherry-picked changes
        git rev-list $revisions --left-right --cherry-pick | \
@@ -971,66 +1062,6 @@ then
                        rm "$rewritten"/$rev
                fi
        done
-fi
-
-test -s "$todo" || echo noop >> "$todo"
-test -z "$autosquash" || git rebase--helper --rearrange-squash || exit
-test -n "$cmd" && git rebase--helper --add-exec-commands "$cmd"
-
-todocount=$(git stripspace --strip-comments <"$todo" | wc -l)
-todocount=${todocount##* }
-
-cat >>"$todo" <<EOF
-
-$comment_char $(eval_ngettext \
-       "Rebase \$shortrevisions onto \$shortonto (\$todocount command)" \
-       "Rebase \$shortrevisions onto \$shortonto (\$todocount commands)" \
-       "$todocount")
-EOF
-append_todo_help
-gettext "
-However, if you remove everything, the rebase will be aborted.
-
-" | git stripspace --comment-lines >>"$todo"
-
-if test -z "$keep_empty"
-then
-       printf '%s\n' "$comment_char $(gettext "Note that empty commits are commented out")" >>"$todo"
-fi
-
-
-has_action "$todo" ||
-       return 2
-
-cp "$todo" "$todo".backup
-collapse_todo_ids
-git_sequence_editor "$todo" ||
-       die_abort "$(gettext "Could not execute editor")"
-
-has_action "$todo" ||
-       return 2
-
-git rebase--helper --check-todo-list || {
-       ret=$?
-       checkout_onto
-       exit $ret
-}
-
-expand_todo_ids
-
-test -d "$rewritten" || test -n "$force_rebase" ||
-onto="$(git rebase--helper --skip-unnecessary-picks)" ||
-die "Could not skip unnecessary pick commands"
-
-checkout_onto
-if test -z "$rebase_root" && test ! -d "$rewritten"
-then
-       require_clean_work_tree "rebase"
-       exec git rebase--helper ${force_rebase:+--no-ff} $allow_empty_message \
-               --continue
-fi
-do_rest
 
+       complete_action
 }
-# ... and then we call the whole thing.
-git_rebase__interactive
index ceb715453cc9eba0b6e91abfd2ea3863e74f3e05..cf4c0422148935906ad939c5351652a1531e5f0d 100644 (file)
@@ -27,7 +27,7 @@ continue_merge () {
        cmt=$(cat "$state_dir/current")
        if ! git diff-index --quiet --ignore-submodules HEAD --
        then
-               if ! git commit ${gpg_sign_opt:+"$gpg_sign_opt"} $allow_empty_message \
+               if ! git commit ${gpg_sign_opt:+"$gpg_sign_opt"} $signoff $allow_empty_message \
                        --no-verify -C "$cmt"
                then
                        echo "Commit failed, please do not call \"git commit\""
@@ -104,15 +104,6 @@ finish_rb_merge () {
        say All done.
 }
 
-# The whole contents of this file is run by dot-sourcing it from
-# inside a shell function.  It used to be that "return"s we see
-# below were not inside any function, and expected to return
-# to the function that dot-sourced us.
-#
-# However, older (9.x) versions of FreeBSD /bin/sh misbehave on such a
-# construct and continue to run the statements that follow such a "return".
-# As a work-around, we introduce an extra layer of a function
-# here, and immediately call it after defining it.
 git_rebase__merge () {
 
 case "$action" in
@@ -171,5 +162,3 @@ done
 finish_rb_merge
 
 }
-# ... and then we call the whole thing.
-git_rebase__merge
index a1f6e5de6a3ed1fe9a6217a136611682f3db6582..40be59ecc4704da05a0e153f1709d4af2c62cd18 100755 (executable)
@@ -17,6 +17,7 @@ q,quiet!           be quiet. implies --no-stat
 autostash          automatically stash/stash pop before and after
 fork-point         use 'merge-base --fork-point' to refine upstream
 onto=!             rebase onto given branch instead of upstream
+r,rebase-merges?   try to rebase merges instead of skipping them
 p,preserve-merges! try to recreate merges instead of ignoring them
 s,strategy=!       use the given merge strategy
 no-ff!             cherry-pick all commits, even if unchanged
@@ -62,6 +63,7 @@ $(gettext 'Resolve all conflicts manually, mark them as resolved with
 You can instead skip this commit: run "git rebase --skip".
 To abort and get back to the state before "git rebase", run "git rebase --abort".')
 "
+squash_onto=
 unset onto
 unset restrict_revision
 cmd=
@@ -88,10 +90,13 @@ type=
 state_dir=
 # One of {'', continue, skip, abort}, as parsed from command line
 action=
+rebase_merges=
+rebase_cousins=
 preserve_merges=
 autosquash=
 keep_empty=
 allow_empty_message=
+signoff=
 test "$(git config --bool rebase.autosquash)" = "true" && autosquash=t
 case "$(git config --bool commit.gpgsign)" in
 true)  gpg_sign_opt=-S ;;
@@ -121,6 +126,10 @@ read_basic_state () {
                allow_rerere_autoupdate="$(cat "$state_dir"/allow_rerere_autoupdate)"
        test -f "$state_dir"/gpg_sign_opt &&
                gpg_sign_opt="$(cat "$state_dir"/gpg_sign_opt)"
+       test -f "$state_dir"/signoff && {
+               signoff="$(cat "$state_dir"/signoff)"
+               force_rebase=t
+       }
 }
 
 write_basic_state () {
@@ -135,6 +144,7 @@ write_basic_state () {
        test -n "$allow_rerere_autoupdate" && echo "$allow_rerere_autoupdate" > \
                "$state_dir"/allow_rerere_autoupdate
        test -n "$gpg_sign_opt" && echo "$gpg_sign_opt" > "$state_dir"/gpg_sign_opt
+       test -n "$signoff" && echo "$signoff" >"$state_dir"/signoff
 }
 
 output () {
@@ -197,6 +207,7 @@ run_specific_rebase () {
                autosquash=
        fi
        . git-rebase--$type
+       git_rebase__$type${preserve_merges:+__preserve_merges}
        ret=$?
        if test $ret -eq 0
        then
@@ -269,6 +280,22 @@ do
        --allow-empty-message)
                allow_empty_message=--allow-empty-message
                ;;
+       --no-keep-empty)
+               keep_empty=
+               ;;
+       --rebase-merges)
+               rebase_merges=t
+               test -z "$interactive_rebase" && interactive_rebase=implied
+               ;;
+       --rebase-merges=*)
+               rebase_merges=t
+               case "${1#*=}" in
+               rebase-cousins) rebase_cousins=t;;
+               no-rebase-cousins) rebase_cousins=;;
+               *) die "Unknown mode: $1";;
+               esac
+               test -z "$interactive_rebase" && interactive_rebase=implied
+               ;;
        --preserve-merges)
                preserve_merges=t
                test -z "$interactive_rebase" && interactive_rebase=implied
@@ -331,7 +358,13 @@ do
        --ignore-whitespace)
                git_am_opt="$git_am_opt $1"
                ;;
-       --committer-date-is-author-date|--ignore-date|--signoff|--no-signoff)
+       --signoff)
+               signoff=--signoff
+               ;;
+       --no-signoff)
+               signoff=
+               ;;
+       --committer-date-is-author-date|--ignore-date)
                git_am_opt="$git_am_opt $1"
                force_rebase=t
                ;;
@@ -447,6 +480,11 @@ then
        test -z "$interactive_rebase" && interactive_rebase=implied
 fi
 
+if test -n "$keep_empty"
+then
+       test -z "$interactive_rebase" && interactive_rebase=implied
+fi
+
 if test -n "$interactive_rebase"
 then
        type=interactive
@@ -465,6 +503,14 @@ then
        git_format_patch_opt="$git_format_patch_opt --progress"
 fi
 
+if test -n "$signoff"
+then
+       test -n "$preserve_merges" &&
+               die "$(gettext "error: cannot combine '--signoff' with '--preserve-merges'")"
+       git_am_opt="$git_am_opt $signoff"
+       force_rebase=t
+fi
+
 if test -z "$rebase_root"
 then
        case "$#" in
index 2fa7818ca9a8ac7363d17aef17b864f7361b3eb8..7157397fd03a0791b7722ab49aa696181977381c 100755 (executable)
@@ -1642,10 +1642,15 @@ sub send_message {
                        elsif (/^Content-Transfer-Encoding: (.*)/i) {
                                $xfer_encoding = $1 if not defined $xfer_encoding;
                        }
+                       elsif (/^In-Reply-To: (.*)/i) {
+                               $in_reply_to = $1;
+                       }
+                       elsif (/^References: (.*)/i) {
+                               $references = $1;
+                       }
                        elsif (!/^Date:\s/i && /^[-A-Za-z]+:\s+\S/) {
                                push @xh, $_;
                        }
-
                } else {
                        # In the traditional
                        # "send lots of email" format,
index fc8f8ae6401dddcceaa82f9e9c748f5c185536d6..94793c1a913abf569ff9101d935c355b9eb27648 100755 (executable)
@@ -39,7 +39,7 @@ fi
 no_changes () {
        git diff-index --quiet --cached HEAD --ignore-submodules -- "$@" &&
        git diff-files --quiet --ignore-submodules -- "$@" &&
-       (test -z "$untracked" || test -z "$(untracked_files)")
+       (test -z "$untracked" || test -z "$(untracked_files "$@")")
 }
 
 untracked_files () {
@@ -315,16 +315,18 @@ push_stash () {
        if test -z "$patch_mode"
        then
                test "$untracked" = "all" && CLEAN_X_OPTION=-x || CLEAN_X_OPTION=
-               if test -n "$untracked"
+               if test -n "$untracked" && test $# = 0
                then
-                       git clean --force --quiet -d $CLEAN_X_OPTION -- "$@"
+                       git clean --force --quiet -d $CLEAN_X_OPTION
                fi
 
                if test $# != 0
                then
-                       git add -u -- "$@" |
-                       git checkout-index -z --force --stdin
-                       git diff-index -p --cached --binary HEAD -- "$@" | git apply --index -R
+                       test -z "$untracked" && UPDATE_OPTION="-u" || UPDATE_OPTION=
+                       test "$untracked" = "all" && FORCE_OPTION="--force" || FORCE_OPTION=
+                       git add $UPDATE_OPTION $FORCE_OPTION -- "$@"
+                       git diff-index -p --cached --binary HEAD -- "$@" |
+                       git apply --index -R
                else
                        git reset --hard -q
                fi
index a6b6c3e40c180e58186f9ef8af5c6fd6fc654080..050f2a36f414f147f3bde3cc9f69639d5a36a975 100755 (executable)
@@ -374,7 +374,8 @@ sub term_init {
 usage(1) unless defined $cmd;
 load_authors() if $_authors;
 if (defined $_authors_prog) {
-       $_authors_prog = "'" . File::Spec->rel2abs($_authors_prog) . "'";
+       my $abs_file = File::Spec->rel2abs($_authors_prog);
+       $_authors_prog = "'" . $abs_file . "'" if -x $abs_file;
 }
 
 unless ($cmd =~ /^(?:clone|init|multi-init|commit-diff)$/) {
diff --git a/git.c b/git.c
index ceaa58ef40e536f1290cce3ad1223004063e41a6..5771d62a328d69692291674c7ed0f9050efac3ca 100644 (file)
--- a/git.c
+++ b/git.c
@@ -1,13 +1,31 @@
 #include "builtin.h"
 #include "config.h"
-#include "exec_cmd.h"
+#include "exec-cmd.h"
 #include "help.h"
 #include "run-command.h"
 
+#define RUN_SETUP              (1<<0)
+#define RUN_SETUP_GENTLY       (1<<1)
+#define USE_PAGER              (1<<2)
+/*
+ * require working tree to be present -- anything uses this needs
+ * RUN_SETUP for reading from the configuration file.
+ */
+#define NEED_WORK_TREE         (1<<3)
+#define SUPPORT_SUPER_PREFIX   (1<<4)
+#define DELAY_PAGER_CONFIG     (1<<5)
+#define NO_PARSEOPT            (1<<6) /* parse-options is not used */
+
+struct cmd_struct {
+       const char *cmd;
+       int (*fn)(int, const char **, const char *);
+       unsigned int option;
+};
+
 const char git_usage_string[] =
        N_("git [--version] [--help] [-C <path>] [-c <name>=<value>]\n"
           "           [--exec-path[=<path>]] [--html-path] [--man-path] [--info-path]\n"
-          "           [-p | --paginate | --no-pager] [--no-replace-objects] [--bare]\n"
+          "           [-p | --paginate | -P | --no-pager] [--no-replace-objects] [--bare]\n"
           "           [--git-dir=<path>] [--work-tree=<path>] [--namespace=<name>]\n"
           "           <command> [<args>]");
 
@@ -18,7 +36,7 @@ const char git_more_info_string[] =
 
 static int use_pager = -1;
 
-static void list_builtins(void);
+static void list_builtins(unsigned int exclude_option, char sep);
 
 static void commit_pager_choice(void) {
        switch (use_pager) {
@@ -65,7 +83,7 @@ static int handle_options(const char ***argv, int *argc, int *envchanged)
                 */
                if (skip_prefix(cmd, "--exec-path", &cmd)) {
                        if (*cmd == '=')
-                               git_set_argv_exec_path(cmd + 1);
+                               git_set_exec_path(cmd + 1);
                        else {
                                puts(git_exec_path());
                                exit(0);
@@ -81,7 +99,7 @@ static int handle_options(const char ***argv, int *argc, int *envchanged)
                        exit(0);
                } else if (!strcmp(cmd, "-p") || !strcmp(cmd, "--paginate")) {
                        use_pager = 1;
-               } else if (!strcmp(cmd, "--no-pager")) {
+               } else if (!strcmp(cmd, "-P") || !strcmp(cmd, "--no-pager")) {
                        use_pager = 0;
                        if (envchanged)
                                *envchanged = 1;
@@ -206,7 +224,10 @@ static int handle_options(const char ***argv, int *argc, int *envchanged)
                        (*argv)++;
                        (*argc)--;
                } else if (!strcmp(cmd, "--list-builtins")) {
-                       list_builtins();
+                       list_builtins(0, '\n');
+                       exit(0);
+               } else if (!strcmp(cmd, "--list-parseopt-builtins")) {
+                       list_builtins(NO_PARSEOPT, ' ');
                        exit(0);
                } else {
                        fprintf(stderr, _("unknown option: %s\n"), cmd);
@@ -288,23 +309,6 @@ static int handle_alias(int *argcp, const char ***argv)
        return ret;
 }
 
-#define RUN_SETUP              (1<<0)
-#define RUN_SETUP_GENTLY       (1<<1)
-#define USE_PAGER              (1<<2)
-/*
- * require working tree to be present -- anything uses this needs
- * RUN_SETUP for reading from the configuration file.
- */
-#define NEED_WORK_TREE         (1<<3)
-#define SUPPORT_SUPER_PREFIX   (1<<4)
-#define DELAY_PAGER_CONFIG     (1<<5)
-
-struct cmd_struct {
-       const char *cmd;
-       int (*fn)(int, const char **, const char *);
-       int option;
-};
-
 static int run_builtin(struct cmd_struct *p, int argc, const char **argv)
 {
        int status, help;
@@ -367,18 +371,18 @@ static int run_builtin(struct cmd_struct *p, int argc, const char **argv)
 static struct cmd_struct commands[] = {
        { "add", cmd_add, RUN_SETUP | NEED_WORK_TREE },
        { "am", cmd_am, RUN_SETUP | NEED_WORK_TREE },
-       { "annotate", cmd_annotate, RUN_SETUP },
+       { "annotate", cmd_annotate, RUN_SETUP | NO_PARSEOPT },
        { "apply", cmd_apply, RUN_SETUP_GENTLY },
        { "archive", cmd_archive, RUN_SETUP_GENTLY },
        { "bisect--helper", cmd_bisect__helper, RUN_SETUP },
        { "blame", cmd_blame, RUN_SETUP },
        { "branch", cmd_branch, RUN_SETUP | DELAY_PAGER_CONFIG },
-       { "bundle", cmd_bundle, RUN_SETUP_GENTLY },
+       { "bundle", cmd_bundle, RUN_SETUP_GENTLY | NO_PARSEOPT },
        { "cat-file", cmd_cat_file, RUN_SETUP },
        { "check-attr", cmd_check_attr, RUN_SETUP },
        { "check-ignore", cmd_check_ignore, RUN_SETUP | NEED_WORK_TREE },
        { "check-mailmap", cmd_check_mailmap, RUN_SETUP },
-       { "check-ref-format", cmd_check_ref_format },
+       { "check-ref-format", cmd_check_ref_format, NO_PARSEOPT  },
        { "checkout", cmd_checkout, RUN_SETUP | NEED_WORK_TREE },
        { "checkout-index", cmd_checkout_index,
                RUN_SETUP | NEED_WORK_TREE},
@@ -388,30 +392,31 @@ static struct cmd_struct commands[] = {
        { "clone", cmd_clone },
        { "column", cmd_column, RUN_SETUP_GENTLY },
        { "commit", cmd_commit, RUN_SETUP | NEED_WORK_TREE },
-       { "commit-tree", cmd_commit_tree, RUN_SETUP },
+       { "commit-graph", cmd_commit_graph, RUN_SETUP },
+       { "commit-tree", cmd_commit_tree, RUN_SETUP | NO_PARSEOPT },
        { "config", cmd_config, RUN_SETUP_GENTLY | DELAY_PAGER_CONFIG },
        { "count-objects", cmd_count_objects, RUN_SETUP },
-       { "credential", cmd_credential, RUN_SETUP_GENTLY },
+       { "credential", cmd_credential, RUN_SETUP_GENTLY | NO_PARSEOPT },
        { "describe", cmd_describe, RUN_SETUP },
-       { "diff", cmd_diff },
-       { "diff-files", cmd_diff_files, RUN_SETUP | NEED_WORK_TREE },
-       { "diff-index", cmd_diff_index, RUN_SETUP },
-       { "diff-tree", cmd_diff_tree, RUN_SETUP },
+       { "diff", cmd_diff, NO_PARSEOPT },
+       { "diff-files", cmd_diff_files, RUN_SETUP | NEED_WORK_TREE | NO_PARSEOPT },
+       { "diff-index", cmd_diff_index, RUN_SETUP | NO_PARSEOPT },
+       { "diff-tree", cmd_diff_tree, RUN_SETUP | NO_PARSEOPT },
        { "difftool", cmd_difftool, RUN_SETUP | NEED_WORK_TREE },
        { "fast-export", cmd_fast_export, RUN_SETUP },
        { "fetch", cmd_fetch, RUN_SETUP },
-       { "fetch-pack", cmd_fetch_pack, RUN_SETUP },
+       { "fetch-pack", cmd_fetch_pack, RUN_SETUP | NO_PARSEOPT },
        { "fmt-merge-msg", cmd_fmt_merge_msg, RUN_SETUP },
        { "for-each-ref", cmd_for_each_ref, RUN_SETUP },
        { "format-patch", cmd_format_patch, RUN_SETUP },
        { "fsck", cmd_fsck, RUN_SETUP },
        { "fsck-objects", cmd_fsck, RUN_SETUP },
        { "gc", cmd_gc, RUN_SETUP },
-       { "get-tar-commit-id", cmd_get_tar_commit_id },
+       { "get-tar-commit-id", cmd_get_tar_commit_id, NO_PARSEOPT },
        { "grep", cmd_grep, RUN_SETUP_GENTLY },
        { "hash-object", cmd_hash_object },
        { "help", cmd_help },
-       { "index-pack", cmd_index_pack, RUN_SETUP_GENTLY },
+       { "index-pack", cmd_index_pack, RUN_SETUP_GENTLY | NO_PARSEOPT },
        { "init", cmd_init_db },
        { "init-db", cmd_init_db },
        { "interpret-trailers", cmd_interpret_trailers, RUN_SETUP_GENTLY },
@@ -419,27 +424,27 @@ static struct cmd_struct commands[] = {
        { "ls-files", cmd_ls_files, RUN_SETUP },
        { "ls-remote", cmd_ls_remote, RUN_SETUP_GENTLY },
        { "ls-tree", cmd_ls_tree, RUN_SETUP },
-       { "mailinfo", cmd_mailinfo, RUN_SETUP_GENTLY },
-       { "mailsplit", cmd_mailsplit },
+       { "mailinfo", cmd_mailinfo, RUN_SETUP_GENTLY | NO_PARSEOPT },
+       { "mailsplit", cmd_mailsplit, NO_PARSEOPT },
        { "merge", cmd_merge, RUN_SETUP | NEED_WORK_TREE },
        { "merge-base", cmd_merge_base, RUN_SETUP },
        { "merge-file", cmd_merge_file, RUN_SETUP_GENTLY },
-       { "merge-index", cmd_merge_index, RUN_SETUP },
-       { "merge-ours", cmd_merge_ours, RUN_SETUP },
-       { "merge-recursive", cmd_merge_recursive, RUN_SETUP | NEED_WORK_TREE },
-       { "merge-recursive-ours", cmd_merge_recursive, RUN_SETUP | NEED_WORK_TREE },
-       { "merge-recursive-theirs", cmd_merge_recursive, RUN_SETUP | NEED_WORK_TREE },
-       { "merge-subtree", cmd_merge_recursive, RUN_SETUP | NEED_WORK_TREE },
-       { "merge-tree", cmd_merge_tree, RUN_SETUP },
-       { "mktag", cmd_mktag, RUN_SETUP },
+       { "merge-index", cmd_merge_index, RUN_SETUP | NO_PARSEOPT },
+       { "merge-ours", cmd_merge_ours, RUN_SETUP | NO_PARSEOPT },
+       { "merge-recursive", cmd_merge_recursive, RUN_SETUP | NEED_WORK_TREE | NO_PARSEOPT },
+       { "merge-recursive-ours", cmd_merge_recursive, RUN_SETUP | NEED_WORK_TREE | NO_PARSEOPT },
+       { "merge-recursive-theirs", cmd_merge_recursive, RUN_SETUP | NEED_WORK_TREE | NO_PARSEOPT },
+       { "merge-subtree", cmd_merge_recursive, RUN_SETUP | NEED_WORK_TREE | NO_PARSEOPT },
+       { "merge-tree", cmd_merge_tree, RUN_SETUP | NO_PARSEOPT },
+       { "mktag", cmd_mktag, RUN_SETUP | NO_PARSEOPT },
        { "mktree", cmd_mktree, RUN_SETUP },
        { "mv", cmd_mv, RUN_SETUP | NEED_WORK_TREE },
        { "name-rev", cmd_name_rev, RUN_SETUP },
        { "notes", cmd_notes, RUN_SETUP },
        { "pack-objects", cmd_pack_objects, RUN_SETUP },
-       { "pack-redundant", cmd_pack_redundant, RUN_SETUP },
+       { "pack-redundant", cmd_pack_redundant, RUN_SETUP | NO_PARSEOPT },
        { "pack-refs", cmd_pack_refs, RUN_SETUP },
-       { "patch-id", cmd_patch_id, RUN_SETUP_GENTLY },
+       { "patch-id", cmd_patch_id, RUN_SETUP_GENTLY | NO_PARSEOPT },
        { "pickaxe", cmd_blame, RUN_SETUP },
        { "prune", cmd_prune, RUN_SETUP },
        { "prune-packed", cmd_prune_packed, RUN_SETUP },
@@ -450,17 +455,18 @@ static struct cmd_struct commands[] = {
        { "receive-pack", cmd_receive_pack },
        { "reflog", cmd_reflog, RUN_SETUP },
        { "remote", cmd_remote, RUN_SETUP },
-       { "remote-ext", cmd_remote_ext },
-       { "remote-fd", cmd_remote_fd },
+       { "remote-ext", cmd_remote_ext, NO_PARSEOPT },
+       { "remote-fd", cmd_remote_fd, NO_PARSEOPT },
        { "repack", cmd_repack, RUN_SETUP },
        { "replace", cmd_replace, RUN_SETUP },
        { "rerere", cmd_rerere, RUN_SETUP },
        { "reset", cmd_reset, RUN_SETUP },
-       { "rev-list", cmd_rev_list, RUN_SETUP },
-       { "rev-parse", cmd_rev_parse },
+       { "rev-list", cmd_rev_list, RUN_SETUP | NO_PARSEOPT },
+       { "rev-parse", cmd_rev_parse, NO_PARSEOPT },
        { "revert", cmd_revert, RUN_SETUP | NEED_WORK_TREE },
        { "rm", cmd_rm, RUN_SETUP },
        { "send-pack", cmd_send_pack, RUN_SETUP },
+       { "serve", cmd_serve, RUN_SETUP },
        { "shortlog", cmd_shortlog, RUN_SETUP_GENTLY | USE_PAGER },
        { "show", cmd_show, RUN_SETUP },
        { "show-branch", cmd_show_branch, RUN_SETUP },
@@ -468,23 +474,24 @@ static struct cmd_struct commands[] = {
        { "stage", cmd_add, RUN_SETUP | NEED_WORK_TREE },
        { "status", cmd_status, RUN_SETUP | NEED_WORK_TREE },
        { "stripspace", cmd_stripspace },
-       { "submodule--helper", cmd_submodule__helper, RUN_SETUP | SUPPORT_SUPER_PREFIX},
+       { "submodule--helper", cmd_submodule__helper, RUN_SETUP | SUPPORT_SUPER_PREFIX | NO_PARSEOPT },
        { "symbolic-ref", cmd_symbolic_ref, RUN_SETUP },
        { "tag", cmd_tag, RUN_SETUP | DELAY_PAGER_CONFIG },
-       { "unpack-file", cmd_unpack_file, RUN_SETUP },
-       { "unpack-objects", cmd_unpack_objects, RUN_SETUP },
+       { "unpack-file", cmd_unpack_file, RUN_SETUP | NO_PARSEOPT },
+       { "unpack-objects", cmd_unpack_objects, RUN_SETUP | NO_PARSEOPT },
        { "update-index", cmd_update_index, RUN_SETUP },
        { "update-ref", cmd_update_ref, RUN_SETUP },
        { "update-server-info", cmd_update_server_info, RUN_SETUP },
-       { "upload-archive", cmd_upload_archive },
-       { "upload-archive--writer", cmd_upload_archive_writer },
-       { "var", cmd_var, RUN_SETUP_GENTLY },
+       { "upload-archive", cmd_upload_archive, NO_PARSEOPT },
+       { "upload-archive--writer", cmd_upload_archive_writer, NO_PARSEOPT },
+       { "upload-pack", cmd_upload_pack },
+       { "var", cmd_var, RUN_SETUP_GENTLY | NO_PARSEOPT },
        { "verify-commit", cmd_verify_commit, RUN_SETUP },
        { "verify-pack", cmd_verify_pack },
        { "verify-tag", cmd_verify_tag, RUN_SETUP },
        { "version", cmd_version },
        { "whatchanged", cmd_whatchanged, RUN_SETUP },
-       { "worktree", cmd_worktree, RUN_SETUP },
+       { "worktree", cmd_worktree, RUN_SETUP | NO_PARSEOPT },
        { "write-tree", cmd_write_tree, RUN_SETUP },
 };
 
@@ -504,11 +511,15 @@ int is_builtin(const char *s)
        return !!get_builtin(s);
 }
 
-static void list_builtins(void)
+static void list_builtins(unsigned int exclude_option, char sep)
 {
        int i;
-       for (i = 0; i < ARRAY_SIZE(commands); i++)
-               printf("%s\n", commands[i].cmd);
+       for (i = 0; i < ARRAY_SIZE(commands); i++) {
+               if (exclude_option &&
+                   (commands[i].option & exclude_option))
+                       continue;
+               printf("%s%c", commands[i].cmd, sep);
+       }
 }
 
 #ifdef STRIP_EXTENSION
index 4feacf16e5bcd93dcde4a2ea769f6b61ca369593..0647bd6348cdedd0e32f30ed23934d390eae4122 100644 (file)
@@ -101,22 +101,26 @@ void print_signature_buffer(const struct signature_check *sigc, unsigned flags)
                fputs(output, stderr);
 }
 
-/*
- * Look at GPG signed content (e.g. a signed tag object), whose
- * payload is followed by a detached signature on it.  Return the
- * offset where the embedded detached signature begins, or the end of
- * the data when there is no such signature.
- */
-size_t parse_signature(const char *buf, unsigned long size)
+static int is_gpg_start(const char *line)
+{
+       return starts_with(line, PGP_SIGNATURE) ||
+               starts_with(line, PGP_MESSAGE);
+}
+
+size_t parse_signature(const char *buf, size_t size)
 {
-       char *eol;
        size_t len = 0;
-       while (len < size && !starts_with(buf + len, PGP_SIGNATURE) &&
-                       !starts_with(buf + len, PGP_MESSAGE)) {
+       size_t match = size;
+       while (len < size) {
+               const char *eol;
+
+               if (is_gpg_start(buf + len))
+                       match = len;
+
                eol = memchr(buf + len, '\n', size - len);
                len += eol ? eol - (buf + len) + 1 : size - len;
        }
-       return len;
+       return match;
 }
 
 void set_signing_key(const char *key)
@@ -128,13 +132,19 @@ void set_signing_key(const char *key)
 int git_gpg_config(const char *var, const char *value, void *cb)
 {
        if (!strcmp(var, "user.signingkey")) {
+               if (!value)
+                       return config_error_nonbool(var);
                set_signing_key(value);
+               return 0;
        }
+
        if (!strcmp(var, "gpg.program")) {
                if (!value)
                        return config_error_nonbool(var);
                gpg_program = xstrdup(value);
+               return 0;
        }
+
        return 0;
 }
 
@@ -145,12 +155,6 @@ const char *get_signing_key(void)
        return git_committer_info(IDENT_STRICT|IDENT_NO_DATE);
 }
 
-/*
- * Create a detached signature for the contents of "buffer" and append
- * it after "signature"; "buffer" and "signature" can be the same
- * strbuf instance, which would cause the detached signature appended
- * at the end.
- */
 int sign_buffer(struct strbuf *buffer, struct strbuf *signature, const char *signing_key)
 {
        struct child_process gpg = CHILD_PROCESS_INIT;
@@ -192,11 +196,6 @@ int sign_buffer(struct strbuf *buffer, struct strbuf *signature, const char *sig
        return 0;
 }
 
-/*
- * Run "gpg" to see if the payload matches the detached signature.
- * gpg_output, when set, receives the diagnostic output from GPG.
- * gpg_status, when set, receives the status output from GPG.
- */
 int verify_signed_buffer(const char *payload, size_t payload_size,
                         const char *signature, size_t signature_size,
                         struct strbuf *gpg_output, struct strbuf *gpg_status)
index d2d4fd3a656e3f4953aea5eda3eea435f37f237e..a5e6517ae67ea5fa3f79265517381d58d99fba18 100644 (file)
@@ -23,16 +23,43 @@ struct signature_check {
        char *key;
 };
 
-extern void signature_check_clear(struct signature_check *sigc);
-extern size_t parse_signature(const char *buf, unsigned long size);
-extern void parse_gpg_output(struct signature_check *);
-extern int sign_buffer(struct strbuf *buffer, struct strbuf *signature, const char *signing_key);
-extern int verify_signed_buffer(const char *payload, size_t payload_size, const char *signature, size_t signature_size, struct strbuf *gpg_output, struct strbuf *gpg_status);
-extern int git_gpg_config(const char *, const char *, void *);
-extern void set_signing_key(const char *);
-extern const char *get_signing_key(void);
-extern int check_signature(const char *payload, size_t plen,
-       const char *signature, size_t slen, struct signature_check *sigc);
-void print_signature_buffer(const struct signature_check *sigc, unsigned flags);
+void signature_check_clear(struct signature_check *sigc);
+
+/*
+ * Look at GPG signed content (e.g. a signed tag object), whose
+ * payload is followed by a detached signature on it.  Return the
+ * offset where the embedded detached signature begins, or the end of
+ * the data when there is no such signature.
+ */
+size_t parse_signature(const char *buf, size_t size);
+
+void parse_gpg_output(struct signature_check *);
+
+/*
+ * Create a detached signature for the contents of "buffer" and append
+ * it after "signature"; "buffer" and "signature" can be the same
+ * strbuf instance, which would cause the detached signature appended
+ * at the end.
+ */
+int sign_buffer(struct strbuf *buffer, struct strbuf *signature,
+               const char *signing_key);
+
+/*
+ * Run "gpg" to see if the payload matches the detached signature.
+ * gpg_output, when set, receives the diagnostic output from GPG.
+ * gpg_status, when set, receives the status output from GPG.
+ */
+int verify_signed_buffer(const char *payload, size_t payload_size,
+                        const char *signature, size_t signature_size,
+                        struct strbuf *gpg_output, struct strbuf *gpg_status);
+
+int git_gpg_config(const char *, const char *, void *);
+void set_signing_key(const char *);
+const char *get_signing_key(void);
+int check_signature(const char *payload, size_t plen,
+                   const char *signature, size_t slen,
+                   struct signature_check *sigc);
+void print_signature_buffer(const struct signature_check *sigc,
+                           unsigned flags);
 
 #endif
diff --git a/grep.c b/grep.c
index 834b8eb439297ff5af4a3da8b946a40ca68057f1..65b90c10a38c136d2ce9a42fee5c543cd7d4717d 100644 (file)
--- a/grep.c
+++ b/grep.c
@@ -2015,7 +2015,7 @@ static int grep_source_load_oid(struct grep_source *gs)
        enum object_type type;
 
        grep_read_lock();
-       gs->buf = read_sha1_file(gs->identifier, &type, &gs->size);
+       gs->buf = read_object_file(gs->identifier, &type, &gs->size);
        grep_read_unlock();
 
        if (!gs->buf)
diff --git a/help.c b/help.c
index 60071a9beaaedf45730385b7fc1443b402c50156..a4feef2ffe90ae853c220b9368642c4edb503e5e 100644 (file)
--- a/help.c
+++ b/help.c
@@ -1,7 +1,7 @@
 #include "cache.h"
 #include "config.h"
 #include "builtin.h"
-#include "exec_cmd.h"
+#include "exec-cmd.h"
 #include "run-command.h"
 #include "levenshtein.h"
 #include "help.h"
index f3dc218b2a3d2662efda56fb6c8878a37a4e67a4..adaef16fadfd03f34b8ac5bb496bd51aab292b20 100644 (file)
@@ -1,15 +1,18 @@
 #include "cache.h"
 #include "config.h"
+#include "repository.h"
 #include "refs.h"
 #include "pkt-line.h"
 #include "object.h"
 #include "tag.h"
-#include "exec_cmd.h"
+#include "exec-cmd.h"
 #include "run-command.h"
 #include "string-list.h"
 #include "url.h"
 #include "argv-array.h"
 #include "packfile.h"
+#include "object-store.h"
+#include "protocol.h"
 
 static const char content_type[] = "Content-Type";
 static const char content_length[] = "Content-Length";
@@ -466,8 +469,11 @@ static void get_info_refs(struct strbuf *hdr, char *arg)
                hdr_str(hdr, content_type, buf.buf);
                end_headers(hdr);
 
-               packet_write_fmt(1, "# service=git-%s\n", svc->name);
-               packet_flush(1);
+
+               if (determine_protocol_version_server() != protocol_v2) {
+                       packet_write_fmt(1, "# service=git-%s\n", svc->name);
+                       packet_flush(1);
+               }
 
                argv[0] = svc->name;
                run_service(argv, 0);
@@ -517,14 +523,13 @@ static void get_info_packs(struct strbuf *hdr, char *arg)
        size_t cnt = 0;
 
        select_getanyfile(hdr);
-       prepare_packed_git();
-       for (p = packed_git; p; p = p->next) {
+       for (p = get_packed_git(the_repository); p; p = p->next) {
                if (p->pack_local)
                        cnt++;
        }
 
        strbuf_grow(&buf, cnt * 53 + 2);
-       for (p = packed_git; p; p = p->next) {
+       for (p = get_packed_git(the_repository); p; p = p->next) {
                if (p->pack_local)
                        strbuf_addf(&buf, "P %s\n", p->pack_name + objdirlen + 6);
        }
index 8af380050ce6a43a6764ad871f0de141c316421a..a32ac118d90ca6141a72d31f47497116870f6803 100644 (file)
@@ -1,6 +1,6 @@
 #include "cache.h"
 #include "config.h"
-#include "exec_cmd.h"
+#include "exec-cmd.h"
 #include "http.h"
 #include "walker.h"
 
@@ -17,21 +17,13 @@ int cmd_main(int argc, const char **argv)
        char *url = NULL;
        int arg = 1;
        int rc = 0;
-       int get_tree = 0;
-       int get_history = 0;
-       int get_all = 0;
        int get_verbosely = 0;
        int get_recover = 0;
 
        while (arg < argc && argv[arg][0] == '-') {
                if (argv[arg][1] == 't') {
-                       get_tree = 1;
                } else if (argv[arg][1] == 'c') {
-                       get_history = 1;
                } else if (argv[arg][1] == 'a') {
-                       get_all = 1;
-                       get_tree = 1;
-                       get_history = 1;
                } else if (argv[arg][1] == 'v') {
                        get_verbosely = 1;
                } else if (argv[arg][1] == 'w') {
@@ -55,10 +47,6 @@ int cmd_main(int argc, const char **argv)
                commits = 1;
        }
 
-       if (get_all == 0)
-               warning("http-fetch: use without -a is deprecated.\n"
-                       "In a future release, -a will become the default.");
-
        if (argv[arg])
                str_end_url_with_slash(argv[arg], &url);
 
@@ -68,9 +56,6 @@ int cmd_main(int argc, const char **argv)
 
        http_init(NULL, url, 0);
        walker = get_http_walker(url);
-       walker->get_tree = get_tree;
-       walker->get_history = get_history;
-       walker->get_all = get_all;
        walker->get_verbosely = get_verbosely;
        walker->get_recover = get_recover;
 
index 7dcd9daf62cf07e1cc43022fde17607cef6dc147..2669f4bfa1e2e47226f6e58084f35f4a64b21226 100644 (file)
@@ -6,12 +6,13 @@
 #include "refs.h"
 #include "diff.h"
 #include "revision.h"
-#include "exec_cmd.h"
+#include "exec-cmd.h"
 #include "remote.h"
 #include "list-objects.h"
 #include "sigchain.h"
 #include "argv-array.h"
 #include "packfile.h"
+#include "object-store.h"
 
 #ifdef EXPAT_NEEDS_XMLPARSE_H
 #include <xmlparse.h>
@@ -361,7 +362,7 @@ static void start_put(struct transfer_request *request)
        ssize_t size;
        git_zstream stream;
 
-       unpacked = read_sha1_file(request->obj->oid.hash, &type, &len);
+       unpacked = read_object_file(&request->obj->oid, &type, &len);
        hdrlen = xsnprintf(hdr, sizeof(hdr), "%s %lu", type_name(type), len) + 1;
 
        /* Set it up */
@@ -1330,7 +1331,7 @@ static int get_delta(struct rev_info *revs, struct remote_lock *lock)
        int count = 0;
 
        while ((commit = get_revision(revs)) != NULL) {
-               p = process_tree(commit->tree, p);
+               p = process_tree(get_commit_tree(commit), p);
                commit->object.flags |= LOCAL;
                if (!(commit->object.flags & UNINTERESTING))
                        count += add_send_request(&commit->object, lock);
index 07c2b1af826d3e24e2568e5bdef17806f355048f..7cdfb2f24c76d2f09b39ae2fa97685ecec2d1630 100644 (file)
@@ -1,10 +1,12 @@
 #include "cache.h"
+#include "repository.h"
 #include "commit.h"
 #include "walker.h"
 #include "http.h"
 #include "list.h"
 #include "transport.h"
 #include "packfile.h"
+#include "object-store.h"
 
 struct alt_base {
        char *base;
@@ -22,7 +24,7 @@ enum object_request_state {
 
 struct object_request {
        struct walker *walker;
-       unsigned char sha1[20];
+       struct object_id oid;
        struct alt_base *repo;
        enum object_request_state state;
        struct http_object_request *req;
@@ -56,7 +58,7 @@ static void start_object_request(struct walker *walker,
        struct active_request_slot *slot;
        struct http_object_request *req;
 
-       req = new_http_object_request(obj_req->repo->base, obj_req->sha1);
+       req = new_http_object_request(obj_req->repo->base, obj_req->oid.hash);
        if (req == NULL) {
                obj_req->state = ABORTED;
                return;
@@ -82,7 +84,7 @@ static void finish_object_request(struct object_request *obj_req)
                return;
 
        if (obj_req->req->rename == 0)
-               walker_say(obj_req->walker, "got %s\n", sha1_to_hex(obj_req->sha1));
+               walker_say(obj_req->walker, "got %s\n", oid_to_hex(&obj_req->oid));
 }
 
 static void process_object_response(void *callback_data)
@@ -129,7 +131,7 @@ static int fill_active_slot(struct walker *walker)
        list_for_each_safe(pos, tmp, head) {
                obj_req = list_entry(pos, struct object_request, node);
                if (obj_req->state == WAITING) {
-                       if (has_sha1_file(obj_req->sha1))
+                       if (has_sha1_file(obj_req->oid.hash))
                                obj_req->state = COMPLETE;
                        else {
                                start_object_request(walker, obj_req);
@@ -148,7 +150,7 @@ static void prefetch(struct walker *walker, unsigned char *sha1)
 
        newreq = xmalloc(sizeof(*newreq));
        newreq->walker = walker;
-       hashcpy(newreq->sha1, sha1);
+       hashcpy(newreq->oid.hash, sha1);
        newreq->repo = data->alt;
        newreq->state = WAITING;
        newreq->req = NULL;
@@ -481,13 +483,13 @@ static int fetch_object(struct walker *walker, unsigned char *sha1)
 
        list_for_each(pos, head) {
                obj_req = list_entry(pos, struct object_request, node);
-               if (!hashcmp(obj_req->sha1, sha1))
+               if (!hashcmp(obj_req->oid.hash, sha1))
                        break;
        }
        if (obj_req == NULL)
                return error("Couldn't find request for %s in the queue", hex);
 
-       if (has_sha1_file(obj_req->sha1)) {
+       if (has_sha1_file(obj_req->oid.hash)) {
                if (obj_req->req != NULL)
                        abort_http_object_request(obj_req->req);
                abort_object_request(obj_req);
@@ -541,11 +543,11 @@ static int fetch_object(struct walker *walker, unsigned char *sha1)
        } else if (req->zret != Z_STREAM_END) {
                walker->corrupt_object_found++;
                ret = error("File %s (%s) corrupt", hex, req->url);
-       } else if (hashcmp(obj_req->sha1, req->real_sha1)) {
+       } else if (hashcmp(obj_req->oid.hash, req->real_sha1)) {
                ret = error("File %s has bad hash", hex);
        } else if (req->rename < 0) {
                struct strbuf buf = STRBUF_INIT;
-               sha1_file_name(&buf, req->sha1);
+               sha1_file_name(the_repository, &buf, req->sha1);
                ret = error("unable to write sha1 filename %s", buf.buf);
                strbuf_release(&buf);
        }
diff --git a/http.c b/http.c
index a5bd5d62c22c054f82b9971fc1f320c643f1d6fb..fed13b2169a49aa602fc2ff21d45e368e1734d79 100644 (file)
--- a/http.c
+++ b/http.c
@@ -14,6 +14,7 @@
 #include "packfile.h"
 #include "protocol.h"
 #include "string-list.h"
+#include "object-store.h"
 
 static struct trace_key trace_curl = TRACE_KEY_INIT(CURL);
 static int trace_curl_data = 1;
@@ -62,6 +63,9 @@ static struct {
        { "tlsv1.1", CURL_SSLVERSION_TLSv1_1 },
        { "tlsv1.2", CURL_SSLVERSION_TLSv1_2 },
 #endif
+#if LIBCURL_VERSION_NUM >= 0x073400
+       { "tlsv1.3", CURL_SSLVERSION_TLSv1_3 },
+#endif
 };
 #if LIBCURL_VERSION_NUM >= 0x070903
 static const char *ssl_key;
@@ -972,21 +976,6 @@ static void set_from_env(const char **var, const char *envname)
                *var = val;
 }
 
-static void protocol_http_header(void)
-{
-       if (get_protocol_version_config() > 0) {
-               struct strbuf protocol_header = STRBUF_INIT;
-
-               strbuf_addf(&protocol_header, GIT_PROTOCOL_HEADER ": version=%d",
-                           get_protocol_version_config());
-
-
-               extra_http_headers = curl_slist_append(extra_http_headers,
-                                                      protocol_header.buf);
-               strbuf_release(&protocol_header);
-       }
-}
-
 void http_init(struct remote *remote, const char *url, int proactive_auth)
 {
        char *low_speed_limit;
@@ -1017,8 +1006,6 @@ void http_init(struct remote *remote, const char *url, int proactive_auth)
        if (remote)
                var_override(&http_proxy_authmethod, remote->http_proxy_authmethod);
 
-       protocol_http_header();
-
        pragma_header = curl_slist_append(http_copy_default_headers(),
                "Pragma: no-cache");
        no_pragma_header = curl_slist_append(http_copy_default_headers(),
@@ -1791,6 +1778,14 @@ static int http_request(const char *url,
 
        headers = curl_slist_append(headers, buf.buf);
 
+       /* Add additional headers here */
+       if (options && options->extra_headers) {
+               const struct string_list_item *item;
+               for_each_string_list_item(item, options->extra_headers) {
+                       headers = curl_slist_append(headers, item->string);
+               }
+       }
+
        curl_easy_setopt(slot->curl, CURLOPT_URL, url);
        curl_easy_setopt(slot->curl, CURLOPT_HTTPHEADER, headers);
        curl_easy_setopt(slot->curl, CURLOPT_ENCODING, "gzip");
@@ -2135,7 +2130,7 @@ int finish_http_pack_request(struct http_pack_request *preq)
                return -1;
        }
 
-       install_packed_git(p);
+       install_packed_git(the_repository, p);
        free(tmp_idx);
        return 0;
 }
@@ -2248,7 +2243,7 @@ struct http_object_request *new_http_object_request(const char *base_url,
        hashcpy(freq->sha1, sha1);
        freq->localfile = -1;
 
-       sha1_file_name(&filename, sha1);
+       sha1_file_name(the_repository, &filename, sha1);
        snprintf(freq->tmpfile, sizeof(freq->tmpfile),
                 "%s.temp", filename.buf);
 
@@ -2397,8 +2392,7 @@ int finish_http_object_request(struct http_object_request *freq)
                unlink_or_warn(freq->tmpfile);
                return -1;
        }
-
-       sha1_file_name(&filename, freq->sha1);
+       sha1_file_name(the_repository, &filename, freq->sha1);
        freq->rename = finalize_object_file(freq->tmpfile, filename.buf);
        strbuf_release(&filename);
 
diff --git a/http.h b/http.h
index f7bd3b26b0da70e44402579e4912a1b20dcef16e..4df4a25e1abc232c27f1b00ba0a97b05a689db2e 100644 (file)
--- a/http.h
+++ b/http.h
@@ -172,6 +172,13 @@ struct http_get_options {
         * for details.
         */
        struct strbuf *base_url;
+
+       /*
+        * If not NULL, contains additional HTTP headers to be sent with the
+        * request. The strings in the list must not be freed until after the
+        * request has completed.
+        */
+       struct string_list *extra_headers;
 };
 
 /* Return values for http_get_*() */
index ffb0a6eca8ce632dc448883a47987bf48adea31b..3573cbfb0fca7d524f3d3531f9f97ebeabdd15e6 100644 (file)
@@ -24,7 +24,7 @@
 #include "cache.h"
 #include "config.h"
 #include "credential.h"
-#include "exec_cmd.h"
+#include "exec-cmd.h"
 #include "run-command.h"
 #include "parse-options.h"
 #ifdef NO_OPENSSL
index cdc2257db56f7d68412bb456e91854a9e9aa5cd5..fa9cfd5bdbb5dfe4a1ca5c7f7711435caf091805 100644 (file)
@@ -501,8 +501,7 @@ static void fill_blob_sha1(struct commit *commit, struct diff_filespec *spec)
        unsigned mode;
        struct object_id oid;
 
-       if (get_tree_entry(commit->object.oid.hash, spec->path,
-                          oid.hash, &mode))
+       if (get_tree_entry(&commit->object.oid, spec->path, &oid, &mode))
                die("There is no path %s in the commit", spec->path);
        fill_filespec(spec, &oid, 1, mode);
 
@@ -817,8 +816,8 @@ static void queue_diffs(struct line_log_data *range,
        assert(commit);
 
        DIFF_QUEUE_CLEAR(&diff_queued_diff);
-       diff_tree_oid(parent ? &parent->tree->object.oid : NULL,
-                     &commit->tree->object.oid, "", opt);
+       diff_tree_oid(parent ? get_commit_tree_oid(parent) : NULL,
+                     get_commit_tree_oid(commit), "", opt);
        if (opt->detect_rename) {
                filter_diffs_for_paths(range, 1);
                if (diff_might_be_rename())
index 4356c45368e10bd7f41e15f91130dbfd3e4e9281..5b14d2711ad3791169f5d8cbc03836b4254bd55c 100644 (file)
@@ -19,7 +19,7 @@
  * in the traversal (until we mark it SEEN).  This is a way to
  * let us silently de-dup calls to show() in the caller.  This
  * is subtly different from the "revision.h:SHOWN" and the
- * "sha1_name.c:ONELINE_SEEN" bits.  And also different from
+ * "sha1-name.c:ONELINE_SEEN" bits.  And also different from
  * the non-de-dup usage in pack-bitmap.c
  */
 #define FILTER_SHOWN_BUT_REVISIT (1<<21)
@@ -117,7 +117,7 @@ static enum list_objects_filter_result filter_blobs_limit(
                assert(obj->type == OBJ_BLOB);
                assert((obj->flags & SEEN) == 0);
 
-               t = sha1_object_info(obj->oid.hash, &object_length);
+               t = oid_object_info(the_repository, &obj->oid, &object_length);
                if (t != OBJ_BLOB) { /* probably OBJ_NONE */
                        /*
                         * We DO NOT have the blob locally, so we cannot
index 168bef688a89489a9d88d3e1f773483dbc1c8860..3eec510357337f5e33eea08a05faa22373d98c07 100644 (file)
@@ -195,7 +195,7 @@ static void mark_edge_parents_uninteresting(struct commit *commit,
                struct commit *parent = parents->item;
                if (!(parent->object.flags & UNINTERESTING))
                        continue;
-               mark_tree_uninteresting(parent->tree);
+               mark_tree_uninteresting(get_commit_tree(parent));
                if (revs->edge_hint && !(parent->object.flags & SHOWN)) {
                        parent->object.flags |= SHOWN;
                        show_edge(parent);
@@ -212,7 +212,7 @@ void mark_edges_uninteresting(struct rev_info *revs, show_edge_fn show_edge)
                struct commit *commit = list->item;
 
                if (commit->object.flags & UNINTERESTING) {
-                       mark_tree_uninteresting(commit->tree);
+                       mark_tree_uninteresting(get_commit_tree(commit));
                        if (revs->edge_hint_aggressive && !(commit->object.flags & SHOWN)) {
                                commit->object.flags |= SHOWN;
                                show_edge(commit);
@@ -227,7 +227,7 @@ void mark_edges_uninteresting(struct rev_info *revs, show_edge_fn show_edge)
                        struct commit *commit = (struct commit *)obj;
                        if (obj->type != OBJ_COMMIT || !(obj->flags & UNINTERESTING))
                                continue;
-                       mark_tree_uninteresting(commit->tree);
+                       mark_tree_uninteresting(get_commit_tree(commit));
                        if (!(obj->flags & SHOWN)) {
                                obj->flags |= SHOWN;
                                show_edge(commit);
@@ -300,8 +300,8 @@ static void do_traverse(struct rev_info *revs,
                 * an uninteresting boundary commit may not have its tree
                 * parsed yet, but we are not going to show them anyway
                 */
-               if (commit->tree)
-                       add_pending_tree(revs, commit->tree);
+               if (get_commit_tree(commit))
+                       add_pending_tree(revs, get_commit_tree(commit));
                show_commit(commit, show_data);
 
                if (revs->tree_blobs_in_commit_order)
index bdf23c5f7b89eda2997057285d1fff64acb0d30c..724bae0de25b5b6e22dfecee233ff999be880dd5 100644 (file)
@@ -177,7 +177,7 @@ static void show_parents(struct commit *commit, int abbrev, FILE *file)
        struct commit_list *p;
        for (p = commit->parents; p ; p = p->next) {
                struct commit *parent = p->item;
-               fprintf(file, " %s", find_unique_abbrev(parent->object.oid.hash, abbrev));
+               fprintf(file, " %s", find_unique_abbrev(&parent->object.oid, abbrev));
        }
 }
 
@@ -185,7 +185,7 @@ static void show_children(struct rev_info *opt, struct commit *commit, int abbre
 {
        struct commit_list *p = lookup_decoration(&opt->children, &commit->object);
        for ( ; p; p = p->next) {
-               fprintf(opt->diffopt.file, " %s", find_unique_abbrev(p->item->object.oid.hash, abbrev));
+               fprintf(opt->diffopt.file, " %s", find_unique_abbrev(&p->item->object.oid, abbrev));
        }
 }
 
@@ -362,7 +362,8 @@ void fmt_output_email_subject(struct strbuf *sb, struct rev_info *opt)
 
 void log_write_email_headers(struct rev_info *opt, struct commit *commit,
                             const char **extra_headers_p,
-                            int *need_8bit_cte_p)
+                            int *need_8bit_cte_p,
+                            int maybe_multipart)
 {
        const char *extra_headers = opt->extra_headers;
        const char *name = oid_to_hex(opt->zero_commit ?
@@ -385,7 +386,7 @@ void log_write_email_headers(struct rev_info *opt, struct commit *commit,
                               opt->ref_message_ids->items[i].string);
                graph_show_oneline(opt->graph);
        }
-       if (opt->mime_boundary) {
+       if (opt->mime_boundary && maybe_multipart) {
                static char subject_buffer[1024];
                static char buffer[1024];
                struct strbuf filename =  STRBUF_INIT;
@@ -488,9 +489,9 @@ static int is_common_merge(const struct commit *commit)
                && !commit->parents->next->next);
 }
 
-static void show_one_mergetag(struct commit *commit,
-                             struct commit_extra_header *extra,
-                             void *data)
+static int show_one_mergetag(struct commit *commit,
+                            struct commit_extra_header *extra,
+                            void *data)
 {
        struct rev_info *opt = (struct rev_info *)data;
        struct object_id oid;
@@ -502,7 +503,7 @@ static void show_one_mergetag(struct commit *commit,
        hash_object_file(extra->value, extra->len, type_name(OBJ_TAG), &oid);
        tag = lookup_tag(&oid);
        if (!tag)
-               return; /* error message already given */
+               return -1; /* error message already given */
 
        strbuf_init(&verify_message, 256);
        if (parse_tag_buffer(tag, extra->value, extra->len))
@@ -536,11 +537,12 @@ static void show_one_mergetag(struct commit *commit,
 
        show_sig_lines(opt, status, verify_message.buf);
        strbuf_release(&verify_message);
+       return 0;
 }
 
-static void show_mergetag(struct rev_info *opt, struct commit *commit)
+static int show_mergetag(struct rev_info *opt, struct commit *commit)
 {
-       for_each_mergetag(show_one_mergetag, commit, opt);
+       return for_each_mergetag(show_one_mergetag, commit, opt);
 }
 
 void show_log(struct rev_info *opt)
@@ -558,7 +560,7 @@ void show_log(struct rev_info *opt)
 
                if (!opt->graph)
                        put_revision_mark(opt, commit);
-               fputs(find_unique_abbrev(commit->object.oid.hash, abbrev_commit), opt->diffopt.file);
+               fputs(find_unique_abbrev(&commit->object.oid, abbrev_commit), opt->diffopt.file);
                if (opt->print_parents)
                        show_parents(commit, abbrev_commit, opt->diffopt.file);
                if (opt->children.name)
@@ -610,7 +612,7 @@ void show_log(struct rev_info *opt)
 
        if (cmit_fmt_is_mail(opt->commit_format)) {
                log_write_email_headers(opt, commit, &extra_headers,
-                                       &ctx.need_8bit_cte);
+                                       &ctx.need_8bit_cte, 1);
                ctx.rev = opt;
                ctx.print_email_subject = 1;
        } else if (opt->commit_format != CMIT_FMT_USERFORMAT) {
@@ -620,7 +622,8 @@ void show_log(struct rev_info *opt)
 
                if (!opt->graph)
                        put_revision_mark(opt, commit);
-               fputs(find_unique_abbrev(commit->object.oid.hash, abbrev_commit),
+               fputs(find_unique_abbrev(&commit->object.oid,
+                                        abbrev_commit),
                      opt->diffopt.file);
                if (opt->print_parents)
                        show_parents(commit, abbrev_commit, opt->diffopt.file);
@@ -628,8 +631,7 @@ void show_log(struct rev_info *opt)
                        show_children(opt, commit, abbrev_commit);
                if (parent)
                        fprintf(opt->diffopt.file, " (from %s)",
-                              find_unique_abbrev(parent->object.oid.hash,
-                                                 abbrev_commit));
+                              find_unique_abbrev(&parent->object.oid, abbrev_commit));
                fputs(diff_get_color_opt(&opt->diffopt, DIFF_RESET), opt->diffopt.file);
                show_decorations(opt, commit);
                if (opt->commit_format == CMIT_FMT_ONELINE) {
@@ -806,7 +808,7 @@ static int log_tree_diff(struct rev_info *opt, struct commit *commit, struct log
                return 0;
 
        parse_commit_or_die(commit);
-       oid = &commit->tree->object.oid;
+       oid = get_commit_tree_oid(commit);
 
        /* Root commit? */
        parents = get_saved_parents(opt, commit);
@@ -831,7 +833,7 @@ static int log_tree_diff(struct rev_info *opt, struct commit *commit, struct log
                         * we merged _in_.
                         */
                        parse_commit_or_die(parents->item);
-                       diff_tree_oid(&parents->item->tree->object.oid,
+                       diff_tree_oid(get_commit_tree_oid(parents->item),
                                      oid, "", &opt->diffopt);
                        log_tree_diff_flush(opt);
                        return !opt->loginfo;
@@ -846,7 +848,7 @@ static int log_tree_diff(struct rev_info *opt, struct commit *commit, struct log
                struct commit *parent = parents->item;
 
                parse_commit_or_die(parent);
-               diff_tree_oid(&parent->tree->object.oid,
+               diff_tree_oid(get_commit_tree_oid(parent),
                              oid, "", &opt->diffopt);
                log_tree_diff_flush(opt);
 
index deba035187185bc28a83520b47480c3141d8398e..e66862807463a12e28573be04cbcfcf2d200e6f3 100644 (file)
@@ -27,7 +27,8 @@ void format_decorations_extended(struct strbuf *sb, const struct commit *commit,
 void show_decorations(struct rev_info *opt, struct commit *commit);
 void log_write_email_headers(struct rev_info *opt, struct commit *commit,
                             const char **extra_headers_p,
-                            int *need_8bit_cte_p);
+                            int *need_8bit_cte_p,
+                            int maybe_multipart);
 void load_ref_decorations(struct decoration_filter *filter, int flags);
 
 #define FORMAT_PATCH_NAME_MAX 64
diff --git a/ls-refs.c b/ls-refs.c
new file mode 100644 (file)
index 0000000..a06f12e
--- /dev/null
+++ b/ls-refs.c
@@ -0,0 +1,96 @@
+#include "cache.h"
+#include "repository.h"
+#include "refs.h"
+#include "remote.h"
+#include "argv-array.h"
+#include "ls-refs.h"
+#include "pkt-line.h"
+
+/*
+ * Check if one of the prefixes is a prefix of the ref.
+ * If no prefixes were provided, all refs match.
+ */
+static int ref_match(const struct argv_array *prefixes, const char *refname)
+{
+       int i;
+
+       if (!prefixes->argc)
+               return 1; /* no restriction */
+
+       for (i = 0; i < prefixes->argc; i++) {
+               const char *prefix = prefixes->argv[i];
+
+               if (starts_with(refname, prefix))
+                       return 1;
+       }
+
+       return 0;
+}
+
+struct ls_refs_data {
+       unsigned peel;
+       unsigned symrefs;
+       struct argv_array prefixes;
+};
+
+static int send_ref(const char *refname, const struct object_id *oid,
+                   int flag, void *cb_data)
+{
+       struct ls_refs_data *data = cb_data;
+       const char *refname_nons = strip_namespace(refname);
+       struct strbuf refline = STRBUF_INIT;
+
+       if (!ref_match(&data->prefixes, refname))
+               return 0;
+
+       strbuf_addf(&refline, "%s %s", oid_to_hex(oid), refname_nons);
+       if (data->symrefs && flag & REF_ISSYMREF) {
+               struct object_id unused;
+               const char *symref_target = resolve_ref_unsafe(refname, 0,
+                                                              &unused,
+                                                              &flag);
+
+               if (!symref_target)
+                       die("'%s' is a symref but it is not?", refname);
+
+               strbuf_addf(&refline, " symref-target:%s", symref_target);
+       }
+
+       if (data->peel) {
+               struct object_id peeled;
+               if (!peel_ref(refname, &peeled))
+                       strbuf_addf(&refline, " peeled:%s", oid_to_hex(&peeled));
+       }
+
+       strbuf_addch(&refline, '\n');
+       packet_write(1, refline.buf, refline.len);
+
+       strbuf_release(&refline);
+       return 0;
+}
+
+int ls_refs(struct repository *r, struct argv_array *keys,
+           struct packet_reader *request)
+{
+       struct ls_refs_data data;
+
+       memset(&data, 0, sizeof(data));
+
+       while (packet_reader_read(request) != PACKET_READ_FLUSH) {
+               const char *arg = request->line;
+               const char *out;
+
+               if (!strcmp("peel", arg))
+                       data.peel = 1;
+               else if (!strcmp("symrefs", arg))
+                       data.symrefs = 1;
+               else if (skip_prefix(arg, "ref-prefix ", &out))
+                       argv_array_push(&data.prefixes, out);
+       }
+
+       head_ref_namespaced(send_ref, &data);
+       for_each_namespaced_ref(send_ref, &data);
+       packet_flush(1);
+       argv_array_clear(&data.prefixes);
+       return 0;
+}
diff --git a/ls-refs.h b/ls-refs.h
new file mode 100644 (file)
index 0000000..b62877e
--- /dev/null
+++ b/ls-refs.h
@@ -0,0 +1,10 @@
+#ifndef LS_REFS_H
+#define LS_REFS_H
+
+struct repository;
+struct argv_array;
+struct packet_reader;
+extern int ls_refs(struct repository *r, struct argv_array *keys,
+                  struct packet_reader *request);
+
+#endif /* LS_REFS_H */
index cb921b4db676e3db918ee16f419cd2b78e0bf57e..13f0d2884e25edef3fde5bdf72b89a770111e472 100644 (file)
--- a/mailmap.c
+++ b/mailmap.c
@@ -224,7 +224,7 @@ static int read_mailmap_blob(struct string_list *map,
        if (get_oid(name, &oid) < 0)
                return 0;
 
-       buf = read_sha1_file(oid.hash, &type, &size);
+       buf = read_object_file(&oid, &type, &size);
        if (!buf)
                return error("unable to read mailmap object at %s", name);
        if (type != OBJ_BLOB)
index 0ca99d51626f49b5d4a88b9edc43c533c970dd0e..72cc2baa3f96b2cbfa296335c7f0ff1094b6e96c 100644 (file)
@@ -54,7 +54,7 @@ static void *fill_tree_desc_strict(struct tree_desc *desc,
        enum object_type type;
        unsigned long size;
 
-       buffer = read_sha1_file(hash->hash, &type, &size);
+       buffer = read_object_file(hash, &type, &size);
        if (!buffer)
                die("unable to read tree (%s)", oid_to_hex(hash));
        if (type != OBJ_TREE)
@@ -180,7 +180,7 @@ static int splice_tree(const struct object_id *oid1, const char *prefix,
        if (*subpath)
                subpath++;
 
-       buf = read_sha1_file(oid1->hash, &type, &sz);
+       buf = read_object_file(oid1, &type, &sz);
        if (!buf)
                die("cannot read tree %s", oid_to_hex(oid1));
        init_tree_desc(&desc, buf, sz);
@@ -269,7 +269,7 @@ void shift_tree(const struct object_id *hash1,
                if (!*del_prefix)
                        return;
 
-               if (get_tree_entry(hash2->hash, del_prefix, shifted->hash, &mode))
+               if (get_tree_entry(hash2, del_prefix, shifted, &mode))
                        die("cannot find path %s in tree %s",
                            del_prefix, oid_to_hex(hash2));
                return;
@@ -296,12 +296,12 @@ void shift_tree_by(const struct object_id *hash1,
        unsigned candidate = 0;
 
        /* Can hash2 be a tree at shift_prefix in tree hash1? */
-       if (!get_tree_entry(hash1->hash, shift_prefix, sub1.hash, &mode1) &&
+       if (!get_tree_entry(hash1, shift_prefix, &sub1, &mode1) &&
            S_ISDIR(mode1))
                candidate |= 1;
 
        /* Can hash1 be a tree at shift_prefix in tree hash2? */
-       if (!get_tree_entry(hash2->hash, shift_prefix, sub2.hash, &mode2) &&
+       if (!get_tree_entry(hash2, shift_prefix, &sub2, &mode2) &&
            S_ISDIR(mode2))
                candidate |= 2;
 
diff --git a/mem-pool.c b/mem-pool.c
new file mode 100644 (file)
index 0000000..389d7af
--- /dev/null
@@ -0,0 +1,55 @@
+/*
+ * Memory Pool implementation logic.
+ */
+
+#include "cache.h"
+#include "mem-pool.h"
+
+static struct mp_block *mem_pool_alloc_block(struct mem_pool *mem_pool, size_t block_alloc)
+{
+       struct mp_block *p;
+
+       mem_pool->pool_alloc += sizeof(struct mp_block) + block_alloc;
+       p = xmalloc(st_add(sizeof(struct mp_block), block_alloc));
+       p->next_block = mem_pool->mp_block;
+       p->next_free = (char *)p->space;
+       p->end = p->next_free + block_alloc;
+       mem_pool->mp_block = p;
+
+       return p;
+}
+
+void *mem_pool_alloc(struct mem_pool *mem_pool, size_t len)
+{
+       struct mp_block *p;
+       void *r;
+
+       /* round up to a 'uintmax_t' alignment */
+       if (len & (sizeof(uintmax_t) - 1))
+               len += sizeof(uintmax_t) - (len & (sizeof(uintmax_t) - 1));
+
+       for (p = mem_pool->mp_block; p; p = p->next_block)
+               if (p->end - p->next_free >= len)
+                       break;
+
+       if (!p) {
+               if (len >= (mem_pool->block_alloc / 2)) {
+                       mem_pool->pool_alloc += len;
+                       return xmalloc(len);
+               }
+
+               p = mem_pool_alloc_block(mem_pool, mem_pool->block_alloc);
+       }
+
+       r = p->next_free;
+       p->next_free += len;
+       return r;
+}
+
+void *mem_pool_calloc(struct mem_pool *mem_pool, size_t count, size_t size)
+{
+       size_t len = st_mult(count, size);
+       void *r = mem_pool_alloc(mem_pool, len);
+       memset(r, 0, len);
+       return r;
+}
diff --git a/mem-pool.h b/mem-pool.h
new file mode 100644 (file)
index 0000000..829ad58
--- /dev/null
@@ -0,0 +1,34 @@
+#ifndef MEM_POOL_H
+#define MEM_POOL_H
+
+struct mp_block {
+       struct mp_block *next_block;
+       char *next_free;
+       char *end;
+       uintmax_t space[FLEX_ARRAY]; /* more */
+};
+
+struct mem_pool {
+       struct mp_block *mp_block;
+
+       /*
+        * The amount of available memory to grow the pool by.
+        * This size does not include the overhead for the mp_block.
+        */
+       size_t block_alloc;
+
+       /* The total amount of memory allocated by the pool. */
+       size_t pool_alloc;
+};
+
+/*
+ * Alloc memory from the mem_pool.
+ */
+void *mem_pool_alloc(struct mem_pool *pool, size_t len);
+
+/*
+ * Allocate and zero memory from the memory pool.
+ */
+void *mem_pool_calloc(struct mem_pool *pool, size_t count, size_t size);
+
+#endif
index 9b6eac22e4256d8f2bf82961b6e4f320d89fdeba..fa49c17287f4120b4bbb75acc6b92b3d339710cd 100644 (file)
@@ -11,7 +11,7 @@ static int fill_mmfile_blob(mmfile_t *f, struct blob *obj)
        unsigned long size;
        enum object_type type;
 
-       buf = read_sha1_file(obj->object.oid.hash, &type, &size);
+       buf = read_object_file(&obj->object.oid, &type, &size);
        if (!buf)
                return -1;
        if (type != OBJ_BLOB) {
@@ -66,7 +66,7 @@ void *merge_blobs(const char *path, struct blob *base, struct blob *our, struct
                        return NULL;
                if (!our)
                        our = their;
-               return read_sha1_file(our->object.oid.hash, &type, size);
+               return read_object_file(&our->object.oid, &type, size);
        }
 
        if (fill_mmfile_blob(&f1, our) < 0)
index 869092f7b9bc0ce1104a1c43c7201acdee2eb4f8..35df695fa420073d04997a3792f1c3a45d278b5d 100644 (file)
@@ -49,6 +49,67 @@ static unsigned int path_hash(const char *path)
        return ignore_case ? strihash(path) : strhash(path);
 }
 
+static struct dir_rename_entry *dir_rename_find_entry(struct hashmap *hashmap,
+                                                     char *dir)
+{
+       struct dir_rename_entry key;
+
+       if (dir == NULL)
+               return NULL;
+       hashmap_entry_init(&key, strhash(dir));
+       key.dir = dir;
+       return hashmap_get(hashmap, &key, NULL);
+}
+
+static int dir_rename_cmp(const void *unused_cmp_data,
+                         const void *entry,
+                         const void *entry_or_key,
+                         const void *unused_keydata)
+{
+       const struct dir_rename_entry *e1 = entry;
+       const struct dir_rename_entry *e2 = entry_or_key;
+
+       return strcmp(e1->dir, e2->dir);
+}
+
+static void dir_rename_init(struct hashmap *map)
+{
+       hashmap_init(map, dir_rename_cmp, NULL, 0);
+}
+
+static void dir_rename_entry_init(struct dir_rename_entry *entry,
+                                 char *directory)
+{
+       hashmap_entry_init(entry, strhash(directory));
+       entry->dir = directory;
+       entry->non_unique_new_dir = 0;
+       strbuf_init(&entry->new_dir, 0);
+       string_list_init(&entry->possible_new_dirs, 0);
+}
+
+static struct collision_entry *collision_find_entry(struct hashmap *hashmap,
+                                                   char *target_file)
+{
+       struct collision_entry key;
+
+       hashmap_entry_init(&key, strhash(target_file));
+       key.target_file = target_file;
+       return hashmap_get(hashmap, &key, NULL);
+}
+
+static int collision_cmp(void *unused_cmp_data,
+                        const struct collision_entry *e1,
+                        const struct collision_entry *e2,
+                        const void *unused_keydata)
+{
+       return strcmp(e1->target_file, e2->target_file);
+}
+
+static void collision_init(struct hashmap *map)
+{
+       hashmap_init(map, (hashmap_cmp_fn) collision_cmp, NULL, 0);
+}
+
 static void flush_output(struct merge_options *o)
 {
        if (o->buffer_output < 2 && o->obuf.len) {
@@ -101,7 +162,7 @@ static struct commit *make_virtual_commit(struct tree *tree, const char *comment
        struct commit *commit = alloc_commit_node();
 
        set_merge_remote_desc(commit, comment, (struct object *)commit);
-       commit->tree = tree;
+       commit->maybe_tree = tree;
        commit->object.parsed = 1;
        return commit;
 }
@@ -119,6 +180,7 @@ static int oid_eq(const struct object_id *a, const struct object_id *b)
 
 enum rename_type {
        RENAME_NORMAL = 0,
+       RENAME_DIR,
        RENAME_DELETE,
        RENAME_ONE_FILE_TO_ONE,
        RENAME_ONE_FILE_TO_TWO,
@@ -228,7 +290,7 @@ static void output_commit_title(struct merge_options *o, struct commit *commit)
                strbuf_addf(&o->obuf, "virtual %s\n",
                        merge_remote_util(commit)->name);
        else {
-               strbuf_add_unique_abbrev(&o->obuf, commit->object.oid.hash,
+               strbuf_add_unique_abbrev(&o->obuf, &commit->object.oid,
                                         DEFAULT_ABBREV);
                strbuf_addch(&o->obuf, ' ');
                if (parse_commit(commit) != 0)
@@ -254,7 +316,7 @@ static int add_cacheinfo(struct merge_options *o,
 
        ce = make_cache_entry(mode, oid ? oid->hash : null_sha1, path, stage, 0);
        if (!ce)
-               return err(o, _("addinfo_cache failed for path '%s'"), path);
+               return err(o, _("add_cacheinfo failed for path '%s'; merge aborting."), path);
 
        ret = add_cache_entry(ce, options);
        if (refresh) {
@@ -262,7 +324,7 @@ static int add_cacheinfo(struct merge_options *o,
 
                nce = refresh_cache_entry(ce, CE_MATCH_REFRESH | CE_MATCH_IGNORE_MISSING);
                if (!nce)
-                       return err(o, _("addinfo_cache failed for path '%s'"), path);
+                       return err(o, _("add_cacheinfo failed to refresh for path '%s'; merge aborting."), path);
                if (nce != ce)
                        ret = add_cache_entry(nce, options);
        }
@@ -275,33 +337,44 @@ static void init_tree_desc_from_tree(struct tree_desc *desc, struct tree *tree)
        init_tree_desc(desc, tree->buffer, tree->size);
 }
 
-static int git_merge_trees(int index_only,
+static int git_merge_trees(struct merge_options *o,
                           struct tree *common,
                           struct tree *head,
                           struct tree *merge)
 {
        int rc;
        struct tree_desc t[3];
-       struct unpack_trees_options opts;
+       struct index_state tmp_index = { NULL };
 
-       memset(&opts, 0, sizeof(opts));
-       if (index_only)
-               opts.index_only = 1;
+       memset(&o->unpack_opts, 0, sizeof(o->unpack_opts));
+       if (o->call_depth)
+               o->unpack_opts.index_only = 1;
        else
-               opts.update = 1;
-       opts.merge = 1;
-       opts.head_idx = 2;
-       opts.fn = threeway_merge;
-       opts.src_index = &the_index;
-       opts.dst_index = &the_index;
-       setup_unpack_trees_porcelain(&opts, "merge");
+               o->unpack_opts.update = 1;
+       o->unpack_opts.merge = 1;
+       o->unpack_opts.head_idx = 2;
+       o->unpack_opts.fn = threeway_merge;
+       o->unpack_opts.src_index = &the_index;
+       o->unpack_opts.dst_index = &tmp_index;
+       setup_unpack_trees_porcelain(&o->unpack_opts, "merge");
 
        init_tree_desc_from_tree(t+0, common);
        init_tree_desc_from_tree(t+1, head);
        init_tree_desc_from_tree(t+2, merge);
 
-       rc = unpack_trees(3, t, &opts);
+       rc = unpack_trees(3, t, &o->unpack_opts);
        cache_tree_free(&active_cache_tree);
+
+       /*
+        * Update the_index to match the new results, AFTER saving a copy
+        * in o->orig_index.  Update src_index to point to the saved copy.
+        * (verify_uptodate() checks src_index, and the original index is
+        * the one that had the necessary modification timestamps.)
+        */
+       o->orig_index = the_index;
+       the_index = tmp_index;
+       o->unpack_opts.src_index = &o->orig_index;
+
        return rc;
 }
 
@@ -335,7 +408,7 @@ struct tree *write_tree_from_memory(struct merge_options *o)
        return result;
 }
 
-static int save_files_dirs(const unsigned char *sha1,
+static int save_files_dirs(const struct object_id *oid,
                struct strbuf *base, const char *path,
                unsigned int mode, int stage, void *context)
 {
@@ -360,6 +433,21 @@ static void get_files_dirs(struct merge_options *o, struct tree *tree)
        read_tree_recursive(tree, "", 0, 0, &match_all, save_files_dirs, o);
 }
 
+static int get_tree_entry_if_blob(const struct object_id *tree,
+                                 const char *path,
+                                 struct object_id *hashy,
+                                 unsigned int *mode_o)
+{
+       int ret;
+
+       ret = get_tree_entry(tree, path, hashy, mode_o);
+       if (S_ISDIR(*mode_o)) {
+               oidcpy(hashy, &null_oid);
+               *mode_o = 0;
+       }
+       return ret;
+}
+
 /*
  * Returns an index_entry instance which doesn't have to correspond to
  * a real cache entry in Git's index.
@@ -370,12 +458,12 @@ static struct stage_data *insert_stage_data(const char *path,
 {
        struct string_list_item *item;
        struct stage_data *e = xcalloc(1, sizeof(struct stage_data));
-       get_tree_entry(o->object.oid.hash, path,
-                       e->stages[1].oid.hash, &e->stages[1].mode);
-       get_tree_entry(a->object.oid.hash, path,
-                       e->stages[2].oid.hash, &e->stages[2].mode);
-       get_tree_entry(b->object.oid.hash, path,
-                       e->stages[3].oid.hash, &e->stages[3].mode);
+       get_tree_entry_if_blob(&o->object.oid, path,
+                              &e->stages[1].oid, &e->stages[1].mode);
+       get_tree_entry_if_blob(&a->object.oid, path,
+                              &e->stages[2].oid, &e->stages[2].mode);
+       get_tree_entry_if_blob(&b->object.oid, path,
+                              &e->stages[3].oid, &e->stages[3].mode);
        item = string_list_insert(entries, path);
        item->util = e;
        return e;
@@ -534,78 +622,10 @@ struct rename {
         */
        struct stage_data *src_entry;
        struct stage_data *dst_entry;
+       unsigned add_turned_into_rename:1;
        unsigned processed:1;
 };
 
-/*
- * Get information of all renames which occurred between 'o_tree' and
- * 'tree'. We need the three trees in the merge ('o_tree', 'a_tree' and
- * 'b_tree') to be able to associate the correct cache entries with
- * the rename information. 'tree' is always equal to either a_tree or b_tree.
- */
-static struct string_list *get_renames(struct merge_options *o,
-                                      struct tree *tree,
-                                      struct tree *o_tree,
-                                      struct tree *a_tree,
-                                      struct tree *b_tree,
-                                      struct string_list *entries)
-{
-       int i;
-       struct string_list *renames;
-       struct diff_options opts;
-
-       renames = xcalloc(1, sizeof(struct string_list));
-       if (!o->detect_rename)
-               return renames;
-
-       diff_setup(&opts);
-       opts.flags.recursive = 1;
-       opts.flags.rename_empty = 0;
-       opts.detect_rename = DIFF_DETECT_RENAME;
-       opts.rename_limit = o->merge_rename_limit >= 0 ? o->merge_rename_limit :
-                           o->diff_rename_limit >= 0 ? o->diff_rename_limit :
-                           1000;
-       opts.rename_score = o->rename_score;
-       opts.show_rename_progress = o->show_rename_progress;
-       opts.output_format = DIFF_FORMAT_NO_OUTPUT;
-       diff_setup_done(&opts);
-       diff_tree_oid(&o_tree->object.oid, &tree->object.oid, "", &opts);
-       diffcore_std(&opts);
-       if (opts.needed_rename_limit > o->needed_rename_limit)
-               o->needed_rename_limit = opts.needed_rename_limit;
-       for (i = 0; i < diff_queued_diff.nr; ++i) {
-               struct string_list_item *item;
-               struct rename *re;
-               struct diff_filepair *pair = diff_queued_diff.queue[i];
-               if (pair->status != 'R') {
-                       diff_free_filepair(pair);
-                       continue;
-               }
-               re = xmalloc(sizeof(*re));
-               re->processed = 0;
-               re->pair = pair;
-               item = string_list_lookup(entries, re->pair->one->path);
-               if (!item)
-                       re->src_entry = insert_stage_data(re->pair->one->path,
-                                       o_tree, a_tree, b_tree, entries);
-               else
-                       re->src_entry = item->util;
-
-               item = string_list_lookup(entries, re->pair->two->path);
-               if (!item)
-                       re->dst_entry = insert_stage_data(re->pair->two->path,
-                                       o_tree, a_tree, b_tree, entries);
-               else
-                       re->dst_entry = item->util;
-               item = string_list_insert(renames, pair->one->path);
-               item->util = re;
-       }
-       opts.output_format = DIFF_FORMAT_NO_OUTPUT;
-       diff_queued_diff.nr = 0;
-       diff_flush(&opts);
-       return renames;
-}
-
 static int update_stages(struct merge_options *opt, const char *path,
                         const struct diff_filespec *o,
                         const struct diff_filespec *a,
@@ -637,6 +657,27 @@ static int update_stages(struct merge_options *opt, const char *path,
        return 0;
 }
 
+static int update_stages_for_stage_data(struct merge_options *opt,
+                                       const char *path,
+                                       const struct stage_data *stage_data)
+{
+       struct diff_filespec o, a, b;
+
+       o.mode = stage_data->stages[1].mode;
+       oidcpy(&o.oid, &stage_data->stages[1].oid);
+
+       a.mode = stage_data->stages[2].mode;
+       oidcpy(&a.oid, &stage_data->stages[2].oid);
+
+       b.mode = stage_data->stages[3].mode;
+       oidcpy(&b.oid, &stage_data->stages[3].oid);
+
+       return update_stages(opt, path,
+                            is_null_oid(&o.oid) ? NULL : &o,
+                            is_null_oid(&a.oid) ? NULL : &a,
+                            is_null_oid(&b.oid) ? NULL : &b);
+}
+
 static void update_entry(struct stage_data *entry,
                         struct diff_filespec *o,
                         struct diff_filespec *a,
@@ -738,31 +779,92 @@ static int dir_in_way(const char *path, int check_working_copy, int empty_ok)
                !(empty_ok && is_empty_dir(path));
 }
 
-static int was_tracked(const char *path)
+/*
+ * Returns whether path was tracked in the index before the merge started,
+ * and its oid and mode match the specified values
+ */
+static int was_tracked_and_matches(struct merge_options *o, const char *path,
+                                  const struct object_id *oid, unsigned mode)
 {
-       int pos = cache_name_pos(path, strlen(path));
+       int pos = index_name_pos(&o->orig_index, path, strlen(path));
+       struct cache_entry *ce;
+
+       if (0 > pos)
+               /* we were not tracking this path before the merge */
+               return 0;
+
+       /* See if the file we were tracking before matches */
+       ce = o->orig_index.cache[pos];
+       return (oid_eq(&ce->oid, oid) && ce->ce_mode == mode);
+}
+
+/*
+ * Returns whether path was tracked in the index before the merge started
+ */
+static int was_tracked(struct merge_options *o, const char *path)
+{
+       int pos = index_name_pos(&o->orig_index, path, strlen(path));
 
        if (0 <= pos)
-               /* we have been tracking this path */
+               /* we were tracking this path before the merge */
                return 1;
 
-       /*
-        * Look for an unmerged entry for the path,
-        * specifically stage #2, which would indicate
-        * that "our" side before the merge started
-        * had the path tracked (and resulted in a conflict).
-        */
-       for (pos = -1 - pos;
-            pos < active_nr && !strcmp(path, active_cache[pos]->name);
-            pos++)
-               if (ce_stage(active_cache[pos]) == 2)
-                       return 1;
        return 0;
 }
 
 static int would_lose_untracked(const char *path)
 {
-       return !was_tracked(path) && file_exists(path);
+       /*
+        * This may look like it can be simplified to:
+        *   return !was_tracked(o, path) && file_exists(path)
+        * but it can't.  This function needs to know whether path was in
+        * the working tree due to EITHER having been tracked in the index
+        * before the merge OR having been put into the working copy and
+        * index by unpack_trees().  Due to that either-or requirement, we
+        * check the current index instead of the original one.
+        *
+        * Note that we do not need to worry about merge-recursive itself
+        * updating the index after unpack_trees() and before calling this
+        * function, because we strictly require all code paths in
+        * merge-recursive to update the working tree first and the index
+        * second.  Doing otherwise would break
+        * update_file()/would_lose_untracked(); see every comment in this
+        * file which mentions "update_stages".
+        */
+       int pos = cache_name_pos(path, strlen(path));
+
+       if (pos < 0)
+               pos = -1 - pos;
+       while (pos < active_nr &&
+              !strcmp(path, active_cache[pos]->name)) {
+               /*
+                * If stage #0, it is definitely tracked.
+                * If it has stage #2 then it was tracked
+                * before this merge started.  All other
+                * cases the path was not tracked.
+                */
+               switch (ce_stage(active_cache[pos])) {
+               case 0:
+               case 2:
+                       return 0;
+               }
+               pos++;
+       }
+       return file_exists(path);
+}
+
+static int was_dirty(struct merge_options *o, const char *path)
+{
+       struct cache_entry *ce;
+       int dirty = 1;
+
+       if (o->call_depth || !was_tracked(o, path))
+               return !dirty;
+
+       ce = index_file_exists(o->unpack_opts.src_index,
+                              path, strlen(path), ignore_case);
+       dirty = verify_uptodate(ce, &o->unpack_opts) != 0;
+       return dirty;
 }
 
 static int make_room_for_path(struct merge_options *o, const char *path)
@@ -842,7 +944,7 @@ static int update_file_flags(struct merge_options *o,
                        goto update_index;
                }
 
-               buf = read_sha1_file(oid->hash, &type, &size);
+               buf = read_object_file(oid, &type, &size);
                if (!buf)
                        return err(o, _("cannot read object %s '%s'"), oid_to_hex(oid), path);
                if (type != OBJ_BLOB) {
@@ -893,7 +995,9 @@ static int update_file_flags(struct merge_options *o,
        }
  update_index:
        if (!ret && update_cache)
-               add_cacheinfo(o, mode, oid, path, 0, update_wd, ADD_CACHE_OK_TO_ADD);
+               if (add_cacheinfo(o, mode, oid, path, 0, update_wd,
+                                 ADD_CACHE_OK_TO_ADD))
+                       return -1;
        return ret;
 }
 
@@ -978,12 +1082,13 @@ static int merge_3way(struct merge_options *o,
 }
 
 static int merge_file_1(struct merge_options *o,
-                                          const struct diff_filespec *one,
-                                          const struct diff_filespec *a,
-                                          const struct diff_filespec *b,
-                                          const char *branch1,
-                                          const char *branch2,
-                                          struct merge_file_info *result)
+                       const struct diff_filespec *one,
+                       const struct diff_filespec *a,
+                       const struct diff_filespec *b,
+                       const char *filename,
+                       const char *branch1,
+                       const char *branch2,
+                       struct merge_file_info *result)
 {
        result->merge = 0;
        result->clean = 1;
@@ -1063,18 +1168,22 @@ static int merge_file_1(struct merge_options *o,
                        die("BUG: unsupported object type in the tree");
        }
 
+       if (result->merge)
+               output(o, 2, _("Auto-merging %s"), filename);
+
        return 0;
 }
 
 static int merge_file_special_markers(struct merge_options *o,
-                          const struct diff_filespec *one,
-                          const struct diff_filespec *a,
-                          const struct diff_filespec *b,
-                          const char *branch1,
-                          const char *filename1,
-                          const char *branch2,
-                          const char *filename2,
-                          struct merge_file_info *mfi)
+                                     const struct diff_filespec *one,
+                                     const struct diff_filespec *a,
+                                     const struct diff_filespec *b,
+                                     const char *target_filename,
+                                     const char *branch1,
+                                     const char *filename1,
+                                     const char *branch2,
+                                     const char *filename2,
+                                     struct merge_file_info *mfi)
 {
        char *side1 = NULL;
        char *side2 = NULL;
@@ -1085,22 +1194,23 @@ static int merge_file_special_markers(struct merge_options *o,
        if (filename2)
                side2 = xstrfmt("%s:%s", branch2, filename2);
 
-       ret = merge_file_1(o, one, a, b,
+       ret = merge_file_1(o, one, a, b, target_filename,
                           side1 ? side1 : branch1,
                           side2 ? side2 : branch2, mfi);
+
        free(side1);
        free(side2);
        return ret;
 }
 
 static int merge_file_one(struct merge_options *o,
-                                        const char *path,
-                                        const struct object_id *o_oid, int o_mode,
-                                        const struct object_id *a_oid, int a_mode,
-                                        const struct object_id *b_oid, int b_mode,
-                                        const char *branch1,
-                                        const char *branch2,
-                                        struct merge_file_info *mfi)
+                         const char *path,
+                         const struct object_id *o_oid, int o_mode,
+                         const struct object_id *a_oid, int a_mode,
+                         const struct object_id *b_oid, int b_mode,
+                         const char *branch1,
+                         const char *branch2,
+                         struct merge_file_info *mfi)
 {
        struct diff_filespec one, a, b;
 
@@ -1111,7 +1221,39 @@ static int merge_file_one(struct merge_options *o,
        a.mode = a_mode;
        oidcpy(&b.oid, b_oid);
        b.mode = b_mode;
-       return merge_file_1(o, &one, &a, &b, branch1, branch2, mfi);
+       return merge_file_1(o, &one, &a, &b, path, branch1, branch2, mfi);
+}
+
+static int conflict_rename_dir(struct merge_options *o,
+                              struct diff_filepair *pair,
+                              const char *rename_branch,
+                              const char *other_branch)
+{
+       const struct diff_filespec *dest = pair->two;
+
+       if (!o->call_depth && would_lose_untracked(dest->path)) {
+               char *alt_path = unique_path(o, dest->path, rename_branch);
+
+               output(o, 1, _("Error: Refusing to lose untracked file at %s; "
+                              "writing to %s instead."),
+                      dest->path, alt_path);
+               /*
+                * Write the file in worktree at alt_path, but not in the
+                * index.  Instead, write to dest->path for the index but
+                * only at the higher appropriate stage.
+                */
+               if (update_file(o, 0, &dest->oid, dest->mode, alt_path))
+                       return -1;
+               free(alt_path);
+               return update_stages(o, dest->path, NULL,
+                                    rename_branch == o->branch1 ? dest : NULL,
+                                    rename_branch == o->branch1 ? NULL : dest);
+       }
+
+       /* Update dest->path both in index and in worktree */
+       if (update_file(o, 1, &dest->oid, dest->mode, dest->path))
+               return -1;
+       return 0;
 }
 
 static int handle_change_delete(struct merge_options *o,
@@ -1127,7 +1269,8 @@ static int handle_change_delete(struct merge_options *o,
        const char *update_path = path;
        int ret = 0;
 
-       if (dir_in_way(path, !o->call_depth, 0)) {
+       if (dir_in_way(path, !o->call_depth, 0) ||
+           (!o->call_depth && would_lose_untracked(path))) {
                update_path = alt_path = unique_path(o, path, change_branch);
        }
 
@@ -1242,17 +1385,34 @@ static int handle_file(struct merge_options *o,
 
        add = filespec_from_entry(&other, dst_entry, stage ^ 1);
        if (add) {
+               int ren_src_was_dirty = was_dirty(o, rename->path);
                char *add_name = unique_path(o, rename->path, other_branch);
                if (update_file(o, 0, &add->oid, add->mode, add_name))
                        return -1;
 
-               remove_file(o, 0, rename->path, 0);
+               if (ren_src_was_dirty) {
+                       output(o, 1, _("Refusing to lose dirty file at %s"),
+                              rename->path);
+               }
+               /*
+                * Because the double negatives somehow keep confusing me...
+                *    1) update_wd iff !ren_src_was_dirty.
+                *    2) no_wd iff !update_wd
+                *    3) so, no_wd == !!ren_src_was_dirty == ren_src_was_dirty
+                */
+               remove_file(o, 0, rename->path, ren_src_was_dirty);
                dst_name = unique_path(o, rename->path, cur_branch);
        } else {
                if (dir_in_way(rename->path, !o->call_depth, 0)) {
                        dst_name = unique_path(o, rename->path, cur_branch);
                        output(o, 1, _("%s is a directory in %s adding as %s instead"),
                               rename->path, other_branch, dst_name);
+               } else if (!o->call_depth &&
+                          would_lose_untracked(rename->path)) {
+                       dst_name = unique_path(o, rename->path, cur_branch);
+                       output(o, 1, _("Refusing to lose untracked file at %s; "
+                                      "adding as %s instead"),
+                              rename->path, dst_name);
                }
        }
        if ((ret = update_file(o, 0, &rename->oid, rename->mode, dst_name)))
@@ -1339,6 +1499,8 @@ static int conflict_rename_rename_2to1(struct merge_options *o,
        struct diff_filespec *c1 = ci->pair1->two;
        struct diff_filespec *c2 = ci->pair2->two;
        char *path = c1->path; /* == c2->path */
+       char *path_side_1_desc;
+       char *path_side_2_desc;
        struct merge_file_info mfi_c1;
        struct merge_file_info mfi_c2;
        int ret;
@@ -1352,13 +1514,19 @@ static int conflict_rename_rename_2to1(struct merge_options *o,
        remove_file(o, 1, a->path, o->call_depth || would_lose_untracked(a->path));
        remove_file(o, 1, b->path, o->call_depth || would_lose_untracked(b->path));
 
+       path_side_1_desc = xstrfmt("%s (was %s)", path, a->path);
+       path_side_2_desc = xstrfmt("%s (was %s)", path, b->path);
        if (merge_file_special_markers(o, a, c1, &ci->ren1_other,
+                                      path_side_1_desc,
                                       o->branch1, c1->path,
                                       o->branch2, ci->ren1_other.path, &mfi_c1) ||
            merge_file_special_markers(o, b, &ci->ren2_other, c2,
+                                      path_side_2_desc,
                                       o->branch1, ci->ren2_other.path,
                                       o->branch2, c2->path, &mfi_c2))
                return -1;
+       free(path_side_1_desc);
+       free(path_side_2_desc);
 
        if (o->call_depth) {
                /*
@@ -1378,11 +1546,43 @@ static int conflict_rename_rename_2to1(struct merge_options *o,
                char *new_path2 = unique_path(o, path, ci->branch2);
                output(o, 1, _("Renaming %s to %s and %s to %s instead"),
                       a->path, new_path1, b->path, new_path2);
-               remove_file(o, 0, path, 0);
+               if (was_dirty(o, path))
+                       output(o, 1, _("Refusing to lose dirty file at %s"),
+                              path);
+               else if (would_lose_untracked(path))
+                       /*
+                        * Only way we get here is if both renames were from
+                        * a directory rename AND user had an untracked file
+                        * at the location where both files end up after the
+                        * two directory renames.  See testcase 10d of t6043.
+                        */
+                       output(o, 1, _("Refusing to lose untracked file at "
+                                      "%s, even though it's in the way."),
+                              path);
+               else
+                       remove_file(o, 0, path, 0);
                ret = update_file(o, 0, &mfi_c1.oid, mfi_c1.mode, new_path1);
                if (!ret)
                        ret = update_file(o, 0, &mfi_c2.oid, mfi_c2.mode,
                                          new_path2);
+               /*
+                * unpack_trees() actually populates the index for us for
+                * "normal" rename/rename(2to1) situtations so that the
+                * correct entries are at the higher stages, which would
+                * make the call below to update_stages_for_stage_data
+                * unnecessary.  However, if either of the renames came
+                * from a directory rename, then unpack_trees() will not
+                * have gotten the right data loaded into the index, so we
+                * need to do so now.  (While it'd be tempting to move this
+                * call to update_stages_for_stage_data() to
+                * apply_directory_rename_modifications(), that would break
+                * our intermediate calls to would_lose_untracked() since
+                * those rely on the current in-memory index.  See also the
+                * big "NOTE" in update_stages()).
+                */
+               if (update_stages_for_stage_data(o, path, ci->dst_entry1))
+                       ret = -1;
+
                free(new_path2);
                free(new_path1);
        }
@@ -1390,6 +1590,754 @@ static int conflict_rename_rename_2to1(struct merge_options *o,
        return ret;
 }
 
+/*
+ * Get the diff_filepairs changed between o_tree and tree.
+ */
+static struct diff_queue_struct *get_diffpairs(struct merge_options *o,
+                                              struct tree *o_tree,
+                                              struct tree *tree)
+{
+       struct diff_queue_struct *ret;
+       struct diff_options opts;
+
+       diff_setup(&opts);
+       opts.flags.recursive = 1;
+       opts.flags.rename_empty = 0;
+       opts.detect_rename = DIFF_DETECT_RENAME;
+       opts.rename_limit = o->merge_rename_limit >= 0 ? o->merge_rename_limit :
+                           o->diff_rename_limit >= 0 ? o->diff_rename_limit :
+                           1000;
+       opts.rename_score = o->rename_score;
+       opts.show_rename_progress = o->show_rename_progress;
+       opts.output_format = DIFF_FORMAT_NO_OUTPUT;
+       diff_setup_done(&opts);
+       diff_tree_oid(&o_tree->object.oid, &tree->object.oid, "", &opts);
+       diffcore_std(&opts);
+       if (opts.needed_rename_limit > o->needed_rename_limit)
+               o->needed_rename_limit = opts.needed_rename_limit;
+
+       ret = xmalloc(sizeof(*ret));
+       *ret = diff_queued_diff;
+
+       opts.output_format = DIFF_FORMAT_NO_OUTPUT;
+       diff_queued_diff.nr = 0;
+       diff_queued_diff.queue = NULL;
+       diff_flush(&opts);
+       return ret;
+}
+
+static int tree_has_path(struct tree *tree, const char *path)
+{
+       struct object_id hashy;
+       unsigned int mode_o;
+
+       return !get_tree_entry(&tree->object.oid, path,
+                              &hashy, &mode_o);
+}
+
+/*
+ * Return a new string that replaces the beginning portion (which matches
+ * entry->dir), with entry->new_dir.  In perl-speak:
+ *   new_path_name = (old_path =~ s/entry->dir/entry->new_dir/);
+ * NOTE:
+ *   Caller must ensure that old_path starts with entry->dir + '/'.
+ */
+static char *apply_dir_rename(struct dir_rename_entry *entry,
+                             const char *old_path)
+{
+       struct strbuf new_path = STRBUF_INIT;
+       int oldlen, newlen;
+
+       if (entry->non_unique_new_dir)
+               return NULL;
+
+       oldlen = strlen(entry->dir);
+       newlen = entry->new_dir.len + (strlen(old_path) - oldlen) + 1;
+       strbuf_grow(&new_path, newlen);
+       strbuf_addbuf(&new_path, &entry->new_dir);
+       strbuf_addstr(&new_path, &old_path[oldlen]);
+
+       return strbuf_detach(&new_path, NULL);
+}
+
+static void get_renamed_dir_portion(const char *old_path, const char *new_path,
+                                   char **old_dir, char **new_dir)
+{
+       char *end_of_old, *end_of_new;
+       int old_len, new_len;
+
+       *old_dir = NULL;
+       *new_dir = NULL;
+
+       /*
+        * For
+        *    "a/b/c/d/e/foo.c" -> "a/b/some/thing/else/e/foo.c"
+        * the "e/foo.c" part is the same, we just want to know that
+        *    "a/b/c/d" was renamed to "a/b/some/thing/else"
+        * so, for this example, this function returns "a/b/c/d" in
+        * *old_dir and "a/b/some/thing/else" in *new_dir.
+        *
+        * Also, if the basename of the file changed, we don't care.  We
+        * want to know which portion of the directory, if any, changed.
+        */
+       end_of_old = strrchr(old_path, '/');
+       end_of_new = strrchr(new_path, '/');
+
+       if (end_of_old == NULL || end_of_new == NULL)
+               return;
+       while (*--end_of_new == *--end_of_old &&
+              end_of_old != old_path &&
+              end_of_new != new_path)
+               ; /* Do nothing; all in the while loop */
+       /*
+        * We've found the first non-matching character in the directory
+        * paths.  That means the current directory we were comparing
+        * represents the rename.  Move end_of_old and end_of_new back
+        * to the full directory name.
+        */
+       if (*end_of_old == '/')
+               end_of_old++;
+       if (*end_of_old != '/')
+               end_of_new++;
+       end_of_old = strchr(end_of_old, '/');
+       end_of_new = strchr(end_of_new, '/');
+
+       /*
+        * It may have been the case that old_path and new_path were the same
+        * directory all along.  Don't claim a rename if they're the same.
+        */
+       old_len = end_of_old - old_path;
+       new_len = end_of_new - new_path;
+
+       if (old_len != new_len || strncmp(old_path, new_path, old_len)) {
+               *old_dir = xstrndup(old_path, old_len);
+               *new_dir = xstrndup(new_path, new_len);
+       }
+}
+
+static void remove_hashmap_entries(struct hashmap *dir_renames,
+                                  struct string_list *items_to_remove)
+{
+       int i;
+       struct dir_rename_entry *entry;
+
+       for (i = 0; i < items_to_remove->nr; i++) {
+               entry = items_to_remove->items[i].util;
+               hashmap_remove(dir_renames, entry, NULL);
+       }
+       string_list_clear(items_to_remove, 0);
+}
+
+/*
+ * See if there is a directory rename for path, and if there are any file
+ * level conflicts for the renamed location.  If there is a rename and
+ * there are no conflicts, return the new name.  Otherwise, return NULL.
+ */
+static char *handle_path_level_conflicts(struct merge_options *o,
+                                        const char *path,
+                                        struct dir_rename_entry *entry,
+                                        struct hashmap *collisions,
+                                        struct tree *tree)
+{
+       char *new_path = NULL;
+       struct collision_entry *collision_ent;
+       int clean = 1;
+       struct strbuf collision_paths = STRBUF_INIT;
+
+       /*
+        * entry has the mapping of old directory name to new directory name
+        * that we want to apply to path.
+        */
+       new_path = apply_dir_rename(entry, path);
+
+       if (!new_path) {
+               /* This should only happen when entry->non_unique_new_dir set */
+               if (!entry->non_unique_new_dir)
+                       BUG("entry->non_unqiue_dir not set and !new_path");
+               output(o, 1, _("CONFLICT (directory rename split): "
+                              "Unclear where to place %s because directory "
+                              "%s was renamed to multiple other directories, "
+                              "with no destination getting a majority of the "
+                              "files."),
+                      path, entry->dir);
+               clean = 0;
+               return NULL;
+       }
+
+       /*
+        * The caller needs to have ensured that it has pre-populated
+        * collisions with all paths that map to new_path.  Do a quick check
+        * to ensure that's the case.
+        */
+       collision_ent = collision_find_entry(collisions, new_path);
+       if (collision_ent == NULL)
+               BUG("collision_ent is NULL");
+
+       /*
+        * Check for one-sided add/add/.../add conflicts, i.e.
+        * where implicit renames from the other side doing
+        * directory rename(s) can affect this side of history
+        * to put multiple paths into the same location.  Warn
+        * and bail on directory renames for such paths.
+        */
+       if (collision_ent->reported_already) {
+               clean = 0;
+       } else if (tree_has_path(tree, new_path)) {
+               collision_ent->reported_already = 1;
+               strbuf_add_separated_string_list(&collision_paths, ", ",
+                                                &collision_ent->source_files);
+               output(o, 1, _("CONFLICT (implicit dir rename): Existing "
+                              "file/dir at %s in the way of implicit "
+                              "directory rename(s) putting the following "
+                              "path(s) there: %s."),
+                      new_path, collision_paths.buf);
+               clean = 0;
+       } else if (collision_ent->source_files.nr > 1) {
+               collision_ent->reported_already = 1;
+               strbuf_add_separated_string_list(&collision_paths, ", ",
+                                                &collision_ent->source_files);
+               output(o, 1, _("CONFLICT (implicit dir rename): Cannot map "
+                              "more than one path to %s; implicit directory "
+                              "renames tried to put these paths there: %s"),
+                      new_path, collision_paths.buf);
+               clean = 0;
+       }
+
+       /* Free memory we no longer need */
+       strbuf_release(&collision_paths);
+       if (!clean && new_path) {
+               free(new_path);
+               return NULL;
+       }
+
+       return new_path;
+}
+
+/*
+ * There are a couple things we want to do at the directory level:
+ *   1. Check for both sides renaming to the same thing, in order to avoid
+ *      implicit renaming of files that should be left in place.  (See
+ *      testcase 6b in t6043 for details.)
+ *   2. Prune directory renames if there are still files left in the
+ *      the original directory.  These represent a partial directory rename,
+ *      i.e. a rename where only some of the files within the directory
+ *      were renamed elsewhere.  (Technically, this could be done earlier
+ *      in get_directory_renames(), except that would prevent us from
+ *      doing the previous check and thus failing testcase 6b.)
+ *   3. Check for rename/rename(1to2) conflicts (at the directory level).
+ *      In the future, we could potentially record this info as well and
+ *      omit reporting rename/rename(1to2) conflicts for each path within
+ *      the affected directories, thus cleaning up the merge output.
+ *   NOTE: We do NOT check for rename/rename(2to1) conflicts at the
+ *         directory level, because merging directories is fine.  If it
+ *         causes conflicts for files within those merged directories, then
+ *         that should be detected at the individual path level.
+ */
+static void handle_directory_level_conflicts(struct merge_options *o,
+                                            struct hashmap *dir_re_head,
+                                            struct tree *head,
+                                            struct hashmap *dir_re_merge,
+                                            struct tree *merge)
+{
+       struct hashmap_iter iter;
+       struct dir_rename_entry *head_ent;
+       struct dir_rename_entry *merge_ent;
+
+       struct string_list remove_from_head = STRING_LIST_INIT_NODUP;
+       struct string_list remove_from_merge = STRING_LIST_INIT_NODUP;
+
+       hashmap_iter_init(dir_re_head, &iter);
+       while ((head_ent = hashmap_iter_next(&iter))) {
+               merge_ent = dir_rename_find_entry(dir_re_merge, head_ent->dir);
+               if (merge_ent &&
+                   !head_ent->non_unique_new_dir &&
+                   !merge_ent->non_unique_new_dir &&
+                   !strbuf_cmp(&head_ent->new_dir, &merge_ent->new_dir)) {
+                       /* 1. Renamed identically; remove it from both sides */
+                       string_list_append(&remove_from_head,
+                                          head_ent->dir)->util = head_ent;
+                       strbuf_release(&head_ent->new_dir);
+                       string_list_append(&remove_from_merge,
+                                          merge_ent->dir)->util = merge_ent;
+                       strbuf_release(&merge_ent->new_dir);
+               } else if (tree_has_path(head, head_ent->dir)) {
+                       /* 2. This wasn't a directory rename after all */
+                       string_list_append(&remove_from_head,
+                                          head_ent->dir)->util = head_ent;
+                       strbuf_release(&head_ent->new_dir);
+               }
+       }
+
+       remove_hashmap_entries(dir_re_head, &remove_from_head);
+       remove_hashmap_entries(dir_re_merge, &remove_from_merge);
+
+       hashmap_iter_init(dir_re_merge, &iter);
+       while ((merge_ent = hashmap_iter_next(&iter))) {
+               head_ent = dir_rename_find_entry(dir_re_head, merge_ent->dir);
+               if (tree_has_path(merge, merge_ent->dir)) {
+                       /* 2. This wasn't a directory rename after all */
+                       string_list_append(&remove_from_merge,
+                                          merge_ent->dir)->util = merge_ent;
+               } else if (head_ent &&
+                          !head_ent->non_unique_new_dir &&
+                          !merge_ent->non_unique_new_dir) {
+                       /* 3. rename/rename(1to2) */
+                       /*
+                        * We can assume it's not rename/rename(1to1) because
+                        * that was case (1), already checked above.  So we
+                        * know that head_ent->new_dir and merge_ent->new_dir
+                        * are different strings.
+                        */
+                       output(o, 1, _("CONFLICT (rename/rename): "
+                                      "Rename directory %s->%s in %s. "
+                                      "Rename directory %s->%s in %s"),
+                              head_ent->dir, head_ent->new_dir.buf, o->branch1,
+                              head_ent->dir, merge_ent->new_dir.buf, o->branch2);
+                       string_list_append(&remove_from_head,
+                                          head_ent->dir)->util = head_ent;
+                       strbuf_release(&head_ent->new_dir);
+                       string_list_append(&remove_from_merge,
+                                          merge_ent->dir)->util = merge_ent;
+                       strbuf_release(&merge_ent->new_dir);
+               }
+       }
+
+       remove_hashmap_entries(dir_re_head, &remove_from_head);
+       remove_hashmap_entries(dir_re_merge, &remove_from_merge);
+}
+
+static struct hashmap *get_directory_renames(struct diff_queue_struct *pairs,
+                                            struct tree *tree)
+{
+       struct hashmap *dir_renames;
+       struct hashmap_iter iter;
+       struct dir_rename_entry *entry;
+       int i;
+
+       /*
+        * Typically, we think of a directory rename as all files from a
+        * certain directory being moved to a target directory.  However,
+        * what if someone first moved two files from the original
+        * directory in one commit, and then renamed the directory
+        * somewhere else in a later commit?  At merge time, we just know
+        * that files from the original directory went to two different
+        * places, and that the bulk of them ended up in the same place.
+        * We want each directory rename to represent where the bulk of the
+        * files from that directory end up; this function exists to find
+        * where the bulk of the files went.
+        *
+        * The first loop below simply iterates through the list of file
+        * renames, finding out how often each directory rename pair
+        * possibility occurs.
+        */
+       dir_renames = xmalloc(sizeof(*dir_renames));
+       dir_rename_init(dir_renames);
+       for (i = 0; i < pairs->nr; ++i) {
+               struct string_list_item *item;
+               int *count;
+               struct diff_filepair *pair = pairs->queue[i];
+               char *old_dir, *new_dir;
+
+               /* File not part of directory rename if it wasn't renamed */
+               if (pair->status != 'R')
+                       continue;
+
+               get_renamed_dir_portion(pair->one->path, pair->two->path,
+                                       &old_dir,        &new_dir);
+               if (!old_dir)
+                       /* Directory didn't change at all; ignore this one. */
+                       continue;
+
+               entry = dir_rename_find_entry(dir_renames, old_dir);
+               if (!entry) {
+                       entry = xmalloc(sizeof(*entry));
+                       dir_rename_entry_init(entry, old_dir);
+                       hashmap_put(dir_renames, entry);
+               } else {
+                       free(old_dir);
+               }
+               item = string_list_lookup(&entry->possible_new_dirs, new_dir);
+               if (!item) {
+                       item = string_list_insert(&entry->possible_new_dirs,
+                                                 new_dir);
+                       item->util = xcalloc(1, sizeof(int));
+               } else {
+                       free(new_dir);
+               }
+               count = item->util;
+               *count += 1;
+       }
+
+       /*
+        * For each directory with files moved out of it, we find out which
+        * target directory received the most files so we can declare it to
+        * be the "winning" target location for the directory rename.  This
+        * winner gets recorded in new_dir.  If there is no winner
+        * (multiple target directories received the same number of files),
+        * we set non_unique_new_dir.  Once we've determined the winner (or
+        * that there is no winner), we no longer need possible_new_dirs.
+        */
+       hashmap_iter_init(dir_renames, &iter);
+       while ((entry = hashmap_iter_next(&iter))) {
+               int max = 0;
+               int bad_max = 0;
+               char *best = NULL;
+
+               for (i = 0; i < entry->possible_new_dirs.nr; i++) {
+                       int *count = entry->possible_new_dirs.items[i].util;
+
+                       if (*count == max)
+                               bad_max = max;
+                       else if (*count > max) {
+                               max = *count;
+                               best = entry->possible_new_dirs.items[i].string;
+                       }
+               }
+               if (bad_max == max)
+                       entry->non_unique_new_dir = 1;
+               else {
+                       assert(entry->new_dir.len == 0);
+                       strbuf_addstr(&entry->new_dir, best);
+               }
+               /*
+                * The relevant directory sub-portion of the original full
+                * filepaths were xstrndup'ed before inserting into
+                * possible_new_dirs, and instead of manually iterating the
+                * list and free'ing each, just lie and tell
+                * possible_new_dirs that it did the strdup'ing so that it
+                * will free them for us.
+                */
+               entry->possible_new_dirs.strdup_strings = 1;
+               string_list_clear(&entry->possible_new_dirs, 1);
+       }
+
+       return dir_renames;
+}
+
+static struct dir_rename_entry *check_dir_renamed(const char *path,
+                                                 struct hashmap *dir_renames)
+{
+       char temp[PATH_MAX];
+       char *end;
+       struct dir_rename_entry *entry;
+
+       strcpy(temp, path);
+       while ((end = strrchr(temp, '/'))) {
+               *end = '\0';
+               entry = dir_rename_find_entry(dir_renames, temp);
+               if (entry)
+                       return entry;
+       }
+       return NULL;
+}
+
+static void compute_collisions(struct hashmap *collisions,
+                              struct hashmap *dir_renames,
+                              struct diff_queue_struct *pairs)
+{
+       int i;
+
+       /*
+        * Multiple files can be mapped to the same path due to directory
+        * renames done by the other side of history.  Since that other
+        * side of history could have merged multiple directories into one,
+        * if our side of history added the same file basename to each of
+        * those directories, then all N of them would get implicitly
+        * renamed by the directory rename detection into the same path,
+        * and we'd get an add/add/.../add conflict, and all those adds
+        * from *this* side of history.  This is not representable in the
+        * index, and users aren't going to easily be able to make sense of
+        * it.  So we need to provide a good warning about what's
+        * happening, and fall back to no-directory-rename detection
+        * behavior for those paths.
+        *
+        * See testcases 9e and all of section 5 from t6043 for examples.
+        */
+       collision_init(collisions);
+
+       for (i = 0; i < pairs->nr; ++i) {
+               struct dir_rename_entry *dir_rename_ent;
+               struct collision_entry *collision_ent;
+               char *new_path;
+               struct diff_filepair *pair = pairs->queue[i];
+
+               if (pair->status != 'A' && pair->status != 'R')
+                       continue;
+               dir_rename_ent = check_dir_renamed(pair->two->path,
+                                                  dir_renames);
+               if (!dir_rename_ent)
+                       continue;
+
+               new_path = apply_dir_rename(dir_rename_ent, pair->two->path);
+               if (!new_path)
+                       /*
+                        * dir_rename_ent->non_unique_new_path is true, which
+                        * means there is no directory rename for us to use,
+                        * which means it won't cause us any additional
+                        * collisions.
+                        */
+                       continue;
+               collision_ent = collision_find_entry(collisions, new_path);
+               if (!collision_ent) {
+                       collision_ent = xcalloc(1,
+                                               sizeof(struct collision_entry));
+                       hashmap_entry_init(collision_ent, strhash(new_path));
+                       hashmap_put(collisions, collision_ent);
+                       collision_ent->target_file = new_path;
+               } else {
+                       free(new_path);
+               }
+               string_list_insert(&collision_ent->source_files,
+                                  pair->two->path);
+       }
+}
+
+static char *check_for_directory_rename(struct merge_options *o,
+                                       const char *path,
+                                       struct tree *tree,
+                                       struct hashmap *dir_renames,
+                                       struct hashmap *dir_rename_exclusions,
+                                       struct hashmap *collisions,
+                                       int *clean_merge)
+{
+       char *new_path = NULL;
+       struct dir_rename_entry *entry = check_dir_renamed(path, dir_renames);
+       struct dir_rename_entry *oentry = NULL;
+
+       if (!entry)
+               return new_path;
+
+       /*
+        * This next part is a little weird.  We do not want to do an
+        * implicit rename into a directory we renamed on our side, because
+        * that will result in a spurious rename/rename(1to2) conflict.  An
+        * example:
+        *   Base commit: dumbdir/afile, otherdir/bfile
+        *   Side 1:      smrtdir/afile, otherdir/bfile
+        *   Side 2:      dumbdir/afile, dumbdir/bfile
+        * Here, while working on Side 1, we could notice that otherdir was
+        * renamed/merged to dumbdir, and change the diff_filepair for
+        * otherdir/bfile into a rename into dumbdir/bfile.  However, Side
+        * 2 will notice the rename from dumbdir to smrtdir, and do the
+        * transitive rename to move it from dumbdir/bfile to
+        * smrtdir/bfile.  That gives us bfile in dumbdir vs being in
+        * smrtdir, a rename/rename(1to2) conflict.  We really just want
+        * the file to end up in smrtdir.  And the way to achieve that is
+        * to not let Side1 do the rename to dumbdir, since we know that is
+        * the source of one of our directory renames.
+        *
+        * That's why oentry and dir_rename_exclusions is here.
+        *
+        * As it turns out, this also prevents N-way transient rename
+        * confusion; See testcases 9c and 9d of t6043.
+        */
+       oentry = dir_rename_find_entry(dir_rename_exclusions, entry->new_dir.buf);
+       if (oentry) {
+               output(o, 1, _("WARNING: Avoiding applying %s -> %s rename "
+                              "to %s, because %s itself was renamed."),
+                      entry->dir, entry->new_dir.buf, path, entry->new_dir.buf);
+       } else {
+               new_path = handle_path_level_conflicts(o, path, entry,
+                                                      collisions, tree);
+               *clean_merge &= (new_path != NULL);
+       }
+
+       return new_path;
+}
+
+static void apply_directory_rename_modifications(struct merge_options *o,
+                                                struct diff_filepair *pair,
+                                                char *new_path,
+                                                struct rename *re,
+                                                struct tree *tree,
+                                                struct tree *o_tree,
+                                                struct tree *a_tree,
+                                                struct tree *b_tree,
+                                                struct string_list *entries,
+                                                int *clean)
+{
+       struct string_list_item *item;
+       int stage = (tree == a_tree ? 2 : 3);
+       int update_wd;
+
+       /*
+        * In all cases where we can do directory rename detection,
+        * unpack_trees() will have read pair->two->path into the
+        * index and the working copy.  We need to remove it so that
+        * we can instead place it at new_path.  It is guaranteed to
+        * not be untracked (unpack_trees() would have errored out
+        * saying the file would have been overwritten), but it might
+        * be dirty, though.
+        */
+       update_wd = !was_dirty(o, pair->two->path);
+       if (!update_wd)
+               output(o, 1, _("Refusing to lose dirty file at %s"),
+                      pair->two->path);
+       remove_file(o, 1, pair->two->path, !update_wd);
+
+       /* Find or create a new re->dst_entry */
+       item = string_list_lookup(entries, new_path);
+       if (item) {
+               /*
+                * Since we're renaming on this side of history, and it's
+                * due to a directory rename on the other side of history
+                * (which we only allow when the directory in question no
+                * longer exists on the other side of history), the
+                * original entry for re->dst_entry is no longer
+                * necessary...
+                */
+               re->dst_entry->processed = 1;
+
+               /*
+                * ...because we'll be using this new one.
+                */
+               re->dst_entry = item->util;
+       } else {
+               /*
+                * re->dst_entry is for the before-dir-rename path, and we
+                * need it to hold information for the after-dir-rename
+                * path.  Before creating a new entry, we need to mark the
+                * old one as unnecessary (...unless it is shared by
+                * src_entry, i.e. this didn't use to be a rename, in which
+                * case we can just allow the normal processing to happen
+                * for it).
+                */
+               if (pair->status == 'R')
+                       re->dst_entry->processed = 1;
+
+               re->dst_entry = insert_stage_data(new_path,
+                                                 o_tree, a_tree, b_tree,
+                                                 entries);
+               item = string_list_insert(entries, new_path);
+               item->util = re->dst_entry;
+       }
+
+       /*
+        * Update the stage_data with the information about the path we are
+        * moving into place.  That slot will be empty and available for us
+        * to write to because of the collision checks in
+        * handle_path_level_conflicts().  In other words,
+        * re->dst_entry->stages[stage].oid will be the null_oid, so it's
+        * open for us to write to.
+        *
+        * It may be tempting to actually update the index at this point as
+        * well, using update_stages_for_stage_data(), but as per the big
+        * "NOTE" in update_stages(), doing so will modify the current
+        * in-memory index which will break calls to would_lose_untracked()
+        * that we need to make.  Instead, we need to just make sure that
+        * the various conflict_rename_*() functions update the index
+        * explicitly rather than relying on unpack_trees() to have done it.
+        */
+       get_tree_entry(&tree->object.oid,
+                      pair->two->path,
+                      &re->dst_entry->stages[stage].oid,
+                      &re->dst_entry->stages[stage].mode);
+
+       /* Update pair status */
+       if (pair->status == 'A') {
+               /*
+                * Recording rename information for this add makes it look
+                * like a rename/delete conflict.  Make sure we can
+                * correctly handle this as an add that was moved to a new
+                * directory instead of reporting a rename/delete conflict.
+                */
+               re->add_turned_into_rename = 1;
+       }
+       /*
+        * We don't actually look at pair->status again, but it seems
+        * pedagogically correct to adjust it.
+        */
+       pair->status = 'R';
+
+       /*
+        * Finally, record the new location.
+        */
+       pair->two->path = new_path;
+}
+
+/*
+ * Get information of all renames which occurred in 'pairs', making use of
+ * any implicit directory renames inferred from the other side of history.
+ * We need the three trees in the merge ('o_tree', 'a_tree' and 'b_tree')
+ * to be able to associate the correct cache entries with the rename
+ * information; tree is always equal to either a_tree or b_tree.
+ */
+static struct string_list *get_renames(struct merge_options *o,
+                                      struct diff_queue_struct *pairs,
+                                      struct hashmap *dir_renames,
+                                      struct hashmap *dir_rename_exclusions,
+                                      struct tree *tree,
+                                      struct tree *o_tree,
+                                      struct tree *a_tree,
+                                      struct tree *b_tree,
+                                      struct string_list *entries,
+                                      int *clean_merge)
+{
+       int i;
+       struct hashmap collisions;
+       struct hashmap_iter iter;
+       struct collision_entry *e;
+       struct string_list *renames;
+
+       compute_collisions(&collisions, dir_renames, pairs);
+       renames = xcalloc(1, sizeof(struct string_list));
+
+       for (i = 0; i < pairs->nr; ++i) {
+               struct string_list_item *item;
+               struct rename *re;
+               struct diff_filepair *pair = pairs->queue[i];
+               char *new_path; /* non-NULL only with directory renames */
+
+               if (pair->status != 'A' && pair->status != 'R') {
+                       diff_free_filepair(pair);
+                       continue;
+               }
+               new_path = check_for_directory_rename(o, pair->two->path, tree,
+                                                     dir_renames,
+                                                     dir_rename_exclusions,
+                                                     &collisions,
+                                                     clean_merge);
+               if (pair->status != 'R' && !new_path) {
+                       diff_free_filepair(pair);
+                       continue;
+               }
+
+               re = xmalloc(sizeof(*re));
+               re->processed = 0;
+               re->add_turned_into_rename = 0;
+               re->pair = pair;
+               item = string_list_lookup(entries, re->pair->one->path);
+               if (!item)
+                       re->src_entry = insert_stage_data(re->pair->one->path,
+                                       o_tree, a_tree, b_tree, entries);
+               else
+                       re->src_entry = item->util;
+
+               item = string_list_lookup(entries, re->pair->two->path);
+               if (!item)
+                       re->dst_entry = insert_stage_data(re->pair->two->path,
+                                       o_tree, a_tree, b_tree, entries);
+               else
+                       re->dst_entry = item->util;
+               item = string_list_insert(renames, pair->one->path);
+               item->util = re;
+               if (new_path)
+                       apply_directory_rename_modifications(o, pair, new_path,
+                                                            re, tree, o_tree,
+                                                            a_tree, b_tree,
+                                                            entries,
+                                                            clean_merge);
+       }
+
+       hashmap_iter_init(&collisions, &iter);
+       while ((e = hashmap_iter_next(&iter))) {
+               free(e->target_file);
+               string_list_clear(&e->source_files, 0);
+       }
+       hashmap_free(&collisions, 1);
+       return renames;
+}
+
 static int process_renames(struct merge_options *o,
                           struct string_list *a_renames,
                           struct string_list *b_renames)
@@ -1538,7 +2486,7 @@ static int process_renames(struct merge_options *o,
                         * add-source case).
                         */
                        remove_file(o, 1, ren1_src,
-                                   renamed_stage == 2 || !was_tracked(ren1_src));
+                                   renamed_stage == 2 || !was_tracked(o, ren1_src));
 
                        oidcpy(&src_other.oid,
                               &ren1->src_entry->stages[other_stage].oid);
@@ -1548,7 +2496,19 @@ static int process_renames(struct merge_options *o,
                        dst_other.mode = ren1->dst_entry->stages[other_stage].mode;
                        try_merge = 0;
 
-                       if (oid_eq(&src_other.oid, &null_oid)) {
+                       if (oid_eq(&src_other.oid, &null_oid) &&
+                           ren1->add_turned_into_rename) {
+                               setup_rename_conflict_info(RENAME_DIR,
+                                                          ren1->pair,
+                                                          NULL,
+                                                          branch1,
+                                                          branch2,
+                                                          ren1->dst_entry,
+                                                          NULL,
+                                                          o,
+                                                          NULL,
+                                                          NULL);
+                       } else if (oid_eq(&src_other.oid, &null_oid)) {
                                setup_rename_conflict_info(RENAME_DELETE,
                                                           ren1->pair,
                                                           NULL,
@@ -1645,6 +2605,105 @@ static int process_renames(struct merge_options *o,
        return clean_merge;
 }
 
+struct rename_info {
+       struct string_list *head_renames;
+       struct string_list *merge_renames;
+};
+
+static void initial_cleanup_rename(struct diff_queue_struct *pairs,
+                                  struct hashmap *dir_renames)
+{
+       struct hashmap_iter iter;
+       struct dir_rename_entry *e;
+
+       hashmap_iter_init(dir_renames, &iter);
+       while ((e = hashmap_iter_next(&iter))) {
+               free(e->dir);
+               strbuf_release(&e->new_dir);
+               /* possible_new_dirs already cleared in get_directory_renames */
+       }
+       hashmap_free(dir_renames, 1);
+       free(dir_renames);
+
+       free(pairs->queue);
+       free(pairs);
+}
+
+static int handle_renames(struct merge_options *o,
+                         struct tree *common,
+                         struct tree *head,
+                         struct tree *merge,
+                         struct string_list *entries,
+                         struct rename_info *ri)
+{
+       struct diff_queue_struct *head_pairs, *merge_pairs;
+       struct hashmap *dir_re_head, *dir_re_merge;
+       int clean = 1;
+
+       ri->head_renames = NULL;
+       ri->merge_renames = NULL;
+
+       if (!o->detect_rename)
+               return 1;
+
+       head_pairs = get_diffpairs(o, common, head);
+       merge_pairs = get_diffpairs(o, common, merge);
+
+       dir_re_head = get_directory_renames(head_pairs, head);
+       dir_re_merge = get_directory_renames(merge_pairs, merge);
+
+       handle_directory_level_conflicts(o,
+                                        dir_re_head, head,
+                                        dir_re_merge, merge);
+
+       ri->head_renames  = get_renames(o, head_pairs,
+                                       dir_re_merge, dir_re_head, head,
+                                       common, head, merge, entries,
+                                       &clean);
+       if (clean < 0)
+               goto cleanup;
+       ri->merge_renames = get_renames(o, merge_pairs,
+                                       dir_re_head, dir_re_merge, merge,
+                                       common, head, merge, entries,
+                                       &clean);
+       if (clean < 0)
+               goto cleanup;
+       clean &= process_renames(o, ri->head_renames, ri->merge_renames);
+
+cleanup:
+       /*
+        * Some cleanup is deferred until cleanup_renames() because the
+        * data structures are still needed and referenced in
+        * process_entry().  But there are a few things we can free now.
+        */
+       initial_cleanup_rename(head_pairs, dir_re_head);
+       initial_cleanup_rename(merge_pairs, dir_re_merge);
+
+       return clean;
+}
+
+static void final_cleanup_rename(struct string_list *rename)
+{
+       const struct rename *re;
+       int i;
+
+       if (rename == NULL)
+               return;
+
+       for (i = 0; i < rename->nr; i++) {
+               re = rename->items[i].util;
+               diff_free_filepair(re->pair);
+       }
+       string_list_clear(rename, 1);
+       free(rename);
+}
+
+static void final_cleanup_renames(struct rename_info *re_info)
+{
+       final_cleanup_rename(re_info->head_renames);
+       final_cleanup_rename(re_info->merge_renames);
+}
+
 static struct object_id *stage_oid(const struct object_id *oid, unsigned mode)
 {
        return (is_null_oid(oid) || mode == 0) ? NULL: (struct object_id *)oid;
@@ -1656,7 +2715,7 @@ static int read_oid_strbuf(struct merge_options *o,
        void *buf;
        enum object_type type;
        unsigned long size;
-       buf = read_sha1_file(oid->hash, &type, &size);
+       buf = read_object_file(oid, &type, &size);
        if (!buf)
                return err(o, _("cannot read object %s"), oid_to_hex(oid));
        if (type != OBJ_BLOB) {
@@ -1735,6 +2794,7 @@ static int handle_modify_delete(struct merge_options *o,
 
 static int merge_content(struct merge_options *o,
                         const char *path,
+                        int is_dirty,
                         struct object_id *o_oid, int o_mode,
                         struct object_id *a_oid, int a_mode,
                         struct object_id *b_oid, int b_mode,
@@ -1775,29 +2835,26 @@ static int merge_content(struct merge_options *o,
                               S_ISGITLINK(pair1->two->mode)))
                        df_conflict_remains = 1;
        }
-       if (merge_file_special_markers(o, &one, &a, &b,
+       if (merge_file_special_markers(o, &one, &a, &b, path,
                                       o->branch1, path1,
                                       o->branch2, path2, &mfi))
                return -1;
 
-       if (mfi.clean && !df_conflict_remains &&
-           oid_eq(&mfi.oid, a_oid) && mfi.mode == a_mode) {
-               int path_renamed_outside_HEAD;
+       /*
+        * We can skip updating the working tree file iff:
+        *   a) The merge is clean
+        *   b) The merge matches what was in HEAD (content, mode, pathname)
+        *   c) The target path is usable (i.e. not involved in D/F conflict)
+        */
+       if (mfi.clean &&
+           was_tracked_and_matches(o, path, &mfi.oid, mfi.mode) &&
+           !df_conflict_remains) {
                output(o, 3, _("Skipped %s (merged same as existing)"), path);
-               /*
-                * The content merge resulted in the same file contents we
-                * already had.  We can return early if those file contents
-                * are recorded at the correct path (which may not be true
-                * if the merge involves a rename).
-                */
-               path_renamed_outside_HEAD = !path2 || !strcmp(path, path2);
-               if (!path_renamed_outside_HEAD) {
-                       add_cacheinfo(o, mfi.mode, &mfi.oid, path,
-                                     0, (!o->call_depth), 0);
-                       return mfi.clean;
-               }
-       } else
-               output(o, 2, _("Auto-merging %s"), path);
+               if (add_cacheinfo(o, mfi.mode, &mfi.oid, path,
+                                 0, (!o->call_depth && !is_dirty), 0))
+                       return -1;
+               return mfi.clean;
+       }
 
        if (!mfi.clean) {
                if (S_ISGITLINK(mfi.mode))
@@ -1809,7 +2866,7 @@ static int merge_content(struct merge_options *o,
                                return -1;
        }
 
-       if (df_conflict_remains) {
+       if (df_conflict_remains || is_dirty) {
                char *new_path;
                if (o->call_depth) {
                        remove_file_from_cache(path);
@@ -1818,7 +2875,7 @@ static int merge_content(struct merge_options *o,
                                if (update_stages(o, path, &one, &a, &b))
                                        return -1;
                        } else {
-                               int file_from_stage2 = was_tracked(path);
+                               int file_from_stage2 = was_tracked(o, path);
                                struct diff_filespec merged;
                                oidcpy(&merged.oid, &mfi.oid);
                                merged.mode = mfi.mode;
@@ -1831,6 +2888,10 @@ static int merge_content(struct merge_options *o,
 
                }
                new_path = unique_path(o, path, rename_conflict_info->branch1);
+               if (is_dirty) {
+                       output(o, 1, _("Refusing to lose dirty file at %s"),
+                              path);
+               }
                output(o, 1, _("Adding as %s instead"), new_path);
                if (update_file(o, 0, &mfi.oid, mfi.mode, new_path)) {
                        free(new_path);
@@ -1840,7 +2901,20 @@ static int merge_content(struct merge_options *o,
                mfi.clean = 0;
        } else if (update_file(o, mfi.clean, &mfi.oid, mfi.mode, path))
                return -1;
-       return mfi.clean;
+       return !is_dirty && mfi.clean;
+}
+
+static int conflict_rename_normal(struct merge_options *o,
+                                 const char *path,
+                                 struct object_id *o_oid, unsigned int o_mode,
+                                 struct object_id *a_oid, unsigned int a_mode,
+                                 struct object_id *b_oid, unsigned int b_mode,
+                                 struct rename_conflict_info *ci)
+{
+       /* Merge the content and write it out */
+       return merge_content(o, path, was_dirty(o, path),
+                            o_oid, o_mode, a_oid, a_mode, b_oid, b_mode,
+                            ci);
 }
 
 /* Per entry merge function */
@@ -1862,9 +2936,20 @@ static int process_entry(struct merge_options *o,
                switch (conflict_info->rename_type) {
                case RENAME_NORMAL:
                case RENAME_ONE_FILE_TO_ONE:
-                       clean_merge = merge_content(o, path,
-                                                   o_oid, o_mode, a_oid, a_mode, b_oid, b_mode,
-                                                   conflict_info);
+                       clean_merge = conflict_rename_normal(o,
+                                                            path,
+                                                            o_oid, o_mode,
+                                                            a_oid, a_mode,
+                                                            b_oid, b_mode,
+                                                            conflict_info);
+                       break;
+               case RENAME_DIR:
+                       clean_merge = 1;
+                       if (conflict_rename_dir(o,
+                                               conflict_info->pair1,
+                                               conflict_info->branch1,
+                                               conflict_info->branch2))
+                               clean_merge = -1;
                        break;
                case RENAME_DELETE:
                        clean_merge = 0;
@@ -1952,7 +3037,8 @@ static int process_entry(struct merge_options *o,
        } else if (a_oid && b_oid) {
                /* Case C: Added in both (check for same permissions) and */
                /* case D: Modified in both, but differently. */
-               clean_merge = merge_content(o, path,
+               int is_dirty = 0; /* unpack_trees would have bailed if dirty */
+               clean_merge = merge_content(o, path, is_dirty,
                                            o_oid, o_mode, a_oid, a_mode, b_oid, b_mode,
                                            NULL);
        } else if (!o_oid && !a_oid && !b_oid) {
@@ -1993,7 +3079,7 @@ int merge_trees(struct merge_options *o,
                return 1;
        }
 
-       code = git_merge_trees(o->call_depth, common, head, merge);
+       code = git_merge_trees(o, common, head, merge);
 
        if (code != 0) {
                if (show(o, 4) || o->call_depth)
@@ -2004,7 +3090,8 @@ int merge_trees(struct merge_options *o,
        }
 
        if (unmerged_cache()) {
-               struct string_list *entries, *re_head, *re_merge;
+               struct string_list *entries;
+               struct rename_info re_info;
                int i;
                /*
                 * Only need the hashmap while processing entries, so
@@ -2018,9 +3105,8 @@ int merge_trees(struct merge_options *o,
                get_files_dirs(o, merge);
 
                entries = get_unmerged();
-               re_head  = get_renames(o, head, common, head, merge, entries);
-               re_merge = get_renames(o, merge, common, head, merge, entries);
-               clean = process_renames(o, re_head, re_merge);
+               clean = handle_renames(o, common, head, merge, entries,
+                                      &re_info);
                record_df_conflict_files(o, entries);
                if (clean < 0)
                        goto cleanup;
@@ -2045,22 +3131,28 @@ int merge_trees(struct merge_options *o,
                }
 
 cleanup:
-               string_list_clear(re_merge, 0);
-               string_list_clear(re_head, 0);
+               final_cleanup_renames(&re_info);
+
                string_list_clear(entries, 1);
+               free(entries);
 
                hashmap_free(&o->current_file_dir_set, 1);
 
-               free(re_merge);
-               free(re_head);
-               free(entries);
-
                if (clean < 0)
                        return clean;
        }
        else
                clean = 1;
 
+       /* Free the extra index left from git_merge_trees() */
+       /*
+        * FIXME: Need to also free data allocated by
+        * setup_unpack_trees_porcelain() tucked away in o->unpack_opts.msgs,
+        * but the problem is that only half of it refers to dynamically
+        * allocated data, while the other half points at static strings.
+        */
+       discard_index(&o->orig_index);
+
        if (o->call_depth && !(*result = write_tree_from_memory(o)))
                return -1;
 
@@ -2154,7 +3246,8 @@ int merge_recursive(struct merge_options *o,
                read_cache();
 
        o->ancestor = "merged common ancestors";
-       clean = merge_trees(o, h1->tree, h2->tree, merged_common_ancestors->tree,
+       clean = merge_trees(o, get_commit_tree(h1), get_commit_tree(h2),
+                           get_commit_tree(merged_common_ancestors),
                            &mrtree);
        if (clean < 0) {
                flush_output(o);
index 80d69d140195cc3ba1054050569e56bfc0277b56..248093e407c7744b1e3e9a5bd5780375a1c6c07f 100644 (file)
@@ -1,6 +1,7 @@
 #ifndef MERGE_RECURSIVE_H
 #define MERGE_RECURSIVE_H
 
+#include "unpack-trees.h"
 #include "string-list.h"
 
 struct merge_options {
@@ -27,6 +28,33 @@ struct merge_options {
        struct strbuf obuf;
        struct hashmap current_file_dir_set;
        struct string_list df_conflict_file_set;
+       struct unpack_trees_options unpack_opts;
+       struct index_state orig_index;
+};
+
+/*
+ * For dir_rename_entry, directory names are stored as a full path from the
+ * toplevel of the repository and do not include a trailing '/'.  Also:
+ *
+ *   dir:                original name of directory being renamed
+ *   non_unique_new_dir: if true, could not determine new_dir
+ *   new_dir:            final name of directory being renamed
+ *   possible_new_dirs:  temporary used to help determine new_dir; see comments
+ *                       in get_directory_renames() for details
+ */
+struct dir_rename_entry {
+       struct hashmap_entry ent; /* must be the first member! */
+       char *dir;
+       unsigned non_unique_new_dir:1;
+       struct strbuf new_dir;
+       struct string_list possible_new_dirs;
+};
+
+struct collision_entry {
+       struct hashmap_entry ent; /* must be the first member! */
+       char *target_file;
+       struct string_list source_files;
+       unsigned reported_already:1;
 };
 
 /* merge_trees() but with recursive ancestor consolidation */
diff --git a/mergetools/guiffy b/mergetools/guiffy
new file mode 100644 (file)
index 0000000..8b23a13
--- /dev/null
@@ -0,0 +1,18 @@
+diff_cmd () {
+       "$merge_tool_path" "$LOCAL" "$REMOTE"
+}
+
+merge_cmd () {
+       if $base_present
+       then
+               "$merge_tool_path" -s "$LOCAL" \
+               "$REMOTE" "$BASE" "$MERGED"
+       else
+               "$merge_tool_path" -m "$LOCAL" \
+               "$REMOTE" "$MERGED"
+       fi
+}
+
+exit_code_trustable () {
+       true
+}
index 398e61d5e943b1e6e143f159739f83fc0df9b6b8..e61988e503b0b2097afeb6716123d88429c0717d 100644 (file)
@@ -77,7 +77,7 @@ char *notes_cache_get(struct notes_cache *c, struct object_id *key_oid,
        value_oid = get_note(&c->tree, key_oid);
        if (!value_oid)
                return NULL;
-       value = read_sha1_file(value_oid->hash, &type, &size);
+       value = read_object_file(value_oid, &type, &size);
 
        *outsize = size;
        return value;
index c09c5e0e474a30b0a8bf3d91063c8bbc61e63557..e06d71ea47c00b9faf2d427752c6d3584df8c453 100644 (file)
@@ -322,7 +322,7 @@ static void write_note_to_worktree(const struct object_id *obj,
 {
        enum object_type type;
        unsigned long size;
-       void *buf = read_sha1_file(note->hash, &type, &size);
+       void *buf = read_object_file(note, &type, &size);
 
        if (!buf)
                die("cannot read note %s for object %s",
@@ -600,14 +600,14 @@ int notes_merge(struct notes_merge_options *o,
                        printf("No merge base found; doing history-less merge\n");
        } else if (!bases->next) {
                base_oid = &bases->item->object.oid;
-               base_tree_oid = &bases->item->tree->object.oid;
+               base_tree_oid = get_commit_tree_oid(bases->item);
                if (o->verbosity >= 4)
                        printf("One merge base found (%.7s)\n",
                               oid_to_hex(base_oid));
        } else {
                /* TODO: How to handle multiple merge-bases? */
                base_oid = &bases->item->object.oid;
-               base_tree_oid = &bases->item->tree->object.oid;
+               base_tree_oid = get_commit_tree_oid(bases->item);
                if (o->verbosity >= 3)
                        printf("Multiple merge bases found. Using the first "
                                "(%.7s)\n", oid_to_hex(base_oid));
@@ -634,8 +634,9 @@ int notes_merge(struct notes_merge_options *o,
                goto found_result;
        }
 
-       result = merge_from_diffs(o, base_tree_oid, &local->tree->object.oid,
-                                 &remote->tree->object.oid, local_tree);
+       result = merge_from_diffs(o, base_tree_oid,
+                                 get_commit_tree_oid(local),
+                                 get_commit_tree_oid(remote), local_tree);
 
        if (result != 0) { /* non-trivial merge (with or without conflicts) */
                /* Commit (partial) result */
diff --git a/notes.c b/notes.c
index ce9a8f53f8668bbbf790f3b2d20f641fc034c301..a386d450c4c812ef30d0fc661fe2c03e1d062a83 100644 (file)
--- a/notes.c
+++ b/notes.c
@@ -796,13 +796,13 @@ int combine_notes_concatenate(struct object_id *cur_oid,
 
        /* read in both note blob objects */
        if (!is_null_oid(new_oid))
-               new_msg = read_sha1_file(new_oid->hash, &new_type, &new_len);
+               new_msg = read_object_file(new_oid, &new_type, &new_len);
        if (!new_msg || !new_len || new_type != OBJ_BLOB) {
                free(new_msg);
                return 0;
        }
        if (!is_null_oid(cur_oid))
-               cur_msg = read_sha1_file(cur_oid->hash, &cur_type, &cur_len);
+               cur_msg = read_object_file(cur_oid, &cur_type, &cur_len);
        if (!cur_msg || !cur_len || cur_type != OBJ_BLOB) {
                free(cur_msg);
                free(new_msg);
@@ -858,7 +858,7 @@ static int string_list_add_note_lines(struct string_list *list,
                return 0;
 
        /* read_sha1_file NUL-terminates */
-       data = read_sha1_file(oid->hash, &t, &len);
+       data = read_object_file(oid, &t, &len);
        if (t != OBJ_BLOB || !data || !len) {
                free(data);
                return t != OBJ_BLOB || !data;
@@ -1012,7 +1012,7 @@ void init_notes(struct notes_tree *t, const char *notes_ref,
                return;
        if (flags & NOTES_INIT_WRITABLE && read_ref(notes_ref, &object_oid))
                die("Cannot use notes ref %s", notes_ref);
-       if (get_tree_entry(object_oid.hash, "", oid.hash, &mode))
+       if (get_tree_entry(&object_oid, "", &oid, &mode))
                die("Failed to read notes tree referenced by %s (%s)",
                    notes_ref, oid_to_hex(&object_oid));
 
@@ -1217,7 +1217,7 @@ static void format_note(struct notes_tree *t, const struct object_id *object_oid
        if (!oid)
                return;
 
-       if (!(msg = read_sha1_file(oid->hash, &type, &msglen)) || type != OBJ_BLOB) {
+       if (!(msg = read_object_file(oid, &type, &msglen)) || type != OBJ_BLOB) {
                free(msg);
                return;
        }
diff --git a/object-store.h b/object-store.h
new file mode 100644 (file)
index 0000000..d683112
--- /dev/null
@@ -0,0 +1,142 @@
+#ifndef OBJECT_STORE_H
+#define OBJECT_STORE_H
+
+#include "oidmap.h"
+
+struct alternate_object_database {
+       struct alternate_object_database *next;
+
+       /* see alt_scratch_buf() */
+       struct strbuf scratch;
+       size_t base_len;
+
+       /*
+        * Used to store the results of readdir(3) calls when searching
+        * for unique abbreviated hashes.  This cache is never
+        * invalidated, thus it's racy and not necessarily accurate.
+        * That's fine for its purpose; don't use it for tasks requiring
+        * greater accuracy!
+        */
+       char loose_objects_subdir_seen[256];
+       struct oid_array loose_objects_cache;
+
+       /*
+        * Path to the alternative object store. If this is a relative path,
+        * it is relative to the current working directory.
+        */
+       char path[FLEX_ARRAY];
+};
+void prepare_alt_odb(struct repository *r);
+char *compute_alternate_path(const char *path, struct strbuf *err);
+typedef int alt_odb_fn(struct alternate_object_database *, void *);
+int foreach_alt_odb(alt_odb_fn, void*);
+
+/*
+ * Allocate a "struct alternate_object_database" but do _not_ actually
+ * add it to the list of alternates.
+ */
+struct alternate_object_database *alloc_alt_odb(const char *dir);
+
+/*
+ * Add the directory to the on-disk alternates file; the new entry will also
+ * take effect in the current process.
+ */
+void add_to_alternates_file(const char *dir);
+
+/*
+ * Add the directory to the in-memory list of alternates (along with any
+ * recursive alternates it points to), but do not modify the on-disk alternates
+ * file.
+ */
+void add_to_alternates_memory(const char *dir);
+
+/*
+ * Returns a scratch strbuf pre-filled with the alternate object directory,
+ * including a trailing slash, which can be used to access paths in the
+ * alternate. Always use this over direct access to alt->scratch, as it
+ * cleans up any previous use of the scratch buffer.
+ */
+struct strbuf *alt_scratch_buf(struct alternate_object_database *alt);
+
+struct packed_git {
+       struct packed_git *next;
+       struct list_head mru;
+       struct pack_window *windows;
+       off_t pack_size;
+       const void *index_data;
+       size_t index_size;
+       uint32_t num_objects;
+       uint32_t num_bad_objects;
+       unsigned char *bad_object_sha1;
+       int index_version;
+       time_t mtime;
+       int pack_fd;
+       int index;              /* for builtin/pack-objects.c */
+       unsigned pack_local:1,
+                pack_keep:1,
+                pack_keep_in_core:1,
+                freshened:1,
+                do_not_close:1,
+                pack_promisor:1;
+       unsigned char sha1[20];
+       struct revindex_entry *revindex;
+       /* something like ".git/objects/pack/xxxxx.pack" */
+       char pack_name[FLEX_ARRAY]; /* more */
+};
+
+struct raw_object_store {
+       /*
+        * Path to the repository's object store.
+        * Cannot be NULL after initialization.
+        */
+       char *objectdir;
+
+       /* Path to extra alternate object database if not NULL */
+       char *alternate_db;
+
+       struct alternate_object_database *alt_odb_list;
+       struct alternate_object_database **alt_odb_tail;
+
+       /*
+        * Objects that should be substituted by other objects
+        * (see git-replace(1)).
+        */
+       struct oidmap *replace_map;
+
+       /*
+        * private data
+        *
+        * should only be accessed directly by packfile.c
+        */
+
+       struct packed_git *packed_git;
+       /* A most-recently-used ordered version of the packed_git list. */
+       struct list_head packed_git_mru;
+
+       /*
+        * A fast, rough count of the number of objects in the repository.
+        * These two fields are not meant for direct access. Use
+        * approximate_object_count() instead.
+        */
+       unsigned long approximate_object_count;
+       unsigned approximate_object_count_valid : 1;
+
+       /*
+        * Whether packed_git has already been populated with this repository's
+        * packs.
+        */
+       unsigned packed_git_initialized : 1;
+};
+
+struct raw_object_store *raw_object_store_new(void);
+void raw_object_store_clear(struct raw_object_store *o);
+
+/*
+ * Put in `buf` the name of the file in the local object database that
+ * would be used to store a loose object with the specified sha1.
+ */
+void sha1_file_name(struct repository *r, struct strbuf *buf, const unsigned char *sha1);
+
+void *map_sha1_file(struct repository *r, const unsigned char *sha1, unsigned long *size);
+
+#endif /* OBJECT_STORE_H */
index e6ad3f61f03a98aa82dd901d96cbc230381c2e90..f7f4de3aaf6d42a6dde5811b37aa8239ac183754 100644 (file)
--- a/object.c
+++ b/object.c
@@ -1,9 +1,12 @@
 #include "cache.h"
 #include "object.h"
+#include "replace-object.h"
 #include "blob.h"
 #include "tree.h"
 #include "commit.h"
 #include "tag.h"
+#include "object-store.h"
+#include "packfile.h"
 
 static struct object **obj_hash;
 static int nr_objs, obj_hash_size;
@@ -244,7 +247,7 @@ struct object *parse_object(const struct object_id *oid)
        unsigned long size;
        enum object_type type;
        int eaten;
-       const unsigned char *repl = lookup_replace_object(oid->hash);
+       const struct object_id *repl = lookup_replace_object(the_repository, oid);
        void *buffer;
        struct object *obj;
 
@@ -254,8 +257,8 @@ struct object *parse_object(const struct object_id *oid)
 
        if ((obj && obj->type == OBJ_BLOB && has_object_file(oid)) ||
            (!obj && has_object_file(oid) &&
-            sha1_object_info(oid->hash, NULL) == OBJ_BLOB)) {
-               if (check_sha1_signature(repl, NULL, 0, NULL) < 0) {
+            oid_object_info(the_repository, oid, NULL) == OBJ_BLOB)) {
+               if (check_object_signature(repl, NULL, 0, NULL) < 0) {
                        error("sha1 mismatch %s", oid_to_hex(oid));
                        return NULL;
                }
@@ -263,11 +266,11 @@ struct object *parse_object(const struct object_id *oid)
                return lookup_object(oid->hash);
        }
 
-       buffer = read_sha1_file(oid->hash, &type, &size);
+       buffer = read_object_file(oid, &type, &size);
        if (buffer) {
-               if (check_sha1_signature(repl, buffer, size, type_name(type)) < 0) {
+               if (check_object_signature(repl, buffer, size, type_name(type)) < 0) {
                        free(buffer);
-                       error("sha1 mismatch %s", sha1_to_hex(repl));
+                       error("sha1 mismatch %s", oid_to_hex(repl));
                        return NULL;
                }
 
@@ -445,3 +448,46 @@ void clear_commit_marks_all(unsigned int flags)
                        obj->flags &= ~flags;
        }
 }
+
+struct raw_object_store *raw_object_store_new(void)
+{
+       struct raw_object_store *o = xmalloc(sizeof(*o));
+
+       memset(o, 0, sizeof(*o));
+       INIT_LIST_HEAD(&o->packed_git_mru);
+       return o;
+}
+
+static void free_alt_odb(struct alternate_object_database *alt)
+{
+       strbuf_release(&alt->scratch);
+       oid_array_clear(&alt->loose_objects_cache);
+       free(alt);
+}
+
+static void free_alt_odbs(struct raw_object_store *o)
+{
+       while (o->alt_odb_list) {
+               struct alternate_object_database *next;
+
+               next = o->alt_odb_list->next;
+               free_alt_odb(o->alt_odb_list);
+               o->alt_odb_list = next;
+       }
+}
+
+void raw_object_store_clear(struct raw_object_store *o)
+{
+       FREE_AND_NULL(o->objectdir);
+       FREE_AND_NULL(o->alternate_db);
+
+       oidmap_free(o->replace_map, 1);
+       FREE_AND_NULL(o->replace_map);
+
+       free_alt_odbs(o);
+       o->alt_odb_tail = NULL;
+
+       INIT_LIST_HEAD(&o->packed_git_mru);
+       close_all_packs(o);
+       o->packed_git = NULL;
+}
index f13f85b2a94e3afc15debfbaf89416b5cda45acb..5c13955000cdaec252f29a3b93599560c35deb8a 100644 (file)
--- a/object.h
+++ b/object.h
@@ -25,7 +25,6 @@ struct object_array {
 
 #define OBJECT_ARRAY_INIT { 0, 0, NULL }
 
-#define TYPE_BITS   3
 /*
  * object flag allocation:
  * revision.h:               0---------10                                26
@@ -37,7 +36,7 @@ struct object_array {
  * bundle.c:                                        16
  * http-push.c:                                     16-----19
  * commit.c:                                        16-----19
- * sha1_name.c:                                              20
+ * sha1-name.c:                                              20
  * list-objects-filter.c:                                      21
  * builtin/fsck.c:           0--3
  * builtin/index-pack.c:                                     2021
index e01f9928840488b85db2ebc62527d8e4aab81ffc..72d9daec7e62bb924940c4e20763effaa347ea5c 100644 (file)
@@ -48,7 +48,8 @@ void bitmap_writer_show_progress(int show)
 /**
  * Build the initial type index for the packfile
  */
-void bitmap_writer_build_type_index(struct pack_idx_entry **index,
+void bitmap_writer_build_type_index(struct packing_data *to_pack,
+                                   struct pack_idx_entry **index,
                                    uint32_t index_nr)
 {
        uint32_t i;
@@ -57,24 +58,25 @@ void bitmap_writer_build_type_index(struct pack_idx_entry **index,
        writer.trees = ewah_new();
        writer.blobs = ewah_new();
        writer.tags = ewah_new();
+       ALLOC_ARRAY(to_pack->in_pack_pos, to_pack->nr_objects);
 
        for (i = 0; i < index_nr; ++i) {
                struct object_entry *entry = (struct object_entry *)index[i];
                enum object_type real_type;
 
-               entry->in_pack_pos = i;
+               oe_set_in_pack_pos(to_pack, entry, i);
 
-               switch (entry->type) {
+               switch (oe_type(entry)) {
                case OBJ_COMMIT:
                case OBJ_TREE:
                case OBJ_BLOB:
                case OBJ_TAG:
-                       real_type = entry->type;
+                       real_type = oe_type(entry);
                        break;
 
                default:
-                       real_type = sha1_object_info(entry->idx.oid.hash,
-                                                    NULL);
+                       real_type = oid_object_info(the_repository,
+                                                   &entry->idx.oid, NULL);
                        break;
                }
 
@@ -98,7 +100,7 @@ void bitmap_writer_build_type_index(struct pack_idx_entry **index,
                default:
                        die("Missing type information for %s (%d/%d)",
                            oid_to_hex(&entry->idx.oid), real_type,
-                           entry->type);
+                           oe_type(entry));
                }
        }
 }
@@ -147,7 +149,7 @@ static uint32_t find_object_pos(const unsigned char *sha1)
                        "(object %s is missing)", sha1_to_hex(sha1));
        }
 
-       return entry->in_pack_pos;
+       return oe_in_pack_pos(writer.to_pack, entry);
 }
 
 static void show_object(struct object *object, const char *name, void *data)
@@ -535,7 +537,7 @@ void bitmap_writer_finish(struct pack_idx_entry **index,
        if (options & BITMAP_OPT_HASH_CACHE)
                write_hash_cache(f, index, index_nr);
 
-       hashclose(f, NULL, CSUM_FSYNC);
+       finalize_hashfile(f, NULL, CSUM_HASH_IN_STREAM | CSUM_FSYNC | CSUM_CLOSE);
 
        if (adjust_shared_perm(tmp_file.buf))
                die_errno("unable to make temporary bitmap file readable");
index 9270983e5f581e40f894a8885396e43d13e71015..c9e90d1bb530cae500fec403cf716c959c74cbcb 100644 (file)
@@ -10,6 +10,8 @@
 #include "pack-revindex.h"
 #include "pack-objects.h"
 #include "packfile.h"
+#include "repository.h"
+#include "object-store.h"
 
 /*
  * An entry on the bitmap index, representing the bitmap for a given
@@ -334,8 +336,7 @@ static int open_pack_bitmap(void)
 
        assert(!bitmap_git.map && !bitmap_git.loaded);
 
-       prepare_packed_git();
-       for (p = packed_git; p; p = p->next) {
+       for (p = get_packed_git(the_repository); p; p = p->next) {
                if (open_pack_bitmap_1(p) == 0)
                        ret = 0;
        }
@@ -1032,7 +1033,7 @@ int rebuild_existing_bitmaps(struct packing_data *mapping,
                oe = packlist_find(mapping, sha1, NULL);
 
                if (oe)
-                       reposition[i] = oe->in_pack_pos + 1;
+                       reposition[i] = oe_in_pack_pos(mapping, oe) + 1;
        }
 
        rebuild = bitmap_new();
index 3742a00e14a0d4da335253b2a76f978edb499d35..5ded2f139a6ccdab725ed5e568b1100906f4b736 100644 (file)
@@ -44,7 +44,9 @@ int rebuild_existing_bitmaps(struct packing_data *mapping, khash_sha1 *reused_bi
 
 void bitmap_writer_show_progress(int show);
 void bitmap_writer_set_checksum(unsigned char *sha1);
-void bitmap_writer_build_type_index(struct pack_idx_entry **index, uint32_t index_nr);
+void bitmap_writer_build_type_index(struct packing_data *to_pack,
+                                   struct pack_idx_entry **index,
+                                   uint32_t index_nr);
 void bitmap_writer_reuse_bitmaps(struct packing_data *to_pack);
 void bitmap_writer_select_commits(struct commit **indexed_commits,
                unsigned int indexed_commits_nr, int max_bitmaps);
index 8fc7dd1694cf1a67da69bc010879e0c54ee403be..d3a57df34f2d2bf0ef9935a51f171f24d8ed6a72 100644 (file)
@@ -1,8 +1,10 @@
 #include "cache.h"
+#include "repository.h"
 #include "pack.h"
 #include "pack-revindex.h"
 #include "progress.h"
 #include "packfile.h"
+#include "object-store.h"
 
 struct idx_entry {
        off_t                offset;
@@ -126,14 +128,14 @@ static int verify_packfile(struct packed_git *p,
 
                if (type == OBJ_BLOB && big_file_threshold <= size) {
                        /*
-                        * Let check_sha1_signature() check it with
+                        * Let check_object_signature() check it with
                         * the streaming interface; no point slurping
                         * the data in-core only to discard.
                         */
                        data = NULL;
                        data_valid = 0;
                } else {
-                       data = unpack_entry(p, entries[i].offset, &type, &size);
+                       data = unpack_entry(the_repository, p, entries[i].offset, &type, &size);
                        data_valid = 1;
                }
 
@@ -141,7 +143,7 @@ static int verify_packfile(struct packed_git *p,
                        err = error("cannot unpack %s from %s at offset %"PRIuMAX"",
                                    oid_to_hex(entries[i].oid.oid), p->pack_name,
                                    (uintmax_t)entries[i].offset);
-               else if (check_sha1_signature(entries[i].oid.hash, data, size, type_name(type)))
+               else if (check_object_signature(entries[i].oid.oid, data, size, type_name(type)))
                        err = error("packed %s from %s is corrupt",
                                    oid_to_hex(entries[i].oid.oid), p->pack_name);
                else if (fn) {
index 9558d13834e2842d32fa4015f2e3c00538d52dcd..e0c460056801b466231fe0daefc950e349058f5a 100644 (file)
@@ -2,6 +2,8 @@
 #include "object.h"
 #include "pack.h"
 #include "pack-objects.h"
+#include "packfile.h"
+#include "config.h"
 
 static uint32_t locate_object_entry_hash(struct packing_data *pdata,
                                         const unsigned char *sha1,
@@ -86,6 +88,66 @@ struct object_entry *packlist_find(struct packing_data *pdata,
        return &pdata->objects[pdata->index[i] - 1];
 }
 
+static void prepare_in_pack_by_idx(struct packing_data *pdata)
+{
+       struct packed_git **mapping, *p;
+       int cnt = 0, nr = 1U << OE_IN_PACK_BITS;
+
+       ALLOC_ARRAY(mapping, nr);
+       /*
+        * oe_in_pack() on an all-zero'd object_entry
+        * (i.e. in_pack_idx also zero) should return NULL.
+        */
+       mapping[cnt++] = NULL;
+       for (p = get_packed_git(the_repository); p; p = p->next, cnt++) {
+               if (cnt == nr) {
+                       free(mapping);
+                       return;
+               }
+               p->index = cnt;
+               mapping[cnt] = p;
+       }
+       pdata->in_pack_by_idx = mapping;
+}
+
+/*
+ * A new pack appears after prepare_in_pack_by_idx() has been
+ * run. This is likely a race.
+ *
+ * We could map this new pack to in_pack_by_idx[] array, but then we
+ * have to deal with full array anyway. And since it's hard to test
+ * this fall back code, just stay simple and fall back to using
+ * in_pack[] array.
+ */
+void oe_map_new_pack(struct packing_data *pack,
+                    struct packed_git *p)
+{
+       uint32_t i;
+
+       REALLOC_ARRAY(pack->in_pack, pack->nr_alloc);
+
+       for (i = 0; i < pack->nr_objects; i++)
+               pack->in_pack[i] = oe_in_pack(pack, pack->objects + i);
+
+       FREE_AND_NULL(pack->in_pack_by_idx);
+}
+
+/* assume pdata is already zero'd by caller */
+void prepare_packing_data(struct packing_data *pdata)
+{
+       if (git_env_bool("GIT_TEST_FULL_IN_PACK_ARRAY", 0)) {
+               /*
+                * do not initialize in_pack_by_idx[] to force the
+                * slow path in oe_in_pack()
+                */
+       } else {
+               prepare_in_pack_by_idx(pdata);
+       }
+
+       pdata->oe_size_limit = git_env_ulong("GIT_TEST_OE_SIZE",
+                                            1U << OE_SIZE_BITS);
+}
+
 struct object_entry *packlist_alloc(struct packing_data *pdata,
                                    const unsigned char *sha1,
                                    uint32_t index_pos)
@@ -95,6 +157,9 @@ struct object_entry *packlist_alloc(struct packing_data *pdata,
        if (pdata->nr_objects >= pdata->nr_alloc) {
                pdata->nr_alloc = (pdata->nr_alloc  + 1024) * 3 / 2;
                REALLOC_ARRAY(pdata->objects, pdata->nr_alloc);
+
+               if (!pdata->in_pack_by_idx)
+                       REALLOC_ARRAY(pdata->in_pack, pdata->nr_alloc);
        }
 
        new_entry = pdata->objects + pdata->nr_objects++;
@@ -107,5 +172,8 @@ struct object_entry *packlist_alloc(struct packing_data *pdata,
        else
                pdata->index[index_pos] = pdata->nr_objects;
 
+       if (pdata->in_pack)
+               pdata->in_pack[pdata->nr_objects - 1] = NULL;
+
        return new_entry;
 }
index 03f1191659dab55b2c4c440c347101a3cdbd4650..edf74dabddfdb2b67bad803d1c898e93a3af4d8b 100644 (file)
 #ifndef PACK_OBJECTS_H
 #define PACK_OBJECTS_H
 
+#include "object-store.h"
+
+#define DEFAULT_DELTA_CACHE_SIZE (256 * 1024 * 1024)
+
+#define OE_DFS_STATE_BITS      2
+#define OE_DEPTH_BITS          12
+#define OE_IN_PACK_BITS                10
+#define OE_Z_DELTA_BITS                20
+/*
+ * Note that oe_set_size() becomes expensive when the given size is
+ * above this limit. Don't lower it too much.
+ */
+#define OE_SIZE_BITS           31
+#define OE_DELTA_SIZE_BITS     20
+
+/*
+ * State flags for depth-first search used for analyzing delta cycles.
+ *
+ * The depth is measured in delta-links to the base (so if A is a delta
+ * against B, then A has a depth of 1, and B a depth of 0).
+ */
+enum dfs_state {
+       DFS_NONE = 0,
+       DFS_ACTIVE,
+       DFS_DONE,
+       DFS_NUM_STATES
+};
+
+/*
+ * The size of struct nearly determines pack-objects's memory
+ * consumption. This struct is packed tight for that reason. When you
+ * add or reorder something in this struct, think a bit about this.
+ *
+ * basic object info
+ * -----------------
+ * idx.oid is filled up before delta searching starts. idx.crc32 is
+ * only valid after the object is written out and will be used for
+ * generating the index. idx.offset will be both gradually set and
+ * used in writing phase (base objects get offset first, then deltas
+ * refer to them)
+ *
+ * "size" is the uncompressed object size. Compressed size of the raw
+ * data for an object in a pack is not stored anywhere but is computed
+ * and made available when reverse .idx is made. Note that when a
+ * delta is reused, "size" is the uncompressed _delta_ size, not the
+ * canonical one after the delta has been applied.
+ *
+ * "hash" contains a path name hash which is used for sorting the
+ * delta list and also during delta searching. Once prepare_pack()
+ * returns it's no longer needed.
+ *
+ * source pack info
+ * ----------------
+ * The (in_pack, in_pack_offset) tuple contains the location of the
+ * object in the source pack. in_pack_header_size allows quickly
+ * skipping the header and going straight to the zlib stream.
+ *
+ * "type" and "in_pack_type" both describe object type. in_pack_type
+ * may contain a delta type, while type is always the canonical type.
+ *
+ * deltas
+ * ------
+ * Delta links (delta, delta_child and delta_sibling) are created to
+ * reflect that delta graph from the source pack then updated or added
+ * during delta searching phase when we find better deltas.
+ *
+ * delta_child and delta_sibling are last needed in
+ * compute_write_order(). "delta" and "delta_size" must remain valid
+ * at object writing phase in case the delta is not cached.
+ *
+ * If a delta is cached in memory and is compressed, delta_data points
+ * to the data and z_delta_size contains the compressed size. If it's
+ * uncompressed [1], z_delta_size must be zero. delta_size is always
+ * the uncompressed size and must be valid even if the delta is not
+ * cached.
+ *
+ * [1] during try_delta phase we don't bother with compressing because
+ * the delta could be quickly replaced with a better one.
+ */
 struct object_entry {
        struct pack_idx_entry idx;
-       unsigned long size;     /* uncompressed size */
-       struct packed_git *in_pack;     /* already in pack */
-       off_t in_pack_offset;
-       struct object_entry *delta;     /* delta base object */
-       struct object_entry *delta_child; /* deltified objects who bases me */
-       struct object_entry *delta_sibling; /* other deltified objects who
-                                            * uses the same base as me
-                                            */
        void *delta_data;       /* cached delta (uncompressed) */
-       unsigned long delta_size;       /* delta data size (uncompressed) */
-       unsigned long z_delta_size;     /* delta data size (compressed) */
-       enum object_type type;
-       enum object_type in_pack_type;  /* could be delta */
+       off_t in_pack_offset;
        uint32_t hash;                  /* name hint hash */
-       unsigned int in_pack_pos;
-       unsigned char in_pack_header_size;
+       unsigned size_:OE_SIZE_BITS;
+       unsigned size_valid:1;
+       uint32_t delta_idx;     /* delta base object */
+       uint32_t delta_child_idx; /* deltified objects who bases me */
+       uint32_t delta_sibling_idx; /* other deltified objects who
+                                    * uses the same base as me
+                                    */
+       unsigned delta_size_:OE_DELTA_SIZE_BITS; /* delta data size (uncompressed) */
+       unsigned delta_size_valid:1;
+       unsigned in_pack_idx:OE_IN_PACK_BITS;   /* already in pack */
+       unsigned z_delta_size:OE_Z_DELTA_BITS;
+       unsigned type_valid:1;
+       unsigned type_:TYPE_BITS;
+       unsigned no_try_delta:1;
+       unsigned in_pack_type:TYPE_BITS; /* could be delta */
        unsigned preferred_base:1; /*
                                    * we do not pack this, but is available
                                    * to be used as the base object to delta
                                    * objects against.
                                    */
-       unsigned no_try_delta:1;
        unsigned tagged:1; /* near the very tip of refs */
        unsigned filled:1; /* assigned write-order */
+       unsigned dfs_state:OE_DFS_STATE_BITS;
+       unsigned char in_pack_header_size;
+       unsigned depth:OE_DEPTH_BITS;
 
        /*
-        * State flags for depth-first search used for analyzing delta cycles.
+        * pahole results on 64-bit linux (gcc and clang)
+        *
+        *   size: 80, bit_padding: 20 bits, holes: 8 bits
+        *
+        * and on 32-bit (gcc)
         *
-        * The depth is measured in delta-links to the base (so if A is a delta
-        * against B, then A has a depth of 1, and B a depth of 0).
+        *   size: 76, bit_padding: 20 bits, holes: 8 bits
         */
-       enum {
-               DFS_NONE = 0,
-               DFS_ACTIVE,
-               DFS_DONE
-       } dfs_state;
-       int depth;
 };
 
 struct packing_data {
@@ -48,8 +128,22 @@ struct packing_data {
 
        int32_t *index;
        uint32_t index_size;
+
+       unsigned int *in_pack_pos;
+
+       /*
+        * Only one of these can be non-NULL and they have different
+        * sizes. if in_pack_by_idx is allocated, oe_in_pack() returns
+        * the pack of an object using in_pack_idx field. If not,
+        * in_pack[] array is used the same way as in_pack_pos[]
+        */
+       struct packed_git **in_pack_by_idx;
+       struct packed_git **in_pack;
+
+       uintmax_t oe_size_limit;
 };
 
+void prepare_packing_data(struct packing_data *pdata);
 struct object_entry *packlist_alloc(struct packing_data *pdata,
                                    const unsigned char *sha1,
                                    uint32_t index_pos);
@@ -78,4 +172,178 @@ static inline uint32_t pack_name_hash(const char *name)
        return hash;
 }
 
+static inline enum object_type oe_type(const struct object_entry *e)
+{
+       return e->type_valid ? e->type_ : OBJ_BAD;
+}
+
+static inline void oe_set_type(struct object_entry *e,
+                              enum object_type type)
+{
+       if (type >= OBJ_ANY)
+               BUG("OBJ_ANY cannot be set in pack-objects code");
+
+       e->type_valid = type >= OBJ_NONE;
+       e->type_ = (unsigned)type;
+}
+
+static inline unsigned int oe_in_pack_pos(const struct packing_data *pack,
+                                         const struct object_entry *e)
+{
+       return pack->in_pack_pos[e - pack->objects];
+}
+
+static inline void oe_set_in_pack_pos(const struct packing_data *pack,
+                                     const struct object_entry *e,
+                                     unsigned int pos)
+{
+       pack->in_pack_pos[e - pack->objects] = pos;
+}
+
+static inline struct packed_git *oe_in_pack(const struct packing_data *pack,
+                                           const struct object_entry *e)
+{
+       if (pack->in_pack_by_idx)
+               return pack->in_pack_by_idx[e->in_pack_idx];
+       else
+               return pack->in_pack[e - pack->objects];
+}
+
+void oe_map_new_pack(struct packing_data *pack,
+                    struct packed_git *p);
+static inline void oe_set_in_pack(struct packing_data *pack,
+                                 struct object_entry *e,
+                                 struct packed_git *p)
+{
+       if (!p->index)
+               oe_map_new_pack(pack, p);
+       if (pack->in_pack_by_idx)
+               e->in_pack_idx = p->index;
+       else
+               pack->in_pack[e - pack->objects] = p;
+}
+
+static inline struct object_entry *oe_delta(
+               const struct packing_data *pack,
+               const struct object_entry *e)
+{
+       if (e->delta_idx)
+               return &pack->objects[e->delta_idx - 1];
+       return NULL;
+}
+
+static inline void oe_set_delta(struct packing_data *pack,
+                               struct object_entry *e,
+                               struct object_entry *delta)
+{
+       if (delta)
+               e->delta_idx = (delta - pack->objects) + 1;
+       else
+               e->delta_idx = 0;
+}
+
+static inline struct object_entry *oe_delta_child(
+               const struct packing_data *pack,
+               const struct object_entry *e)
+{
+       if (e->delta_child_idx)
+               return &pack->objects[e->delta_child_idx - 1];
+       return NULL;
+}
+
+static inline void oe_set_delta_child(struct packing_data *pack,
+                                     struct object_entry *e,
+                                     struct object_entry *delta)
+{
+       if (delta)
+               e->delta_child_idx = (delta - pack->objects) + 1;
+       else
+               e->delta_child_idx = 0;
+}
+
+static inline struct object_entry *oe_delta_sibling(
+               const struct packing_data *pack,
+               const struct object_entry *e)
+{
+       if (e->delta_sibling_idx)
+               return &pack->objects[e->delta_sibling_idx - 1];
+       return NULL;
+}
+
+static inline void oe_set_delta_sibling(struct packing_data *pack,
+                                       struct object_entry *e,
+                                       struct object_entry *delta)
+{
+       if (delta)
+               e->delta_sibling_idx = (delta - pack->objects) + 1;
+       else
+               e->delta_sibling_idx = 0;
+}
+
+unsigned long oe_get_size_slow(struct packing_data *pack,
+                              const struct object_entry *e);
+static inline unsigned long oe_size(struct packing_data *pack,
+                                   const struct object_entry *e)
+{
+       if (e->size_valid)
+               return e->size_;
+
+       return oe_get_size_slow(pack, e);
+}
+
+static inline int oe_size_less_than(struct packing_data *pack,
+                                   const struct object_entry *lhs,
+                                   unsigned long rhs)
+{
+       if (lhs->size_valid)
+               return lhs->size_ < rhs;
+       if (rhs < pack->oe_size_limit) /* rhs < 2^x <= lhs ? */
+               return 0;
+       return oe_get_size_slow(pack, lhs) < rhs;
+}
+
+static inline int oe_size_greater_than(struct packing_data *pack,
+                                      const struct object_entry *lhs,
+                                      unsigned long rhs)
+{
+       if (lhs->size_valid)
+               return lhs->size_ > rhs;
+       if (rhs < pack->oe_size_limit) /* rhs < 2^x <= lhs ? */
+               return 1;
+       return oe_get_size_slow(pack, lhs) > rhs;
+}
+
+static inline void oe_set_size(struct packing_data *pack,
+                              struct object_entry *e,
+                              unsigned long size)
+{
+       if (size < pack->oe_size_limit) {
+               e->size_ = size;
+               e->size_valid = 1;
+       } else {
+               e->size_valid = 0;
+               if (oe_get_size_slow(pack, e) != size)
+                       BUG("'size' is supposed to be the object size!");
+       }
+}
+
+static inline unsigned long oe_delta_size(struct packing_data *pack,
+                                         const struct object_entry *e)
+{
+       if (e->delta_size_valid)
+               return e->delta_size_;
+       return oe_size(pack, e);
+}
+
+static inline void oe_set_delta_size(struct packing_data *pack,
+                                    struct object_entry *e,
+                                    unsigned long size)
+{
+       e->delta_size_ = size;
+       e->delta_size_valid = e->delta_size_ == size;
+       if (!e->delta_size_valid && size != oe_size(pack, e))
+               BUG("this can only happen in check_object() "
+                   "where delta size is the same as entry size");
+}
+
 #endif
index ff5f62c03326a7f01926c72a19be5029ae4c1a8b..bb521cf7fb2911bc2d3653f46224880f8540f9ab 100644 (file)
@@ -1,5 +1,6 @@
 #include "cache.h"
 #include "pack-revindex.h"
+#include "object-store.h"
 
 /*
  * Pack index for existing packs give us easy access to the offsets into
index d775c7406dd5a869a1ce4d28f6ef872e08476b77..a9d46bc03f63b27ff85cceecb763d4e39f47898f 100644 (file)
@@ -170,8 +170,9 @@ const char *write_idx_file(const char *index_name, struct pack_idx_entry **objec
        }
 
        hashwrite(f, sha1, the_hash_algo->rawsz);
-       hashclose(f, NULL, ((opts->flags & WRITE_IDX_VERIFY)
-                           ? CSUM_CLOSE : CSUM_FSYNC));
+       finalize_hashfile(f, NULL, CSUM_HASH_IN_STREAM | CSUM_CLOSE |
+                                   ((opts->flags & WRITE_IDX_VERIFY)
+                                   ? 0 : CSUM_FSYNC));
        return index_name;
 }
 
index 7c1a2519fcb9c45aacf5481811bc26f44736de59..9e7e693fb55d539aa7efcf909c6b07c6d153ae39 100644 (file)
@@ -1,6 +1,7 @@
 #include "cache.h"
 #include "list.h"
 #include "pack.h"
+#include "repository.h"
 #include "dir.h"
 #include "mergesort.h"
 #include "packfile.h"
@@ -13,6 +14,7 @@
 #include "tag.h"
 #include "tree-walk.h"
 #include "tree.h"
+#include "object-store.h"
 
 char *odb_pack_name(struct strbuf *buf,
                    const unsigned char *sha1,
@@ -44,8 +46,6 @@ static unsigned int pack_open_fds;
 static unsigned int pack_max_fds;
 static size_t peak_pack_mapped;
 static size_t pack_mapped;
-struct packed_git *packed_git;
-LIST_HEAD(packed_git_mru);
 
 #define SZ_FMT PRIuMAX
 static inline uintmax_t sz_fmt(size_t s) { return s; }
@@ -245,7 +245,7 @@ static int unuse_one_window(struct packed_git *current)
 
        if (current)
                scan_windows(current, &lru_p, &lru_w, &lru_l);
-       for (p = packed_git; p; p = p->next)
+       for (p = the_repository->objects->packed_git; p; p = p->next)
                scan_windows(p, &lru_p, &lru_w, &lru_l);
        if (lru_p) {
                munmap(lru_w->base, lru_w->len);
@@ -304,18 +304,18 @@ void close_pack_index(struct packed_git *p)
        }
 }
 
-static void close_pack(struct packed_git *p)
+void close_pack(struct packed_git *p)
 {
        close_pack_windows(p);
        close_pack_fd(p);
        close_pack_index(p);
 }
 
-void close_all_packs(void)
+void close_all_packs(struct raw_object_store *o)
 {
        struct packed_git *p;
 
-       for (p = packed_git; p; p = p->next)
+       for (p = o->packed_git; p; p = p->next)
                if (p->do_not_close)
                        die("BUG: want to close pack marked 'do-not-close'");
                else
@@ -383,7 +383,7 @@ static int close_one_pack(void)
        struct pack_window *mru_w = NULL;
        int accept_windows_inuse = 1;
 
-       for (p = packed_git; p; p = p->next) {
+       for (p = the_repository->objects->packed_git; p; p = p->next) {
                if (p->pack_fd == -1)
                        continue;
                find_lru_pack(p, &lru_p, &mru_w, &accept_windows_inuse);
@@ -680,13 +680,13 @@ struct packed_git *add_packed_git(const char *path, size_t path_len, int local)
        return p;
 }
 
-void install_packed_git(struct packed_git *pack)
+void install_packed_git(struct repository *r, struct packed_git *pack)
 {
        if (pack->pack_fd != -1)
                pack_open_fds++;
 
-       pack->next = packed_git;
-       packed_git = pack;
+       pack->next = r->objects->packed_git;
+       r->objects->packed_git = pack;
 }
 
 void (*report_garbage)(unsigned seen_bits, const char *path);
@@ -735,7 +735,7 @@ static void report_pack_garbage(struct string_list *list)
        report_helper(list, seen_bits, first, list->nr);
 }
 
-static void prepare_packed_git_one(char *objdir, int local)
+static void prepare_packed_git_one(struct repository *r, char *objdir, int local)
 {
        struct strbuf path = STRBUF_INIT;
        size_t dirnamelen;
@@ -768,7 +768,8 @@ static void prepare_packed_git_one(char *objdir, int local)
                base_len = path.len;
                if (strip_suffix_mem(path.buf, &base_len, ".idx")) {
                        /* Don't reopen a pack we already have. */
-                       for (p = packed_git; p; p = p->next) {
+                       for (p = r->objects->packed_git; p;
+                            p = p->next) {
                                size_t len;
                                if (strip_suffix(p->pack_name, ".pack", &len) &&
                                    len == base_len &&
@@ -781,7 +782,7 @@ static void prepare_packed_git_one(char *objdir, int local)
                             * corresponding .pack file that we can map.
                             */
                            (p = add_packed_git(path.buf, path.len, local)) != NULL)
-                               install_packed_git(p);
+                               install_packed_git(r, p);
                }
 
                if (!report_garbage)
@@ -802,8 +803,7 @@ static void prepare_packed_git_one(char *objdir, int local)
        strbuf_release(&path);
 }
 
-static int approximate_object_count_valid;
-
+static void prepare_packed_git(struct repository *r);
 /*
  * Give a fast, rough count of the number of objects in the repository. This
  * ignores loose objects completely. If you have a lot of them, then either
@@ -813,19 +813,20 @@ static int approximate_object_count_valid;
  */
 unsigned long approximate_object_count(void)
 {
-       static unsigned long count;
-       if (!approximate_object_count_valid) {
+       if (!the_repository->objects->approximate_object_count_valid) {
+               unsigned long count;
                struct packed_git *p;
 
-               prepare_packed_git();
+               prepare_packed_git(the_repository);
                count = 0;
-               for (p = packed_git; p; p = p->next) {
+               for (p = the_repository->objects->packed_git; p; p = p->next) {
                        if (open_pack_index(p))
                                continue;
                        count += p->num_objects;
                }
+               the_repository->objects->approximate_object_count = count;
        }
-       return count;
+       return the_repository->objects->approximate_object_count;
 }
 
 static void *get_next_packed_git(const void *p)
@@ -866,43 +867,55 @@ static int sort_pack(const void *a_, const void *b_)
        return -1;
 }
 
-static void rearrange_packed_git(void)
+static void rearrange_packed_git(struct repository *r)
 {
-       packed_git = llist_mergesort(packed_git, get_next_packed_git,
-                                    set_next_packed_git, sort_pack);
+       r->objects->packed_git = llist_mergesort(
+               r->objects->packed_git, get_next_packed_git,
+               set_next_packed_git, sort_pack);
 }
 
-static void prepare_packed_git_mru(void)
+static void prepare_packed_git_mru(struct repository *r)
 {
        struct packed_git *p;
 
-       INIT_LIST_HEAD(&packed_git_mru);
+       INIT_LIST_HEAD(&r->objects->packed_git_mru);
 
-       for (p = packed_git; p; p = p->next)
-               list_add_tail(&p->mru, &packed_git_mru);
+       for (p = r->objects->packed_git; p; p = p->next)
+               list_add_tail(&p->mru, &r->objects->packed_git_mru);
 }
 
-static int prepare_packed_git_run_once = 0;
-void prepare_packed_git(void)
+static void prepare_packed_git(struct repository *r)
 {
        struct alternate_object_database *alt;
 
-       if (prepare_packed_git_run_once)
+       if (r->objects->packed_git_initialized)
                return;
-       prepare_packed_git_one(get_object_directory(), 1);
-       prepare_alt_odb();
-       for (alt = alt_odb_list; alt; alt = alt->next)
-               prepare_packed_git_one(alt->path, 0);
-       rearrange_packed_git();
-       prepare_packed_git_mru();
-       prepare_packed_git_run_once = 1;
+       prepare_packed_git_one(r, r->objects->objectdir, 1);
+       prepare_alt_odb(r);
+       for (alt = r->objects->alt_odb_list; alt; alt = alt->next)
+               prepare_packed_git_one(r, alt->path, 0);
+       rearrange_packed_git(r);
+       prepare_packed_git_mru(r);
+       r->objects->packed_git_initialized = 1;
+}
+
+void reprepare_packed_git(struct repository *r)
+{
+       r->objects->approximate_object_count_valid = 0;
+       r->objects->packed_git_initialized = 0;
+       prepare_packed_git(r);
+}
+
+struct packed_git *get_packed_git(struct repository *r)
+{
+       prepare_packed_git(r);
+       return r->objects->packed_git;
 }
 
-void reprepare_packed_git(void)
+struct list_head *get_packed_git_mru(struct repository *r)
 {
-       approximate_object_count_valid = 0;
-       prepare_packed_git_run_once = 0;
-       prepare_packed_git();
+       prepare_packed_git(r);
+       return &r->objects->packed_git_mru;
 }
 
 unsigned long unpack_object_header_buffer(const unsigned char *buf,
@@ -1013,7 +1026,7 @@ const struct packed_git *has_packed_and_bad(const unsigned char *sha1)
        struct packed_git *p;
        unsigned i;
 
-       for (p = packed_git; p; p = p->next)
+       for (p = the_repository->objects->packed_git; p; p = p->next)
                for (i = 0; i < p->num_bad_objects; i++)
                        if (!hashcmp(sha1, p->bad_object_sha1 + 20 * i))
                                return p;
@@ -1091,17 +1104,19 @@ static const unsigned char *get_delta_base_sha1(struct packed_git *p,
                return NULL;
 }
 
-static int retry_bad_packed_offset(struct packed_git *p, off_t obj_offset)
+static int retry_bad_packed_offset(struct repository *r,
+                                  struct packed_git *p,
+                                  off_t obj_offset)
 {
        int type;
        struct revindex_entry *revidx;
-       const unsigned char *sha1;
+       struct object_id oid;
        revidx = find_pack_revindex(p, obj_offset);
        if (!revidx)
                return OBJ_BAD;
-       sha1 = nth_packed_object_sha1(p, revidx->nr);
-       mark_bad_packed_object(p, sha1);
-       type = sha1_object_info(sha1, NULL);
+       nth_packed_object_oid(&oid, p, revidx->nr);
+       mark_bad_packed_object(p, oid.hash);
+       type = oid_object_info(r, &oid, NULL);
        if (type <= OBJ_NONE)
                return OBJ_BAD;
        return type;
@@ -1109,7 +1124,8 @@ static int retry_bad_packed_offset(struct packed_git *p, off_t obj_offset)
 
 #define POI_STACK_PREALLOC 64
 
-static enum object_type packed_to_object_type(struct packed_git *p,
+static enum object_type packed_to_object_type(struct repository *r,
+                                             struct packed_git *p,
                                              off_t obj_offset,
                                              enum object_type type,
                                              struct pack_window **w_curs,
@@ -1140,7 +1156,7 @@ static enum object_type packed_to_object_type(struct packed_git *p,
                if (type <= OBJ_NONE) {
                        /* If getting the base itself fails, we first
                         * retry the base, otherwise unwind */
-                       type = retry_bad_packed_offset(p, base_offset);
+                       type = retry_bad_packed_offset(r, p, base_offset);
                        if (type > OBJ_NONE)
                                goto out;
                        goto unwind;
@@ -1168,7 +1184,7 @@ static enum object_type packed_to_object_type(struct packed_git *p,
 unwind:
        while (poi_stack_nr) {
                obj_offset = poi_stack[--poi_stack_nr];
-               type = retry_bad_packed_offset(p, obj_offset);
+               type = retry_bad_packed_offset(r, p, obj_offset);
                if (type > OBJ_NONE)
                        goto out;
        }
@@ -1255,14 +1271,15 @@ static void detach_delta_base_cache_entry(struct delta_base_cache_entry *ent)
        free(ent);
 }
 
-static void *cache_or_unpack_entry(struct packed_git *p, off_t base_offset,
-       unsigned long *base_size, enum object_type *type)
+static void *cache_or_unpack_entry(struct repository *r, struct packed_git *p,
+                                  off_t base_offset, unsigned long *base_size,
+                                  enum object_type *type)
 {
        struct delta_base_cache_entry *ent;
 
        ent = get_delta_base_cache_entry(p, base_offset);
        if (!ent)
-               return unpack_entry(p, base_offset, type, base_size);
+               return unpack_entry(r, p, base_offset, type, base_size);
 
        if (type)
                *type = ent->type;
@@ -1316,8 +1333,8 @@ static void add_delta_base_cache(struct packed_git *p, off_t base_offset,
        hashmap_add(&delta_base_cache, ent);
 }
 
-int packed_object_info(struct packed_git *p, off_t obj_offset,
-                      struct object_info *oi)
+int packed_object_info(struct repository *r, struct packed_git *p,
+                      off_t obj_offset, struct object_info *oi)
 {
        struct pack_window *w_curs = NULL;
        unsigned long size;
@@ -1329,7 +1346,7 @@ int packed_object_info(struct packed_git *p, off_t obj_offset,
         * a "real" type later if the caller is interested.
         */
        if (oi->contentp) {
-               *oi->contentp = cache_or_unpack_entry(p, obj_offset, oi->sizep,
+               *oi->contentp = cache_or_unpack_entry(r, p, obj_offset, oi->sizep,
                                                      &type);
                if (!*oi->contentp)
                        type = OBJ_BAD;
@@ -1363,8 +1380,8 @@ int packed_object_info(struct packed_git *p, off_t obj_offset,
 
        if (oi->typep || oi->type_name) {
                enum object_type ptot;
-               ptot = packed_to_object_type(p, obj_offset, type, &w_curs,
-                                            curpos);
+               ptot = packed_to_object_type(r, p, obj_offset,
+                                            type, &w_curs, curpos);
                if (oi->typep)
                        *oi->typep = ptot;
                if (oi->type_name) {
@@ -1452,7 +1469,9 @@ struct unpack_entry_stack_ent {
        unsigned long size;
 };
 
-static void *read_object(const unsigned char *sha1, enum object_type *type,
+static void *read_object(struct repository *r,
+                        const struct object_id *oid,
+                        enum object_type *type,
                         unsigned long *size)
 {
        struct object_info oi = OBJECT_INFO_INIT;
@@ -1461,12 +1480,12 @@ static void *read_object(const unsigned char *sha1, enum object_type *type,
        oi.sizep = size;
        oi.contentp = &content;
 
-       if (sha1_object_info_extended(sha1, &oi, 0) < 0)
+       if (oid_object_info_extended(r, oid, &oi, 0) < 0)
                return NULL;
        return content;
 }
 
-void *unpack_entry(struct packed_git *p, off_t obj_offset,
+void *unpack_entry(struct repository *r, struct packed_git *p, off_t obj_offset,
                   enum object_type *final_type, unsigned long *final_size)
 {
        struct pack_window *w_curs = NULL;
@@ -1501,11 +1520,11 @@ void *unpack_entry(struct packed_git *p, off_t obj_offset,
                        struct revindex_entry *revidx = find_pack_revindex(p, obj_offset);
                        off_t len = revidx[1].offset - obj_offset;
                        if (check_pack_crc(p, &w_curs, obj_offset, len, revidx->nr)) {
-                               const unsigned char *sha1 =
-                                       nth_packed_object_sha1(p, revidx->nr);
+                               struct object_id oid;
+                               nth_packed_object_oid(&oid, p, revidx->nr);
                                error("bad packed object CRC for %s",
-                                     sha1_to_hex(sha1));
-                               mark_bad_packed_object(p, sha1);
+                                     oid_to_hex(&oid));
+                               mark_bad_packed_object(p, oid.hash);
                                data = NULL;
                                goto out;
                        }
@@ -1588,16 +1607,16 @@ void *unpack_entry(struct packed_git *p, off_t obj_offset,
                         * of a corrupted pack, and is better than failing outright.
                         */
                        struct revindex_entry *revidx;
-                       const unsigned char *base_sha1;
+                       struct object_id base_oid;
                        revidx = find_pack_revindex(p, obj_offset);
                        if (revidx) {
-                               base_sha1 = nth_packed_object_sha1(p, revidx->nr);
+                               nth_packed_object_oid(&base_oid, p, revidx->nr);
                                error("failed to read delta base object %s"
                                      " at offset %"PRIuMAX" from %s",
-                                     sha1_to_hex(base_sha1), (uintmax_t)obj_offset,
+                                     oid_to_hex(&base_oid), (uintmax_t)obj_offset,
                                      p->pack_name);
-                               mark_bad_packed_object(p, base_sha1);
-                               base = read_object(base_sha1, &type, &base_size);
+                               mark_bad_packed_object(p, base_oid.hash);
+                               base = read_object(r, &base_oid, &type, &base_size);
                                external_base = base;
                        }
                }
@@ -1654,6 +1673,29 @@ void *unpack_entry(struct packed_git *p, off_t obj_offset,
        return data;
 }
 
+int bsearch_pack(const struct object_id *oid, const struct packed_git *p, uint32_t *result)
+{
+       const unsigned char *index_fanout = p->index_data;
+       const unsigned char *index_lookup;
+       int index_lookup_width;
+
+       if (!index_fanout)
+               BUG("bsearch_pack called without a valid pack-index");
+
+       index_lookup = index_fanout + 4 * 256;
+       if (p->index_version == 1) {
+               index_lookup_width = 24;
+               index_lookup += 4;
+       } else {
+               index_lookup_width = 20;
+               index_fanout += 8;
+               index_lookup += 8;
+       }
+
+       return bsearch_hash(oid->hash, (const uint32_t*)index_fanout,
+                           index_lookup, index_lookup_width, result);
+}
+
 const unsigned char *nth_packed_object_sha1(struct packed_git *p,
                                            uint32_t n)
 {
@@ -1720,30 +1762,17 @@ off_t nth_packed_object_offset(const struct packed_git *p, uint32_t n)
 off_t find_pack_entry_one(const unsigned char *sha1,
                                  struct packed_git *p)
 {
-       const uint32_t *level1_ofs = p->index_data;
        const unsigned char *index = p->index_data;
-       unsigned stride;
+       struct object_id oid;
        uint32_t result;
 
        if (!index) {
                if (open_pack_index(p))
                        return 0;
-               level1_ofs = p->index_data;
-               index = p->index_data;
-       }
-       if (p->index_version > 1) {
-               level1_ofs += 2;
-               index += 8;
-       }
-       index += 4 * 256;
-       if (p->index_version > 1) {
-               stride = 20;
-       } else {
-               stride = 24;
-               index += 4;
        }
 
-       if (bsearch_hash(sha1, level1_ofs, index, stride, &result))
+       hashcpy(oid.hash, sha1);
+       if (bsearch_pack(&oid, p, &result))
                return nth_packed_object_offset(p, result);
        return 0;
 }
@@ -1814,22 +1843,18 @@ static int fill_pack_entry(const unsigned char *sha1,
        return 1;
 }
 
-/*
- * Iff a pack file contains the object named by sha1, return true and
- * store its location to e.
- */
-int find_pack_entry(const unsigned char *sha1, struct pack_entry *e)
+int find_pack_entry(struct repository *r, const unsigned char *sha1, struct pack_entry *e)
 {
        struct list_head *pos;
 
-       prepare_packed_git();
-       if (!packed_git)
+       prepare_packed_git(r);
+       if (!r->objects->packed_git)
                return 0;
 
-       list_for_each(pos, &packed_git_mru) {
+       list_for_each(pos, &r->objects->packed_git_mru) {
                struct packed_git *p = list_entry(pos, struct packed_git, mru);
                if (fill_pack_entry(sha1, e, p)) {
-                       list_move(&p->mru, &packed_git_mru);
+                       list_move(&p->mru, &r->objects->packed_git_mru);
                        return 1;
                }
        }
@@ -1839,7 +1864,7 @@ int find_pack_entry(const unsigned char *sha1, struct pack_entry *e)
 int has_sha1_pack(const unsigned char *sha1)
 {
        struct pack_entry e;
-       return find_pack_entry(sha1, &e);
+       return find_pack_entry(the_repository, sha1, &e);
 }
 
 int has_pack_index(const unsigned char *sha1)
@@ -1850,7 +1875,7 @@ int has_pack_index(const unsigned char *sha1)
        return 1;
 }
 
-static int for_each_object_in_pack(struct packed_git *p, each_packed_object_fn cb, void *data)
+int for_each_object_in_pack(struct packed_git *p, each_packed_object_fn cb, void *data)
 {
        uint32_t i;
        int r = 0;
@@ -1875,8 +1900,8 @@ int for_each_packed_object(each_packed_object_fn cb, void *data, unsigned flags)
        int r = 0;
        int pack_errors = 0;
 
-       prepare_packed_git();
-       for (p = packed_git; p; p = p->next) {
+       prepare_packed_git(the_repository);
+       for (p = the_repository->objects->packed_git; p; p = p->next) {
                if ((flags & FOR_EACH_OBJECT_LOCAL_ONLY) && !p->pack_local)
                        continue;
                if ((flags & FOR_EACH_OBJECT_PROMISOR_ONLY) &&
@@ -1907,7 +1932,7 @@ static int add_promisor_object(const struct object_id *oid,
 
        /*
         * If this is a tree, commit, or tag, the objects it refers
-        * to are also promisor objects. (Blobs refer to no objects.)
+        * to are also promisor objects. (Blobs refer to no objects->)
         */
        if (obj->type == OBJ_TREE) {
                struct tree *tree = (struct tree *)obj;
@@ -1925,7 +1950,7 @@ static int add_promisor_object(const struct object_id *oid,
                struct commit *commit = (struct commit *) obj;
                struct commit_list *parents = commit->parents;
 
-               oidset_insert(set, &commit->tree->object.oid);
+               oidset_insert(set, get_commit_tree_oid(commit));
                for (; parents; parents = parents->next)
                        oidset_insert(set, &parents->item->object.oid);
        } else if (obj->type == OBJ_TAG) {
index a7fca598d672b73010a5fb99e4507da4634002ff..bfd0b5399de090c2e404a4e210eeaa59586ca7cc 100644 (file)
@@ -34,9 +34,11 @@ extern struct packed_git *parse_pack_index(unsigned char *sha1, const char *idx_
 #define PACKDIR_FILE_GARBAGE 4
 extern void (*report_garbage)(unsigned seen_bits, const char *path);
 
-extern void prepare_packed_git(void);
-extern void reprepare_packed_git(void);
-extern void install_packed_git(struct packed_git *pack);
+extern void reprepare_packed_git(struct repository *r);
+extern void install_packed_git(struct repository *r, struct packed_git *pack);
+
+struct packed_git *get_packed_git(struct repository *r);
+struct list_head *get_packed_git_mru(struct repository *r);
 
 /*
  * Give a rough count of objects in the repository. This sacrifices accuracy
@@ -63,7 +65,8 @@ extern void close_pack_index(struct packed_git *);
 
 extern unsigned char *use_pack(struct packed_git *, struct pack_window **, off_t, unsigned long *);
 extern void close_pack_windows(struct packed_git *);
-extern void close_all_packs(void);
+extern void close_pack(struct packed_git *);
+extern void close_all_packs(struct raw_object_store *o);
 extern void unuse_pack(struct pack_window **);
 extern void clear_delta_base_cache(void);
 extern struct packed_git *add_packed_git(const char *path, size_t path_len, int local);
@@ -78,6 +81,14 @@ extern struct packed_git *add_packed_git(const char *path, size_t path_len, int
  */
 extern void check_pack_index_ptr(const struct packed_git *p, const void *ptr);
 
+/*
+ * Perform binary search on a pack-index for a given oid. Packfile is expected to
+ * have a valid pack-index.
+ *
+ * See 'bsearch_hash' for more information.
+ */
+int bsearch_pack(const struct object_id *oid, const struct packed_git *p, uint32_t *result);
+
 /*
  * Return the SHA-1 of the nth object within the specified packfile.
  * Open the index if it is not already open.  The return value points
@@ -105,7 +116,7 @@ extern off_t nth_packed_object_offset(const struct packed_git *, uint32_t n);
 extern off_t find_pack_entry_one(const unsigned char *sha1, struct packed_git *);
 
 extern int is_pack_valid(struct packed_git *);
-extern void *unpack_entry(struct packed_git *, off_t, enum object_type *, unsigned long *);
+extern void *unpack_entry(struct repository *r, struct packed_git *, off_t, enum object_type *, unsigned long *);
 extern unsigned long unpack_object_header_buffer(const unsigned char *buf, unsigned long len, enum object_type *type, unsigned long *sizep);
 extern unsigned long get_size_from_delta(struct packed_git *, struct pack_window **, off_t);
 extern int unpack_object_header(struct packed_git *, struct pack_window **, off_t *, unsigned long *);
@@ -115,12 +126,18 @@ extern void release_pack_memory(size_t);
 /* global flag to enable extra checks when accessing packed objects */
 extern int do_check_packed_object_crc;
 
-extern int packed_object_info(struct packed_git *pack, off_t offset, struct object_info *);
+extern int packed_object_info(struct repository *r,
+                             struct packed_git *pack,
+                             off_t offset, struct object_info *);
 
 extern void mark_bad_packed_object(struct packed_git *p, const unsigned char *sha1);
 extern const struct packed_git *has_packed_and_bad(const unsigned char *sha1);
 
-extern int find_pack_entry(const unsigned char *sha1, struct pack_entry *e);
+/*
+ * Iff a pack file in the given repository contains the object named by sha1,
+ * return true and store its location to e.
+ */
+extern int find_pack_entry(struct repository *r, const unsigned char *sha1, struct pack_entry *e);
 
 extern int has_sha1_pack(const unsigned char *sha1);
 
@@ -140,6 +157,7 @@ typedef int each_packed_object_fn(const struct object_id *oid,
                                  struct packed_git *pack,
                                  uint32_t pos,
                                  void *data);
+extern int for_each_object_in_pack(struct packed_git *p, each_packed_object_fn, void *data);
 extern int for_each_packed_object(each_packed_object_fn, void *, unsigned flags);
 
 /*
diff --git a/pager.c b/pager.c
index 92b23e6cd1d44a26c86afeeb748ddc0aee3f9154..226828f178a0c1a876710326634e83288863af3d 100644 (file)
--- a/pager.c
+++ b/pager.c
@@ -109,10 +109,15 @@ void setup_pager(void)
                return;
 
        /*
-        * force computing the width of the terminal before we redirect
-        * the standard output to the pager.
+        * After we redirect standard output, we won't be able to use an ioctl
+        * to get the terminal size. Let's grab it now, and then set $COLUMNS
+        * to communicate it to any sub-processes.
         */
-       (void) term_columns();
+       {
+               char buf[64];
+               xsnprintf(buf, sizeof(buf), "%d", term_columns());
+               setenv("COLUMNS", buf, 0);
+       }
 
        setenv("GIT_PAGER_IN_USE", "true", 1);
 
index c6679cb2cdee15981ee9ef31c402c358a3727d1e..0f9f311a7a93350a584125c90da643fce96c34e7 100644 (file)
@@ -38,7 +38,11 @@ int parse_opt_approxidate_cb(const struct option *opt, const char *arg,
 int parse_opt_expiry_date_cb(const struct option *opt, const char *arg,
                             int unset)
 {
-       return parse_expiry_date(arg, (timestamp_t *)opt->value);
+       if (unset)
+               arg = "never";
+       if (parse_expiry_date(arg, (timestamp_t *)opt->value))
+               die(_("malformed expiration date '%s'"), arg);
+       return 0;
 }
 
 int parse_opt_color_flag_cb(const struct option *opt, const char *arg,
index 125e84f98451b4eb12e9d8a6cb4da58b2d8db51e..0f7059a8ab32a624775026d7dc2289245c87c192 100644 (file)
@@ -317,14 +317,16 @@ static int parse_long_opt(struct parse_opt_ctx_t *p, const char *arg,
                return get_value(p, options, all_opts, flags ^ opt_flags);
        }
 
-       if (ambiguous_option)
-               return error("Ambiguous option: %s "
+       if (ambiguous_option) {
+               error("Ambiguous option: %s "
                        "(could be --%s%s or --%s%s)",
                        arg,
                        (ambiguous_flags & OPT_UNSET) ?  "no-" : "",
                        ambiguous_option->long_name,
                        (abbrev_flags & OPT_UNSET) ?  "no-" : "",
                        abbrev_option->long_name);
+               return -3;
+       }
        if (abbrev_option)
                return get_value(p, abbrev_option, all_opts, abbrev_flags);
        return -2;
@@ -476,7 +478,6 @@ int parse_options_step(struct parse_opt_ctx_t *ctx,
                       const char * const usagestr[])
 {
        int internal_help = !(ctx->flags & PARSE_OPT_NO_INTERNAL_HELP);
-       int err = 0;
 
        /* we must reset ->opt, unknown short option leave it dangling */
        ctx->opt = NULL;
@@ -505,7 +506,7 @@ int parse_options_step(struct parse_opt_ctx_t *ctx,
                        ctx->opt = arg + 1;
                        switch (parse_short_opt(ctx, options)) {
                        case -1:
-                               goto show_usage_error;
+                               return PARSE_OPT_ERROR;
                        case -2:
                                if (ctx->opt)
                                        check_typos(arg + 1, options);
@@ -518,7 +519,7 @@ int parse_options_step(struct parse_opt_ctx_t *ctx,
                        while (ctx->opt) {
                                switch (parse_short_opt(ctx, options)) {
                                case -1:
-                                       goto show_usage_error;
+                                       return PARSE_OPT_ERROR;
                                case -2:
                                        if (internal_help && *ctx->opt == 'h')
                                                goto show_usage;
@@ -550,9 +551,11 @@ int parse_options_step(struct parse_opt_ctx_t *ctx,
                        goto show_usage;
                switch (parse_long_opt(ctx, arg + 2, options)) {
                case -1:
-                       goto show_usage_error;
+                       return PARSE_OPT_ERROR;
                case -2:
                        goto unknown;
+               case -3:
+                       goto show_usage;
                }
                continue;
 unknown:
@@ -563,10 +566,8 @@ int parse_options_step(struct parse_opt_ctx_t *ctx,
        }
        return PARSE_OPT_DONE;
 
- show_usage_error:
-       err = 1;
  show_usage:
-       return usage_with_options_internal(ctx, usagestr, options, 0, err);
+       return usage_with_options_internal(ctx, usagestr, options, 0, 0);
 }
 
 int parse_options_end(struct parse_opt_ctx_t *ctx)
@@ -585,6 +586,7 @@ int parse_options(int argc, const char **argv, const char *prefix,
        parse_options_start(&ctx, argc, argv, prefix, options, flags);
        switch (parse_options_step(&ctx, options, usagestr)) {
        case PARSE_OPT_HELP:
+       case PARSE_OPT_ERROR:
                exit(129);
        case PARSE_OPT_NON_OPTION:
        case PARSE_OPT_DONE:
index ab1cc362bf2918c28a14dd851c1b1a13dfa0c863..dd14911a297a5b10705ecb31243c55a7dc2f193c 100644 (file)
@@ -200,6 +200,7 @@ enum {
        PARSE_OPT_HELP = -1,
        PARSE_OPT_DONE,
        PARSE_OPT_NON_OPTION,
+       PARSE_OPT_ERROR,
        PARSE_OPT_UNKNOWN
 };
 
diff --git a/path.c b/path.c
index da8b655730d363dda5010bdf2d53bd76abb82931..3308b7b95828821fdda6806d9d19228cbf8e61ed 100644 (file)
--- a/path.c
+++ b/path.c
@@ -10,6 +10,7 @@
 #include "submodule-config.h"
 #include "path.h"
 #include "packfile.h"
+#include "object-store.h"
 
 static int get_st_mode_bits(const char *path, int *mode)
 {
@@ -382,7 +383,7 @@ static void adjust_git_path(const struct repository *repo,
                strbuf_splice(buf, 0, buf->len,
                              repo->index_file, strlen(repo->index_file));
        else if (dir_prefix(base, "objects"))
-               replace_dir(buf, git_dir_len + 7, repo->objectdir);
+               replace_dir(buf, git_dir_len + 7, repo->objects->objectdir);
        else if (git_hooks_path && dir_prefix(base, "hooks"))
                replace_dir(buf, git_dir_len + 5, git_hooks_path);
        else if (repo->different_commondir)
index 16ebcc612ce4acb4fba6511d5b388184934cb22a..d856930b2e5f31bb7b1e7aef46e8e056068bb431 100644 (file)
@@ -554,7 +554,7 @@ sub get_record {
        my ($fh, $rs) = @_;
        local $/ = $rs;
        my $rec = <$fh>;
-       chomp $rec if defined $rs;
+       chomp $rec if defined $rec;
        $rec;
 }
 
index dba96fff0aecef6eac83aacdaa54b46806cdb0a4..bfb4fb67a13f4530aae2d974e579b9ba45e20cdb 100644 (file)
@@ -18,7 +18,7 @@ BEGIN
 
 sub __bootstrap_locale_messages {
        our $TEXTDOMAIN = 'git';
-       our $TEXTDOMAINDIR = $ENV{GIT_TEXTDOMAINDIR} || '@@LOCALEDIR@@';
+       our $TEXTDOMAINDIR ||= $ENV{GIT_TEXTDOMAINDIR} || '@@LOCALEDIR@@';
 
        require POSIX;
        POSIX->import(qw(setlocale));
index 991a5885e9230b1f55bd6f3b7f7b53321bf9e562..76b29659057d329670ce07b102de37caf133e97f 100644 (file)
@@ -1482,7 +1482,6 @@ sub call_authors_prog {
        }
        if ($author =~ /^\s*(.+?)\s*<(.*)>\s*$/) {
                my ($name, $email) = ($1, $2);
-               $email = undef if length $2 == 0;
                return [$name, $email];
        } else {
                die "Author: $orig_author: $::_authors_prog returned "
@@ -2020,8 +2019,8 @@ sub make_log_entry {
                remove_username($full_url);
                $log_entry{metadata} = "$full_url\@$r $uuid";
                $log_entry{svm_revision} = $r;
-               $email ||= "$author\@$uuid";
-               $commit_email ||= "$author\@$uuid";
+               $email = "$author\@$uuid" unless defined $email;
+               $commit_email = "$author\@$uuid" unless defined $commit_email;
        } elsif ($self->use_svnsync_props) {
                my $full_url = canonicalize_url(
                        add_path_to_url( $self->svnsync->{url}, $self->path )
@@ -2029,15 +2028,15 @@ sub make_log_entry {
                remove_username($full_url);
                my $uuid = $self->svnsync->{uuid};
                $log_entry{metadata} = "$full_url\@$rev $uuid";
-               $email ||= "$author\@$uuid";
-               $commit_email ||= "$author\@$uuid";
+               $email = "$author\@$uuid" unless defined $email;
+               $commit_email = "$author\@$uuid" unless defined $commit_email;
        } else {
                my $url = $self->metadata_url;
                remove_username($url);
                my $uuid = $self->rewrite_uuid || $self->ra->get_uuid;
                $log_entry{metadata} = "$url\@$rev " . $uuid;
-               $email ||= "$author\@" . $uuid;
-               $commit_email ||= "$author\@" . $uuid;
+               $email = "$author\@$uuid" unless defined $email;
+               $commit_email = "$author\@$uuid" unless defined $commit_email;
        }
        $log_entry{name} = $name;
        $log_entry{email} = $email;
diff --git a/perl/header_templates/fixed_prefix.template.pl b/perl/header_templates/fixed_prefix.template.pl
new file mode 100644 (file)
index 0000000..857b439
--- /dev/null
@@ -0,0 +1 @@
+use lib (split(/@@PATHSEP@@/, $ENV{GITPERLLIB} || '@@INSTLIBDIR@@'));
diff --git a/perl/header_templates/runtime_prefix.template.pl b/perl/header_templates/runtime_prefix.template.pl
new file mode 100644 (file)
index 0000000..9d28b3d
--- /dev/null
@@ -0,0 +1,42 @@
+# BEGIN RUNTIME_PREFIX generated code.
+#
+# This finds our Git::* libraries relative to the script's runtime path.
+sub __git_system_path {
+       my ($relpath) = @_;
+       my $gitexecdir_relative = '@@GITEXECDIR_REL@@';
+
+       # GIT_EXEC_PATH is supplied by `git` or the test suite.
+       my $exec_path;
+       if (exists $ENV{GIT_EXEC_PATH}) {
+               $exec_path = $ENV{GIT_EXEC_PATH};
+       } else {
+               # This can happen if this script is being directly invoked instead of run
+               # by "git".
+               require FindBin;
+               $exec_path = $FindBin::Bin;
+       }
+
+       # Trim off the relative gitexecdir path to get the system path.
+       (my $prefix = $exec_path) =~ s/\Q$gitexecdir_relative\E$//;
+
+       require File::Spec;
+       return File::Spec->catdir($prefix, $relpath);
+}
+
+BEGIN {
+       use lib split /@@PATHSEP@@/,
+       (
+               $ENV{GITPERLLIB} ||
+               do {
+                       my $perllibdir = __git_system_path('@@PERLLIBDIR_REL@@');
+                       (-e $perllibdir) || die("Invalid system path ($relpath): $path");
+                       $perllibdir;
+               }
+       );
+
+       # Export the system locale directory to the I18N module. The locale directory
+       # is only installed if NO_GETTEXT is set.
+       $Git::I18N::TEXTDOMAINDIR = __git_system_path('@@LOCALEDIR_REL@@');
+}
+
+# END RUNTIME_PREFIX generated code.
index 2827ca772a3703f71bc588d0f6cacd5caa318fe7..555eb2a50746bb8f129a8c14c9b6d61169093436 100644 (file)
@@ -91,6 +91,12 @@ void packet_flush(int fd)
        write_or_die(fd, "0000", 4);
 }
 
+void packet_delim(int fd)
+{
+       packet_trace("0001", 4, 1);
+       write_or_die(fd, "0001", 4);
+}
+
 int packet_flush_gently(int fd)
 {
        packet_trace("0000", 4, 1);
@@ -105,6 +111,12 @@ void packet_buf_flush(struct strbuf *buf)
        strbuf_add(buf, "0000", 4);
 }
 
+void packet_buf_delim(struct strbuf *buf)
+{
+       packet_trace("0001", 4, 1);
+       strbuf_add(buf, "0001", 4);
+}
+
 static void set_packet_header(char *buf, const int size)
 {
        static char hexchar[] = "0123456789abcdef";
@@ -203,6 +215,22 @@ void packet_buf_write(struct strbuf *buf, const char *fmt, ...)
        va_end(args);
 }
 
+void packet_buf_write_len(struct strbuf *buf, const char *data, size_t len)
+{
+       size_t orig_len, n;
+
+       orig_len = buf->len;
+       strbuf_addstr(buf, "0000");
+       strbuf_add(buf, data, len);
+       n = buf->len - orig_len;
+
+       if (n > LARGE_PACKET_MAX)
+               die("protocol error: impossibly long line");
+
+       set_packet_header(&buf->buf[orig_len], n);
+       packet_trace(data, len, 1);
+}
+
 int write_packetized_from_fd(int fd_in, int fd_out)
 {
        static char buf[LARGE_PACKET_DATA_MAX];
@@ -280,28 +308,43 @@ static int packet_length(const char *linelen)
        return (val < 0) ? val : (val << 8) | hex2chr(linelen + 2);
 }
 
-int packet_read(int fd, char **src_buf, size_t *src_len,
-               char *buffer, unsigned size, int options)
+enum packet_read_status packet_read_with_status(int fd, char **src_buffer,
+                                               size_t *src_len, char *buffer,
+                                               unsigned size, int *pktlen,
+                                               int options)
 {
-       int len, ret;
+       int len;
        char linelen[4];
 
-       ret = get_packet_data(fd, src_buf, src_len, linelen, 4, options);
-       if (ret < 0)
-               return ret;
+       if (get_packet_data(fd, src_buffer, src_len, linelen, 4, options) < 0) {
+               *pktlen = -1;
+               return PACKET_READ_EOF;
+       }
+
        len = packet_length(linelen);
-       if (len < 0)
+
+       if (len < 0) {
                die("protocol error: bad line length character: %.4s", linelen);
-       if (!len) {
+       } else if (!len) {
                packet_trace("0000", 4, 0);
-               return 0;
+               *pktlen = 0;
+               return PACKET_READ_FLUSH;
+       } else if (len == 1) {
+               packet_trace("0001", 4, 0);
+               *pktlen = 0;
+               return PACKET_READ_DELIM;
+       } else if (len < 4) {
+               die("protocol error: bad line length %d", len);
        }
+
        len -= 4;
-       if (len >= size)
+       if ((unsigned)len >= size)
                die("protocol error: bad line length %d", len);
-       ret = get_packet_data(fd, src_buf, src_len, buffer, len, options);
-       if (ret < 0)
-               return ret;
+
+       if (get_packet_data(fd, src_buffer, src_len, buffer, len, options) < 0) {
+               *pktlen = -1;
+               return PACKET_READ_EOF;
+       }
 
        if ((options & PACKET_READ_CHOMP_NEWLINE) &&
            len && buffer[len-1] == '\n')
@@ -309,7 +352,19 @@ int packet_read(int fd, char **src_buf, size_t *src_len,
 
        buffer[len] = 0;
        packet_trace(buffer, len, 0);
-       return len;
+       *pktlen = len;
+       return PACKET_READ_NORMAL;
+}
+
+int packet_read(int fd, char **src_buffer, size_t *src_len,
+               char *buffer, unsigned size, int options)
+{
+       int pktlen = -1;
+
+       packet_read_with_status(fd, src_buffer, src_len, buffer, size,
+                               &pktlen, options);
+
+       return pktlen;
 }
 
 static char *packet_read_line_generic(int fd,
@@ -377,3 +432,53 @@ ssize_t read_packetized_to_strbuf(int fd_in, struct strbuf *sb_out)
        }
        return sb_out->len - orig_len;
 }
+
+/* Packet Reader Functions */
+void packet_reader_init(struct packet_reader *reader, int fd,
+                       char *src_buffer, size_t src_len,
+                       int options)
+{
+       memset(reader, 0, sizeof(*reader));
+
+       reader->fd = fd;
+       reader->src_buffer = src_buffer;
+       reader->src_len = src_len;
+       reader->buffer = packet_buffer;
+       reader->buffer_size = sizeof(packet_buffer);
+       reader->options = options;
+}
+
+enum packet_read_status packet_reader_read(struct packet_reader *reader)
+{
+       if (reader->line_peeked) {
+               reader->line_peeked = 0;
+               return reader->status;
+       }
+
+       reader->status = packet_read_with_status(reader->fd,
+                                                &reader->src_buffer,
+                                                &reader->src_len,
+                                                reader->buffer,
+                                                reader->buffer_size,
+                                                &reader->pktlen,
+                                                reader->options);
+
+       if (reader->status == PACKET_READ_NORMAL)
+               reader->line = reader->buffer;
+       else
+               reader->line = NULL;
+
+       return reader->status;
+}
+
+enum packet_read_status packet_reader_peek(struct packet_reader *reader)
+{
+       /* Only allow peeking a single line */
+       if (reader->line_peeked)
+               return reader->status;
+
+       /* Peek a line by reading it and setting peeked flag */
+       packet_reader_read(reader);
+       reader->line_peeked = 1;
+       return reader->status;
+}
index 3dad583e2d02264c4a831c939ae0e13a54de2ff6..5b28d43472db41a59f0a44845953f163748593b0 100644 (file)
  * side can't, we stay with pure read/write interfaces.
  */
 void packet_flush(int fd);
+void packet_delim(int fd);
 void packet_write_fmt(int fd, const char *fmt, ...) __attribute__((format (printf, 2, 3)));
 void packet_buf_flush(struct strbuf *buf);
+void packet_buf_delim(struct strbuf *buf);
 void packet_write(int fd_out, const char *buf, size_t size);
 void packet_buf_write(struct strbuf *buf, const char *fmt, ...) __attribute__((format (printf, 2, 3)));
+void packet_buf_write_len(struct strbuf *buf, const char *data, size_t len);
 int packet_flush_gently(int fd);
 int packet_write_fmt_gently(int fd, const char *fmt, ...) __attribute__((format (printf, 2, 3)));
 int write_packetized_from_fd(int fd_in, int fd_out);
@@ -65,6 +68,23 @@ int write_packetized_from_buf(const char *src_in, size_t len, int fd_out);
 int packet_read(int fd, char **src_buffer, size_t *src_len, char
                *buffer, unsigned size, int options);
 
+/*
+ * Read a packetized line into a buffer like the 'packet_read()' function but
+ * returns an 'enum packet_read_status' which indicates the status of the read.
+ * The number of bytes read will be assigined to *pktlen if the status of the
+ * read was 'PACKET_READ_NORMAL'.
+ */
+enum packet_read_status {
+       PACKET_READ_EOF,
+       PACKET_READ_NORMAL,
+       PACKET_READ_FLUSH,
+       PACKET_READ_DELIM,
+};
+enum packet_read_status packet_read_with_status(int fd, char **src_buffer,
+                                               size_t *src_len, char *buffer,
+                                               unsigned size, int *pktlen,
+                                               int options);
+
 /*
  * Convenience wrapper for packet_read that is not gentle, and sets the
  * CHOMP_NEWLINE option. The return value is NULL for a flush packet,
@@ -96,6 +116,64 @@ char *packet_read_line_buf(char **src_buf, size_t *src_len, int *size);
  */
 ssize_t read_packetized_to_strbuf(int fd_in, struct strbuf *sb_out);
 
+struct packet_reader {
+       /* source file descriptor */
+       int fd;
+
+       /* source buffer and its size */
+       char *src_buffer;
+       size_t src_len;
+
+       /* buffer that pkt-lines are read into and its size */
+       char *buffer;
+       unsigned buffer_size;
+
+       /* options to be used during reads */
+       int options;
+
+       /* status of the last read */
+       enum packet_read_status status;
+
+       /* length of data read during the last read */
+       int pktlen;
+
+       /* the last line read */
+       const char *line;
+
+       /* indicates if a line has been peeked */
+       int line_peeked;
+};
+
+/*
+ * Initialize a 'struct packet_reader' object which is an
+ * abstraction around the 'packet_read_with_status()' function.
+ */
+extern void packet_reader_init(struct packet_reader *reader, int fd,
+                              char *src_buffer, size_t src_len,
+                              int options);
+
+/*
+ * Perform a packet read and return the status of the read.
+ * The values of 'pktlen' and 'line' are updated based on the status of the
+ * read as follows:
+ *
+ * PACKET_READ_ERROR: 'pktlen' is set to '-1' and 'line' is set to NULL
+ * PACKET_READ_NORMAL: 'pktlen' is set to the number of bytes read
+ *                    'line' is set to point at the read line
+ * PACKET_READ_FLUSH: 'pktlen' is set to '0' and 'line' is set to NULL
+ */
+extern enum packet_read_status packet_reader_read(struct packet_reader *reader);
+
+/*
+ * Peek the next packet line without consuming it and return the status.
+ * The next call to 'packet_reader_read()' will perform a read of the same line
+ * that was peeked, consuming the line.
+ *
+ * Peeking multiple times without calling 'packet_reader_read()' will return
+ * the same result.
+ */
+extern enum packet_read_status packet_reader_peek(struct packet_reader *reader);
+
 #define DEFAULT_PACKET_MAX 1000
 #define LARGE_PACKET_MAX 65520
 #define LARGE_PACKET_DATA_MAX (LARGE_PACKET_MAX - 4)
index f7ce4902301490d73bdd79bd396cf7bbe5f893ea..703fa6ff7bf297e9d0dd91586f951f715032e06a 100644 (file)
--- a/pretty.c
+++ b/pretty.c
@@ -549,7 +549,7 @@ static void add_merge_info(const struct pretty_print_context *pp,
                struct object_id *oidp = &parent->item->object.oid;
                strbuf_addch(sb, ' ');
                if (pp->abbrev)
-                       strbuf_add_unique_abbrev(sb, oidp->hash, pp->abbrev);
+                       strbuf_add_unique_abbrev(sb, oidp, pp->abbrev);
                else
                        strbuf_addstr(sb, oid_to_hex(oidp));
                parent = parent->next;
@@ -1156,15 +1156,16 @@ static size_t format_commit_one(struct strbuf *sb, /* in UTF-8 */
                return 1;
        case 'h':               /* abbreviated commit hash */
                strbuf_addstr(sb, diff_get_color(c->auto_color, DIFF_COMMIT));
-               strbuf_add_unique_abbrev(sb, commit->object.oid.hash,
+               strbuf_add_unique_abbrev(sb, &commit->object.oid,
                                         c->pretty_ctx->abbrev);
                strbuf_addstr(sb, diff_get_color(c->auto_color, DIFF_RESET));
                return 1;
        case 'T':               /* tree hash */
-               strbuf_addstr(sb, oid_to_hex(&commit->tree->object.oid));
+               strbuf_addstr(sb, oid_to_hex(get_commit_tree_oid(commit)));
                return 1;
        case 't':               /* abbreviated tree hash */
-               strbuf_add_unique_abbrev(sb, commit->tree->object.oid.hash,
+               strbuf_add_unique_abbrev(sb,
+                                        get_commit_tree_oid(commit),
                                         c->pretty_ctx->abbrev);
                return 1;
        case 'P':               /* parent hashes */
@@ -1178,7 +1179,7 @@ static size_t format_commit_one(struct strbuf *sb, /* in UTF-8 */
                for (p = commit->parents; p; p = p->next) {
                        if (p != commit->parents)
                                strbuf_addch(sb, ' ');
-                       strbuf_add_unique_abbrev(sb, p->item->object.oid.hash,
+                       strbuf_add_unique_abbrev(sb, &p->item->object.oid,
                                                 c->pretty_ctx->abbrev);
                }
                return 1;
index 43012b7eb6e18bc48d93e6b96a3f3f57fe8ef79f..5e636785d14f8ef5283561d4606fface080bf3c0 100644 (file)
@@ -8,6 +8,8 @@ static enum protocol_version parse_protocol_version(const char *value)
                return protocol_v0;
        else if (!strcmp(value, "1"))
                return protocol_v1;
+       else if (!strcmp(value, "2"))
+               return protocol_v2;
        else
                return protocol_unknown_version;
 }
index 1b2bc94a8d9f3c008bb76a0dfcdd640f99756fe3..2ad35e433c1e6f5f0d08c9d7500dc35782429f51 100644 (file)
@@ -5,6 +5,7 @@ enum protocol_version {
        protocol_unknown_version = -1,
        protocol_v0 = 0,
        protocol_v1 = 1,
+       protocol_v2 = 2,
 };
 
 /*
index 191ebe3e6a99d26913a510967a9545f53db58bc8..ffb976c33c6936a7b178c7c478bcf4cd2840d472 100644 (file)
@@ -11,6 +11,7 @@
 #include "list-objects.h"
 #include "packfile.h"
 #include "worktree.h"
+#include "object-store.h"
 
 struct connectivity_progress {
        struct progress *progress;
@@ -77,7 +78,7 @@ static void add_recent_object(const struct object_id *oid,
         * later processing, and the revision machinery expects
         * commits and tags to have been parsed.
         */
-       type = sha1_object_info(oid->hash, NULL);
+       type = oid_object_info(the_repository, oid, NULL);
        if (type < 0)
                die("unable to get object info for %s", oid_to_hex(oid));
 
index 59a73f4a81d76a19b8a2280e9643f7c1e715a5d4..fa3df2e72e0a520b2bc1ad5fdcb93beed445bfa0 100644 (file)
@@ -185,7 +185,7 @@ static int ce_compare_link(const struct cache_entry *ce, size_t expected_size)
        if (strbuf_readlink(&sb, ce->name, expected_size))
                return -1;
 
-       buffer = read_sha1_file(ce->oid.hash, &type, &size);
+       buffer = read_object_file(&ce->oid, &type, &size);
        if (buffer) {
                if (size == sb.len)
                        match = memcmp(buffer, sb.buf, size);
@@ -2268,7 +2268,7 @@ static int do_write_index(struct index_state *istate, struct tempfile *tempfile,
 
        if (!istate->version) {
                istate->version = get_index_format_default();
-               if (getenv("GIT_TEST_SPLIT_INDEX"))
+               if (git_env_bool("GIT_TEST_SPLIT_INDEX", 0))
                        init_split_index(istate);
        }
 
@@ -2559,7 +2559,7 @@ int write_locked_index(struct index_state *istate, struct lock_file *lock,
                goto out;
        }
 
-       if (getenv("GIT_TEST_SPLIT_INDEX")) {
+       if (git_env_bool("GIT_TEST_SPLIT_INDEX", 0)) {
                int v = si->base_sha1[0];
                if ((v & 15) < 6)
                        istate->cache_changed |= SPLIT_INDEX_ORDERED;
@@ -2693,7 +2693,7 @@ void *read_blob_data_from_index(const struct index_state *istate,
        }
        if (pos < 0)
                return NULL;
-       data = read_sha1_file(istate->cache[pos]->oid.hash, &type, &sz);
+       data = read_object_file(&istate->cache[pos]->oid, &type, &sz);
        if (!data || type != OBJ_BLOB) {
                free(data);
                return NULL;
index 45fc56216aaa8fd084a10514f6f4912878ab1627..dba826e71803d44d648ecdcf793bd01e55ecb830 100644 (file)
@@ -101,22 +101,38 @@ static struct used_atom {
 } *used_atom;
 static int used_atom_cnt, need_tagged, need_symref;
 
-static void color_atom_parser(const struct ref_format *format, struct used_atom *atom, const char *color_value)
+/*
+ * Expand string, append it to strbuf *sb, then return error code ret.
+ * Allow to save few lines of code.
+ */
+static int strbuf_addf_ret(struct strbuf *sb, int ret, const char *fmt, ...)
+{
+       va_list ap;
+       va_start(ap, fmt);
+       strbuf_vaddf(sb, fmt, ap);
+       va_end(ap);
+       return ret;
+}
+
+static int color_atom_parser(const struct ref_format *format, struct used_atom *atom,
+                            const char *color_value, struct strbuf *err)
 {
        if (!color_value)
-               die(_("expected format: %%(color:<color>)"));
+               return strbuf_addf_ret(err, -1, _("expected format: %%(color:<color>)"));
        if (color_parse(color_value, atom->u.color) < 0)
-               die(_("unrecognized color: %%(color:%s)"), color_value);
+               return strbuf_addf_ret(err, -1, _("unrecognized color: %%(color:%s)"),
+                                      color_value);
        /*
         * We check this after we've parsed the color, which lets us complain
         * about syntactically bogus color names even if they won't be used.
         */
        if (!want_color(format->use_color))
                color_parse("", atom->u.color);
+       return 0;
 }
 
-static void refname_atom_parser_internal(struct refname_atom *atom,
-                                        const char *arg, const char *name)
+static int refname_atom_parser_internal(struct refname_atom *atom, const char *arg,
+                                        const char *name, struct strbuf *err)
 {
        if (!arg)
                atom->option = R_NORMAL;
@@ -126,16 +142,18 @@ static void refname_atom_parser_internal(struct refname_atom *atom,
                 skip_prefix(arg, "strip=", &arg)) {
                atom->option = R_LSTRIP;
                if (strtol_i(arg, 10, &atom->lstrip))
-                       die(_("Integer value expected refname:lstrip=%s"), arg);
+                       return strbuf_addf_ret(err, -1, _("Integer value expected refname:lstrip=%s"), arg);
        } else if (skip_prefix(arg, "rstrip=", &arg)) {
                atom->option = R_RSTRIP;
                if (strtol_i(arg, 10, &atom->rstrip))
-                       die(_("Integer value expected refname:rstrip=%s"), arg);
+                       return strbuf_addf_ret(err, -1, _("Integer value expected refname:rstrip=%s"), arg);
        } else
-               die(_("unrecognized %%(%s) argument: %s"), name, arg);
+               return strbuf_addf_ret(err, -1, _("unrecognized %%(%s) argument: %s"), name, arg);
+       return 0;
 }
 
-static void remote_ref_atom_parser(const struct ref_format *format, struct used_atom *atom, const char *arg)
+static int remote_ref_atom_parser(const struct ref_format *format, struct used_atom *atom,
+                                 const char *arg, struct strbuf *err)
 {
        struct string_list params = STRING_LIST_INIT_DUP;
        int i;
@@ -145,9 +163,8 @@ static void remote_ref_atom_parser(const struct ref_format *format, struct used_
 
        if (!arg) {
                atom->u.remote_ref.option = RR_REF;
-               refname_atom_parser_internal(&atom->u.remote_ref.refname,
-                                            arg, atom->name);
-               return;
+               return refname_atom_parser_internal(&atom->u.remote_ref.refname,
+                                                   arg, atom->name, err);
        }
 
        atom->u.remote_ref.nobracket = 0;
@@ -170,29 +187,38 @@ static void remote_ref_atom_parser(const struct ref_format *format, struct used_
                        atom->u.remote_ref.push_remote = 1;
                } else {
                        atom->u.remote_ref.option = RR_REF;
-                       refname_atom_parser_internal(&atom->u.remote_ref.refname,
-                                                    arg, atom->name);
+                       if (refname_atom_parser_internal(&atom->u.remote_ref.refname,
+                                                        arg, atom->name, err)) {
+                               string_list_clear(&params, 0);
+                               return -1;
+                       }
                }
        }
 
        string_list_clear(&params, 0);
+       return 0;
 }
 
-static void body_atom_parser(const struct ref_format *format, struct used_atom *atom, const char *arg)
+static int body_atom_parser(const struct ref_format *format, struct used_atom *atom,
+                           const char *arg, struct strbuf *err)
 {
        if (arg)
-               die(_("%%(body) does not take arguments"));
+               return strbuf_addf_ret(err, -1, _("%%(body) does not take arguments"));
        atom->u.contents.option = C_BODY_DEP;
+       return 0;
 }
 
-static void subject_atom_parser(const struct ref_format *format, struct used_atom *atom, const char *arg)
+static int subject_atom_parser(const struct ref_format *format, struct used_atom *atom,
+                              const char *arg, struct strbuf *err)
 {
        if (arg)
-               die(_("%%(subject) does not take arguments"));
+               return strbuf_addf_ret(err, -1, _("%%(subject) does not take arguments"));
        atom->u.contents.option = C_SUB;
+       return 0;
 }
 
-static void trailers_atom_parser(const struct ref_format *format, struct used_atom *atom, const char *arg)
+static int trailers_atom_parser(const struct ref_format *format, struct used_atom *atom,
+                               const char *arg, struct strbuf *err)
 {
        struct string_list params = STRING_LIST_INIT_DUP;
        int i;
@@ -205,15 +231,20 @@ static void trailers_atom_parser(const struct ref_format *format, struct used_at
                                atom->u.contents.trailer_opts.unfold = 1;
                        else if (!strcmp(s, "only"))
                                atom->u.contents.trailer_opts.only_trailers = 1;
-                       else
-                               die(_("unknown %%(trailers) argument: %s"), s);
+                       else {
+                               strbuf_addf(err, _("unknown %%(trailers) argument: %s"), s);
+                               string_list_clear(&params, 0);
+                               return -1;
+                       }
                }
        }
        atom->u.contents.option = C_TRAILERS;
        string_list_clear(&params, 0);
+       return 0;
 }
 
-static void contents_atom_parser(const struct ref_format *format, struct used_atom *atom, const char *arg)
+static int contents_atom_parser(const struct ref_format *format, struct used_atom *atom,
+                               const char *arg, struct strbuf *err)
 {
        if (!arg)
                atom->u.contents.option = C_BARE;
@@ -225,16 +256,19 @@ static void contents_atom_parser(const struct ref_format *format, struct used_at
                atom->u.contents.option = C_SUB;
        else if (skip_prefix(arg, "trailers", &arg)) {
                skip_prefix(arg, ":", &arg);
-               trailers_atom_parser(format, atom, *arg ? arg : NULL);
+               if (trailers_atom_parser(format, atom, *arg ? arg : NULL, err))
+                       return -1;
        } else if (skip_prefix(arg, "lines=", &arg)) {
                atom->u.contents.option = C_LINES;
                if (strtoul_ui(arg, 10, &atom->u.contents.nlines))
-                       die(_("positive value expected contents:lines=%s"), arg);
+                       return strbuf_addf_ret(err, -1, _("positive value expected contents:lines=%s"), arg);
        } else
-               die(_("unrecognized %%(contents) argument: %s"), arg);
+               return strbuf_addf_ret(err, -1, _("unrecognized %%(contents) argument: %s"), arg);
+       return 0;
 }
 
-static void objectname_atom_parser(const struct ref_format *format, struct used_atom *atom, const char *arg)
+static int objectname_atom_parser(const struct ref_format *format, struct used_atom *atom,
+                                 const char *arg, struct strbuf *err)
 {
        if (!arg)
                atom->u.objectname.option = O_FULL;
@@ -244,16 +278,18 @@ static void objectname_atom_parser(const struct ref_format *format, struct used_
                atom->u.objectname.option = O_LENGTH;
                if (strtoul_ui(arg, 10, &atom->u.objectname.length) ||
                    atom->u.objectname.length == 0)
-                       die(_("positive value expected objectname:short=%s"), arg);
+                       return strbuf_addf_ret(err, -1, _("positive value expected objectname:short=%s"), arg);
                if (atom->u.objectname.length < MINIMUM_ABBREV)
                        atom->u.objectname.length = MINIMUM_ABBREV;
        } else
-               die(_("unrecognized %%(objectname) argument: %s"), arg);
+               return strbuf_addf_ret(err, -1, _("unrecognized %%(objectname) argument: %s"), arg);
+       return 0;
 }
 
-static void refname_atom_parser(const struct ref_format *format, struct used_atom *atom, const char *arg)
+static int refname_atom_parser(const struct ref_format *format, struct used_atom *atom,
+                              const char *arg, struct strbuf *err)
 {
-       refname_atom_parser_internal(&atom->u.refname, arg, atom->name);
+       return refname_atom_parser_internal(&atom->u.refname, arg, atom->name, err);
 }
 
 static align_type parse_align_position(const char *s)
@@ -267,7 +303,8 @@ static align_type parse_align_position(const char *s)
        return -1;
 }
 
-static void align_atom_parser(const struct ref_format *format, struct used_atom *atom, const char *arg)
+static int align_atom_parser(const struct ref_format *format, struct used_atom *atom,
+                            const char *arg, struct strbuf *err)
 {
        struct align *align = &atom->u.align;
        struct string_list params = STRING_LIST_INIT_DUP;
@@ -275,7 +312,7 @@ static void align_atom_parser(const struct ref_format *format, struct used_atom
        unsigned int width = ~0U;
 
        if (!arg)
-               die(_("expected format: %%(align:<width>,<position>)"));
+               return strbuf_addf_ret(err, -1, _("expected format: %%(align:<width>,<position>)"));
 
        align->position = ALIGN_LEFT;
 
@@ -286,49 +323,65 @@ static void align_atom_parser(const struct ref_format *format, struct used_atom
 
                if (skip_prefix(s, "position=", &s)) {
                        position = parse_align_position(s);
-                       if (position < 0)
-                               die(_("unrecognized position:%s"), s);
+                       if (position < 0) {
+                               strbuf_addf(err, _("unrecognized position:%s"), s);
+                               string_list_clear(&params, 0);
+                               return -1;
+                       }
                        align->position = position;
                } else if (skip_prefix(s, "width=", &s)) {
-                       if (strtoul_ui(s, 10, &width))
-                               die(_("unrecognized width:%s"), s);
+                       if (strtoul_ui(s, 10, &width)) {
+                               strbuf_addf(err, _("unrecognized width:%s"), s);
+                               string_list_clear(&params, 0);
+                               return -1;
+                       }
                } else if (!strtoul_ui(s, 10, &width))
                        ;
                else if ((position = parse_align_position(s)) >= 0)
                        align->position = position;
-               else
-                       die(_("unrecognized %%(align) argument: %s"), s);
+               else {
+                       strbuf_addf(err, _("unrecognized %%(align) argument: %s"), s);
+                       string_list_clear(&params, 0);
+                       return -1;
+               }
        }
 
-       if (width == ~0U)
-               die(_("positive width expected with the %%(align) atom"));
+       if (width == ~0U) {
+               string_list_clear(&params, 0);
+               return strbuf_addf_ret(err, -1, _("positive width expected with the %%(align) atom"));
+       }
        align->width = width;
        string_list_clear(&params, 0);
+       return 0;
 }
 
-static void if_atom_parser(const struct ref_format *format, struct used_atom *atom, const char *arg)
+static int if_atom_parser(const struct ref_format *format, struct used_atom *atom,
+                         const char *arg, struct strbuf *err)
 {
        if (!arg) {
                atom->u.if_then_else.cmp_status = COMPARE_NONE;
-               return;
+               return 0;
        } else if (skip_prefix(arg, "equals=", &atom->u.if_then_else.str)) {
                atom->u.if_then_else.cmp_status = COMPARE_EQUAL;
        } else if (skip_prefix(arg, "notequals=", &atom->u.if_then_else.str)) {
                atom->u.if_then_else.cmp_status = COMPARE_UNEQUAL;
-       } else {
-               die(_("unrecognized %%(if) argument: %s"), arg);
-       }
+       } else
+               return strbuf_addf_ret(err, -1, _("unrecognized %%(if) argument: %s"), arg);
+       return 0;
 }
 
-static void head_atom_parser(const struct ref_format *format, struct used_atom *atom, const char *arg)
+static int head_atom_parser(const struct ref_format *format, struct used_atom *atom,
+                           const char *arg, struct strbuf *unused_err)
 {
        atom->u.head = resolve_refdup("HEAD", RESOLVE_REF_READING, NULL, NULL);
+       return 0;
 }
 
 static struct {
        const char *name;
        cmp_type cmp_type;
-       void (*parser)(const struct ref_format *format, struct used_atom *atom, const char *arg);
+       int (*parser)(const struct ref_format *format, struct used_atom *atom,
+                     const char *arg, struct strbuf *err);
 } valid_atom[] = {
        { "refname" , FIELD_STR, refname_atom_parser },
        { "objecttype" },
@@ -387,7 +440,8 @@ struct ref_formatting_state {
 
 struct atom_value {
        const char *s;
-       void (*handler)(struct atom_value *atomv, struct ref_formatting_state *state);
+       int (*handler)(struct atom_value *atomv, struct ref_formatting_state *state,
+                      struct strbuf *err);
        uintmax_t value; /* used for sorting when not FIELD_STR */
        struct used_atom *atom;
 };
@@ -396,7 +450,8 @@ struct atom_value {
  * Used to parse format string and sort specifiers
  */
 static int parse_ref_filter_atom(const struct ref_format *format,
-                                const char *atom, const char *ep)
+                                const char *atom, const char *ep,
+                                struct strbuf *err)
 {
        const char *sp;
        const char *arg;
@@ -406,7 +461,8 @@ static int parse_ref_filter_atom(const struct ref_format *format,
        if (*sp == '*' && sp < ep)
                sp++; /* deref */
        if (ep <= sp)
-               die(_("malformed field name: %.*s"), (int)(ep-atom), atom);
+               return strbuf_addf_ret(err, -1, _("malformed field name: %.*s"),
+                                      (int)(ep-atom), atom);
 
        /* Do we have the atom already used elsewhere? */
        for (i = 0; i < used_atom_cnt; i++) {
@@ -432,7 +488,8 @@ static int parse_ref_filter_atom(const struct ref_format *format,
        }
 
        if (ARRAY_SIZE(valid_atom) <= i)
-               die(_("unknown field name: %.*s"), (int)(ep-atom), atom);
+               return strbuf_addf_ret(err, -1, _("unknown field name: %.*s"),
+                                      (int)(ep-atom), atom);
 
        /* Add it in, including the deref prefix */
        at = used_atom_cnt;
@@ -451,8 +508,8 @@ static int parse_ref_filter_atom(const struct ref_format *format,
                }
        }
        memset(&used_atom[at].u, 0, sizeof(used_atom[at].u));
-       if (valid_atom[i].parser)
-               valid_atom[i].parser(format, &used_atom[at], arg);
+       if (valid_atom[i].parser && valid_atom[i].parser(format, &used_atom[at], arg, err))
+               return -1;
        if (*atom == '*')
                need_tagged = 1;
        if (!strcmp(valid_atom[i].name, "symref"))
@@ -481,7 +538,8 @@ static void quote_formatting(struct strbuf *s, const char *str, int quote_style)
        }
 }
 
-static void append_atom(struct atom_value *v, struct ref_formatting_state *state)
+static int append_atom(struct atom_value *v, struct ref_formatting_state *state,
+                      struct strbuf *unused_err)
 {
        /*
         * Quote formatting is only done when the stack has a single
@@ -493,6 +551,7 @@ static void append_atom(struct atom_value *v, struct ref_formatting_state *state
                quote_formatting(&state->stack->output, v->s, state->quote_style);
        else
                strbuf_addstr(&state->stack->output, v->s);
+       return 0;
 }
 
 static void push_stack_element(struct ref_formatting_stack **stack)
@@ -527,7 +586,8 @@ static void end_align_handler(struct ref_formatting_stack **stack)
        strbuf_release(&s);
 }
 
-static void align_atom_handler(struct atom_value *atomv, struct ref_formatting_state *state)
+static int align_atom_handler(struct atom_value *atomv, struct ref_formatting_state *state,
+                             struct strbuf *unused_err)
 {
        struct ref_formatting_stack *new_stack;
 
@@ -535,6 +595,7 @@ static void align_atom_handler(struct atom_value *atomv, struct ref_formatting_s
        new_stack = state->stack;
        new_stack->at_end = end_align_handler;
        new_stack->at_end_data = &atomv->atom->u.align;
+       return 0;
 }
 
 static void if_then_else_handler(struct ref_formatting_stack **stack)
@@ -572,7 +633,8 @@ static void if_then_else_handler(struct ref_formatting_stack **stack)
        free(if_then_else);
 }
 
-static void if_atom_handler(struct atom_value *atomv, struct ref_formatting_state *state)
+static int if_atom_handler(struct atom_value *atomv, struct ref_formatting_state *state,
+                          struct strbuf *unused_err)
 {
        struct ref_formatting_stack *new_stack;
        struct if_then_else *if_then_else = xcalloc(sizeof(struct if_then_else), 1);
@@ -584,6 +646,7 @@ static void if_atom_handler(struct atom_value *atomv, struct ref_formatting_stat
        new_stack = state->stack;
        new_stack->at_end = if_then_else_handler;
        new_stack->at_end_data = if_then_else;
+       return 0;
 }
 
 static int is_empty(const char *s)
@@ -596,7 +659,8 @@ static int is_empty(const char *s)
        return 1;
 }
 
-static void then_atom_handler(struct atom_value *atomv, struct ref_formatting_state *state)
+static int then_atom_handler(struct atom_value *atomv, struct ref_formatting_state *state,
+                            struct strbuf *err)
 {
        struct ref_formatting_stack *cur = state->stack;
        struct if_then_else *if_then_else = NULL;
@@ -604,11 +668,11 @@ static void then_atom_handler(struct atom_value *atomv, struct ref_formatting_st
        if (cur->at_end == if_then_else_handler)
                if_then_else = (struct if_then_else *)cur->at_end_data;
        if (!if_then_else)
-               die(_("format: %%(then) atom used without an %%(if) atom"));
+               return strbuf_addf_ret(err, -1, _("format: %%(then) atom used without an %%(if) atom"));
        if (if_then_else->then_atom_seen)
-               die(_("format: %%(then) atom used more than once"));
+               return strbuf_addf_ret(err, -1, _("format: %%(then) atom used more than once"));
        if (if_then_else->else_atom_seen)
-               die(_("format: %%(then) atom used after %%(else)"));
+               return strbuf_addf_ret(err, -1, _("format: %%(then) atom used after %%(else)"));
        if_then_else->then_atom_seen = 1;
        /*
         * If the 'equals' or 'notequals' attribute is used then
@@ -624,9 +688,11 @@ static void then_atom_handler(struct atom_value *atomv, struct ref_formatting_st
        } else if (cur->output.len && !is_empty(cur->output.buf))
                if_then_else->condition_satisfied = 1;
        strbuf_reset(&cur->output);
+       return 0;
 }
 
-static void else_atom_handler(struct atom_value *atomv, struct ref_formatting_state *state)
+static int else_atom_handler(struct atom_value *atomv, struct ref_formatting_state *state,
+                            struct strbuf *err)
 {
        struct ref_formatting_stack *prev = state->stack;
        struct if_then_else *if_then_else = NULL;
@@ -634,24 +700,26 @@ static void else_atom_handler(struct atom_value *atomv, struct ref_formatting_st
        if (prev->at_end == if_then_else_handler)
                if_then_else = (struct if_then_else *)prev->at_end_data;
        if (!if_then_else)
-               die(_("format: %%(else) atom used without an %%(if) atom"));
+               return strbuf_addf_ret(err, -1, _("format: %%(else) atom used without an %%(if) atom"));
        if (!if_then_else->then_atom_seen)
-               die(_("format: %%(else) atom used without a %%(then) atom"));
+               return strbuf_addf_ret(err, -1, _("format: %%(else) atom used without a %%(then) atom"));
        if (if_then_else->else_atom_seen)
-               die(_("format: %%(else) atom used more than once"));
+               return strbuf_addf_ret(err, -1, _("format: %%(else) atom used more than once"));
        if_then_else->else_atom_seen = 1;
        push_stack_element(&state->stack);
        state->stack->at_end_data = prev->at_end_data;
        state->stack->at_end = prev->at_end;
+       return 0;
 }
 
-static void end_atom_handler(struct atom_value *atomv, struct ref_formatting_state *state)
+static int end_atom_handler(struct atom_value *atomv, struct ref_formatting_state *state,
+                           struct strbuf *err)
 {
        struct ref_formatting_stack *current = state->stack;
        struct strbuf s = STRBUF_INIT;
 
        if (!current->at_end)
-               die(_("format: %%(end) atom used without corresponding atom"));
+               return strbuf_addf_ret(err, -1, _("format: %%(end) atom used without corresponding atom"));
        current->at_end(&state->stack);
 
        /*  Stack may have been popped within at_end(), hence reset the current pointer */
@@ -668,6 +736,7 @@ static void end_atom_handler(struct atom_value *atomv, struct ref_formatting_sta
        }
        strbuf_release(&s);
        pop_stack_element(&state->stack);
+       return 0;
 }
 
 /*
@@ -702,17 +771,21 @@ int verify_ref_format(struct ref_format *format)
 
        format->need_color_reset_at_eol = 0;
        for (cp = format->format; *cp && (sp = find_next(cp)); ) {
+               struct strbuf err = STRBUF_INIT;
                const char *color, *ep = strchr(sp, ')');
                int at;
 
                if (!ep)
                        return error(_("malformed format string %s"), sp);
                /* sp points at "%(" and ep points at the closing ")" */
-               at = parse_ref_filter_atom(format, sp + 2, ep);
+               at = parse_ref_filter_atom(format, sp + 2, ep, &err);
+               if (at < 0)
+                       die("%s", err.buf);
                cp = ep + 1;
 
                if (skip_prefix(used_atom[at].name, "color:", &color))
                        format->need_color_reset_at_eol = !!strcmp(color, "reset");
+               strbuf_release(&err);
        }
        if (format->need_color_reset_at_eol && !want_color(format->use_color))
                format->need_color_reset_at_eol = 0;
@@ -728,7 +801,7 @@ int verify_ref_format(struct ref_format *format)
 static void *get_obj(const struct object_id *oid, struct object **obj, unsigned long *sz, int *eaten)
 {
        enum object_type type;
-       void *buf = read_sha1_file(oid->hash, &type, sz);
+       void *buf = read_object_file(oid, &type, sz);
 
        if (buf)
                *obj = parse_object_buffer(oid, type, *sz, buf, eaten);
@@ -737,18 +810,18 @@ static void *get_obj(const struct object_id *oid, struct object **obj, unsigned
        return buf;
 }
 
-static int grab_objectname(const char *name, const unsigned char *sha1,
+static int grab_objectname(const char *name, const struct object_id *oid,
                           struct atom_value *v, struct used_atom *atom)
 {
        if (starts_with(name, "objectname")) {
                if (atom->u.objectname.option == O_SHORT) {
-                       v->s = xstrdup(find_unique_abbrev(sha1, DEFAULT_ABBREV));
+                       v->s = xstrdup(find_unique_abbrev(oid, DEFAULT_ABBREV));
                        return 1;
                } else if (atom->u.objectname.option == O_FULL) {
-                       v->s = xstrdup(sha1_to_hex(sha1));
+                       v->s = xstrdup(oid_to_hex(oid));
                        return 1;
                } else if (atom->u.objectname.option == O_LENGTH) {
-                       v->s = xstrdup(find_unique_abbrev(sha1, atom->u.objectname.length));
+                       v->s = xstrdup(find_unique_abbrev(oid, atom->u.objectname.length));
                        return 1;
                } else
                        die("BUG: unknown %%(objectname) option");
@@ -775,7 +848,7 @@ static void grab_common_values(struct atom_value *val, int deref, struct object
                        v->s = xstrfmt("%lu", sz);
                }
                else if (deref)
-                       grab_objectname(name, obj->oid.hash, v, &used_atom[i]);
+                       grab_objectname(name, &obj->oid, v, &used_atom[i]);
        }
 }
 
@@ -815,7 +888,7 @@ static void grab_commit_values(struct atom_value *val, int deref, struct object
                if (deref)
                        name++;
                if (!strcmp(name, "tree")) {
-                       v->s = xstrdup(oid_to_hex(&commit->tree->object.oid));
+                       v->s = xstrdup(oid_to_hex(get_commit_tree_oid(commit)));
                }
                else if (!strcmp(name, "numparent")) {
                        v->value = commit_list_count(commit->parents);
@@ -1309,10 +1382,14 @@ char *get_head_description(void)
        memset(&state, 0, sizeof(state));
        wt_status_get_state(&state, 1);
        if (state.rebase_in_progress ||
-           state.rebase_interactive_in_progress)
-               strbuf_addf(&desc, _("(no branch, rebasing %s)"),
-                           state.branch);
-       else if (state.bisect_in_progress)
+           state.rebase_interactive_in_progress) {
+               if (state.branch)
+                       strbuf_addf(&desc, _("(no branch, rebasing %s)"),
+                                   state.branch);
+               else
+                       strbuf_addf(&desc, _("(no branch, rebasing detached HEAD %s)"),
+                                   state.detached_from);
+       } else if (state.bisect_in_progress)
                strbuf_addf(&desc, _("(no branch, bisect started on %s)"),
                            state.branch);
        else if (state.detached_from) {
@@ -1354,28 +1431,30 @@ static const char *get_refname(struct used_atom *atom, struct ref_array_item *re
        return show_ref(&atom->u.refname, ref->refname);
 }
 
-static void get_object(struct ref_array_item *ref, const struct object_id *oid,
-                      int deref, struct object **obj)
+static int get_object(struct ref_array_item *ref, const struct object_id *oid,
+                      int deref, struct object **obj, struct strbuf *err)
 {
        int eaten;
+       int ret = 0;
        unsigned long size;
        void *buf = get_obj(oid, obj, &size, &eaten);
        if (!buf)
-               die(_("missing object %s for %s"),
-                   oid_to_hex(oid), ref->refname);
-       if (!*obj)
-               die(_("parse_object_buffer failed on %s for %s"),
-                   oid_to_hex(oid), ref->refname);
-
-       grab_values(ref->value, deref, *obj, buf, size);
+               ret = strbuf_addf_ret(err, -1, _("missing object %s for %s"),
+                                     oid_to_hex(oid), ref->refname);
+       else if (!*obj)
+               ret = strbuf_addf_ret(err, -1, _("parse_object_buffer failed on %s for %s"),
+                                     oid_to_hex(oid), ref->refname);
+       else
+               grab_values(ref->value, deref, *obj, buf, size);
        if (!eaten)
                free(buf);
+       return ret;
 }
 
 /*
  * Parse the object referred by ref, and grab needed value.
  */
-static void populate_value(struct ref_array_item *ref)
+static int populate_value(struct ref_array_item *ref, struct strbuf *err)
 {
        struct object *obj;
        int i;
@@ -1455,7 +1534,7 @@ static void populate_value(struct ref_array_item *ref)
                                v->s = xstrdup(buf + 1);
                        }
                        continue;
-               } else if (!deref && grab_objectname(name, ref->objectname.hash, v, atom)) {
+               } else if (!deref && grab_objectname(name, &ref->objectname, v, atom)) {
                        continue;
                } else if (!strcmp(name, "HEAD")) {
                        if (atom->u.head && !strcmp(ref->refname, atom->u.head))
@@ -1497,16 +1576,17 @@ static void populate_value(struct ref_array_item *ref)
                        break;
        }
        if (used_atom_cnt <= i)
-               return;
+               return 0;
 
-       get_object(ref, &ref->objectname, 0, &obj);
+       if (get_object(ref, &ref->objectname, 0, &obj, err))
+               return -1;
 
        /*
         * If there is no atom that wants to know about tagged
         * object, we are done.
         */
        if (!need_tagged || (obj->type != OBJ_TAG))
-               return;
+               return 0;
 
        /*
         * If it is a tag object, see if we use a value that derefs
@@ -1520,20 +1600,23 @@ static void populate_value(struct ref_array_item *ref)
         * is not consistent with what deref_tag() does
         * which peels the onion to the core.
         */
-       get_object(ref, tagged, 1, &obj);
+       return get_object(ref, tagged, 1, &obj, err);
 }
 
 /*
  * Given a ref, return the value for the atom.  This lazily gets value
  * out of the object by calling populate value.
  */
-static void get_ref_atom_value(struct ref_array_item *ref, int atom, struct atom_value **v)
+static int get_ref_atom_value(struct ref_array_item *ref, int atom,
+                             struct atom_value **v, struct strbuf *err)
 {
        if (!ref->value) {
-               populate_value(ref);
+               if (populate_value(ref, err))
+                       return -1;
                fill_missing_values(ref->value);
        }
        *v = &ref->value[atom];
+       return 0;
 }
 
 /*
@@ -1824,15 +1907,30 @@ static const struct object_id *match_points_at(struct oid_array *points_at,
        return NULL;
 }
 
-/* Allocate space for a new ref_array_item and copy the objectname and flag to it */
+/*
+ * Allocate space for a new ref_array_item and copy the name and oid to it.
+ *
+ * Callers can then fill in other struct members at their leisure.
+ */
 static struct ref_array_item *new_ref_array_item(const char *refname,
-                                                const unsigned char *objectname,
-                                                int flag)
+                                                const struct object_id *oid)
 {
        struct ref_array_item *ref;
+
        FLEX_ALLOC_STR(ref, refname, refname);
-       hashcpy(ref->objectname.hash, objectname);
-       ref->flag = flag;
+       oidcpy(&ref->objectname, oid);
+
+       return ref;
+}
+
+struct ref_array_item *ref_array_push(struct ref_array *array,
+                                     const char *refname,
+                                     const struct object_id *oid)
+{
+       struct ref_array_item *ref = new_ref_array_item(refname, oid);
+
+       ALLOC_GROW(array->items, array->nr + 1, array->alloc);
+       array->items[array->nr++] = ref;
 
        return ref;
 }
@@ -1927,12 +2025,11 @@ static int ref_filter_handler(const char *refname, const struct object_id *oid,
         * to do its job and the resulting list may yet to be pruned
         * by maxcount logic.
         */
-       ref = new_ref_array_item(refname, oid->hash, flag);
+       ref = ref_array_push(ref_cbdata->array, refname, oid);
        ref->commit = commit;
-
-       REALLOC_ARRAY(ref_cbdata->array->items, ref_cbdata->array->nr + 1);
-       ref_cbdata->array->items[ref_cbdata->array->nr++] = ref;
+       ref->flag = flag;
        ref->kind = kind;
+
        return 0;
 }
 
@@ -2057,9 +2154,13 @@ static int cmp_ref_sorting(struct ref_sorting *s, struct ref_array_item *a, stru
        int cmp;
        cmp_type cmp_type = used_atom[s->atom].type;
        int (*cmp_fn)(const char *, const char *);
+       struct strbuf err = STRBUF_INIT;
 
-       get_ref_atom_value(a, s->atom, &va);
-       get_ref_atom_value(b, s->atom, &vb);
+       if (get_ref_atom_value(a, s->atom, &va, &err))
+               die("%s", err.buf);
+       if (get_ref_atom_value(b, s->atom, &vb, &err))
+               die("%s", err.buf);
+       strbuf_release(&err);
        cmp_fn = s->ignore_case ? strcasecmp : strcmp;
        if (s->version)
                cmp = versioncmp(va->s, vb->s);
@@ -2118,9 +2219,10 @@ static void append_literal(const char *cp, const char *ep, struct ref_formatting
        }
 }
 
-void format_ref_array_item(struct ref_array_item *info,
+int format_ref_array_item(struct ref_array_item *info,
                           const struct ref_format *format,
-                          struct strbuf *final_buf)
+                          struct strbuf *final_buf,
+                          struct strbuf *error_buf)
 {
        const char *cp, *sp, *ep;
        struct ref_formatting_state state = REF_FORMATTING_STATE_INIT;
@@ -2130,14 +2232,17 @@ void format_ref_array_item(struct ref_array_item *info,
 
        for (cp = format->format; *cp && (sp = find_next(cp)); cp = ep + 1) {
                struct atom_value *atomv;
+               int pos;
 
                ep = strchr(sp, ')');
                if (cp < sp)
                        append_literal(cp, sp, &state);
-               get_ref_atom_value(info,
-                                  parse_ref_filter_atom(format, sp + 2, ep),
-                                  &atomv);
-               atomv->handler(atomv, &state);
+               pos = parse_ref_filter_atom(format, sp + 2, ep, error_buf);
+               if (pos < 0 || get_ref_atom_value(info, pos, &atomv, error_buf) ||
+                   atomv->handler(atomv, &state, error_buf)) {
+                       pop_stack_element(&state.stack);
+                       return -1;
+               }
        }
        if (*cp) {
                sp = cp + strlen(cp);
@@ -2146,30 +2251,39 @@ void format_ref_array_item(struct ref_array_item *info,
        if (format->need_color_reset_at_eol) {
                struct atom_value resetv;
                resetv.s = GIT_COLOR_RESET;
-               append_atom(&resetv, &state);
+               if (append_atom(&resetv, &state, error_buf)) {
+                       pop_stack_element(&state.stack);
+                       return -1;
+               }
+       }
+       if (state.stack->prev) {
+               pop_stack_element(&state.stack);
+               return strbuf_addf_ret(error_buf, -1, _("format: %%(end) atom missing"));
        }
-       if (state.stack->prev)
-               die(_("format: %%(end) atom missing"));
        strbuf_addbuf(final_buf, &state.stack->output);
        pop_stack_element(&state.stack);
+       return 0;
 }
 
 void show_ref_array_item(struct ref_array_item *info,
                         const struct ref_format *format)
 {
        struct strbuf final_buf = STRBUF_INIT;
+       struct strbuf error_buf = STRBUF_INIT;
 
-       format_ref_array_item(info, format, &final_buf);
+       if (format_ref_array_item(info, format, &final_buf, &error_buf))
+               die("%s", error_buf.buf);
        fwrite(final_buf.buf, 1, final_buf.len, stdout);
+       strbuf_release(&error_buf);
        strbuf_release(&final_buf);
        putchar('\n');
 }
 
-void pretty_print_ref(const char *name, const unsigned char *sha1,
+void pretty_print_ref(const char *name, const struct object_id *oid,
                      const struct ref_format *format)
 {
        struct ref_array_item *ref_item;
-       ref_item = new_ref_array_item(name, sha1, 0);
+       ref_item = new_ref_array_item(name, oid);
        ref_item->kind = ref_kind_from_refname(name);
        show_ref_array_item(ref_item, format);
        free_array_item(ref_item);
@@ -2183,7 +2297,12 @@ static int parse_sorting_atom(const char *atom)
         */
        struct ref_format dummy = REF_FORMAT_INIT;
        const char *end = atom + strlen(atom);
-       return parse_ref_filter_atom(&dummy, atom, end);
+       struct strbuf err = STRBUF_INIT;
+       int res = parse_ref_filter_atom(&dummy, atom, end, &err);
+       if (res < 0)
+               die("%s", err.buf);
+       strbuf_release(&err);
+       return res;
 }
 
 /*  If no sorting option is given, use refname to sort as default */
index 0d98342b343196387c0f4e2dcd5978a9361d8edb..85c8ebc3b904e9b44bed8b164b9cdf62839d6dae 100644 (file)
@@ -110,9 +110,10 @@ int verify_ref_format(struct ref_format *format);
 /*  Sort the given ref_array as per the ref_sorting provided */
 void ref_array_sort(struct ref_sorting *sort, struct ref_array *array);
 /*  Based on the given format and quote_style, fill the strbuf */
-void format_ref_array_item(struct ref_array_item *info,
-                          const struct ref_format *format,
-                          struct strbuf *final_buf);
+int format_ref_array_item(struct ref_array_item *info,
+                         const struct ref_format *format,
+                         struct strbuf *final_buf,
+                         struct strbuf *error_buf);
 /*  Print the ref using the given format and quote_style */
 void show_ref_array_item(struct ref_array_item *info, const struct ref_format *format);
 /*  Parse a single sort specifier and add it to the list */
@@ -132,7 +133,15 @@ void setup_ref_filter_porcelain_msg(void);
  * Print a single ref, outside of any ref-filter. Note that the
  * name must be a fully qualified refname.
  */
-void pretty_print_ref(const char *name, const unsigned char *sha1,
+void pretty_print_ref(const char *name, const struct object_id *oid,
                      const struct ref_format *format);
 
+/*
+ * Push a single ref onto the array; this can be used to construct your own
+ * ref_array without using filter_refs().
+ */
+struct ref_array_item *ref_array_push(struct ref_array *array,
+                                     const char *refname,
+                                     const struct object_id *oid);
+
 #endif /*  REF_FILTER_H  */
diff --git a/refs.c b/refs.c
index 20ba82b4343ff2ef72cea32deec8a8d7fbd6def7..1f31e6cf00dcb17c1fdf24a98e9575dbd1ba62fc 100644 (file)
--- a/refs.c
+++ b/refs.c
@@ -13,6 +13,8 @@
 #include "tag.h"
 #include "submodule.h"
 #include "worktree.h"
+#include "argv-array.h"
+#include "repository.h"
 
 /*
  * List of all available backends
@@ -206,7 +208,7 @@ char *refs_resolve_refdup(struct ref_store *refs,
 char *resolve_refdup(const char *refname, int resolve_flags,
                     struct object_id *oid, int *flags)
 {
-       return refs_resolve_refdup(get_main_ref_store(),
+       return refs_resolve_refdup(get_main_ref_store(the_repository),
                                   refname, resolve_flags,
                                   oid, flags);
 }
@@ -228,7 +230,7 @@ int refs_read_ref_full(struct ref_store *refs, const char *refname,
 
 int read_ref_full(const char *refname, int resolve_flags, struct object_id *oid, int *flags)
 {
-       return refs_read_ref_full(get_main_ref_store(), refname,
+       return refs_read_ref_full(get_main_ref_store(the_repository), refname,
                                  resolve_flags, oid, flags);
 }
 
@@ -301,7 +303,7 @@ enum peel_status peel_object(const struct object_id *name, struct object_id *oid
        struct object *o = lookup_unknown_object(name->hash);
 
        if (o->type == OBJ_NONE) {
-               int type = sha1_object_info(name->hash, NULL);
+               int type = oid_object_info(the_repository, name, NULL);
                if (type < 0 || !object_as_type(o, type, 0))
                        return PEEL_INVALID;
        }
@@ -375,7 +377,7 @@ int refs_for_each_tag_ref(struct ref_store *refs, each_ref_fn fn, void *cb_data)
 
 int for_each_tag_ref(each_ref_fn fn, void *cb_data)
 {
-       return refs_for_each_tag_ref(get_main_ref_store(), fn, cb_data);
+       return refs_for_each_tag_ref(get_main_ref_store(the_repository), fn, cb_data);
 }
 
 int refs_for_each_branch_ref(struct ref_store *refs, each_ref_fn fn, void *cb_data)
@@ -385,7 +387,7 @@ int refs_for_each_branch_ref(struct ref_store *refs, each_ref_fn fn, void *cb_da
 
 int for_each_branch_ref(each_ref_fn fn, void *cb_data)
 {
-       return refs_for_each_branch_ref(get_main_ref_store(), fn, cb_data);
+       return refs_for_each_branch_ref(get_main_ref_store(the_repository), fn, cb_data);
 }
 
 int refs_for_each_remote_ref(struct ref_store *refs, each_ref_fn fn, void *cb_data)
@@ -395,7 +397,7 @@ int refs_for_each_remote_ref(struct ref_store *refs, each_ref_fn fn, void *cb_da
 
 int for_each_remote_ref(each_ref_fn fn, void *cb_data)
 {
-       return refs_for_each_remote_ref(get_main_ref_store(), fn, cb_data);
+       return refs_for_each_remote_ref(get_main_ref_store(the_repository), fn, cb_data);
 }
 
 int head_ref_namespaced(each_ref_fn fn, void *cb_data)
@@ -501,6 +503,19 @@ int refname_match(const char *abbrev_name, const char *full_name)
        return 0;
 }
 
+/*
+ * Given a 'prefix' expand it by the rules in 'ref_rev_parse_rules' and add
+ * the results to 'prefixes'
+ */
+void expand_ref_prefix(struct argv_array *prefixes, const char *prefix)
+{
+       const char **p;
+       int len = strlen(prefix);
+
+       for (p = ref_rev_parse_rules; *p; p++)
+               argv_array_pushf(prefixes, *p, len, prefix);
+}
+
 /*
  * *string and *len will only be substituted, and *string returned (for
  * later free()ing) if the string passed in is a magic short-hand form
@@ -600,7 +615,8 @@ int dwim_log(const char *str, int len, struct object_id *oid, char **log)
 static int is_per_worktree_ref(const char *refname)
 {
        return !strcmp(refname, "HEAD") ||
-               starts_with(refname, "refs/bisect/");
+               starts_with(refname, "refs/bisect/") ||
+               starts_with(refname, "refs/rewritten/");
 }
 
 static int is_pseudoref_syntax(const char *refname)
@@ -730,7 +746,7 @@ int refs_delete_ref(struct ref_store *refs, const char *msg,
        struct strbuf err = STRBUF_INIT;
 
        if (ref_type(refname) == REF_TYPE_PSEUDOREF) {
-               assert(refs == get_main_ref_store());
+               assert(refs == get_main_ref_store(the_repository));
                return delete_pseudoref(refname, old_oid);
        }
 
@@ -752,7 +768,7 @@ int refs_delete_ref(struct ref_store *refs, const char *msg,
 int delete_ref(const char *msg, const char *refname,
               const struct object_id *old_oid, unsigned int flags)
 {
-       return refs_delete_ref(get_main_ref_store(), msg, refname,
+       return refs_delete_ref(get_main_ref_store(the_repository), msg, refname,
                               old_oid, flags);
 }
 
@@ -928,7 +944,7 @@ struct ref_transaction *ref_store_transaction_begin(struct ref_store *refs,
 
 struct ref_transaction *ref_transaction_begin(struct strbuf *err)
 {
-       return ref_store_transaction_begin(get_main_ref_store(), err);
+       return ref_store_transaction_begin(get_main_ref_store(the_repository), err);
 }
 
 void ref_transaction_free(struct ref_transaction *transaction)
@@ -1060,7 +1076,7 @@ int refs_update_ref(struct ref_store *refs, const char *msg,
        int ret = 0;
 
        if (ref_type(refname) == REF_TYPE_PSEUDOREF) {
-               assert(refs == get_main_ref_store());
+               assert(refs == get_main_ref_store(the_repository));
                ret = write_pseudoref(refname, new_oid, old_oid, &err);
        } else {
                t = ref_store_transaction_begin(refs, &err);
@@ -1099,7 +1115,7 @@ int update_ref(const char *msg, const char *refname,
               const struct object_id *old_oid,
               unsigned int flags, enum action_on_err onerr)
 {
-       return refs_update_ref(get_main_ref_store(), msg, refname, new_oid,
+       return refs_update_ref(get_main_ref_store(the_repository), msg, refname, new_oid,
                               old_oid, flags, onerr);
 }
 
@@ -1320,7 +1336,7 @@ int refs_head_ref(struct ref_store *refs, each_ref_fn fn, void *cb_data)
 
 int head_ref(each_ref_fn fn, void *cb_data)
 {
-       return refs_head_ref(get_main_ref_store(), fn, cb_data);
+       return refs_head_ref(get_main_ref_store(the_repository), fn, cb_data);
 }
 
 struct ref_iterator *refs_ref_iterator_begin(
@@ -1379,7 +1395,7 @@ int refs_for_each_ref(struct ref_store *refs, each_ref_fn fn, void *cb_data)
 
 int for_each_ref(each_ref_fn fn, void *cb_data)
 {
-       return refs_for_each_ref(get_main_ref_store(), fn, cb_data);
+       return refs_for_each_ref(get_main_ref_store(the_repository), fn, cb_data);
 }
 
 int refs_for_each_ref_in(struct ref_store *refs, const char *prefix,
@@ -1390,7 +1406,7 @@ int refs_for_each_ref_in(struct ref_store *refs, const char *prefix,
 
 int for_each_ref_in(const char *prefix, each_ref_fn fn, void *cb_data)
 {
-       return refs_for_each_ref_in(get_main_ref_store(), prefix, fn, cb_data);
+       return refs_for_each_ref_in(get_main_ref_store(the_repository), prefix, fn, cb_data);
 }
 
 int for_each_fullref_in(const char *prefix, each_ref_fn fn, void *cb_data, unsigned int broken)
@@ -1399,7 +1415,7 @@ int for_each_fullref_in(const char *prefix, each_ref_fn fn, void *cb_data, unsig
 
        if (broken)
                flag = DO_FOR_EACH_INCLUDE_BROKEN;
-       return do_for_each_ref(get_main_ref_store(),
+       return do_for_each_ref(get_main_ref_store(the_repository),
                               prefix, fn, 0, flag, cb_data);
 }
 
@@ -1414,9 +1430,9 @@ int refs_for_each_fullref_in(struct ref_store *refs, const char *prefix,
        return do_for_each_ref(refs, prefix, fn, 0, flag, cb_data);
 }
 
-int for_each_replace_ref(each_ref_fn fn, void *cb_data)
+int for_each_replace_ref(struct repository *r, each_ref_fn fn, void *cb_data)
 {
-       return do_for_each_ref(get_main_ref_store(),
+       return do_for_each_ref(get_main_ref_store(r),
                               git_replace_ref_base, fn,
                               strlen(git_replace_ref_base),
                               DO_FOR_EACH_INCLUDE_BROKEN, cb_data);
@@ -1427,7 +1443,7 @@ int for_each_namespaced_ref(each_ref_fn fn, void *cb_data)
        struct strbuf buf = STRBUF_INIT;
        int ret;
        strbuf_addf(&buf, "%srefs/", get_git_namespace());
-       ret = do_for_each_ref(get_main_ref_store(),
+       ret = do_for_each_ref(get_main_ref_store(the_repository),
                              buf.buf, fn, 0, 0, cb_data);
        strbuf_release(&buf);
        return ret;
@@ -1441,7 +1457,7 @@ int refs_for_each_rawref(struct ref_store *refs, each_ref_fn fn, void *cb_data)
 
 int for_each_rawref(each_ref_fn fn, void *cb_data)
 {
-       return refs_for_each_rawref(get_main_ref_store(), fn, cb_data);
+       return refs_for_each_rawref(get_main_ref_store(the_repository), fn, cb_data);
 }
 
 int refs_read_raw_ref(struct ref_store *ref_store,
@@ -1547,7 +1563,7 @@ const char *refs_resolve_ref_unsafe(struct ref_store *refs,
 /* backend functions */
 int refs_init_db(struct strbuf *err)
 {
-       struct ref_store *refs = get_main_ref_store();
+       struct ref_store *refs = get_main_ref_store(the_repository);
 
        return refs->be->init_db(refs, err);
 }
@@ -1555,7 +1571,7 @@ int refs_init_db(struct strbuf *err)
 const char *resolve_ref_unsafe(const char *refname, int resolve_flags,
                               struct object_id *oid, int *flags)
 {
-       return refs_resolve_ref_unsafe(get_main_ref_store(), refname,
+       return refs_resolve_ref_unsafe(get_main_ref_store(the_repository), refname,
                                       resolve_flags, oid, flags);
 }
 
@@ -1607,9 +1623,6 @@ static struct ref_store_hash_entry *alloc_ref_store_hash_entry(
        return entry;
 }
 
-/* A pointer to the ref_store for the main repository: */
-static struct ref_store *main_ref_store;
-
 /* A hashmap of ref_stores, stored by submodule name: */
 static struct hashmap submodule_ref_stores;
 
@@ -1651,13 +1664,16 @@ static struct ref_store *ref_store_init(const char *gitdir,
        return refs;
 }
 
-struct ref_store *get_main_ref_store(void)
+struct ref_store *get_main_ref_store(struct repository *r)
 {
-       if (main_ref_store)
-               return main_ref_store;
+       if (r->refs)
+               return r->refs;
+
+       if (!r->gitdir)
+               BUG("attempting to get main_ref_store outside of repository");
 
-       main_ref_store = ref_store_init(get_git_dir(), REF_STORE_ALL_CAPS);
-       return main_ref_store;
+       r->refs = ref_store_init(r->gitdir, REF_STORE_ALL_CAPS);
+       return r->refs;
 }
 
 /*
@@ -1726,7 +1742,7 @@ struct ref_store *get_worktree_ref_store(const struct worktree *wt)
        const char *id;
 
        if (wt->is_current)
-               return get_main_ref_store();
+               return get_main_ref_store(the_repository);
 
        id = wt->id ? wt->id : "/";
        refs = lookup_ref_store_map(&worktree_ref_stores, id);
@@ -1782,7 +1798,7 @@ int refs_peel_ref(struct ref_store *refs, const char *refname,
 
 int peel_ref(const char *refname, struct object_id *oid)
 {
-       return refs_peel_ref(get_main_ref_store(), refname, oid);
+       return refs_peel_ref(get_main_ref_store(the_repository), refname, oid);
 }
 
 int refs_create_symref(struct ref_store *refs,
@@ -1798,7 +1814,7 @@ int refs_create_symref(struct ref_store *refs,
 int create_symref(const char *ref_target, const char *refs_heads_master,
                  const char *logmsg)
 {
-       return refs_create_symref(get_main_ref_store(), ref_target,
+       return refs_create_symref(get_main_ref_store(the_repository), ref_target,
                                  refs_heads_master, logmsg);
 }
 
@@ -2006,7 +2022,7 @@ int refs_for_each_reflog(struct ref_store *refs, each_ref_fn fn, void *cb_data)
 
 int for_each_reflog(each_ref_fn fn, void *cb_data)
 {
-       return refs_for_each_reflog(get_main_ref_store(), fn, cb_data);
+       return refs_for_each_reflog(get_main_ref_store(the_repository), fn, cb_data);
 }
 
 int refs_for_each_reflog_ent_reverse(struct ref_store *refs,
@@ -2021,7 +2037,7 @@ int refs_for_each_reflog_ent_reverse(struct ref_store *refs,
 int for_each_reflog_ent_reverse(const char *refname, each_reflog_ent_fn fn,
                                void *cb_data)
 {
-       return refs_for_each_reflog_ent_reverse(get_main_ref_store(),
+       return refs_for_each_reflog_ent_reverse(get_main_ref_store(the_repository),
                                                refname, fn, cb_data);
 }
 
@@ -2034,7 +2050,7 @@ int refs_for_each_reflog_ent(struct ref_store *refs, const char *refname,
 int for_each_reflog_ent(const char *refname, each_reflog_ent_fn fn,
                        void *cb_data)
 {
-       return refs_for_each_reflog_ent(get_main_ref_store(), refname,
+       return refs_for_each_reflog_ent(get_main_ref_store(the_repository), refname,
                                        fn, cb_data);
 }
 
@@ -2045,7 +2061,7 @@ int refs_reflog_exists(struct ref_store *refs, const char *refname)
 
 int reflog_exists(const char *refname)
 {
-       return refs_reflog_exists(get_main_ref_store(), refname);
+       return refs_reflog_exists(get_main_ref_store(the_repository), refname);
 }
 
 int refs_create_reflog(struct ref_store *refs, const char *refname,
@@ -2057,7 +2073,7 @@ int refs_create_reflog(struct ref_store *refs, const char *refname,
 int safe_create_reflog(const char *refname, int force_create,
                       struct strbuf *err)
 {
-       return refs_create_reflog(get_main_ref_store(), refname,
+       return refs_create_reflog(get_main_ref_store(the_repository), refname,
                                  force_create, err);
 }
 
@@ -2068,7 +2084,7 @@ int refs_delete_reflog(struct ref_store *refs, const char *refname)
 
 int delete_reflog(const char *refname)
 {
-       return refs_delete_reflog(get_main_ref_store(), refname);
+       return refs_delete_reflog(get_main_ref_store(the_repository), refname);
 }
 
 int refs_reflog_expire(struct ref_store *refs,
@@ -2091,7 +2107,7 @@ int reflog_expire(const char *refname, const struct object_id *oid,
                  reflog_expiry_cleanup_fn cleanup_fn,
                  void *policy_cb_data)
 {
-       return refs_reflog_expire(get_main_ref_store(),
+       return refs_reflog_expire(get_main_ref_store(the_repository),
                                  refname, oid, flags,
                                  prepare_fn, should_prune_fn,
                                  cleanup_fn, policy_cb_data);
@@ -2114,7 +2130,7 @@ int refs_delete_refs(struct ref_store *refs, const char *msg,
 int delete_refs(const char *msg, struct string_list *refnames,
                unsigned int flags)
 {
-       return refs_delete_refs(get_main_ref_store(), msg, refnames, flags);
+       return refs_delete_refs(get_main_ref_store(the_repository), msg, refnames, flags);
 }
 
 int refs_rename_ref(struct ref_store *refs, const char *oldref,
@@ -2125,7 +2141,7 @@ int refs_rename_ref(struct ref_store *refs, const char *oldref,
 
 int rename_ref(const char *oldref, const char *newref, const char *logmsg)
 {
-       return refs_rename_ref(get_main_ref_store(), oldref, newref, logmsg);
+       return refs_rename_ref(get_main_ref_store(the_repository), oldref, newref, logmsg);
 }
 
 int refs_copy_existing_ref(struct ref_store *refs, const char *oldref,
@@ -2136,5 +2152,5 @@ int refs_copy_existing_ref(struct ref_store *refs, const char *oldref,
 
 int copy_existing_ref(const char *oldref, const char *newref, const char *logmsg)
 {
-       return refs_copy_existing_ref(get_main_ref_store(), oldref, newref, logmsg);
+       return refs_copy_existing_ref(get_main_ref_store(the_repository), oldref, newref, logmsg);
 }
diff --git a/refs.h b/refs.h
index 01be5ae32fb01298ff6c0738ac4adfe42643b682..cc2fb4c68c0e194dc51e3846192911c2c6949c6b 100644 (file)
--- a/refs.h
+++ b/refs.h
@@ -139,6 +139,13 @@ int resolve_gitlink_ref(const char *submodule, const char *refname,
  */
 int refname_match(const char *abbrev_name, const char *full_name);
 
+/*
+ * Given a 'prefix' expand it by the rules in 'ref_rev_parse_rules' and add
+ * the results to 'prefixes'
+ */
+struct argv_array;
+void expand_ref_prefix(struct argv_array *prefixes, const char *prefix);
+
 int expand_ref(const char *str, int len, struct object_id *oid, char **ref);
 int dwim_ref(const char *str, int len, struct object_id *oid, char **ref);
 int dwim_log(const char *str, int len, struct object_id *oid, char **ref);
@@ -300,7 +307,7 @@ int for_each_fullref_in(const char *prefix, each_ref_fn fn, void *cb_data,
 int for_each_tag_ref(each_ref_fn fn, void *cb_data);
 int for_each_branch_ref(each_ref_fn fn, void *cb_data);
 int for_each_remote_ref(each_ref_fn fn, void *cb_data);
-int for_each_replace_ref(each_ref_fn fn, void *cb_data);
+int for_each_replace_ref(struct repository *r, each_ref_fn fn, void *cb_data);
 int for_each_glob_ref(each_ref_fn fn, const char *pattern, void *cb_data);
 int for_each_glob_ref_in(each_ref_fn fn, const char *pattern,
                         const char *prefix, void *cb_data);
@@ -758,7 +765,7 @@ int reflog_expire(const char *refname, const struct object_id *oid,
 
 int ref_storage_backend_exists(const char *name);
 
-struct ref_store *get_main_ref_store(void);
+struct ref_store *get_main_ref_store(struct repository *r);
 /*
  * Return the ref_store instance for the specified submodule. For the
  * main repository, use submodule==NULL; such a call cannot fail. For
index bec8e30e9e3e2995739dfff7cc971f8de623610d..49d8f67bf132c2357fddcca4b449e28a54ce8f7c 100644 (file)
@@ -9,6 +9,7 @@
 #include "../lockfile.h"
 #include "../object.h"
 #include "../dir.h"
+#include "../chdir-notify.h"
 
 /*
  * This backend uses the following flags in `ref_update::flags` for
@@ -61,10 +62,6 @@ struct ref_lock {
        struct object_id old_oid;
 };
 
-/*
- * Future: need to be in "struct repository"
- * when doing a full libification.
- */
 struct files_ref_store {
        struct ref_store base;
        unsigned int store_flags;
@@ -106,6 +103,11 @@ static struct ref_store *files_ref_store_create(const char *gitdir,
        refs->packed_ref_store = packed_ref_store_create(sb.buf, flags);
        strbuf_release(&sb);
 
+       chdir_notify_reparent("files-backend $GIT_DIR",
+                             &refs->gitdir);
+       chdir_notify_reparent("files-backend $GIT_COMMONDIR",
+                             &refs->gitcommondir);
+
        return ref_store;
 }
 
index 65288c647278aa27790b13c0360f756686dadf7a..369c34f886fc5532de2afbd1590823eed0758a33 100644 (file)
@@ -5,6 +5,7 @@
 #include "packed-backend.h"
 #include "../iterator.h"
 #include "../lockfile.h"
+#include "../chdir-notify.h"
 
 enum mmap_strategy {
        /*
@@ -202,6 +203,8 @@ struct ref_store *packed_ref_store_create(const char *path,
        refs->store_flags = store_flags;
 
        refs->path = xstrdup(path);
+       chdir_notify_reparent("packed-refs", &refs->path);
+
        return ref_store;
 }
 
index a7c4c9b5ff4822e36bfc43a59d113c624537297e..ceb05347bd18c3c0ea4e7cfd7ffe09a072b722ce 100644 (file)
@@ -1,10 +1,11 @@
 #include "cache.h"
 #include "config.h"
 #include "remote.h"
+#include "connect.h"
 #include "strbuf.h"
 #include "walker.h"
 #include "http.h"
-#include "exec_cmd.h"
+#include "exec-cmd.h"
 #include "run-command.h"
 #include "pkt-line.h"
 #include "string-list.h"
@@ -13,6 +14,7 @@
 #include "credential.h"
 #include "sha1-array.h"
 #include "send-pack.h"
+#include "protocol.h"
 #include "quote.h"
 
 static struct remote *remote;
@@ -184,12 +186,13 @@ static int set_option(const char *name, const char *value)
 }
 
 struct discovery {
-       const char *service;
+       char *service;
        char *buf_alloc;
        char *buf;
        size_t len;
        struct ref *refs;
        struct oid_array shallow;
+       enum protocol_version version;
        unsigned proto_git : 1;
 };
 static struct discovery *last_discovery;
@@ -197,8 +200,31 @@ static struct discovery *last_discovery;
 static struct ref *parse_git_refs(struct discovery *heads, int for_push)
 {
        struct ref *list = NULL;
-       get_remote_heads(-1, heads->buf, heads->len, &list,
-                        for_push ? REF_NORMAL : 0, NULL, &heads->shallow);
+       struct packet_reader reader;
+
+       packet_reader_init(&reader, -1, heads->buf, heads->len,
+                          PACKET_READ_CHOMP_NEWLINE |
+                          PACKET_READ_GENTLE_ON_EOF);
+
+       heads->version = discover_version(&reader);
+       switch (heads->version) {
+       case protocol_v2:
+               /*
+                * Do nothing.  This isn't a list of refs but rather a
+                * capability advertisement.  Client would have run
+                * 'stateless-connect' so we'll dump this capability listing
+                * and let them request the refs themselves.
+                */
+               break;
+       case protocol_v1:
+       case protocol_v0:
+               get_remote_heads(&reader, &list, for_push ? REF_NORMAL : 0,
+                                NULL, &heads->shallow);
+               break;
+       case protocol_unknown_version:
+               BUG("unknown protocol version");
+       }
+
        return list;
 }
 
@@ -259,6 +285,7 @@ static void free_discovery(struct discovery *d)
                free(d->shallow.oid);
                free(d->buf_alloc);
                free_refs(d->refs);
+               free(d->service);
                free(d);
        }
 }
@@ -290,6 +317,19 @@ static int show_http_message(struct strbuf *type, struct strbuf *charset,
        return 0;
 }
 
+static int get_protocol_http_header(enum protocol_version version,
+                                   struct strbuf *header)
+{
+       if (version > 0) {
+               strbuf_addf(header, GIT_PROTOCOL_HEADER ": version=%d",
+                           version);
+
+               return 1;
+       }
+
+       return 0;
+}
+
 static struct discovery *discover_refs(const char *service, int for_push)
 {
        struct strbuf exp = STRBUF_INIT;
@@ -298,9 +338,12 @@ static struct discovery *discover_refs(const char *service, int for_push)
        struct strbuf buffer = STRBUF_INIT;
        struct strbuf refs_url = STRBUF_INIT;
        struct strbuf effective_url = STRBUF_INIT;
+       struct strbuf protocol_header = STRBUF_INIT;
+       struct string_list extra_headers = STRING_LIST_INIT_DUP;
        struct discovery *last = last_discovery;
        int http_ret, maybe_smart = 0;
        struct http_get_options http_options;
+       enum protocol_version version = get_protocol_version_config();
 
        if (last && !strcmp(service, last->service))
                return last;
@@ -317,11 +360,24 @@ static struct discovery *discover_refs(const char *service, int for_push)
                strbuf_addf(&refs_url, "service=%s", service);
        }
 
+       /*
+        * NEEDSWORK: If we are trying to use protocol v2 and we are planning
+        * to perform a push, then fallback to v0 since the client doesn't know
+        * how to push yet using v2.
+        */
+       if (version == protocol_v2 && !strcmp("git-receive-pack", service))
+               version = protocol_v0;
+
+       /* Add the extra Git-Protocol header */
+       if (get_protocol_http_header(version, &protocol_header))
+               string_list_append(&extra_headers, protocol_header.buf);
+
        memset(&http_options, 0, sizeof(http_options));
        http_options.content_type = &type;
        http_options.charset = &charset;
        http_options.effective_url = &effective_url;
        http_options.base_url = &url;
+       http_options.extra_headers = &extra_headers;
        http_options.initial_request = 1;
        http_options.no_cache = 1;
        http_options.keep_error = 1;
@@ -345,7 +401,7 @@ static struct discovery *discover_refs(const char *service, int for_push)
                warning(_("redirecting to %s"), url.buf);
 
        last= xcalloc(1, sizeof(*last_discovery));
-       last->service = service;
+       last->service = xstrdup(service);
        last->buf_alloc = strbuf_detach(&buffer, &last->len);
        last->buf = last->buf_alloc;
 
@@ -377,6 +433,9 @@ static struct discovery *discover_refs(const char *service, int for_push)
                        ;
 
                last->proto_git = 1;
+       } else if (maybe_smart &&
+                  last->len > 5 && starts_with(last->buf + 4, "version 2")) {
+               last->proto_git = 1;
        }
 
        if (last->proto_git)
@@ -390,6 +449,8 @@ static struct discovery *discover_refs(const char *service, int for_push)
        strbuf_release(&charset);
        strbuf_release(&effective_url);
        strbuf_release(&buffer);
+       strbuf_release(&protocol_header);
+       string_list_clear(&extra_headers, 0);
        last_discovery = last;
        return last;
 }
@@ -426,6 +487,7 @@ struct rpc_state {
        char *service_url;
        char *hdr_content_type;
        char *hdr_accept;
+       char *protocol_header;
        char *buf;
        size_t alloc;
        size_t len;
@@ -612,6 +674,10 @@ static int post_rpc(struct rpc_state *rpc)
        headers = curl_slist_append(headers, needs_100_continue ?
                "Expect: 100-continue" : "Expect:");
 
+       /* Add the extra Git-Protocol header */
+       if (rpc->protocol_header)
+               headers = curl_slist_append(headers, rpc->protocol_header);
+
 retry:
        slot = get_active_slot();
 
@@ -752,6 +818,11 @@ static int rpc_service(struct rpc_state *rpc, struct discovery *heads)
        strbuf_addf(&buf, "Accept: application/x-%s-result", svc);
        rpc->hdr_accept = strbuf_detach(&buf, NULL);
 
+       if (get_protocol_http_header(heads->version, &buf))
+               rpc->protocol_header = strbuf_detach(&buf, NULL);
+       else
+               rpc->protocol_header = NULL;
+
        while (!err) {
                int n = packet_read(rpc->out, NULL, NULL, rpc->buf, rpc->alloc, 0);
                if (!n)
@@ -779,6 +850,7 @@ static int rpc_service(struct rpc_state *rpc, struct discovery *heads)
        free(rpc->service_url);
        free(rpc->hdr_content_type);
        free(rpc->hdr_accept);
+       free(rpc->protocol_header);
        free(rpc->buf);
        strbuf_release(&buf);
        return err;
@@ -797,9 +869,6 @@ static int fetch_dumb(int nr_heads, struct ref **to_fetch)
                targets[i] = xstrdup(oid_to_hex(&to_fetch[i]->old_oid));
 
        walker = get_http_walker(url.buf);
-       walker->get_all = 1;
-       walker->get_tree = 1;
-       walker->get_history = 1;
        walker->get_verbosely = options.verbosity >= 3;
        walker->get_recover = 0;
        ret = walker_fetch(walker, nr_heads, targets, NULL, NULL);
@@ -1056,6 +1125,202 @@ static void parse_push(struct strbuf *buf)
        free(specs);
 }
 
+/*
+ * Used to represent the state of a connection to an HTTP server when
+ * communicating using git's wire-protocol version 2.
+ */
+struct proxy_state {
+       char *service_name;
+       char *service_url;
+       struct curl_slist *headers;
+       struct strbuf request_buffer;
+       int in;
+       int out;
+       struct packet_reader reader;
+       size_t pos;
+       int seen_flush;
+};
+
+static void proxy_state_init(struct proxy_state *p, const char *service_name,
+                            enum protocol_version version)
+{
+       struct strbuf buf = STRBUF_INIT;
+
+       memset(p, 0, sizeof(*p));
+       p->service_name = xstrdup(service_name);
+
+       p->in = 0;
+       p->out = 1;
+       strbuf_init(&p->request_buffer, 0);
+
+       strbuf_addf(&buf, "%s%s", url.buf, p->service_name);
+       p->service_url = strbuf_detach(&buf, NULL);
+
+       p->headers = http_copy_default_headers();
+
+       strbuf_addf(&buf, "Content-Type: application/x-%s-request", p->service_name);
+       p->headers = curl_slist_append(p->headers, buf.buf);
+       strbuf_reset(&buf);
+
+       strbuf_addf(&buf, "Accept: application/x-%s-result", p->service_name);
+       p->headers = curl_slist_append(p->headers, buf.buf);
+       strbuf_reset(&buf);
+
+       p->headers = curl_slist_append(p->headers, "Transfer-Encoding: chunked");
+
+       /* Add the Git-Protocol header */
+       if (get_protocol_http_header(version, &buf))
+               p->headers = curl_slist_append(p->headers, buf.buf);
+
+       packet_reader_init(&p->reader, p->in, NULL, 0,
+                          PACKET_READ_GENTLE_ON_EOF);
+
+       strbuf_release(&buf);
+}
+
+static void proxy_state_clear(struct proxy_state *p)
+{
+       free(p->service_name);
+       free(p->service_url);
+       curl_slist_free_all(p->headers);
+       strbuf_release(&p->request_buffer);
+}
+
+/*
+ * CURLOPT_READFUNCTION callback function.
+ * Attempts to copy over a single packet-line at a time into the
+ * curl provided buffer.
+ */
+static size_t proxy_in(char *buffer, size_t eltsize,
+                      size_t nmemb, void *userdata)
+{
+       size_t max;
+       struct proxy_state *p = userdata;
+       size_t avail = p->request_buffer.len - p->pos;
+
+
+       if (eltsize != 1)
+               BUG("curl read callback called with size = %"PRIuMAX" != 1",
+                   (uintmax_t)eltsize);
+       max = nmemb;
+
+       if (!avail) {
+               if (p->seen_flush) {
+                       p->seen_flush = 0;
+                       return 0;
+               }
+
+               strbuf_reset(&p->request_buffer);
+               switch (packet_reader_read(&p->reader)) {
+               case PACKET_READ_EOF:
+                       die("unexpected EOF when reading from parent process");
+               case PACKET_READ_NORMAL:
+                       packet_buf_write_len(&p->request_buffer, p->reader.line,
+                                            p->reader.pktlen);
+                       break;
+               case PACKET_READ_DELIM:
+                       packet_buf_delim(&p->request_buffer);
+                       break;
+               case PACKET_READ_FLUSH:
+                       packet_buf_flush(&p->request_buffer);
+                       p->seen_flush = 1;
+                       break;
+               }
+               p->pos = 0;
+               avail = p->request_buffer.len;
+       }
+
+       if (max < avail)
+               avail = max;
+       memcpy(buffer, p->request_buffer.buf + p->pos, avail);
+       p->pos += avail;
+       return avail;
+}
+
+static size_t proxy_out(char *buffer, size_t eltsize,
+                       size_t nmemb, void *userdata)
+{
+       size_t size;
+       struct proxy_state *p = userdata;
+
+       if (eltsize != 1)
+               BUG("curl read callback called with size = %"PRIuMAX" != 1",
+                   (uintmax_t)eltsize);
+       size = nmemb;
+
+       write_or_die(p->out, buffer, size);
+       return size;
+}
+
+/* Issues a request to the HTTP server configured in `p` */
+static int proxy_request(struct proxy_state *p)
+{
+       struct active_request_slot *slot;
+
+       slot = get_active_slot();
+
+       curl_easy_setopt(slot->curl, CURLOPT_NOBODY, 0);
+       curl_easy_setopt(slot->curl, CURLOPT_POST, 1);
+       curl_easy_setopt(slot->curl, CURLOPT_URL, p->service_url);
+       curl_easy_setopt(slot->curl, CURLOPT_HTTPHEADER, p->headers);
+
+       /* Setup function to read request from client */
+       curl_easy_setopt(slot->curl, CURLOPT_READFUNCTION, proxy_in);
+       curl_easy_setopt(slot->curl, CURLOPT_READDATA, p);
+
+       /* Setup function to write server response to client */
+       curl_easy_setopt(slot->curl, CURLOPT_WRITEFUNCTION, proxy_out);
+       curl_easy_setopt(slot->curl, CURLOPT_WRITEDATA, p);
+
+       if (run_slot(slot, NULL) != HTTP_OK)
+               return -1;
+
+       return 0;
+}
+
+static int stateless_connect(const char *service_name)
+{
+       struct discovery *discover;
+       struct proxy_state p;
+
+       /*
+        * Run the info/refs request and see if the server supports protocol
+        * v2.  If and only if the server supports v2 can we successfully
+        * establish a stateless connection, otherwise we need to tell the
+        * client to fallback to using other transport helper functions to
+        * complete their request.
+        */
+       discover = discover_refs(service_name, 0);
+       if (discover->version != protocol_v2) {
+               printf("fallback\n");
+               fflush(stdout);
+               return -1;
+       } else {
+               /* Stateless Connection established */
+               printf("\n");
+               fflush(stdout);
+       }
+
+       proxy_state_init(&p, service_name, discover->version);
+
+       /*
+        * Dump the capability listing that we got from the server earlier
+        * during the info/refs request.
+        */
+       write_or_die(p.out, discover->buf, discover->len);
+
+       /* Peek the next packet line.  Until we see EOF keep sending POSTs */
+       while (packet_reader_peek(&p.reader) != PACKET_READ_EOF) {
+               if (proxy_request(&p)) {
+                       /* We would have an err here */
+                       break;
+               }
+       }
+
+       proxy_state_clear(&p);
+       return 0;
+}
+
 int cmd_main(int argc, const char **argv)
 {
        struct strbuf buf = STRBUF_INIT;
@@ -1124,12 +1389,16 @@ int cmd_main(int argc, const char **argv)
                        fflush(stdout);
 
                } else if (!strcmp(buf.buf, "capabilities")) {
+                       printf("stateless-connect\n");
                        printf("fetch\n");
                        printf("option\n");
                        printf("push\n");
                        printf("check-connectivity\n");
                        printf("\n");
                        fflush(stdout);
+               } else if (skip_prefix(buf.buf, "stateless-connect ", &arg)) {
+                       if (!stateless_connect(arg))
+                               break;
                } else {
                        error("remote-curl: unknown command '%s' from git", buf.buf);
                        return 1;
index bcebb4c789567eb4017a3a0132ba55c59c427991..444d98059f681e21beeb3e66fe8539887cfb74d1 100644 (file)
@@ -3,7 +3,7 @@
 #include "remote.h"
 #include "strbuf.h"
 #include "url.h"
-#include "exec_cmd.h"
+#include "exec-cmd.h"
 #include "run-command.h"
 #include "vcs-svn/svndump.h"
 #include "notes.h"
@@ -61,7 +61,7 @@ static char *read_ref_note(const struct object_id *oid)
        init_notes(NULL, notes_ref, NULL, 0);
        if (!(note_oid = get_note(NULL, oid)))
                return NULL;    /* note tree not found */
-       if (!(msg = read_sha1_file(note_oid->hash, &type, &msglen)))
+       if (!(msg = read_object_file(note_oid, &type, &msglen)))
                error("Empty notes tree. %s", notes_ref);
        else if (!msglen || type != OBJ_BLOB) {
                error("Note contains unusable content. "
@@ -108,7 +108,7 @@ static int note2mark_cb(const struct object_id *object_oid,
        enum object_type type;
        struct rev_note note;
 
-       if (!(msg = read_sha1_file(note_oid->hash, &type, &msglen)) ||
+       if (!(msg = read_object_file(note_oid, &type, &msglen)) ||
                        !msglen || type != OBJ_BLOB) {
                free(msg);
                return 1;
index c10d87c24615e9d6497b46a69a82a71d3c1735a6..481bf933f390af28d7ef46b33a53b04d0f000be1 100644 (file)
--- a/remote.c
+++ b/remote.c
@@ -1376,7 +1376,7 @@ static void add_missing_tags(struct ref *src, struct ref **dst, struct ref ***ds
                        continue; /* not a tag */
                if (string_list_has_string(&dst_tag, ref->name))
                        continue; /* they already have it */
-               if (sha1_object_info(ref->new_oid.hash, NULL) != OBJ_TAG)
+               if (oid_object_info(the_repository, &ref->new_oid, NULL) != OBJ_TAG)
                        continue; /* be conservative */
                item = string_list_append(&src_tag, ref->name);
                item->util = ref;
index f09c01969d6b0d701140ceb9cb2e8f9e68533c96..93dd97e25f75ff88090dce0d92e0cf18dbdc3295 100644 (file)
--- a/remote.h
+++ b/remote.h
@@ -151,10 +151,19 @@ int check_ref_type(const struct ref *ref, int flags);
 void free_refs(struct ref *ref);
 
 struct oid_array;
-extern struct ref **get_remote_heads(int in, char *src_buf, size_t src_len,
+struct packet_reader;
+struct argv_array;
+struct string_list;
+extern struct ref **get_remote_heads(struct packet_reader *reader,
                                     struct ref **list, unsigned int flags,
                                     struct oid_array *extra_have,
-                                    struct oid_array *shallow);
+                                    struct oid_array *shallow_points);
+
+/* Used for protocol v2 in order to retrieve refs from a remote */
+extern struct ref **get_remote_refs(int fd_out, struct packet_reader *reader,
+                                   struct ref **list, int for_push,
+                                   const struct argv_array *ref_prefixes,
+                                   const struct string_list *server_options);
 
 int resolve_remote_symref(struct ref *ref, struct ref *list);
 int ref_newer(const struct object_id *new_oid, const struct object_id *old_oid);
diff --git a/replace-object.c b/replace-object.c
new file mode 100644 (file)
index 0000000..801b5c1
--- /dev/null
@@ -0,0 +1,73 @@
+#include "cache.h"
+#include "oidmap.h"
+#include "object-store.h"
+#include "replace-object.h"
+#include "refs.h"
+#include "repository.h"
+#include "commit.h"
+
+static int register_replace_ref(const char *refname,
+                               const struct object_id *oid,
+                               int flag, void *cb_data)
+{
+       /* Get sha1 from refname */
+       const char *slash = strrchr(refname, '/');
+       const char *hash = slash ? slash + 1 : refname;
+       struct replace_object *repl_obj = xmalloc(sizeof(*repl_obj));
+
+       if (get_oid_hex(hash, &repl_obj->original.oid)) {
+               free(repl_obj);
+               warning("bad replace ref name: %s", refname);
+               return 0;
+       }
+
+       /* Copy sha1 from the read ref */
+       oidcpy(&repl_obj->replacement, oid);
+
+       /* Register new object */
+       if (oidmap_put(the_repository->objects->replace_map, repl_obj))
+               die("duplicate replace ref: %s", refname);
+
+       return 0;
+}
+
+static void prepare_replace_object(struct repository *r)
+{
+       if (r->objects->replace_map)
+               return;
+
+       r->objects->replace_map =
+               xmalloc(sizeof(*r->objects->replace_map));
+       oidmap_init(r->objects->replace_map, 0);
+
+       for_each_replace_ref(r, register_replace_ref, NULL);
+}
+
+/* We allow "recursive" replacement. Only within reason, though */
+#define MAXREPLACEDEPTH 5
+
+/*
+ * If a replacement for object oid has been set up, return the
+ * replacement object's name (replaced recursively, if necessary).
+ * The return value is either oid or a pointer to a
+ * permanently-allocated value.  This function always respects replace
+ * references, regardless of the value of check_replace_refs.
+ */
+const struct object_id *do_lookup_replace_object(struct repository *r,
+                                                const struct object_id *oid)
+{
+       int depth = MAXREPLACEDEPTH;
+       const struct object_id *cur = oid;
+
+       prepare_replace_object(r);
+
+       /* Try to recursively replace the object */
+       while (depth-- > 0) {
+               struct replace_object *repl_obj =
+                       oidmap_get(r->objects->replace_map, cur);
+               if (!repl_obj)
+                       return cur;
+               cur = &repl_obj->replacement;
+       }
+       die("replace depth too high for object %s", oid_to_hex(oid));
+}
diff --git a/replace-object.h b/replace-object.h
new file mode 100644 (file)
index 0000000..f996de3
--- /dev/null
@@ -0,0 +1,36 @@
+#ifndef REPLACE_OBJECT_H
+#define REPLACE_OBJECT_H
+
+#include "oidmap.h"
+#include "repository.h"
+#include "object-store.h"
+
+struct replace_object {
+       struct oidmap_entry original;
+       struct object_id replacement;
+};
+
+/*
+ * This internal function is only declared here for the benefit of
+ * lookup_replace_object().  Please do not call it directly.
+ */
+extern const struct object_id *do_lookup_replace_object(struct repository *r,
+                                                       const struct object_id *oid);
+
+/*
+ * If object sha1 should be replaced, return the replacement object's
+ * name (replaced recursively, if necessary).  The return value is
+ * either sha1 or a pointer to a permanently-allocated value.  When
+ * object replacement is suppressed, always return sha1.
+ */
+static inline const struct object_id *lookup_replace_object(struct repository *r,
+                                                           const struct object_id *oid)
+{
+       if (!check_replace_refs ||
+           (r->objects->replace_map &&
+            r->objects->replace_map->map.tablesize == 0))
+               return oid;
+       return do_lookup_replace_object(r, oid);
+}
+
+#endif /* REPLACE_OBJECT_H */
diff --git a/replace_object.c b/replace_object.c
deleted file mode 100644 (file)
index 3e49965..0000000
+++ /dev/null
@@ -1,120 +0,0 @@
-#include "cache.h"
-#include "sha1-lookup.h"
-#include "refs.h"
-#include "commit.h"
-
-/*
- * An array of replacements.  The array is kept sorted by the original
- * sha1.
- */
-static struct replace_object {
-       unsigned char original[20];
-       unsigned char replacement[20];
-} **replace_object;
-
-static int replace_object_alloc, replace_object_nr;
-
-static const unsigned char *replace_sha1_access(size_t index, void *table)
-{
-       struct replace_object **replace = table;
-       return replace[index]->original;
-}
-
-static int replace_object_pos(const unsigned char *sha1)
-{
-       return sha1_pos(sha1, replace_object, replace_object_nr,
-                       replace_sha1_access);
-}
-
-static int register_replace_object(struct replace_object *replace,
-                                  int ignore_dups)
-{
-       int pos = replace_object_pos(replace->original);
-
-       if (0 <= pos) {
-               if (ignore_dups)
-                       free(replace);
-               else {
-                       free(replace_object[pos]);
-                       replace_object[pos] = replace;
-               }
-               return 1;
-       }
-       pos = -pos - 1;
-       ALLOC_GROW(replace_object, replace_object_nr + 1, replace_object_alloc);
-       replace_object_nr++;
-       if (pos < replace_object_nr)
-               MOVE_ARRAY(replace_object + pos + 1, replace_object + pos,
-                          replace_object_nr - pos - 1);
-       replace_object[pos] = replace;
-       return 0;
-}
-
-static int register_replace_ref(const char *refname,
-                               const struct object_id *oid,
-                               int flag, void *cb_data)
-{
-       /* Get sha1 from refname */
-       const char *slash = strrchr(refname, '/');
-       const char *hash = slash ? slash + 1 : refname;
-       struct replace_object *repl_obj = xmalloc(sizeof(*repl_obj));
-
-       if (strlen(hash) != 40 || get_sha1_hex(hash, repl_obj->original)) {
-               free(repl_obj);
-               warning("bad replace ref name: %s", refname);
-               return 0;
-       }
-
-       /* Copy sha1 from the read ref */
-       hashcpy(repl_obj->replacement, oid->hash);
-
-       /* Register new object */
-       if (register_replace_object(repl_obj, 1))
-               die("duplicate replace ref: %s", refname);
-
-       return 0;
-}
-
-static void prepare_replace_object(void)
-{
-       static int replace_object_prepared;
-
-       if (replace_object_prepared)
-               return;
-
-       for_each_replace_ref(register_replace_ref, NULL);
-       replace_object_prepared = 1;
-       if (!replace_object_nr)
-               check_replace_refs = 0;
-}
-
-/* We allow "recursive" replacement. Only within reason, though */
-#define MAXREPLACEDEPTH 5
-
-/*
- * If a replacement for object sha1 has been set up, return the
- * replacement object's name (replaced recursively, if necessary).
- * The return value is either sha1 or a pointer to a
- * permanently-allocated value.  This function always respects replace
- * references, regardless of the value of check_replace_refs.
- */
-const unsigned char *do_lookup_replace_object(const unsigned char *sha1)
-{
-       int pos, depth = MAXREPLACEDEPTH;
-       const unsigned char *cur = sha1;
-
-       prepare_replace_object();
-
-       /* Try to recursively replace the object */
-       do {
-               if (--depth < 0)
-                       die("replace depth too high for object %s",
-                           sha1_to_hex(sha1));
-
-               pos = replace_object_pos(cur);
-               if (0 <= pos)
-                       cur = replace_object[pos]->replacement;
-       } while (0 <= pos);
-
-       return cur;
-}
index 4ffbe9bc94edc18314cb49c945038e2f20a40922..beff3caa9e24a902e560372cdab3c337769c7c89 100644 (file)
@@ -1,67 +1,73 @@
 #include "cache.h"
 #include "repository.h"
+#include "object-store.h"
 #include "config.h"
 #include "submodule-config.h"
 
 /* The main repository */
-static struct repository the_repo = {
-       NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, &the_index, &hash_algos[GIT_HASH_SHA1], 0, 0
-};
-struct repository *the_repository = &the_repo;
+static struct repository the_repo;
+struct repository *the_repository;
 
-static char *git_path_from_env(const char *envvar, const char *git_dir,
-                              const char *path, int fromenv)
+void initialize_the_repository(void)
 {
-       if (fromenv) {
-               const char *value = getenv(envvar);
-               if (value)
-                       return xstrdup(value);
-       }
+       the_repository = &the_repo;
 
-       return xstrfmt("%s/%s", git_dir, path);
+       the_repo.index = &the_index;
+       the_repo.objects = raw_object_store_new();
+       repo_set_hash_algo(&the_repo, GIT_HASH_SHA1);
 }
 
-static int find_common_dir(struct strbuf *sb, const char *gitdir, int fromenv)
+static void expand_base_dir(char **out, const char *in,
+                           const char *base_dir, const char *def_in)
 {
-       if (fromenv) {
-               const char *value = getenv(GIT_COMMON_DIR_ENVIRONMENT);
-               if (value) {
-                       strbuf_addstr(sb, value);
-                       return 1;
-               }
-       }
-
-       return get_common_dir_noenv(sb, gitdir);
+       free(*out);
+       if (in)
+               *out = xstrdup(in);
+       else
+               *out = xstrfmt("%s/%s", base_dir, def_in);
 }
 
-static void repo_setup_env(struct repository *repo)
+static void repo_set_commondir(struct repository *repo,
+                              const char *commondir)
 {
        struct strbuf sb = STRBUF_INIT;
 
-       repo->different_commondir = find_common_dir(&sb, repo->gitdir,
-                                                   !repo->ignore_env);
        free(repo->commondir);
+
+       if (commondir) {
+               repo->different_commondir = 1;
+               repo->commondir = xstrdup(commondir);
+               return;
+       }
+
+       repo->different_commondir = get_common_dir_noenv(&sb, repo->gitdir);
        repo->commondir = strbuf_detach(&sb, NULL);
-       free(repo->objectdir);
-       repo->objectdir = git_path_from_env(DB_ENVIRONMENT, repo->commondir,
-                                           "objects", !repo->ignore_env);
-       free(repo->graft_file);
-       repo->graft_file = git_path_from_env(GRAFT_ENVIRONMENT, repo->commondir,
-                                            "info/grafts", !repo->ignore_env);
-       free(repo->index_file);
-       repo->index_file = git_path_from_env(INDEX_ENVIRONMENT, repo->gitdir,
-                                            "index", !repo->ignore_env);
 }
 
-void repo_set_gitdir(struct repository *repo, const char *path)
+void repo_set_gitdir(struct repository *repo,
+                    const char *root,
+                    const struct set_gitdir_args *o)
 {
-       const char *gitfile = read_gitfile(path);
+       const char *gitfile = read_gitfile(root);
+       /*
+        * repo->gitdir is saved because the caller could pass "root"
+        * that also points to repo->gitdir. We want to keep it alive
+        * until after xstrdup(root). Then we can free it.
+        */
        char *old_gitdir = repo->gitdir;
 
-       repo->gitdir = xstrdup(gitfile ? gitfile : path);
-       repo_setup_env(repo);
-
+       repo->gitdir = xstrdup(gitfile ? gitfile : root);
        free(old_gitdir);
+
+       repo_set_commondir(repo, o->commondir);
+       expand_base_dir(&repo->objects->objectdir, o->object_dir,
+                       repo->commondir, "objects");
+       free(repo->objects->alternate_db);
+       repo->objects->alternate_db = xstrdup_or_null(o->alternate_db);
+       expand_base_dir(&repo->graft_file, o->graft_file,
+                       repo->commondir, "info/grafts");
+       expand_base_dir(&repo->index_file, o->index_file,
+                       repo->gitdir, "index");
 }
 
 void repo_set_hash_algo(struct repository *repo, int hash_algo)
@@ -79,6 +85,7 @@ static int repo_init_gitdir(struct repository *repo, const char *gitdir)
        int error = 0;
        char *abspath = NULL;
        const char *resolved_gitdir;
+       struct set_gitdir_args args = { NULL };
 
        abspath = real_pathdup(gitdir, 0);
        if (!abspath) {
@@ -93,7 +100,7 @@ static int repo_init_gitdir(struct repository *repo, const char *gitdir)
                goto out;
        }
 
-       repo_set_gitdir(repo, resolved_gitdir);
+       repo_set_gitdir(repo, resolved_gitdir, &args);
 
 out:
        free(abspath);
@@ -128,12 +135,14 @@ static int read_and_verify_repository_format(struct repository_format *format,
  * Initialize 'repo' based on the provided 'gitdir'.
  * Return 0 upon success and a non-zero value upon failure.
  */
-int repo_init(struct repository *repo, const char *gitdir, const char *worktree)
+int repo_init(struct repository *repo,
+             const char *gitdir,
+             const char *worktree)
 {
        struct repository_format format;
        memset(repo, 0, sizeof(*repo));
 
-       repo->ignore_env = 1;
+       repo->objects = raw_object_store_new();
 
        if (repo_init_gitdir(repo, gitdir))
                goto error;
@@ -167,7 +176,7 @@ int repo_submodule_init(struct repository *submodule,
        struct strbuf worktree = STRBUF_INIT;
        int ret = 0;
 
-       sub = submodule_from_cache(superproject, &null_oid, path);
+       sub = submodule_from_path(superproject, &null_oid, path);
        if (!sub) {
                ret = -1;
                goto out;
@@ -209,12 +218,14 @@ void repo_clear(struct repository *repo)
 {
        FREE_AND_NULL(repo->gitdir);
        FREE_AND_NULL(repo->commondir);
-       FREE_AND_NULL(repo->objectdir);
        FREE_AND_NULL(repo->graft_file);
        FREE_AND_NULL(repo->index_file);
        FREE_AND_NULL(repo->worktree);
        FREE_AND_NULL(repo->submodule_prefix);
 
+       raw_object_store_clear(repo->objects);
+       FREE_AND_NULL(repo->objects);
+
        if (repo->config) {
                git_configset_clear(repo->config);
                FREE_AND_NULL(repo->config);
index 0329e40c7f5e72dad3ba46328a8e3d6c29ed8e58..f2646f0c52aa83f6da8950cfd96c4308498cf417 100644 (file)
@@ -2,9 +2,10 @@
 #define REPOSITORY_H
 
 struct config_set;
+struct git_hash_algo;
 struct index_state;
+struct raw_object_store;
 struct submodule_cache;
-struct git_hash_algo;
 
 struct repository {
        /* Environment */
@@ -21,10 +22,12 @@ struct repository {
        char *commondir;
 
        /*
-        * Path to the repository's object store.
-        * Cannot be NULL after initialization.
+        * Holds any information related to accessing the raw object content.
         */
-       char *objectdir;
+       struct raw_object_store *objects;
+
+       /* The store in which the refs are held. */
+       struct ref_store *refs;
 
        /*
         * Path to the repository's graft file.
@@ -72,15 +75,6 @@ struct repository {
        const struct git_hash_algo *hash_algo;
 
        /* Configurations */
-       /*
-        * Bit used during initialization to indicate if repository state (like
-        * the location of the 'objectdir') should be read from the
-        * environment.  By default this bit will be set at the begining of
-        * 'repo_init()' so that all repositories will ignore the environment.
-        * The exception to this is 'the_repository', which doesn't go through
-        * the normal 'repo_init()' process.
-        */
-       unsigned ignore_env:1;
 
        /* Indicate if a repository has a different 'commondir' from 'gitdir' */
        unsigned different_commondir:1;
@@ -88,10 +82,27 @@ struct repository {
 
 extern struct repository *the_repository;
 
-extern void repo_set_gitdir(struct repository *repo, const char *path);
+/*
+ * Define a custom repository layout. Any field can be NULL, which
+ * will default back to the path according to the default layout.
+ */
+struct set_gitdir_args {
+       const char *commondir;
+       const char *object_dir;
+       const char *graft_file;
+       const char *index_file;
+       const char *alternate_db;
+};
+
+extern void repo_set_gitdir(struct repository *repo,
+                           const char *root,
+                           const struct set_gitdir_args *extra_args);
 extern void repo_set_worktree(struct repository *repo, const char *path);
 extern void repo_set_hash_algo(struct repository *repo, int algo);
-extern int repo_init(struct repository *repo, const char *gitdir, const char *worktree);
+extern void initialize_the_repository(void);
+extern int repo_init(struct repository *r,
+                    const char *gitdir,
+                    const char *worktree);
 extern int repo_submodule_init(struct repository *submodule,
                               struct repository *superproject,
                               const char *path);
index ea24d4c2f47ab6495d97f38245dd420a1ad38391..18cae2d11c9a86aae0ed352a8f7606b142c5c183 100644 (file)
--- a/rerere.c
+++ b/rerere.c
@@ -979,8 +979,8 @@ static int handle_cache(const char *path, unsigned char *sha1, const char *outpu
                        break;
                i = ce_stage(ce) - 1;
                if (!mmfile[i].ptr) {
-                       mmfile[i].ptr = read_sha1_file(ce->oid.hash, &type,
-                                                      &size);
+                       mmfile[i].ptr = read_object_file(&ce->oid, &type,
+                                                        &size);
                        mmfile[i].size = size;
                }
        }
index b40f3173d3fe5ef5c06c00ff8994060a9078669d..aed95b4b35fbb187bb96242afcf6b4d8e3d2008b 100644 (file)
@@ -24,7 +24,7 @@ void record_resolve_undo(struct index_state *istate, struct cache_entry *ce)
        if (!lost->util)
                lost->util = xcalloc(1, sizeof(*ui));
        ui = lost->util;
-       hashcpy(ui->sha1[stage - 1], ce->oid.hash);
+       oidcpy(&ui->oid[stage - 1], &ce->oid);
        ui->mode[stage - 1] = ce->ce_mode;
 }
 
@@ -44,7 +44,7 @@ void resolve_undo_write(struct strbuf *sb, struct string_list *resolve_undo)
                for (i = 0; i < 3; i++) {
                        if (!ui->mode[i])
                                continue;
-                       strbuf_add(sb, ui->sha1[i], 20);
+                       strbuf_add(sb, ui->oid[i].hash, the_hash_algo->rawsz);
                }
        }
 }
@@ -55,6 +55,7 @@ struct string_list *resolve_undo_read(const char *data, unsigned long size)
        size_t len;
        char *endptr;
        int i;
+       const unsigned rawsz = the_hash_algo->rawsz;
 
        resolve_undo = xcalloc(1, sizeof(*resolve_undo));
        resolve_undo->strdup_strings = 1;
@@ -87,11 +88,11 @@ struct string_list *resolve_undo_read(const char *data, unsigned long size)
                for (i = 0; i < 3; i++) {
                        if (!ui->mode[i])
                                continue;
-                       if (size < 20)
+                       if (size < rawsz)
                                goto error;
-                       hashcpy(ui->sha1[i], (const unsigned char *)data);
-                       size -= 20;
-                       data += 20;
+                       memcpy(ui->oid[i].hash, (const unsigned char *)data, rawsz);
+                       size -= rawsz;
+                       data += rawsz;
                }
        }
        return resolve_undo;
@@ -145,7 +146,7 @@ int unmerge_index_entry_at(struct index_state *istate, int pos)
                struct cache_entry *nce;
                if (!ru->mode[i])
                        continue;
-               nce = make_cache_entry(ru->mode[i], ru->sha1[i],
+               nce = make_cache_entry(ru->mode[i], ru->oid[i].hash,
                                       name, i + 1, 0);
                if (matched)
                        nce->ce_flags |= CE_MATCHED;
index 46306455edddb94a554a7a2fcadf49a30861f599..87291904bd34e0e7f3a3601b6742f5345391824d 100644 (file)
@@ -3,7 +3,7 @@
 
 struct resolve_undo_info {
        unsigned int mode[3];
-       unsigned char sha1[3][20];
+       struct object_id oid[3];
 };
 
 extern void record_resolve_undo(struct index_state *, struct cache_entry *);
index b42c836d7a64a67779c587954bcab90d919aaffb..4e0e193e57e0d1360c52ae566770df61677e554e 100644 (file)
@@ -6,6 +6,7 @@
 #include "diff.h"
 #include "refs.h"
 #include "revision.h"
+#include "repository.h"
 #include "graph.h"
 #include "grep.h"
 #include "reflog-walk.h"
@@ -440,8 +441,8 @@ static void file_change(struct diff_options *options,
 static int rev_compare_tree(struct rev_info *revs,
                            struct commit *parent, struct commit *commit)
 {
-       struct tree *t1 = parent->tree;
-       struct tree *t2 = commit->tree;
+       struct tree *t1 = get_commit_tree(parent);
+       struct tree *t2 = get_commit_tree(commit);
 
        if (!t1)
                return REV_TREE_NEW;
@@ -477,7 +478,7 @@ static int rev_compare_tree(struct rev_info *revs,
 static int rev_same_tree_as_empty(struct rev_info *revs, struct commit *commit)
 {
        int retval;
-       struct tree *t1 = commit->tree;
+       struct tree *t1 = get_commit_tree(commit);
 
        if (!t1)
                return 0;
@@ -615,7 +616,7 @@ static void try_to_simplify_commit(struct rev_info *revs, struct commit *commit)
        if (!revs->prune)
                return;
 
-       if (!commit->tree)
+       if (!get_commit_tree(commit))
                return;
 
        if (!commit->parents) {
@@ -1285,7 +1286,7 @@ void add_reflogs_to_pending(struct rev_info *revs, unsigned flags)
 
        cb.all_revs = revs;
        cb.all_flags = flags;
-       cb.refs = get_main_ref_store();
+       cb.refs = get_main_ref_store(the_repository);
        for_each_reflog(handle_one_reflog, &cb);
 
        if (!revs->single_worktree)
@@ -2176,7 +2177,7 @@ static int handle_revision_pseudo_opt(const char *submodule,
                        die("BUG: --single-worktree cannot be used together with submodule");
                refs = get_submodule_ref_store(submodule);
        } else
-               refs = get_main_ref_store();
+               refs = get_main_ref_store(the_repository);
 
        /*
         * NOTE!
index a483d5904a3ec1acae8908dd2e699fa00bcaaa9d..12c94c1dbe5a720a7581af81516b6884fc25d8d0 100644 (file)
@@ -1,6 +1,6 @@
 #include "cache.h"
 #include "run-command.h"
-#include "exec_cmd.h"
+#include "exec-cmd.h"
 #include "sigchain.h"
 #include "argv-array.h"
 #include "thread-utils.h"
@@ -621,7 +621,7 @@ static void trace_run_command(const struct child_process *cp)
        if (!trace_want(&trace_default_key))
                return;
 
-       strbuf_addf(&buf, "trace: run_command:");
+       strbuf_addstr(&buf, "trace: run_command:");
        if (cp->dir) {
                strbuf_addstr(&buf, " cd ");
                sq_quote_buf_pretty(&buf, cp->dir);
index 8d9190f5e7815c6b2f18afd266643a8c862e526e..19025a7aca82a7066b9a2d40d4d50406a9749a5f 100644 (file)
@@ -37,14 +37,14 @@ int option_parse_push_signed(const struct option *opt,
        die("bad %s argument: %s", opt->long_name, arg);
 }
 
-static void feed_object(const unsigned char *sha1, FILE *fh, int negative)
+static void feed_object(const struct object_id *oid, FILE *fh, int negative)
 {
-       if (negative && !has_sha1_file(sha1))
+       if (negative && !has_sha1_file(oid->hash))
                return;
 
        if (negative)
                putc('^', fh);
-       fputs(sha1_to_hex(sha1), fh);
+       fputs(oid_to_hex(oid), fh);
        putc('\n', fh);
 }
 
@@ -89,13 +89,13 @@ static int pack_objects(int fd, struct ref *refs, struct oid_array *extra, struc
         */
        po_in = xfdopen(po.in, "w");
        for (i = 0; i < extra->nr; i++)
-               feed_object(extra->oid[i].hash, po_in, 1);
+               feed_object(&extra->oid[i], po_in, 1);
 
        while (refs) {
                if (!is_null_oid(&refs->old_oid))
-                       feed_object(refs->old_oid.hash, po_in, 1);
+                       feed_object(&refs->old_oid, po_in, 1);
                if (!is_null_oid(&refs->new_oid))
-                       feed_object(refs->new_oid.hash, po_in, 0);
+                       feed_object(&refs->new_oid, po_in, 0);
                refs = refs->next;
        }
 
index f9d1001dee9ad10e243aaeafc46fbdd13597fce7..1ce63261a32a2398c3916fd41f577db58478fe0f 100644 (file)
@@ -7,7 +7,7 @@
 #include "sequencer.h"
 #include "tag.h"
 #include "run-command.h"
-#include "exec_cmd.h"
+#include "exec-cmd.h"
 #include "utf8.h"
 #include "cache-tree.h"
 #include "diff.h"
 #include "hashmap.h"
 #include "notes-utils.h"
 #include "sigchain.h"
+#include "unpack-trees.h"
+#include "worktree.h"
+#include "oidmap.h"
+#include "oidset.h"
 
 #define GIT_REFLOG_ACTION "GIT_REFLOG_ACTION"
 
@@ -74,13 +78,6 @@ static GIT_PATH_FUNC(rebase_path_message, "rebase-merge/message")
  * previous commit and from the first squash/fixup commit are written
  * to it. The commit message for each subsequent squash/fixup commit
  * is appended to the file as it is processed.
- *
- * The first line of the file is of the form
- *     # This is a combination of $count commits.
- * where $count is the number of commits whose messages have been
- * written to the file so far (including the initial "pick" commit).
- * Each time that a commit message is processed, this line is read and
- * updated. It is deleted just before the combined commit is made.
  */
 static GIT_PATH_FUNC(rebase_path_squash_msg, "rebase-merge/message-squash")
 /*
@@ -91,6 +88,11 @@ static GIT_PATH_FUNC(rebase_path_squash_msg, "rebase-merge/message-squash")
  * commit without opening the editor.)
  */
 static GIT_PATH_FUNC(rebase_path_fixup_msg, "rebase-merge/message-fixup")
+/*
+ * This file contains the list fixup/squash commands that have been
+ * accumulated into message-fixup or message-squash so far.
+ */
+static GIT_PATH_FUNC(rebase_path_current_fixups, "rebase-merge/current-fixups")
 /*
  * A script to set the GIT_AUTHOR_NAME, GIT_AUTHOR_EMAIL, and
  * GIT_AUTHOR_DATE that will be used for the commit that is currently
@@ -120,6 +122,13 @@ static GIT_PATH_FUNC(rebase_path_stopped_sha, "rebase-merge/stopped-sha")
 static GIT_PATH_FUNC(rebase_path_rewritten_list, "rebase-merge/rewritten-list")
 static GIT_PATH_FUNC(rebase_path_rewritten_pending,
        "rebase-merge/rewritten-pending")
+
+/*
+ * The path of the file listing refs that need to be deleted after the rebase
+ * finishes. This is used by the `label` command to record the need for cleanup.
+ */
+static GIT_PATH_FUNC(rebase_path_refs_to_delete, "rebase-merge/refs-to-delete")
+
 /*
  * The following files are written by git-rebase just after parsing the
  * command-line (and are only consumed, not modified, by the sequencer).
@@ -127,6 +136,7 @@ static GIT_PATH_FUNC(rebase_path_rewritten_pending,
 static GIT_PATH_FUNC(rebase_path_gpg_sign_opt, "rebase-merge/gpg_sign_opt")
 static GIT_PATH_FUNC(rebase_path_orig_head, "rebase-merge/orig-head")
 static GIT_PATH_FUNC(rebase_path_verbose, "rebase-merge/verbose")
+static GIT_PATH_FUNC(rebase_path_signoff, "rebase-merge/signoff")
 static GIT_PATH_FUNC(rebase_path_head_name, "rebase-merge/head-name")
 static GIT_PATH_FUNC(rebase_path_onto, "rebase-merge/onto")
 static GIT_PATH_FUNC(rebase_path_autostash, "rebase-merge/autostash")
@@ -244,18 +254,35 @@ static const char *gpg_sign_opt_quoted(struct replay_opts *opts)
 
 int sequencer_remove_state(struct replay_opts *opts)
 {
-       struct strbuf dir = STRBUF_INIT;
+       struct strbuf buf = STRBUF_INIT;
        int i;
 
+       if (is_rebase_i(opts) &&
+           strbuf_read_file(&buf, rebase_path_refs_to_delete(), 0) > 0) {
+               char *p = buf.buf;
+               while (*p) {
+                       char *eol = strchr(p, '\n');
+                       if (eol)
+                               *eol = '\0';
+                       if (delete_ref("(rebase -i) cleanup", p, NULL, 0) < 0)
+                               warning(_("could not delete '%s'"), p);
+                       if (!eol)
+                               break;
+                       p = eol + 1;
+               }
+       }
+
        free(opts->gpg_sign);
        free(opts->strategy);
        for (i = 0; i < opts->xopts_nr; i++)
                free(opts->xopts[i]);
        free(opts->xopts);
+       strbuf_release(&opts->current_fixups);
 
-       strbuf_addstr(&dir, get_dir(opts));
-       remove_dir_recursively(&dir, 0);
-       strbuf_release(&dir);
+       strbuf_reset(&buf);
+       strbuf_addstr(&buf, get_dir(opts));
+       remove_dir_recursively(&buf, 0);
+       strbuf_release(&buf);
 
        return 0;
 }
@@ -282,7 +309,7 @@ struct commit_message {
 
 static const char *short_commit_name(struct commit *commit)
 {
-       return find_unique_abbrev(commit->object.oid.hash, DEFAULT_ABBREV);
+       return find_unique_abbrev(&commit->object.oid, DEFAULT_ABBREV);
 }
 
 static int get_message(struct commit *commit, struct commit_message *out)
@@ -345,12 +372,14 @@ static int write_message(const void *buf, size_t len, const char *filename,
        if (msg_fd < 0)
                return error_errno(_("could not lock '%s'"), filename);
        if (write_in_full(msg_fd, buf, len) < 0) {
+               error_errno(_("could not write to '%s'"), filename);
                rollback_lock_file(&msg_file);
-               return error_errno(_("could not write to '%s'"), filename);
+               return -1;
        }
        if (append_eol && write(msg_fd, "\n", 1) < 0) {
+               error_errno(_("could not write eol to '%s'"), filename);
                rollback_lock_file(&msg_file);
-               return error_errno(_("could not write eol to '%s'"), filename);
+               return -1;
        }
        if (commit_lock_file(&msg_file) < 0)
                return error(_("failed to finalize '%s'"), filename);
@@ -499,8 +528,8 @@ static int do_recursive_merge(struct commit *base, struct commit *next,
        o.show_rename_progress = 1;
 
        head_tree = parse_tree_indirect(head);
-       next_tree = next ? next->tree : empty_tree();
-       base_tree = base ? base->tree : empty_tree();
+       next_tree = next ? get_commit_tree(next) : empty_tree();
+       base_tree = base ? get_commit_tree(base) : empty_tree();
 
        for (xopt = opts->xopts; xopt != opts->xopts + opts->xopts_nr; xopt++)
                parse_merge_opt(&o, *xopt);
@@ -561,7 +590,7 @@ static int is_index_unchanged(void)
                        return error(_("unable to update cache tree"));
 
        return !oidcmp(&active_cache_tree->oid,
-                      &head_commit->tree->object.oid);
+                      get_commit_tree_oid(head_commit));
 }
 
 static int write_author_script(const char *message)
@@ -717,6 +746,8 @@ static int run_git_commit(const char *defmsg, struct replay_opts *opts,
                argv_array_pushf(&cmd.args, "-S%s", opts->gpg_sign);
        if (defmsg)
                argv_array_pushl(&cmd.args, "-F", defmsg, NULL);
+       else if (!(flags & EDIT_MSG))
+               argv_array_pushl(&cmd.args, "-C", "HEAD", NULL);
        if ((flags & CLEANUP_MSG))
                argv_array_push(&cmd.args, "--cleanup=strip");
        if ((flags & EDIT_MSG))
@@ -1112,13 +1143,13 @@ static int try_to_commit(struct strbuf *msg, const char *author,
                commit_list_insert(current_head, &parents);
        }
 
-       if (write_cache_as_tree(tree.hash, 0, NULL)) {
+       if (write_cache_as_tree(&tree, 0, NULL)) {
                res = error(_("git write-tree failed to write a tree"));
                goto out;
        }
 
        if (!(flags & ALLOW_EMPTY) && !oidcmp(current_head ?
-                                             &current_head->tree->object.oid :
+                                             get_commit_tree_oid(current_head) :
                                              &empty_tree_oid, &tree)) {
                res = 1; /* run 'git commit' to display error message */
                goto out;
@@ -1148,6 +1179,8 @@ static int try_to_commit(struct strbuf *msg, const char *author,
                goto out;
        }
 
+       reset_ident_date();
+
        if (commit_tree_extended(msg->buf, msg->len, &tree, parents,
                                 oid, author, opts->gpg_sign, extra)) {
                res = error(_("failed to write commit object"));
@@ -1216,12 +1249,12 @@ static int is_original_commit_empty(struct commit *commit)
                if (parse_commit(parent))
                        return error(_("could not parse parent commit %s"),
                                oid_to_hex(&parent->object.oid));
-               ptree_oid = &parent->tree->object.oid;
+               ptree_oid = get_commit_tree_oid(parent);
        } else {
                ptree_oid = the_hash_algo->empty_tree; /* commit is root */
        }
 
-       return !oidcmp(ptree_oid, &commit->tree->object.oid);
+       return !oidcmp(ptree_oid, get_commit_tree_oid(commit));
 }
 
 /*
@@ -1277,6 +1310,9 @@ enum todo_command {
        TODO_SQUASH,
        /* commands that do something else than handling a single commit */
        TODO_EXEC,
+       TODO_LABEL,
+       TODO_RESET,
+       TODO_MERGE,
        /* commands that do nothing but are counted for reporting progress */
        TODO_NOOP,
        TODO_DROP,
@@ -1295,6 +1331,9 @@ static struct {
        { 'f', "fixup" },
        { 's', "squash" },
        { 'x', "exec" },
+       { 'l', "label" },
+       { 't', "reset" },
+       { 'm', "merge" },
        { 0,   "noop" },
        { 'd', "drop" },
        { 0,   NULL }
@@ -1328,34 +1367,23 @@ static int update_squash_messages(enum todo_command command,
                struct commit *commit, struct replay_opts *opts)
 {
        struct strbuf buf = STRBUF_INIT;
-       int count, res;
+       int res;
        const char *message, *body;
 
-       if (file_exists(rebase_path_squash_msg())) {
+       if (opts->current_fixup_count > 0) {
                struct strbuf header = STRBUF_INIT;
-               char *eol, *p;
+               char *eol;
 
-               if (strbuf_read_file(&buf, rebase_path_squash_msg(), 2048) <= 0)
+               if (strbuf_read_file(&buf, rebase_path_squash_msg(), 9) <= 0)
                        return error(_("could not read '%s'"),
                                rebase_path_squash_msg());
 
-               p = buf.buf + 1;
-               eol = strchrnul(buf.buf, '\n');
-               if (buf.buf[0] != comment_line_char ||
-                   (p += strcspn(p, "0123456789\n")) == eol)
-                       return error(_("unexpected 1st line of squash message:"
-                                      "\n\n\t%.*s"),
-                                    (int)(eol - buf.buf), buf.buf);
-               count = strtol(p, NULL, 10);
-
-               if (count < 1)
-                       return error(_("invalid 1st line of squash message:\n"
-                                      "\n\t%.*s"),
-                                    (int)(eol - buf.buf), buf.buf);
+               eol = buf.buf[0] != comment_line_char ?
+                       buf.buf : strchrnul(buf.buf, '\n');
 
                strbuf_addf(&header, "%c ", comment_line_char);
-               strbuf_addf(&header,
-                           _("This is a combination of %d commits."), ++count);
+               strbuf_addf(&header, _("This is a combination of %d commits."),
+                           opts->current_fixup_count + 2);
                strbuf_splice(&buf, 0, eol - buf.buf, header.buf, header.len);
                strbuf_release(&header);
        } else {
@@ -1378,10 +1406,8 @@ static int update_squash_messages(enum todo_command command,
                                     rebase_path_fixup_msg());
                }
 
-               count = 2;
                strbuf_addf(&buf, "%c ", comment_line_char);
-               strbuf_addf(&buf, _("This is a combination of %d commits."),
-                           count);
+               strbuf_addf(&buf, _("This is a combination of %d commits."), 2);
                strbuf_addf(&buf, "\n%c ", comment_line_char);
                strbuf_addstr(&buf, _("This is the 1st commit message:"));
                strbuf_addstr(&buf, "\n\n");
@@ -1398,13 +1424,14 @@ static int update_squash_messages(enum todo_command command,
        if (command == TODO_SQUASH) {
                unlink(rebase_path_fixup_msg());
                strbuf_addf(&buf, "\n%c ", comment_line_char);
-               strbuf_addf(&buf, _("This is the commit message #%d:"), count);
+               strbuf_addf(&buf, _("This is the commit message #%d:"),
+                           ++opts->current_fixup_count);
                strbuf_addstr(&buf, "\n\n");
                strbuf_addstr(&buf, body);
        } else if (command == TODO_FIXUP) {
                strbuf_addf(&buf, "\n%c ", comment_line_char);
                strbuf_addf(&buf, _("The commit message #%d will be skipped:"),
-                           count);
+                           ++opts->current_fixup_count);
                strbuf_addstr(&buf, "\n\n");
                strbuf_add_commented_lines(&buf, body, strlen(body));
        } else
@@ -1413,6 +1440,17 @@ static int update_squash_messages(enum todo_command command,
 
        res = write_message(buf.buf, buf.len, rebase_path_squash_msg(), 0);
        strbuf_release(&buf);
+
+       if (!res) {
+               strbuf_addf(&opts->current_fixups, "%s%s %s",
+                           opts->current_fixups.len ? "\n" : "",
+                           command_to_string(command),
+                           oid_to_hex(&commit->object.oid));
+               res = write_message(opts->current_fixups.buf,
+                                   opts->current_fixups.len,
+                                   rebase_path_current_fixups(), 0);
+       }
+
        return res;
 }
 
@@ -1474,7 +1512,7 @@ static int do_pick_commit(enum todo_command command, struct commit *commit,
                 * that represents the "current" state for merge-recursive
                 * to work on.
                 */
-               if (write_cache_as_tree(head.hash, 0, NULL))
+               if (write_cache_as_tree(&head, 0, NULL))
                        return error(_("your index file is unmerged."));
        } else {
                unborn = get_oid("HEAD", &head);
@@ -1604,7 +1642,7 @@ static int do_pick_commit(enum todo_command command, struct commit *commit,
                }
        }
 
-       if (opts->signoff)
+       if (opts->signoff && !is_fixup(command))
                append_signoff(&msgbuf, 0, 0);
 
        if (is_rebase_i(opts) && write_author_script(msg.message) < 0)
@@ -1675,6 +1713,9 @@ static int do_pick_commit(enum todo_command command, struct commit *commit,
        if (!res && final_fixup) {
                unlink(rebase_path_fixup_msg());
                unlink(rebase_path_squash_msg());
+               unlink(rebase_path_current_fixups());
+               strbuf_reset(&opts->current_fixups);
+               opts->current_fixup_count = 0;
        }
 
 leave:
@@ -1722,9 +1763,14 @@ static int read_and_refresh_cache(struct replay_opts *opts)
        return 0;
 }
 
+enum todo_item_flags {
+       TODO_EDIT_MERGE_MSG = 1
+};
+
 struct todo_item {
        enum todo_command command;
        struct commit *commit;
+       unsigned int flags;
        const char *arg;
        int arg_len;
        size_t offset_in_buf;
@@ -1759,6 +1805,8 @@ static int parse_insn_line(struct todo_item *item, const char *bol, char *eol)
        char *end_of_object_name;
        int i, saved, status, padding;
 
+       item->flags = 0;
+
        /* left-trim */
        bol += strspn(bol, " \t");
 
@@ -1800,13 +1848,29 @@ static int parse_insn_line(struct todo_item *item, const char *bol, char *eol)
                return error(_("missing arguments for %s"),
                             command_to_string(item->command));
 
-       if (item->command == TODO_EXEC) {
+       if (item->command == TODO_EXEC || item->command == TODO_LABEL ||
+           item->command == TODO_RESET) {
                item->commit = NULL;
                item->arg = bol;
                item->arg_len = (int)(eol - bol);
                return 0;
        }
 
+       if (item->command == TODO_MERGE) {
+               if (skip_prefix(bol, "-C", &bol))
+                       bol += strspn(bol, " \t");
+               else if (skip_prefix(bol, "-c", &bol)) {
+                       bol += strspn(bol, " \t");
+                       item->flags |= TODO_EDIT_MERGE_MSG;
+               } else {
+                       item->flags |= TODO_EDIT_MERGE_MSG;
+                       item->commit = NULL;
+                       item->arg = bol;
+                       item->arg_len = (int)(eol - bol);
+                       return 0;
+               }
+       }
+
        end_of_object_name = (char *) bol + strcspn(bol, " \t\n");
        saved = *end_of_object_name;
        *end_of_object_name = '\0';
@@ -1868,6 +1932,23 @@ static int count_commands(struct todo_list *todo_list)
        return count;
 }
 
+static int get_item_line_offset(struct todo_list *todo_list, int index)
+{
+       return index < todo_list->nr ?
+               todo_list->items[index].offset_in_buf : todo_list->buf.len;
+}
+
+static const char *get_item_line(struct todo_list *todo_list, int index)
+{
+       return todo_list->buf.buf + get_item_line_offset(todo_list, index);
+}
+
+static int get_item_line_length(struct todo_list *todo_list, int index)
+{
+       return get_item_line_offset(todo_list, index + 1)
+               -  get_item_line_offset(todo_list, index);
+}
+
 static ssize_t strbuf_read_file_or_whine(struct strbuf *sb, const char *path)
 {
        int fd;
@@ -2043,9 +2124,24 @@ static int read_populate_opts(struct replay_opts *opts)
                if (file_exists(rebase_path_verbose()))
                        opts->verbose = 1;
 
+               if (file_exists(rebase_path_signoff())) {
+                       opts->allow_ff = 0;
+                       opts->signoff = 1;
+               }
+
                read_strategy_opts(opts, &buf);
                strbuf_release(&buf);
 
+               if (read_oneliner(&opts->current_fixups,
+                                 rebase_path_current_fixups(), 1)) {
+                       const char *p = opts->current_fixups.buf;
+                       opts->current_fixup_count = 1;
+                       while ((p = strchr(p, '\n'))) {
+                               opts->current_fixup_count++;
+                               p++;
+                       }
+               }
+
                return 0;
        }
 
@@ -2119,9 +2215,9 @@ static int save_head(const char *head)
        written = write_in_full(fd, buf.buf, buf.len);
        strbuf_release(&buf);
        if (written < 0) {
+               error_errno(_("could not write to '%s'"), git_path_head_file());
                rollback_lock_file(&head_lock);
-               return error_errno(_("could not write to '%s'"),
-                                  git_path_head_file());
+               return -1;
        }
        if (commit_lock_file(&head_lock) < 0)
                return error(_("failed to finalize '%s'"), git_path_head_file());
@@ -2242,29 +2338,27 @@ static int save_todo(struct todo_list *todo_list, struct replay_opts *opts)
        fd = hold_lock_file_for_update(&todo_lock, todo_path, 0);
        if (fd < 0)
                return error_errno(_("could not lock '%s'"), todo_path);
-       offset = next < todo_list->nr ?
-               todo_list->items[next].offset_in_buf : todo_list->buf.len;
+       offset = get_item_line_offset(todo_list, next);
        if (write_in_full(fd, todo_list->buf.buf + offset,
                        todo_list->buf.len - offset) < 0)
                return error_errno(_("could not write to '%s'"), todo_path);
        if (commit_lock_file(&todo_lock) < 0)
                return error(_("failed to finalize '%s'"), todo_path);
 
-       if (is_rebase_i(opts)) {
-               const char *done_path = rebase_path_done();
-               int fd = open(done_path, O_CREAT | O_WRONLY | O_APPEND, 0666);
-               int prev_offset = !next ? 0 :
-                       todo_list->items[next - 1].offset_in_buf;
+       if (is_rebase_i(opts) && next > 0) {
+               const char *done = rebase_path_done();
+               int fd = open(done, O_CREAT | O_WRONLY | O_APPEND, 0666);
+               int ret = 0;
 
-               if (fd >= 0 && offset > prev_offset &&
-                   write_in_full(fd, todo_list->buf.buf + prev_offset,
-                                 offset - prev_offset) < 0) {
-                       close(fd);
-                       return error_errno(_("could not write to '%s'"),
-                                          done_path);
-               }
-               if (fd >= 0)
-                       close(fd);
+               if (fd < 0)
+                       return 0;
+               if (write_in_full(fd, get_item_line(todo_list, next - 1),
+                                 get_item_line_length(todo_list, next - 1))
+                   < 0)
+                       ret = error_errno(_("could not write to '%s'"), done);
+               if (close(fd) < 0)
+                       ret = error_errno(_("failed to finalize '%s'"), done);
+               return ret;
        }
        return 0;
 }
@@ -2392,10 +2486,9 @@ static int error_with_patch(struct commit *commit,
 static int error_failed_squash(struct commit *commit,
        struct replay_opts *opts, int subject_len, const char *subject)
 {
-       if (rename(rebase_path_squash_msg(), rebase_path_message()))
-               return error(_("could not rename '%s' to '%s'"),
+       if (copy_file(rebase_path_message(), rebase_path_squash_msg(), 0666))
+               return error(_("could not copy '%s' to '%s'"),
                        rebase_path_squash_msg(), rebase_path_message());
-       unlink(rebase_path_fixup_msg());
        unlink(git_path_merge_msg());
        if (copy_file(git_path_merge_msg(), rebase_path_message(), 0666))
                return error(_("could not copy '%s' to '%s'"),
@@ -2448,6 +2541,349 @@ static int do_exec(const char *command_line)
        return status;
 }
 
+static int safe_append(const char *filename, const char *fmt, ...)
+{
+       va_list ap;
+       struct lock_file lock = LOCK_INIT;
+       int fd = hold_lock_file_for_update(&lock, filename,
+                                          LOCK_REPORT_ON_ERROR);
+       struct strbuf buf = STRBUF_INIT;
+
+       if (fd < 0)
+               return -1;
+
+       if (strbuf_read_file(&buf, filename, 0) < 0 && errno != ENOENT) {
+               error_errno(_("could not read '%s'"), filename);
+               rollback_lock_file(&lock);
+               return -1;
+       }
+       strbuf_complete(&buf, '\n');
+       va_start(ap, fmt);
+       strbuf_vaddf(&buf, fmt, ap);
+       va_end(ap);
+
+       if (write_in_full(fd, buf.buf, buf.len) < 0) {
+               error_errno(_("could not write to '%s'"), filename);
+               strbuf_release(&buf);
+               rollback_lock_file(&lock);
+               return -1;
+       }
+       if (commit_lock_file(&lock) < 0) {
+               strbuf_release(&buf);
+               rollback_lock_file(&lock);
+               return error(_("failed to finalize '%s'"), filename);
+       }
+
+       strbuf_release(&buf);
+       return 0;
+}
+
+static int do_label(const char *name, int len)
+{
+       struct ref_store *refs = get_main_ref_store(the_repository);
+       struct ref_transaction *transaction;
+       struct strbuf ref_name = STRBUF_INIT, err = STRBUF_INIT;
+       struct strbuf msg = STRBUF_INIT;
+       int ret = 0;
+       struct object_id head_oid;
+
+       if (len == 1 && *name == '#')
+               return error("Illegal label name: '%.*s'", len, name);
+
+       strbuf_addf(&ref_name, "refs/rewritten/%.*s", len, name);
+       strbuf_addf(&msg, "rebase -i (label) '%.*s'", len, name);
+
+       transaction = ref_store_transaction_begin(refs, &err);
+       if (!transaction) {
+               error("%s", err.buf);
+               ret = -1;
+       } else if (get_oid("HEAD", &head_oid)) {
+               error(_("could not read HEAD"));
+               ret = -1;
+       } else if (ref_transaction_update(transaction, ref_name.buf, &head_oid,
+                                         NULL, 0, msg.buf, &err) < 0 ||
+                  ref_transaction_commit(transaction, &err)) {
+               error("%s", err.buf);
+               ret = -1;
+       }
+       ref_transaction_free(transaction);
+       strbuf_release(&err);
+       strbuf_release(&msg);
+
+       if (!ret)
+               ret = safe_append(rebase_path_refs_to_delete(),
+                                 "%s\n", ref_name.buf);
+       strbuf_release(&ref_name);
+
+       return ret;
+}
+
+static const char *reflog_message(struct replay_opts *opts,
+       const char *sub_action, const char *fmt, ...);
+
+static int do_reset(const char *name, int len, struct replay_opts *opts)
+{
+       struct strbuf ref_name = STRBUF_INIT;
+       struct object_id oid;
+       struct lock_file lock = LOCK_INIT;
+       struct tree_desc desc;
+       struct tree *tree;
+       struct unpack_trees_options unpack_tree_opts;
+       int ret = 0, i;
+
+       if (hold_locked_index(&lock, LOCK_REPORT_ON_ERROR) < 0)
+               return -1;
+
+       /* Determine the length of the label */
+       for (i = 0; i < len; i++)
+               if (isspace(name[i]))
+                       len = i;
+
+       strbuf_addf(&ref_name, "refs/rewritten/%.*s", len, name);
+       if (get_oid(ref_name.buf, &oid) &&
+           get_oid(ref_name.buf + strlen("refs/rewritten/"), &oid)) {
+               error(_("could not read '%s'"), ref_name.buf);
+               rollback_lock_file(&lock);
+               strbuf_release(&ref_name);
+               return -1;
+       }
+
+       memset(&unpack_tree_opts, 0, sizeof(unpack_tree_opts));
+       setup_unpack_trees_porcelain(&unpack_tree_opts, "reset");
+       unpack_tree_opts.head_idx = 1;
+       unpack_tree_opts.src_index = &the_index;
+       unpack_tree_opts.dst_index = &the_index;
+       unpack_tree_opts.fn = oneway_merge;
+       unpack_tree_opts.merge = 1;
+       unpack_tree_opts.update = 1;
+
+       if (read_cache_unmerged()) {
+               rollback_lock_file(&lock);
+               strbuf_release(&ref_name);
+               return error_resolve_conflict(_(action_name(opts)));
+       }
+
+       if (!fill_tree_descriptor(&desc, &oid)) {
+               error(_("failed to find tree of %s"), oid_to_hex(&oid));
+               rollback_lock_file(&lock);
+               free((void *)desc.buffer);
+               strbuf_release(&ref_name);
+               return -1;
+       }
+
+       if (unpack_trees(1, &desc, &unpack_tree_opts)) {
+               rollback_lock_file(&lock);
+               free((void *)desc.buffer);
+               strbuf_release(&ref_name);
+               return -1;
+       }
+
+       tree = parse_tree_indirect(&oid);
+       prime_cache_tree(&the_index, tree);
+
+       if (write_locked_index(&the_index, &lock, COMMIT_LOCK) < 0)
+               ret = error(_("could not write index"));
+       free((void *)desc.buffer);
+
+       if (!ret)
+               ret = update_ref(reflog_message(opts, "reset", "'%.*s'",
+                                               len, name), "HEAD", &oid,
+                                NULL, 0, UPDATE_REFS_MSG_ON_ERR);
+
+       strbuf_release(&ref_name);
+       return ret;
+}
+
+static int do_merge(struct commit *commit, const char *arg, int arg_len,
+                   int flags, struct replay_opts *opts)
+{
+       int run_commit_flags = (flags & TODO_EDIT_MERGE_MSG) ?
+               EDIT_MSG | VERIFY_MSG : 0;
+       struct strbuf ref_name = STRBUF_INIT;
+       struct commit *head_commit, *merge_commit, *i;
+       struct commit_list *bases, *j, *reversed = NULL;
+       struct merge_options o;
+       int merge_arg_len, oneline_offset, can_fast_forward, ret;
+       static struct lock_file lock;
+       const char *p;
+
+       if (hold_locked_index(&lock, LOCK_REPORT_ON_ERROR) < 0) {
+               ret = -1;
+               goto leave_merge;
+       }
+
+       head_commit = lookup_commit_reference_by_name("HEAD");
+       if (!head_commit) {
+               ret = error(_("cannot merge without a current revision"));
+               goto leave_merge;
+       }
+
+       oneline_offset = arg_len;
+       merge_arg_len = strcspn(arg, " \t\n");
+       p = arg + merge_arg_len;
+       p += strspn(p, " \t\n");
+       if (*p == '#' && (!p[1] || isspace(p[1]))) {
+               p += 1 + strspn(p + 1, " \t\n");
+               oneline_offset = p - arg;
+       } else if (p - arg < arg_len)
+               BUG("octopus merges are not supported yet: '%s'", p);
+
+       strbuf_addf(&ref_name, "refs/rewritten/%.*s", merge_arg_len, arg);
+       merge_commit = lookup_commit_reference_by_name(ref_name.buf);
+       if (!merge_commit) {
+               /* fall back to non-rewritten ref or commit */
+               strbuf_splice(&ref_name, 0, strlen("refs/rewritten/"), "", 0);
+               merge_commit = lookup_commit_reference_by_name(ref_name.buf);
+       }
+
+       if (!merge_commit) {
+               ret = error(_("could not resolve '%s'"), ref_name.buf);
+               goto leave_merge;
+       }
+
+       if (commit) {
+               const char *message = get_commit_buffer(commit, NULL);
+               const char *body;
+               int len;
+
+               if (!message) {
+                       ret = error(_("could not get commit message of '%s'"),
+                                   oid_to_hex(&commit->object.oid));
+                       goto leave_merge;
+               }
+               write_author_script(message);
+               find_commit_subject(message, &body);
+               len = strlen(body);
+               ret = write_message(body, len, git_path_merge_msg(), 0);
+               unuse_commit_buffer(commit, message);
+               if (ret) {
+                       error_errno(_("could not write '%s'"),
+                                   git_path_merge_msg());
+                       goto leave_merge;
+               }
+       } else {
+               struct strbuf buf = STRBUF_INIT;
+               int len;
+
+               strbuf_addf(&buf, "author %s", git_author_info(0));
+               write_author_script(buf.buf);
+               strbuf_reset(&buf);
+
+               if (oneline_offset < arg_len) {
+                       p = arg + oneline_offset;
+                       len = arg_len - oneline_offset;
+               } else {
+                       strbuf_addf(&buf, "Merge branch '%.*s'",
+                                   merge_arg_len, arg);
+                       p = buf.buf;
+                       len = buf.len;
+               }
+
+               ret = write_message(p, len, git_path_merge_msg(), 0);
+               strbuf_release(&buf);
+               if (ret) {
+                       error_errno(_("could not write '%s'"),
+                                   git_path_merge_msg());
+                       goto leave_merge;
+               }
+       }
+
+       /*
+        * If HEAD is not identical to the first parent of the original merge
+        * commit, we cannot fast-forward.
+        */
+       can_fast_forward = opts->allow_ff && commit && commit->parents &&
+               !oidcmp(&commit->parents->item->object.oid,
+                       &head_commit->object.oid);
+
+       /*
+        * If the merge head is different from the original one, we cannot
+        * fast-forward.
+        */
+       if (can_fast_forward) {
+               struct commit_list *second_parent = commit->parents->next;
+
+               if (second_parent && !second_parent->next &&
+                   oidcmp(&merge_commit->object.oid,
+                          &second_parent->item->object.oid))
+                       can_fast_forward = 0;
+       }
+
+       if (can_fast_forward && commit->parents->next &&
+           !commit->parents->next->next &&
+           !oidcmp(&commit->parents->next->item->object.oid,
+                   &merge_commit->object.oid)) {
+               rollback_lock_file(&lock);
+               ret = fast_forward_to(&commit->object.oid,
+                                     &head_commit->object.oid, 0, opts);
+               goto leave_merge;
+       }
+
+       write_message(oid_to_hex(&merge_commit->object.oid), GIT_SHA1_HEXSZ,
+                     git_path_merge_head(), 0);
+       write_message("no-ff", 5, git_path_merge_mode(), 0);
+
+       bases = get_merge_bases(head_commit, merge_commit);
+       if (bases && !oidcmp(&merge_commit->object.oid,
+                            &bases->item->object.oid)) {
+               ret = 0;
+               /* skip merging an ancestor of HEAD */
+               goto leave_merge;
+       }
+
+       for (j = bases; j; j = j->next)
+               commit_list_insert(j->item, &reversed);
+       free_commit_list(bases);
+
+       read_cache();
+       init_merge_options(&o);
+       o.branch1 = "HEAD";
+       o.branch2 = ref_name.buf;
+       o.buffer_output = 2;
+
+       ret = merge_recursive(&o, head_commit, merge_commit, reversed, &i);
+       if (ret <= 0)
+               fputs(o.obuf.buf, stdout);
+       strbuf_release(&o.obuf);
+       if (ret < 0) {
+               error(_("could not even attempt to merge '%.*s'"),
+                     merge_arg_len, arg);
+               goto leave_merge;
+       }
+       /*
+        * The return value of merge_recursive() is 1 on clean, and 0 on
+        * unclean merge.
+        *
+        * Let's reverse that, so that do_merge() returns 0 upon success and
+        * 1 upon failed merge (keeping the return value -1 for the cases where
+        * we will want to reschedule the `merge` command).
+        */
+       ret = !ret;
+
+       if (active_cache_changed &&
+           write_locked_index(&the_index, &lock, COMMIT_LOCK)) {
+               ret = error(_("merge: Unable to write new index file"));
+               goto leave_merge;
+       }
+
+       rollback_lock_file(&lock);
+       if (ret)
+               rerere(opts->allow_rerere_auto);
+       else
+               /*
+                * In case of problems, we now want to return a positive
+                * value (a negative one would indicate that the `merge`
+                * command needs to be rescheduled).
+                */
+               ret = !!run_git_commit(git_path_merge_msg(), opts,
+                                    run_commit_flags);
+
+leave_merge:
+       strbuf_release(&ref_name);
+       rollback_lock_file(&lock);
+       return ret;
+}
+
 static int is_final_fixup(struct todo_list *todo_list)
 {
        int i = todo_list->current;
@@ -2538,9 +2974,20 @@ static const char *reflog_message(struct replay_opts *opts,
        return buf.buf;
 }
 
+static const char rescheduled_advice[] =
+N_("Could not execute the todo command\n"
+"\n"
+"    %.*s"
+"\n"
+"It has been rescheduled; To edit the command before continuing, please\n"
+"edit the todo list first:\n"
+"\n"
+"    git rebase --edit-todo\n"
+"    git rebase --continue\n");
+
 static int pick_commits(struct todo_list *todo_list, struct replay_opts *opts)
 {
-       int res = 0;
+       int res = 0, reschedule = 0;
 
        setenv(GIT_REFLOG_ACTION, action_name(opts), 0);
        if (opts->allow_ff)
@@ -2583,6 +3030,11 @@ static int pick_commits(struct todo_list *todo_list, struct replay_opts *opts)
                                        opts, is_final_fixup(todo_list));
                        if (is_rebase_i(opts) && res < 0) {
                                /* Reschedule */
+                               advise(_(rescheduled_advice),
+                                      get_item_line_length(todo_list,
+                                                           todo_list->current),
+                                      get_item_line(todo_list,
+                                                    todo_list->current));
                                todo_list->current--;
                                if (save_todo(todo_list, opts))
                                        return -1;
@@ -2606,7 +3058,7 @@ static int pick_commits(struct todo_list *todo_list, struct replay_opts *opts)
                                        intend_to_amend();
                                return error_failed_squash(item->commit, opts,
                                        item->arg_len, item->arg);
-                       } else if (res && is_rebase_i(opts))
+                       } else if (res && is_rebase_i(opts) && item->commit)
                                return res | error_with_patch(item->commit,
                                        item->arg, item->arg_len, opts, res,
                                        item->command == TODO_REWORD);
@@ -2632,9 +3084,44 @@ static int pick_commits(struct todo_list *todo_list, struct replay_opts *opts)
                                /* `current` will be incremented below */
                                todo_list->current = -1;
                        }
+               } else if (item->command == TODO_LABEL) {
+                       if ((res = do_label(item->arg, item->arg_len)))
+                               reschedule = 1;
+               } else if (item->command == TODO_RESET) {
+                       if ((res = do_reset(item->arg, item->arg_len, opts)))
+                               reschedule = 1;
+               } else if (item->command == TODO_MERGE) {
+                       if ((res = do_merge(item->commit,
+                                           item->arg, item->arg_len,
+                                           item->flags, opts)) < 0)
+                               reschedule = 1;
+                       else if (item->commit)
+                               record_in_rewritten(&item->commit->object.oid,
+                                                   peek_command(todo_list, 1));
+                       if (res > 0)
+                               /* failed with merge conflicts */
+                               return error_with_patch(item->commit,
+                                                       item->arg,
+                                                       item->arg_len, opts,
+                                                       res, 0);
                } else if (!is_noop(item->command))
                        return error(_("unknown command %d"), item->command);
 
+               if (reschedule) {
+                       advise(_(rescheduled_advice),
+                              get_item_line_length(todo_list,
+                                                   todo_list->current),
+                              get_item_line(todo_list, todo_list->current));
+                       todo_list->current--;
+                       if (save_todo(todo_list, opts))
+                               return -1;
+                       if (item->commit)
+                               return error_with_patch(item->commit,
+                                                       item->arg,
+                                                       item->arg_len, opts,
+                                                       res, 0);
+               }
+
                todo_list->current++;
                if (res)
                        return res;
@@ -2761,19 +3248,16 @@ static int continue_single_pick(void)
        return run_command_v_opt(argv, RUN_GIT_CMD);
 }
 
-static int commit_staged_changes(struct replay_opts *opts)
+static int commit_staged_changes(struct replay_opts *opts,
+                                struct todo_list *todo_list)
 {
        unsigned int flags = ALLOW_EMPTY | EDIT_MSG;
+       unsigned int final_fixup = 0, is_clean;
 
        if (has_unstaged_changes(1))
                return error(_("cannot rebase: You have unstaged changes."));
-       if (!has_uncommitted_changes(0)) {
-               const char *cherry_pick_head = git_path_cherry_pick_head();
 
-               if (file_exists(cherry_pick_head) && unlink(cherry_pick_head))
-                       return error(_("could not remove CHERRY_PICK_HEAD"));
-               return 0;
-       }
+       is_clean = !has_uncommitted_changes(0);
 
        if (file_exists(rebase_path_amend())) {
                struct strbuf rev = STRBUF_INIT;
@@ -2786,19 +3270,107 @@ static int commit_staged_changes(struct replay_opts *opts)
                if (get_oid_hex(rev.buf, &to_amend))
                        return error(_("invalid contents: '%s'"),
                                rebase_path_amend());
-               if (oidcmp(&head, &to_amend))
+               if (!is_clean && oidcmp(&head, &to_amend))
                        return error(_("\nYou have uncommitted changes in your "
                                       "working tree. Please, commit them\n"
                                       "first and then run 'git rebase "
                                       "--continue' again."));
+               /*
+                * When skipping a failed fixup/squash, we need to edit the
+                * commit message, the current fixup list and count, and if it
+                * was the last fixup/squash in the chain, we need to clean up
+                * the commit message and if there was a squash, let the user
+                * edit it.
+                */
+               if (is_clean && !oidcmp(&head, &to_amend) &&
+                   opts->current_fixup_count > 0 &&
+                   file_exists(rebase_path_stopped_sha())) {
+                       const char *p = opts->current_fixups.buf;
+                       int len = opts->current_fixups.len;
+
+                       opts->current_fixup_count--;
+                       if (!len)
+                               BUG("Incorrect current_fixups:\n%s", p);
+                       while (len && p[len - 1] != '\n')
+                               len--;
+                       strbuf_setlen(&opts->current_fixups, len);
+                       if (write_message(p, len, rebase_path_current_fixups(),
+                                         0) < 0)
+                               return error(_("could not write file: '%s'"),
+                                            rebase_path_current_fixups());
+
+                       /*
+                        * If a fixup/squash in a fixup/squash chain failed, the
+                        * commit message is already correct, no need to commit
+                        * it again.
+                        *
+                        * Only if it is the final command in the fixup/squash
+                        * chain, and only if the chain is longer than a single
+                        * fixup/squash command (which was just skipped), do we
+                        * actually need to re-commit with a cleaned up commit
+                        * message.
+                        */
+                       if (opts->current_fixup_count > 0 &&
+                           !is_fixup(peek_command(todo_list, 0))) {
+                               final_fixup = 1;
+                               /*
+                                * If there was not a single "squash" in the
+                                * chain, we only need to clean up the commit
+                                * message, no need to bother the user with
+                                * opening the commit message in the editor.
+                                */
+                               if (!starts_with(p, "squash ") &&
+                                   !strstr(p, "\nsquash "))
+                                       flags = (flags & ~EDIT_MSG) | CLEANUP_MSG;
+                       } else if (is_fixup(peek_command(todo_list, 0))) {
+                               /*
+                                * We need to update the squash message to skip
+                                * the latest commit message.
+                                */
+                               struct commit *commit;
+                               const char *path = rebase_path_squash_msg();
+
+                               if (parse_head(&commit) ||
+                                   !(p = get_commit_buffer(commit, NULL)) ||
+                                   write_message(p, strlen(p), path, 0)) {
+                                       unuse_commit_buffer(commit, p);
+                                       return error(_("could not write file: "
+                                                      "'%s'"), path);
+                               }
+                               unuse_commit_buffer(commit, p);
+                       }
+               }
 
                strbuf_release(&rev);
                flags |= AMEND_MSG;
        }
 
-       if (run_git_commit(rebase_path_message(), opts, flags))
+       if (is_clean) {
+               const char *cherry_pick_head = git_path_cherry_pick_head();
+
+               if (file_exists(cherry_pick_head) && unlink(cherry_pick_head))
+                       return error(_("could not remove CHERRY_PICK_HEAD"));
+               if (!final_fixup)
+                       return 0;
+       }
+
+       if (run_git_commit(final_fixup ? NULL : rebase_path_message(),
+                          opts, flags))
                return error(_("could not commit staged changes."));
        unlink(rebase_path_amend());
+       if (final_fixup) {
+               unlink(rebase_path_fixup_msg());
+               unlink(rebase_path_squash_msg());
+       }
+       if (opts->current_fixup_count > 0) {
+               /*
+                * Whether final fixup or not, we just cleaned up the commit
+                * message...
+                */
+               unlink(rebase_path_current_fixups());
+               strbuf_reset(&opts->current_fixups);
+               opts->current_fixup_count = 0;
+       }
        return 0;
 }
 
@@ -2810,14 +3382,16 @@ int sequencer_continue(struct replay_opts *opts)
        if (read_and_refresh_cache(opts))
                return -1;
 
+       if (read_populate_opts(opts))
+               return -1;
        if (is_rebase_i(opts)) {
-               if (commit_staged_changes(opts))
+               if ((res = read_populate_todo(&todo_list, opts)))
+                       goto release_todo_list;
+               if (commit_staged_changes(opts, &todo_list))
                        return -1;
        } else if (!file_exists(get_todo_path(opts)))
                return continue_single_pick();
-       if (read_populate_opts(opts))
-               return -1;
-       if ((res = read_populate_todo(&todo_list, opts)))
+       else if ((res = read_populate_todo(&todo_list, opts)))
                goto release_todo_list;
 
        if (!is_rebase_i(opts)) {
@@ -2876,7 +3450,9 @@ int sequencer_pick_revisions(struct replay_opts *opts)
 
                if (!get_oid(name, &oid)) {
                        if (!lookup_commit_reference_gently(&oid, 1)) {
-                               enum object_type type = sha1_object_info(oid.hash, NULL);
+                               enum object_type type = oid_object_info(the_repository,
+                                                                       &oid,
+                                                                       NULL);
                                return error(_("%s: can't cherry-pick a %s"),
                                        name, type_name(type));
                        }
@@ -2986,6 +3562,347 @@ void append_signoff(struct strbuf *msgbuf, int ignore_footer, unsigned flag)
        strbuf_release(&sob);
 }
 
+struct labels_entry {
+       struct hashmap_entry entry;
+       char label[FLEX_ARRAY];
+};
+
+static int labels_cmp(const void *fndata, const struct labels_entry *a,
+                     const struct labels_entry *b, const void *key)
+{
+       return key ? strcmp(a->label, key) : strcmp(a->label, b->label);
+}
+
+struct string_entry {
+       struct oidmap_entry entry;
+       char string[FLEX_ARRAY];
+};
+
+struct label_state {
+       struct oidmap commit2label;
+       struct hashmap labels;
+       struct strbuf buf;
+};
+
+static const char *label_oid(struct object_id *oid, const char *label,
+                            struct label_state *state)
+{
+       struct labels_entry *labels_entry;
+       struct string_entry *string_entry;
+       struct object_id dummy;
+       size_t len;
+       int i;
+
+       string_entry = oidmap_get(&state->commit2label, oid);
+       if (string_entry)
+               return string_entry->string;
+
+       /*
+        * For "uninteresting" commits, i.e. commits that are not to be
+        * rebased, and which can therefore not be labeled, we use a unique
+        * abbreviation of the commit name. This is slightly more complicated
+        * than calling find_unique_abbrev() because we also need to make
+        * sure that the abbreviation does not conflict with any other
+        * label.
+        *
+        * We disallow "interesting" commits to be labeled by a string that
+        * is a valid full-length hash, to ensure that we always can find an
+        * abbreviation for any uninteresting commit's names that does not
+        * clash with any other label.
+        */
+       if (!label) {
+               char *p;
+
+               strbuf_reset(&state->buf);
+               strbuf_grow(&state->buf, GIT_SHA1_HEXSZ);
+               label = p = state->buf.buf;
+
+               find_unique_abbrev_r(p, oid, default_abbrev);
+
+               /*
+                * We may need to extend the abbreviated hash so that there is
+                * no conflicting label.
+                */
+               if (hashmap_get_from_hash(&state->labels, strihash(p), p)) {
+                       size_t i = strlen(p) + 1;
+
+                       oid_to_hex_r(p, oid);
+                       for (; i < GIT_SHA1_HEXSZ; i++) {
+                               char save = p[i];
+                               p[i] = '\0';
+                               if (!hashmap_get_from_hash(&state->labels,
+                                                          strihash(p), p))
+                                       break;
+                               p[i] = save;
+                       }
+               }
+       } else if (((len = strlen(label)) == GIT_SHA1_RAWSZ &&
+                   !get_oid_hex(label, &dummy)) ||
+                  (len == 1 && *label == '#') ||
+                  hashmap_get_from_hash(&state->labels,
+                                        strihash(label), label)) {
+               /*
+                * If the label already exists, or if the label is a valid full
+                * OID, or the label is a '#' (which we use as a separator
+                * between merge heads and oneline), we append a dash and a
+                * number to make it unique.
+                */
+               struct strbuf *buf = &state->buf;
+
+               strbuf_reset(buf);
+               strbuf_add(buf, label, len);
+
+               for (i = 2; ; i++) {
+                       strbuf_setlen(buf, len);
+                       strbuf_addf(buf, "-%d", i);
+                       if (!hashmap_get_from_hash(&state->labels,
+                                                  strihash(buf->buf),
+                                                  buf->buf))
+                               break;
+               }
+
+               label = buf->buf;
+       }
+
+       FLEX_ALLOC_STR(labels_entry, label, label);
+       hashmap_entry_init(labels_entry, strihash(label));
+       hashmap_add(&state->labels, labels_entry);
+
+       FLEX_ALLOC_STR(string_entry, string, label);
+       oidcpy(&string_entry->entry.oid, oid);
+       oidmap_put(&state->commit2label, string_entry);
+
+       return string_entry->string;
+}
+
+static int make_script_with_merges(struct pretty_print_context *pp,
+                                  struct rev_info *revs, FILE *out,
+                                  unsigned flags)
+{
+       int keep_empty = flags & TODO_LIST_KEEP_EMPTY;
+       int rebase_cousins = flags & TODO_LIST_REBASE_COUSINS;
+       struct strbuf buf = STRBUF_INIT, oneline = STRBUF_INIT;
+       struct strbuf label = STRBUF_INIT;
+       struct commit_list *commits = NULL, **tail = &commits, *iter;
+       struct commit_list *tips = NULL, **tips_tail = &tips;
+       struct commit *commit;
+       struct oidmap commit2todo = OIDMAP_INIT;
+       struct string_entry *entry;
+       struct oidset interesting = OIDSET_INIT, child_seen = OIDSET_INIT,
+               shown = OIDSET_INIT;
+       struct label_state state = { OIDMAP_INIT, { NULL }, STRBUF_INIT };
+
+       int abbr = flags & TODO_LIST_ABBREVIATE_CMDS;
+       const char *cmd_pick = abbr ? "p" : "pick",
+               *cmd_label = abbr ? "l" : "label",
+               *cmd_reset = abbr ? "t" : "reset",
+               *cmd_merge = abbr ? "m" : "merge";
+
+       oidmap_init(&commit2todo, 0);
+       oidmap_init(&state.commit2label, 0);
+       hashmap_init(&state.labels, (hashmap_cmp_fn) labels_cmp, NULL, 0);
+       strbuf_init(&state.buf, 32);
+
+       if (revs->cmdline.nr && (revs->cmdline.rev[0].flags & BOTTOM)) {
+               struct object_id *oid = &revs->cmdline.rev[0].item->oid;
+               FLEX_ALLOC_STR(entry, string, "onto");
+               oidcpy(&entry->entry.oid, oid);
+               oidmap_put(&state.commit2label, entry);
+       }
+
+       /*
+        * First phase:
+        * - get onelines for all commits
+        * - gather all branch tips (i.e. 2nd or later parents of merges)
+        * - label all branch tips
+        */
+       while ((commit = get_revision(revs))) {
+               struct commit_list *to_merge;
+               int is_octopus;
+               const char *p1, *p2;
+               struct object_id *oid;
+               int is_empty;
+
+               tail = &commit_list_insert(commit, tail)->next;
+               oidset_insert(&interesting, &commit->object.oid);
+
+               is_empty = is_original_commit_empty(commit);
+               if (!is_empty && (commit->object.flags & PATCHSAME))
+                       continue;
+
+               strbuf_reset(&oneline);
+               pretty_print_commit(pp, commit, &oneline);
+
+               to_merge = commit->parents ? commit->parents->next : NULL;
+               if (!to_merge) {
+                       /* non-merge commit: easy case */
+                       strbuf_reset(&buf);
+                       if (!keep_empty && is_empty)
+                               strbuf_addf(&buf, "%c ", comment_line_char);
+                       strbuf_addf(&buf, "%s %s %s", cmd_pick,
+                                   oid_to_hex(&commit->object.oid),
+                                   oneline.buf);
+
+                       FLEX_ALLOC_STR(entry, string, buf.buf);
+                       oidcpy(&entry->entry.oid, &commit->object.oid);
+                       oidmap_put(&commit2todo, entry);
+
+                       continue;
+               }
+
+               is_octopus = to_merge && to_merge->next;
+
+               if (is_octopus)
+                       BUG("Octopus merges not yet supported");
+
+               /* Create a label */
+               strbuf_reset(&label);
+               if (skip_prefix(oneline.buf, "Merge ", &p1) &&
+                   (p1 = strchr(p1, '\'')) &&
+                   (p2 = strchr(++p1, '\'')))
+                       strbuf_add(&label, p1, p2 - p1);
+               else if (skip_prefix(oneline.buf, "Merge pull request ",
+                                    &p1) &&
+                        (p1 = strstr(p1, " from ")))
+                       strbuf_addstr(&label, p1 + strlen(" from "));
+               else
+                       strbuf_addbuf(&label, &oneline);
+
+               for (p1 = label.buf; *p1; p1++)
+                       if (isspace(*p1))
+                               *(char *)p1 = '-';
+
+               strbuf_reset(&buf);
+               strbuf_addf(&buf, "%s -C %s",
+                           cmd_merge, oid_to_hex(&commit->object.oid));
+
+               /* label the tip of merged branch */
+               oid = &to_merge->item->object.oid;
+               strbuf_addch(&buf, ' ');
+
+               if (!oidset_contains(&interesting, oid))
+                       strbuf_addstr(&buf, label_oid(oid, NULL, &state));
+               else {
+                       tips_tail = &commit_list_insert(to_merge->item,
+                                                       tips_tail)->next;
+
+                       strbuf_addstr(&buf, label_oid(oid, label.buf, &state));
+               }
+               strbuf_addf(&buf, " # %s", oneline.buf);
+
+               FLEX_ALLOC_STR(entry, string, buf.buf);
+               oidcpy(&entry->entry.oid, &commit->object.oid);
+               oidmap_put(&commit2todo, entry);
+       }
+
+       /*
+        * Second phase:
+        * - label branch points
+        * - add HEAD to the branch tips
+        */
+       for (iter = commits; iter; iter = iter->next) {
+               struct commit_list *parent = iter->item->parents;
+               for (; parent; parent = parent->next) {
+                       struct object_id *oid = &parent->item->object.oid;
+                       if (!oidset_contains(&interesting, oid))
+                               continue;
+                       if (!oidset_contains(&child_seen, oid))
+                               oidset_insert(&child_seen, oid);
+                       else
+                               label_oid(oid, "branch-point", &state);
+               }
+
+               /* Add HEAD as implict "tip of branch" */
+               if (!iter->next)
+                       tips_tail = &commit_list_insert(iter->item,
+                                                       tips_tail)->next;
+       }
+
+       /*
+        * Third phase: output the todo list. This is a bit tricky, as we
+        * want to avoid jumping back and forth between revisions. To
+        * accomplish that goal, we walk backwards from the branch tips,
+        * gathering commits not yet shown, reversing the list on the fly,
+        * then outputting that list (labeling revisions as needed).
+        */
+       fprintf(out, "%s onto\n", cmd_label);
+       for (iter = tips; iter; iter = iter->next) {
+               struct commit_list *list = NULL, *iter2;
+
+               commit = iter->item;
+               if (oidset_contains(&shown, &commit->object.oid))
+                       continue;
+               entry = oidmap_get(&state.commit2label, &commit->object.oid);
+
+               if (entry)
+                       fprintf(out, "\n# Branch %s\n", entry->string);
+               else
+                       fprintf(out, "\n");
+
+               while (oidset_contains(&interesting, &commit->object.oid) &&
+                      !oidset_contains(&shown, &commit->object.oid)) {
+                       commit_list_insert(commit, &list);
+                       if (!commit->parents) {
+                               commit = NULL;
+                               break;
+                       }
+                       commit = commit->parents->item;
+               }
+
+               if (!commit)
+                       fprintf(out, "%s onto\n", cmd_reset);
+               else {
+                       const char *to = NULL;
+
+                       entry = oidmap_get(&state.commit2label,
+                                          &commit->object.oid);
+                       if (entry)
+                               to = entry->string;
+                       else if (!rebase_cousins)
+                               to = label_oid(&commit->object.oid, NULL,
+                                              &state);
+
+                       if (!to || !strcmp(to, "onto"))
+                               fprintf(out, "%s onto\n", cmd_reset);
+                       else {
+                               strbuf_reset(&oneline);
+                               pretty_print_commit(pp, commit, &oneline);
+                               fprintf(out, "%s %s # %s\n",
+                                       cmd_reset, to, oneline.buf);
+                       }
+               }
+
+               for (iter2 = list; iter2; iter2 = iter2->next) {
+                       struct object_id *oid = &iter2->item->object.oid;
+                       entry = oidmap_get(&commit2todo, oid);
+                       /* only show if not already upstream */
+                       if (entry)
+                               fprintf(out, "%s\n", entry->string);
+                       entry = oidmap_get(&state.commit2label, oid);
+                       if (entry)
+                               fprintf(out, "%s %s\n",
+                                       cmd_label, entry->string);
+                       oidset_insert(&shown, oid);
+               }
+
+               free_commit_list(list);
+       }
+
+       free_commit_list(commits);
+       free_commit_list(tips);
+
+       strbuf_release(&label);
+       strbuf_release(&oneline);
+       strbuf_release(&buf);
+
+       oidmap_free(&commit2todo, 1);
+       oidmap_free(&state.commit2label, 1);
+       hashmap_free(&state.labels, 1);
+       strbuf_release(&state.buf);
+
+       return 0;
+}
+
 int sequencer_make_script(FILE *out, int argc, const char **argv,
                          unsigned flags)
 {
@@ -2996,11 +3913,13 @@ int sequencer_make_script(FILE *out, int argc, const char **argv,
        struct commit *commit;
        int keep_empty = flags & TODO_LIST_KEEP_EMPTY;
        const char *insn = flags & TODO_LIST_ABBREVIATE_CMDS ? "p" : "pick";
+       int rebase_merges = flags & TODO_LIST_REBASE_MERGES;
 
        init_revisions(&revs, NULL);
        revs.verbose_header = 1;
-       revs.max_parents = 1;
-       revs.cherry_pick = 1;
+       if (!rebase_merges)
+               revs.max_parents = 1;
+       revs.cherry_mark = 1;
        revs.limited = 1;
        revs.reverse = 1;
        revs.right_only = 1;
@@ -3024,9 +3943,16 @@ int sequencer_make_script(FILE *out, int argc, const char **argv,
        if (prepare_revision_walk(&revs) < 0)
                return error(_("make_script: error preparing revisions"));
 
+       if (rebase_merges)
+               return make_script_with_merges(&pp, &revs, out, flags);
+
        while ((commit = get_revision(&revs))) {
+               int is_empty  = is_original_commit_empty(commit);
+
+               if (!is_empty && (commit->object.flags & PATCHSAME))
+                       continue;
                strbuf_reset(&buf);
-               if (!keep_empty && is_original_commit_empty(commit))
+               if (!keep_empty && is_empty)
                        strbuf_addf(&buf, "%c ", comment_line_char);
                strbuf_addf(&buf, "%s %s ", insn,
                            oid_to_hex(&commit->object.oid));
@@ -3113,8 +4039,16 @@ int transform_todos(unsigned flags)
                                          short_commit_name(item->commit) :
                                          oid_to_hex(&item->commit->object.oid);
 
+                       if (item->command == TODO_MERGE) {
+                               if (item->flags & TODO_EDIT_MERGE_MSG)
+                                       strbuf_addstr(&buf, " -c");
+                               else
+                                       strbuf_addstr(&buf, " -C");
+                       }
+
                        strbuf_addf(&buf, " %s", oid);
                }
+
                /* add all the rest */
                if (!item->arg_len)
                        strbuf_addch(&buf, '\n');
@@ -3294,8 +4228,7 @@ int skip_unnecessary_picks(void)
                oid = &item->commit->object.oid;
        }
        if (i > 0) {
-               int offset = i < todo_list.nr ?
-                       todo_list.items[i].offset_in_buf : todo_list.buf.len;
+               int offset = get_item_line_offset(&todo_list, i);
                const char *done_path = rebase_path_done();
 
                fd = open(done_path, O_CREAT | O_WRONLY | O_APPEND, 0666);
@@ -3390,7 +4323,7 @@ int rearrange_squash(void)
                struct subject2item_entry *entry;
 
                next[i] = tail[i] = -1;
-               if (item->command >= TODO_EXEC) {
+               if (!item->commit || item->command == TODO_DROP) {
                        subjects[i] = NULL;
                        continue;
                }
@@ -3475,12 +4408,10 @@ int rearrange_squash(void)
                                continue;
 
                        while (cur >= 0) {
-                               int offset = todo_list.items[cur].offset_in_buf;
-                               int end_offset = cur + 1 < todo_list.nr ?
-                                       todo_list.items[cur + 1].offset_in_buf :
-                                       todo_list.buf.len;
-                               char *bol = todo_list.buf.buf + offset;
-                               char *eol = todo_list.buf.buf + end_offset;
+                               const char *bol =
+                                       get_item_line(&todo_list, cur);
+                               const char *eol =
+                                       get_item_line(&todo_list, cur + 1);
 
                                /* replace 'pick', by 'fixup' or 'squash' */
                                command = todo_list.items[cur].command;
index e45b178dfc41d723bf186f20674c4515d7c7fa00..a800cb57558fab14d43cda47c9a69b3c5015e1a8 100644 (file)
@@ -44,10 +44,14 @@ struct replay_opts {
        char **xopts;
        size_t xopts_nr, xopts_alloc;
 
+       /* Used by fixup/squash */
+       struct strbuf current_fixups;
+       int current_fixup_count;
+
        /* Only used by REPLAY_NONE */
        struct rev_info *revs;
 };
-#define REPLAY_OPTS_INIT { -1 }
+#define REPLAY_OPTS_INIT { .action = -1, .current_fixups = STRBUF_INIT }
 
 /* Call this to setup defaults before parsing command line options */
 void sequencer_init_config(struct replay_opts *opts);
@@ -59,6 +63,13 @@ int sequencer_remove_state(struct replay_opts *opts);
 #define TODO_LIST_KEEP_EMPTY (1U << 0)
 #define TODO_LIST_SHORTEN_IDS (1U << 1)
 #define TODO_LIST_ABBREVIATE_CMDS (1U << 2)
+#define TODO_LIST_REBASE_MERGES (1U << 3)
+/*
+ * When rebasing merges, commits that do have the base commit as ancestor
+ * ("cousins") are *not* rebased onto the new base by default. If those
+ * commits should be rebased onto the new base, this flag needs to be passed.
+ */
+#define TODO_LIST_REBASE_COUSINS (1U << 4)
 int sequencer_make_script(FILE *out, int argc, const char **argv,
                          unsigned flags);
 
diff --git a/serve.c b/serve.c
new file mode 100644 (file)
index 0000000..bda085f
--- /dev/null
+++ b/serve.c
@@ -0,0 +1,258 @@
+#include "cache.h"
+#include "repository.h"
+#include "config.h"
+#include "pkt-line.h"
+#include "version.h"
+#include "argv-array.h"
+#include "ls-refs.h"
+#include "serve.h"
+#include "upload-pack.h"
+
+static int always_advertise(struct repository *r,
+                           struct strbuf *value)
+{
+       return 1;
+}
+
+static int agent_advertise(struct repository *r,
+                          struct strbuf *value)
+{
+       if (value)
+               strbuf_addstr(value, git_user_agent_sanitized());
+       return 1;
+}
+
+struct protocol_capability {
+       /*
+        * The name of the capability.  The server uses this name when
+        * advertising this capability, and the client uses this name to
+        * specify this capability.
+        */
+       const char *name;
+
+       /*
+        * Function queried to see if a capability should be advertised.
+        * Optionally a value can be specified by adding it to 'value'.
+        * If a value is added to 'value', the server will advertise this
+        * capability as "<name>=<value>" instead of "<name>".
+        */
+       int (*advertise)(struct repository *r, struct strbuf *value);
+
+       /*
+        * Function called when a client requests the capability as a command.
+        * The function will be provided the capabilities requested via 'keys'
+        * as well as a struct packet_reader 'request' which the command should
+        * use to read the command specific part of the request.  Every command
+        * MUST read until a flush packet is seen before sending a response.
+        *
+        * This field should be NULL for capabilities which are not commands.
+        */
+       int (*command)(struct repository *r,
+                      struct argv_array *keys,
+                      struct packet_reader *request);
+};
+
+static struct protocol_capability capabilities[] = {
+       { "agent", agent_advertise, NULL },
+       { "ls-refs", always_advertise, ls_refs },
+       { "fetch", upload_pack_advertise, upload_pack_v2 },
+       { "server-option", always_advertise, NULL },
+};
+
+static void advertise_capabilities(void)
+{
+       struct strbuf capability = STRBUF_INIT;
+       struct strbuf value = STRBUF_INIT;
+       int i;
+
+       for (i = 0; i < ARRAY_SIZE(capabilities); i++) {
+               struct protocol_capability *c = &capabilities[i];
+
+               if (c->advertise(the_repository, &value)) {
+                       strbuf_addstr(&capability, c->name);
+
+                       if (value.len) {
+                               strbuf_addch(&capability, '=');
+                               strbuf_addbuf(&capability, &value);
+                       }
+
+                       strbuf_addch(&capability, '\n');
+                       packet_write(1, capability.buf, capability.len);
+               }
+
+               strbuf_reset(&capability);
+               strbuf_reset(&value);
+       }
+
+       packet_flush(1);
+       strbuf_release(&capability);
+       strbuf_release(&value);
+}
+
+static struct protocol_capability *get_capability(const char *key)
+{
+       int i;
+
+       if (!key)
+               return NULL;
+
+       for (i = 0; i < ARRAY_SIZE(capabilities); i++) {
+               struct protocol_capability *c = &capabilities[i];
+               const char *out;
+               if (skip_prefix(key, c->name, &out) && (!*out || *out == '='))
+                       return c;
+       }
+
+       return NULL;
+}
+
+static int is_valid_capability(const char *key)
+{
+       const struct protocol_capability *c = get_capability(key);
+
+       return c && c->advertise(the_repository, NULL);
+}
+
+static int is_command(const char *key, struct protocol_capability **command)
+{
+       const char *out;
+
+       if (skip_prefix(key, "command=", &out)) {
+               struct protocol_capability *cmd = get_capability(out);
+
+               if (*command)
+                       die("command '%s' requested after already requesting command '%s'",
+                           out, (*command)->name);
+               if (!cmd || !cmd->advertise(the_repository, NULL) || !cmd->command)
+                       die("invalid command '%s'", out);
+
+               *command = cmd;
+               return 1;
+       }
+
+       return 0;
+}
+
+int has_capability(const struct argv_array *keys, const char *capability,
+                  const char **value)
+{
+       int i;
+       for (i = 0; i < keys->argc; i++) {
+               const char *out;
+               if (skip_prefix(keys->argv[i], capability, &out) &&
+                   (!*out || *out == '=')) {
+                       if (value) {
+                               if (*out == '=')
+                                       out++;
+                               *value = out;
+                       }
+                       return 1;
+               }
+       }
+
+       return 0;
+}
+
+enum request_state {
+       PROCESS_REQUEST_KEYS,
+       PROCESS_REQUEST_DONE,
+};
+
+static int process_request(void)
+{
+       enum request_state state = PROCESS_REQUEST_KEYS;
+       struct packet_reader reader;
+       struct argv_array keys = ARGV_ARRAY_INIT;
+       struct protocol_capability *command = NULL;
+
+       packet_reader_init(&reader, 0, NULL, 0,
+                          PACKET_READ_CHOMP_NEWLINE |
+                          PACKET_READ_GENTLE_ON_EOF);
+
+       /*
+        * Check to see if the client closed their end before sending another
+        * request.  If so we can terminate the connection.
+        */
+       if (packet_reader_peek(&reader) == PACKET_READ_EOF)
+               return 1;
+       reader.options = PACKET_READ_CHOMP_NEWLINE;
+
+       while (state != PROCESS_REQUEST_DONE) {
+               switch (packet_reader_peek(&reader)) {
+               case PACKET_READ_EOF:
+                       BUG("Should have already died when seeing EOF");
+               case PACKET_READ_NORMAL:
+                       /* collect request; a sequence of keys and values */
+                       if (is_command(reader.line, &command) ||
+                           is_valid_capability(reader.line))
+                               argv_array_push(&keys, reader.line);
+                       else
+                               die("unknown capability '%s'", reader.line);
+
+                       /* Consume the peeked line */
+                       packet_reader_read(&reader);
+                       break;
+               case PACKET_READ_FLUSH:
+                       /*
+                        * If no command and no keys were given then the client
+                        * wanted to terminate the connection.
+                        */
+                       if (!keys.argc)
+                               return 1;
+
+                       /*
+                        * The flush packet isn't consume here like it is in
+                        * the other parts of this switch statement.  This is
+                        * so that the command can read the flush packet and
+                        * see the end of the request in the same way it would
+                        * if command specific arguments were provided after a
+                        * delim packet.
+                        */
+                       state = PROCESS_REQUEST_DONE;
+                       break;
+               case PACKET_READ_DELIM:
+                       /* Consume the peeked line */
+                       packet_reader_read(&reader);
+
+                       state = PROCESS_REQUEST_DONE;
+                       break;
+               }
+       }
+
+       if (!command)
+               die("no command requested");
+
+       command->command(the_repository, &keys, &reader);
+
+       argv_array_clear(&keys);
+       return 0;
+}
+
+/* Main serve loop for protocol version 2 */
+void serve(struct serve_options *options)
+{
+       if (options->advertise_capabilities || !options->stateless_rpc) {
+               /* serve by default supports v2 */
+               packet_write_fmt(1, "version 2\n");
+
+               advertise_capabilities();
+               /*
+                * If only the list of capabilities was requested exit
+                * immediately after advertising capabilities
+                */
+               if (options->advertise_capabilities)
+                       return;
+       }
+
+       /*
+        * If stateless-rpc was requested then exit after
+        * a single request/response exchange
+        */
+       if (options->stateless_rpc) {
+               process_request();
+       } else {
+               for (;;)
+                       if (process_request())
+                               break;
+       }
+}
diff --git a/serve.h b/serve.h
new file mode 100644 (file)
index 0000000..fe65ba9
--- /dev/null
+++ b/serve.h
@@ -0,0 +1,15 @@
+#ifndef SERVE_H
+#define SERVE_H
+
+struct argv_array;
+extern int has_capability(const struct argv_array *keys, const char *capability,
+                         const char **value);
+
+struct serve_options {
+       unsigned advertise_capabilities;
+       unsigned stateless_rpc;
+};
+#define SERVE_OPTIONS_INIT { 0 }
+extern void serve(struct serve_options *options);
+
+#endif /* SERVE_H */
index 26a6c20b7d420f679d7cceaeba4acdb8ae8c00e7..83460ec0d6f10b26719c076639a08a8a0ce803cc 100644 (file)
@@ -1,9 +1,11 @@
 #include "cache.h"
+#include "repository.h"
 #include "refs.h"
 #include "object.h"
 #include "commit.h"
 #include "tag.h"
 #include "packfile.h"
+#include "object-store.h"
 
 /*
  * Create the file "path" by writing to a temporary file and renaming
@@ -199,8 +201,7 @@ static void init_pack_info(const char *infofile, int force)
        objdir = get_object_directory();
        objdirlen = strlen(objdir);
 
-       prepare_packed_git();
-       for (p = packed_git; p; p = p->next) {
+       for (p = get_packed_git(the_repository); p; p = p->next) {
                /* we ignore things on alternate path since they are
                 * not available to the pullers in general.
                 */
@@ -210,7 +211,7 @@ static void init_pack_info(const char *infofile, int force)
        }
        num_pack = i;
        info = xcalloc(num_pack, sizeof(struct pack_info *));
-       for (i = 0, p = packed_git; p; p = p->next) {
+       for (i = 0, p = get_packed_git(the_repository); p; p = p->next) {
                if (!p->pack_local)
                        continue;
                info[i] = xcalloc(1, sizeof(struct pack_info));
diff --git a/setup.c b/setup.c
index 72877796420b06213665f5d357decb202e71fa91..3e03d442b6fad10c1b11fb8e8626f3c4fe444298 100644 (file)
--- a/setup.c
+++ b/setup.c
@@ -3,6 +3,7 @@
 #include "config.h"
 #include "dir.h"
 #include "string-list.h"
+#include "chdir-notify.h"
 
 static int inside_git_dir = -1;
 static int inside_work_tree = -1;
@@ -378,7 +379,7 @@ int is_inside_work_tree(void)
 
 void setup_work_tree(void)
 {
-       const char *work_tree, *git_dir;
+       const char *work_tree;
        static int initialized = 0;
 
        if (initialized)
@@ -388,10 +389,7 @@ void setup_work_tree(void)
                die(_("unable to set up work tree using invalid config"));
 
        work_tree = get_git_work_tree();
-       git_dir = get_git_dir();
-       if (!is_absolute_path(git_dir))
-               git_dir = real_path(get_git_dir());
-       if (!work_tree || chdir(work_tree))
+       if (!work_tree || chdir_notify(work_tree))
                die(_("this operation must be run in a work tree"));
 
        /*
@@ -401,7 +399,6 @@ void setup_work_tree(void)
        if (getenv(GIT_WORK_TREE_ENVIRONMENT))
                setenv(GIT_WORK_TREE_ENVIRONMENT, ".", 1);
 
-       set_git_dir(remove_leading_path(git_dir, work_tree));
        initialized = 1;
 }
 
@@ -1116,8 +1113,7 @@ const char *setup_git_directory_gently(int *nongit_ok)
                        const char *gitdir = getenv(GIT_DIR_ENVIRONMENT);
                        if (!gitdir)
                                gitdir = DEFAULT_GIT_DIR_ENVIRONMENT;
-                       repo_set_gitdir(the_repository, gitdir);
-                       setup_git_env();
+                       setup_git_env(gitdir);
                }
                if (startup_info->have_repository)
                        repo_set_hash_algo(the_repository, repo_fmt.hash_algo);
diff --git a/sha1-file.c b/sha1-file.c
new file mode 100644 (file)
index 0000000..f66059e
--- /dev/null
@@ -0,0 +1,2260 @@
+/*
+ * GIT - The information manager from hell
+ *
+ * Copyright (C) Linus Torvalds, 2005
+ *
+ * This handles basic git sha1 object files - packing, unpacking,
+ * creation etc.
+ */
+#include "cache.h"
+#include "config.h"
+#include "string-list.h"
+#include "lockfile.h"
+#include "delta.h"
+#include "pack.h"
+#include "blob.h"
+#include "commit.h"
+#include "run-command.h"
+#include "tag.h"
+#include "tree.h"
+#include "tree-walk.h"
+#include "refs.h"
+#include "pack-revindex.h"
+#include "sha1-lookup.h"
+#include "bulk-checkin.h"
+#include "repository.h"
+#include "replace-object.h"
+#include "streaming.h"
+#include "dir.h"
+#include "list.h"
+#include "mergesort.h"
+#include "quote.h"
+#include "packfile.h"
+#include "fetch-object.h"
+#include "object-store.h"
+
+/* The maximum size for an object header. */
+#define MAX_HEADER_LEN 32
+
+const unsigned char null_sha1[GIT_MAX_RAWSZ];
+const struct object_id null_oid;
+const struct object_id empty_tree_oid = {
+       EMPTY_TREE_SHA1_BIN_LITERAL
+};
+const struct object_id empty_blob_oid = {
+       EMPTY_BLOB_SHA1_BIN_LITERAL
+};
+
+static void git_hash_sha1_init(git_hash_ctx *ctx)
+{
+       git_SHA1_Init(&ctx->sha1);
+}
+
+static void git_hash_sha1_update(git_hash_ctx *ctx, const void *data, size_t len)
+{
+       git_SHA1_Update(&ctx->sha1, data, len);
+}
+
+static void git_hash_sha1_final(unsigned char *hash, git_hash_ctx *ctx)
+{
+       git_SHA1_Final(hash, &ctx->sha1);
+}
+
+static void git_hash_unknown_init(git_hash_ctx *ctx)
+{
+       die("trying to init unknown hash");
+}
+
+static void git_hash_unknown_update(git_hash_ctx *ctx, const void *data, size_t len)
+{
+       die("trying to update unknown hash");
+}
+
+static void git_hash_unknown_final(unsigned char *hash, git_hash_ctx *ctx)
+{
+       die("trying to finalize unknown hash");
+}
+
+const struct git_hash_algo hash_algos[GIT_HASH_NALGOS] = {
+       {
+               NULL,
+               0x00000000,
+               0,
+               0,
+               git_hash_unknown_init,
+               git_hash_unknown_update,
+               git_hash_unknown_final,
+               NULL,
+               NULL,
+       },
+       {
+               "sha-1",
+               /* "sha1", big-endian */
+               0x73686131,
+               GIT_SHA1_RAWSZ,
+               GIT_SHA1_HEXSZ,
+               git_hash_sha1_init,
+               git_hash_sha1_update,
+               git_hash_sha1_final,
+               &empty_tree_oid,
+               &empty_blob_oid,
+       },
+};
+
+/*
+ * This is meant to hold a *small* number of objects that you would
+ * want read_sha1_file() to be able to return, but yet you do not want
+ * to write them into the object store (e.g. a browse-only
+ * application).
+ */
+static struct cached_object {
+       unsigned char sha1[20];
+       enum object_type type;
+       void *buf;
+       unsigned long size;
+} *cached_objects;
+static int cached_object_nr, cached_object_alloc;
+
+static struct cached_object empty_tree = {
+       EMPTY_TREE_SHA1_BIN_LITERAL,
+       OBJ_TREE,
+       "",
+       0
+};
+
+static struct cached_object *find_cached_object(const unsigned char *sha1)
+{
+       int i;
+       struct cached_object *co = cached_objects;
+
+       for (i = 0; i < cached_object_nr; i++, co++) {
+               if (!hashcmp(co->sha1, sha1))
+                       return co;
+       }
+       if (!hashcmp(sha1, empty_tree.sha1))
+               return &empty_tree;
+       return NULL;
+}
+
+
+static int get_conv_flags(unsigned flags)
+{
+       if (flags & HASH_RENORMALIZE)
+               return CONV_EOL_RENORMALIZE;
+       else if (flags & HASH_WRITE_OBJECT)
+               return global_conv_flags_eol | CONV_WRITE_OBJECT;
+       else
+               return 0;
+}
+
+
+int mkdir_in_gitdir(const char *path)
+{
+       if (mkdir(path, 0777)) {
+               int saved_errno = errno;
+               struct stat st;
+               struct strbuf sb = STRBUF_INIT;
+
+               if (errno != EEXIST)
+                       return -1;
+               /*
+                * Are we looking at a path in a symlinked worktree
+                * whose original repository does not yet have it?
+                * e.g. .git/rr-cache pointing at its original
+                * repository in which the user hasn't performed any
+                * conflict resolution yet?
+                */
+               if (lstat(path, &st) || !S_ISLNK(st.st_mode) ||
+                   strbuf_readlink(&sb, path, st.st_size) ||
+                   !is_absolute_path(sb.buf) ||
+                   mkdir(sb.buf, 0777)) {
+                       strbuf_release(&sb);
+                       errno = saved_errno;
+                       return -1;
+               }
+               strbuf_release(&sb);
+       }
+       return adjust_shared_perm(path);
+}
+
+enum scld_error safe_create_leading_directories(char *path)
+{
+       char *next_component = path + offset_1st_component(path);
+       enum scld_error ret = SCLD_OK;
+
+       while (ret == SCLD_OK && next_component) {
+               struct stat st;
+               char *slash = next_component, slash_character;
+
+               while (*slash && !is_dir_sep(*slash))
+                       slash++;
+
+               if (!*slash)
+                       break;
+
+               next_component = slash + 1;
+               while (is_dir_sep(*next_component))
+                       next_component++;
+               if (!*next_component)
+                       break;
+
+               slash_character = *slash;
+               *slash = '\0';
+               if (!stat(path, &st)) {
+                       /* path exists */
+                       if (!S_ISDIR(st.st_mode)) {
+                               errno = ENOTDIR;
+                               ret = SCLD_EXISTS;
+                       }
+               } else if (mkdir(path, 0777)) {
+                       if (errno == EEXIST &&
+                           !stat(path, &st) && S_ISDIR(st.st_mode))
+                               ; /* somebody created it since we checked */
+                       else if (errno == ENOENT)
+                               /*
+                                * Either mkdir() failed because
+                                * somebody just pruned the containing
+                                * directory, or stat() failed because
+                                * the file that was in our way was
+                                * just removed.  Either way, inform
+                                * the caller that it might be worth
+                                * trying again:
+                                */
+                               ret = SCLD_VANISHED;
+                       else
+                               ret = SCLD_FAILED;
+               } else if (adjust_shared_perm(path)) {
+                       ret = SCLD_PERMS;
+               }
+               *slash = slash_character;
+       }
+       return ret;
+}
+
+enum scld_error safe_create_leading_directories_const(const char *path)
+{
+       int save_errno;
+       /* path points to cache entries, so xstrdup before messing with it */
+       char *buf = xstrdup(path);
+       enum scld_error result = safe_create_leading_directories(buf);
+
+       save_errno = errno;
+       free(buf);
+       errno = save_errno;
+       return result;
+}
+
+int raceproof_create_file(const char *path, create_file_fn fn, void *cb)
+{
+       /*
+        * The number of times we will try to remove empty directories
+        * in the way of path. This is only 1 because if another
+        * process is racily creating directories that conflict with
+        * us, we don't want to fight against them.
+        */
+       int remove_directories_remaining = 1;
+
+       /*
+        * The number of times that we will try to create the
+        * directories containing path. We are willing to attempt this
+        * more than once, because another process could be trying to
+        * clean up empty directories at the same time as we are
+        * trying to create them.
+        */
+       int create_directories_remaining = 3;
+
+       /* A scratch copy of path, filled lazily if we need it: */
+       struct strbuf path_copy = STRBUF_INIT;
+
+       int ret, save_errno;
+
+       /* Sanity check: */
+       assert(*path);
+
+retry_fn:
+       ret = fn(path, cb);
+       save_errno = errno;
+       if (!ret)
+               goto out;
+
+       if (errno == EISDIR && remove_directories_remaining-- > 0) {
+               /*
+                * A directory is in the way. Maybe it is empty; try
+                * to remove it:
+                */
+               if (!path_copy.len)
+                       strbuf_addstr(&path_copy, path);
+
+               if (!remove_dir_recursively(&path_copy, REMOVE_DIR_EMPTY_ONLY))
+                       goto retry_fn;
+       } else if (errno == ENOENT && create_directories_remaining-- > 0) {
+               /*
+                * Maybe the containing directory didn't exist, or
+                * maybe it was just deleted by a process that is
+                * racing with us to clean up empty directories. Try
+                * to create it:
+                */
+               enum scld_error scld_result;
+
+               if (!path_copy.len)
+                       strbuf_addstr(&path_copy, path);
+
+               do {
+                       scld_result = safe_create_leading_directories(path_copy.buf);
+                       if (scld_result == SCLD_OK)
+                               goto retry_fn;
+               } while (scld_result == SCLD_VANISHED && create_directories_remaining-- > 0);
+       }
+
+out:
+       strbuf_release(&path_copy);
+       errno = save_errno;
+       return ret;
+}
+
+static void fill_sha1_path(struct strbuf *buf, const unsigned char *sha1)
+{
+       int i;
+       for (i = 0; i < 20; i++) {
+               static char hex[] = "0123456789abcdef";
+               unsigned int val = sha1[i];
+               strbuf_addch(buf, hex[val >> 4]);
+               strbuf_addch(buf, hex[val & 0xf]);
+               if (!i)
+                       strbuf_addch(buf, '/');
+       }
+}
+
+void sha1_file_name(struct repository *r, struct strbuf *buf, const unsigned char *sha1)
+{
+       strbuf_addstr(buf, r->objects->objectdir);
+       strbuf_addch(buf, '/');
+       fill_sha1_path(buf, sha1);
+}
+
+struct strbuf *alt_scratch_buf(struct alternate_object_database *alt)
+{
+       strbuf_setlen(&alt->scratch, alt->base_len);
+       return &alt->scratch;
+}
+
+static const char *alt_sha1_path(struct alternate_object_database *alt,
+                                const unsigned char *sha1)
+{
+       struct strbuf *buf = alt_scratch_buf(alt);
+       fill_sha1_path(buf, sha1);
+       return buf->buf;
+}
+
+/*
+ * Return non-zero iff the path is usable as an alternate object database.
+ */
+static int alt_odb_usable(struct raw_object_store *o,
+                         struct strbuf *path,
+                         const char *normalized_objdir)
+{
+       struct alternate_object_database *alt;
+
+       /* Detect cases where alternate disappeared */
+       if (!is_directory(path->buf)) {
+               error("object directory %s does not exist; "
+                     "check .git/objects/info/alternates.",
+                     path->buf);
+               return 0;
+       }
+
+       /*
+        * Prevent the common mistake of listing the same
+        * thing twice, or object directory itself.
+        */
+       for (alt = o->alt_odb_list; alt; alt = alt->next) {
+               if (!fspathcmp(path->buf, alt->path))
+                       return 0;
+       }
+       if (!fspathcmp(path->buf, normalized_objdir))
+               return 0;
+
+       return 1;
+}
+
+/*
+ * Prepare alternate object database registry.
+ *
+ * The variable alt_odb_list points at the list of struct
+ * alternate_object_database.  The elements on this list come from
+ * non-empty elements from colon separated ALTERNATE_DB_ENVIRONMENT
+ * environment variable, and $GIT_OBJECT_DIRECTORY/info/alternates,
+ * whose contents is similar to that environment variable but can be
+ * LF separated.  Its base points at a statically allocated buffer that
+ * contains "/the/directory/corresponding/to/.git/objects/...", while
+ * its name points just after the slash at the end of ".git/objects/"
+ * in the example above, and has enough space to hold 40-byte hex
+ * SHA1, an extra slash for the first level indirection, and the
+ * terminating NUL.
+ */
+static void read_info_alternates(struct repository *r,
+                                const char *relative_base,
+                                int depth);
+static int link_alt_odb_entry(struct repository *r, const char *entry,
+       const char *relative_base, int depth, const char *normalized_objdir)
+{
+       struct alternate_object_database *ent;
+       struct strbuf pathbuf = STRBUF_INIT;
+
+       if (!is_absolute_path(entry) && relative_base) {
+               strbuf_realpath(&pathbuf, relative_base, 1);
+               strbuf_addch(&pathbuf, '/');
+       }
+       strbuf_addstr(&pathbuf, entry);
+
+       if (strbuf_normalize_path(&pathbuf) < 0 && relative_base) {
+               error("unable to normalize alternate object path: %s",
+                     pathbuf.buf);
+               strbuf_release(&pathbuf);
+               return -1;
+       }
+
+       /*
+        * The trailing slash after the directory name is given by
+        * this function at the end. Remove duplicates.
+        */
+       while (pathbuf.len && pathbuf.buf[pathbuf.len - 1] == '/')
+               strbuf_setlen(&pathbuf, pathbuf.len - 1);
+
+       if (!alt_odb_usable(r->objects, &pathbuf, normalized_objdir)) {
+               strbuf_release(&pathbuf);
+               return -1;
+       }
+
+       ent = alloc_alt_odb(pathbuf.buf);
+
+       /* add the alternate entry */
+       *r->objects->alt_odb_tail = ent;
+       r->objects->alt_odb_tail = &(ent->next);
+       ent->next = NULL;
+
+       /* recursively add alternates */
+       read_info_alternates(r, pathbuf.buf, depth + 1);
+
+       strbuf_release(&pathbuf);
+       return 0;
+}
+
+static const char *parse_alt_odb_entry(const char *string,
+                                      int sep,
+                                      struct strbuf *out)
+{
+       const char *end;
+
+       strbuf_reset(out);
+
+       if (*string == '#') {
+               /* comment; consume up to next separator */
+               end = strchrnul(string, sep);
+       } else if (*string == '"' && !unquote_c_style(out, string, &end)) {
+               /*
+                * quoted path; unquote_c_style has copied the
+                * data for us and set "end". Broken quoting (e.g.,
+                * an entry that doesn't end with a quote) falls
+                * back to the unquoted case below.
+                */
+       } else {
+               /* normal, unquoted path */
+               end = strchrnul(string, sep);
+               strbuf_add(out, string, end - string);
+       }
+
+       if (*end)
+               end++;
+       return end;
+}
+
+static void link_alt_odb_entries(struct repository *r, const char *alt,
+                                int sep, const char *relative_base, int depth)
+{
+       struct strbuf objdirbuf = STRBUF_INIT;
+       struct strbuf entry = STRBUF_INIT;
+
+       if (!alt || !*alt)
+               return;
+
+       if (depth > 5) {
+               error("%s: ignoring alternate object stores, nesting too deep.",
+                               relative_base);
+               return;
+       }
+
+       strbuf_add_absolute_path(&objdirbuf, r->objects->objectdir);
+       if (strbuf_normalize_path(&objdirbuf) < 0)
+               die("unable to normalize object directory: %s",
+                   objdirbuf.buf);
+
+       while (*alt) {
+               alt = parse_alt_odb_entry(alt, sep, &entry);
+               if (!entry.len)
+                       continue;
+               link_alt_odb_entry(r, entry.buf,
+                                  relative_base, depth, objdirbuf.buf);
+       }
+       strbuf_release(&entry);
+       strbuf_release(&objdirbuf);
+}
+
+static void read_info_alternates(struct repository *r,
+                                const char *relative_base,
+                                int depth)
+{
+       char *path;
+       struct strbuf buf = STRBUF_INIT;
+
+       path = xstrfmt("%s/info/alternates", relative_base);
+       if (strbuf_read_file(&buf, path, 1024) < 0) {
+               warn_on_fopen_errors(path);
+               free(path);
+               return;
+       }
+
+       link_alt_odb_entries(r, buf.buf, '\n', relative_base, depth);
+       strbuf_release(&buf);
+       free(path);
+}
+
+struct alternate_object_database *alloc_alt_odb(const char *dir)
+{
+       struct alternate_object_database *ent;
+
+       FLEX_ALLOC_STR(ent, path, dir);
+       strbuf_init(&ent->scratch, 0);
+       strbuf_addf(&ent->scratch, "%s/", dir);
+       ent->base_len = ent->scratch.len;
+
+       return ent;
+}
+
+void add_to_alternates_file(const char *reference)
+{
+       struct lock_file lock = LOCK_INIT;
+       char *alts = git_pathdup("objects/info/alternates");
+       FILE *in, *out;
+       int found = 0;
+
+       hold_lock_file_for_update(&lock, alts, LOCK_DIE_ON_ERROR);
+       out = fdopen_lock_file(&lock, "w");
+       if (!out)
+               die_errno("unable to fdopen alternates lockfile");
+
+       in = fopen(alts, "r");
+       if (in) {
+               struct strbuf line = STRBUF_INIT;
+
+               while (strbuf_getline(&line, in) != EOF) {
+                       if (!strcmp(reference, line.buf)) {
+                               found = 1;
+                               break;
+                       }
+                       fprintf_or_die(out, "%s\n", line.buf);
+               }
+
+               strbuf_release(&line);
+               fclose(in);
+       }
+       else if (errno != ENOENT)
+               die_errno("unable to read alternates file");
+
+       if (found) {
+               rollback_lock_file(&lock);
+       } else {
+               fprintf_or_die(out, "%s\n", reference);
+               if (commit_lock_file(&lock))
+                       die_errno("unable to move new alternates file into place");
+               if (the_repository->objects->alt_odb_tail)
+                       link_alt_odb_entries(the_repository, reference,
+                                            '\n', NULL, 0);
+       }
+       free(alts);
+}
+
+void add_to_alternates_memory(const char *reference)
+{
+       /*
+        * Make sure alternates are initialized, or else our entry may be
+        * overwritten when they are.
+        */
+       prepare_alt_odb(the_repository);
+
+       link_alt_odb_entries(the_repository, reference,
+                            '\n', NULL, 0);
+}
+
+/*
+ * Compute the exact path an alternate is at and returns it. In case of
+ * error NULL is returned and the human readable error is added to `err`
+ * `path` may be relative and should point to $GITDIR.
+ * `err` must not be null.
+ */
+char *compute_alternate_path(const char *path, struct strbuf *err)
+{
+       char *ref_git = NULL;
+       const char *repo, *ref_git_s;
+       int seen_error = 0;
+
+       ref_git_s = real_path_if_valid(path);
+       if (!ref_git_s) {
+               seen_error = 1;
+               strbuf_addf(err, _("path '%s' does not exist"), path);
+               goto out;
+       } else
+               /*
+                * Beware: read_gitfile(), real_path() and mkpath()
+                * return static buffer
+                */
+               ref_git = xstrdup(ref_git_s);
+
+       repo = read_gitfile(ref_git);
+       if (!repo)
+               repo = read_gitfile(mkpath("%s/.git", ref_git));
+       if (repo) {
+               free(ref_git);
+               ref_git = xstrdup(repo);
+       }
+
+       if (!repo && is_directory(mkpath("%s/.git/objects", ref_git))) {
+               char *ref_git_git = mkpathdup("%s/.git", ref_git);
+               free(ref_git);
+               ref_git = ref_git_git;
+       } else if (!is_directory(mkpath("%s/objects", ref_git))) {
+               struct strbuf sb = STRBUF_INIT;
+               seen_error = 1;
+               if (get_common_dir(&sb, ref_git)) {
+                       strbuf_addf(err,
+                                   _("reference repository '%s' as a linked "
+                                     "checkout is not supported yet."),
+                                   path);
+                       goto out;
+               }
+
+               strbuf_addf(err, _("reference repository '%s' is not a "
+                                       "local repository."), path);
+               goto out;
+       }
+
+       if (!access(mkpath("%s/shallow", ref_git), F_OK)) {
+               strbuf_addf(err, _("reference repository '%s' is shallow"),
+                           path);
+               seen_error = 1;
+               goto out;
+       }
+
+       if (!access(mkpath("%s/info/grafts", ref_git), F_OK)) {
+               strbuf_addf(err,
+                           _("reference repository '%s' is grafted"),
+                           path);
+               seen_error = 1;
+               goto out;
+       }
+
+out:
+       if (seen_error) {
+               FREE_AND_NULL(ref_git);
+       }
+
+       return ref_git;
+}
+
+int foreach_alt_odb(alt_odb_fn fn, void *cb)
+{
+       struct alternate_object_database *ent;
+       int r = 0;
+
+       prepare_alt_odb(the_repository);
+       for (ent = the_repository->objects->alt_odb_list; ent; ent = ent->next) {
+               r = fn(ent, cb);
+               if (r)
+                       break;
+       }
+       return r;
+}
+
+void prepare_alt_odb(struct repository *r)
+{
+       if (r->objects->alt_odb_tail)
+               return;
+
+       r->objects->alt_odb_tail = &r->objects->alt_odb_list;
+       link_alt_odb_entries(r, r->objects->alternate_db, PATH_SEP, NULL, 0);
+
+       read_info_alternates(r, r->objects->objectdir, 0);
+}
+
+/* Returns 1 if we have successfully freshened the file, 0 otherwise. */
+static int freshen_file(const char *fn)
+{
+       struct utimbuf t;
+       t.actime = t.modtime = time(NULL);
+       return !utime(fn, &t);
+}
+
+/*
+ * All of the check_and_freshen functions return 1 if the file exists and was
+ * freshened (if freshening was requested), 0 otherwise. If they return
+ * 0, you should not assume that it is safe to skip a write of the object (it
+ * either does not exist on disk, or has a stale mtime and may be subject to
+ * pruning).
+ */
+int check_and_freshen_file(const char *fn, int freshen)
+{
+       if (access(fn, F_OK))
+               return 0;
+       if (freshen && !freshen_file(fn))
+               return 0;
+       return 1;
+}
+
+static int check_and_freshen_local(const unsigned char *sha1, int freshen)
+{
+       static struct strbuf buf = STRBUF_INIT;
+
+       strbuf_reset(&buf);
+       sha1_file_name(the_repository, &buf, sha1);
+
+       return check_and_freshen_file(buf.buf, freshen);
+}
+
+static int check_and_freshen_nonlocal(const unsigned char *sha1, int freshen)
+{
+       struct alternate_object_database *alt;
+       prepare_alt_odb(the_repository);
+       for (alt = the_repository->objects->alt_odb_list; alt; alt = alt->next) {
+               const char *path = alt_sha1_path(alt, sha1);
+               if (check_and_freshen_file(path, freshen))
+                       return 1;
+       }
+       return 0;
+}
+
+static int check_and_freshen(const unsigned char *sha1, int freshen)
+{
+       return check_and_freshen_local(sha1, freshen) ||
+              check_and_freshen_nonlocal(sha1, freshen);
+}
+
+int has_loose_object_nonlocal(const unsigned char *sha1)
+{
+       return check_and_freshen_nonlocal(sha1, 0);
+}
+
+static int has_loose_object(const unsigned char *sha1)
+{
+       return check_and_freshen(sha1, 0);
+}
+
+static void mmap_limit_check(size_t length)
+{
+       static size_t limit = 0;
+       if (!limit) {
+               limit = git_env_ulong("GIT_MMAP_LIMIT", 0);
+               if (!limit)
+                       limit = SIZE_MAX;
+       }
+       if (length > limit)
+               die("attempting to mmap %"PRIuMAX" over limit %"PRIuMAX,
+                   (uintmax_t)length, (uintmax_t)limit);
+}
+
+void *xmmap_gently(void *start, size_t length,
+                 int prot, int flags, int fd, off_t offset)
+{
+       void *ret;
+
+       mmap_limit_check(length);
+       ret = mmap(start, length, prot, flags, fd, offset);
+       if (ret == MAP_FAILED) {
+               if (!length)
+                       return NULL;
+               release_pack_memory(length);
+               ret = mmap(start, length, prot, flags, fd, offset);
+       }
+       return ret;
+}
+
+void *xmmap(void *start, size_t length,
+       int prot, int flags, int fd, off_t offset)
+{
+       void *ret = xmmap_gently(start, length, prot, flags, fd, offset);
+       if (ret == MAP_FAILED)
+               die_errno("mmap failed");
+       return ret;
+}
+
+/*
+ * With an in-core object data in "map", rehash it to make sure the
+ * object name actually matches "sha1" to detect object corruption.
+ * With "map" == NULL, try reading the object named with "sha1" using
+ * the streaming interface and rehash it to do the same.
+ */
+int check_object_signature(const struct object_id *oid, void *map,
+                          unsigned long size, const char *type)
+{
+       struct object_id real_oid;
+       enum object_type obj_type;
+       struct git_istream *st;
+       git_hash_ctx c;
+       char hdr[MAX_HEADER_LEN];
+       int hdrlen;
+
+       if (map) {
+               hash_object_file(map, size, type, &real_oid);
+               return oidcmp(oid, &real_oid) ? -1 : 0;
+       }
+
+       st = open_istream(oid, &obj_type, &size, NULL);
+       if (!st)
+               return -1;
+
+       /* Generate the header */
+       hdrlen = xsnprintf(hdr, sizeof(hdr), "%s %lu", type_name(obj_type), size) + 1;
+
+       /* Sha1.. */
+       the_hash_algo->init_fn(&c);
+       the_hash_algo->update_fn(&c, hdr, hdrlen);
+       for (;;) {
+               char buf[1024 * 16];
+               ssize_t readlen = read_istream(st, buf, sizeof(buf));
+
+               if (readlen < 0) {
+                       close_istream(st);
+                       return -1;
+               }
+               if (!readlen)
+                       break;
+               the_hash_algo->update_fn(&c, buf, readlen);
+       }
+       the_hash_algo->final_fn(real_oid.hash, &c);
+       close_istream(st);
+       return oidcmp(oid, &real_oid) ? -1 : 0;
+}
+
+int git_open_cloexec(const char *name, int flags)
+{
+       int fd;
+       static int o_cloexec = O_CLOEXEC;
+
+       fd = open(name, flags | o_cloexec);
+       if ((o_cloexec & O_CLOEXEC) && fd < 0 && errno == EINVAL) {
+               /* Try again w/o O_CLOEXEC: the kernel might not support it */
+               o_cloexec &= ~O_CLOEXEC;
+               fd = open(name, flags | o_cloexec);
+       }
+
+#if defined(F_GETFD) && defined(F_SETFD) && defined(FD_CLOEXEC)
+       {
+               static int fd_cloexec = FD_CLOEXEC;
+
+               if (!o_cloexec && 0 <= fd && fd_cloexec) {
+                       /* Opened w/o O_CLOEXEC?  try with fcntl(2) to add it */
+                       int flags = fcntl(fd, F_GETFD);
+                       if (fcntl(fd, F_SETFD, flags | fd_cloexec))
+                               fd_cloexec = 0;
+               }
+       }
+#endif
+       return fd;
+}
+
+/*
+ * Find "sha1" as a loose object in the local repository or in an alternate.
+ * Returns 0 on success, negative on failure.
+ *
+ * The "path" out-parameter will give the path of the object we found (if any).
+ * Note that it may point to static storage and is only valid until another
+ * call to sha1_file_name(), etc.
+ */
+static int stat_sha1_file(struct repository *r, const unsigned char *sha1,
+                         struct stat *st, const char **path)
+{
+       struct alternate_object_database *alt;
+       static struct strbuf buf = STRBUF_INIT;
+
+       strbuf_reset(&buf);
+       sha1_file_name(r, &buf, sha1);
+       *path = buf.buf;
+
+       if (!lstat(*path, st))
+               return 0;
+
+       prepare_alt_odb(r);
+       errno = ENOENT;
+       for (alt = r->objects->alt_odb_list; alt; alt = alt->next) {
+               *path = alt_sha1_path(alt, sha1);
+               if (!lstat(*path, st))
+                       return 0;
+       }
+
+       return -1;
+}
+
+/*
+ * Like stat_sha1_file(), but actually open the object and return the
+ * descriptor. See the caveats on the "path" parameter above.
+ */
+static int open_sha1_file(struct repository *r,
+                         const unsigned char *sha1, const char **path)
+{
+       int fd;
+       struct alternate_object_database *alt;
+       int most_interesting_errno;
+       static struct strbuf buf = STRBUF_INIT;
+
+       strbuf_reset(&buf);
+       sha1_file_name(r, &buf, sha1);
+       *path = buf.buf;
+
+       fd = git_open(*path);
+       if (fd >= 0)
+               return fd;
+       most_interesting_errno = errno;
+
+       prepare_alt_odb(r);
+       for (alt = r->objects->alt_odb_list; alt; alt = alt->next) {
+               *path = alt_sha1_path(alt, sha1);
+               fd = git_open(*path);
+               if (fd >= 0)
+                       return fd;
+               if (most_interesting_errno == ENOENT)
+                       most_interesting_errno = errno;
+       }
+       errno = most_interesting_errno;
+       return -1;
+}
+
+/*
+ * Map the loose object at "path" if it is not NULL, or the path found by
+ * searching for a loose object named "sha1".
+ */
+static void *map_sha1_file_1(struct repository *r, const char *path,
+                            const unsigned char *sha1, unsigned long *size)
+{
+       void *map;
+       int fd;
+
+       if (path)
+               fd = git_open(path);
+       else
+               fd = open_sha1_file(r, sha1, &path);
+       map = NULL;
+       if (fd >= 0) {
+               struct stat st;
+
+               if (!fstat(fd, &st)) {
+                       *size = xsize_t(st.st_size);
+                       if (!*size) {
+                               /* mmap() is forbidden on empty files */
+                               error("object file %s is empty", path);
+                               return NULL;
+                       }
+                       map = xmmap(NULL, *size, PROT_READ, MAP_PRIVATE, fd, 0);
+               }
+               close(fd);
+       }
+       return map;
+}
+
+void *map_sha1_file(struct repository *r,
+                   const unsigned char *sha1, unsigned long *size)
+{
+       return map_sha1_file_1(r, NULL, sha1, size);
+}
+
+static int unpack_sha1_short_header(git_zstream *stream,
+                                   unsigned char *map, unsigned long mapsize,
+                                   void *buffer, unsigned long bufsiz)
+{
+       /* Get the data stream */
+       memset(stream, 0, sizeof(*stream));
+       stream->next_in = map;
+       stream->avail_in = mapsize;
+       stream->next_out = buffer;
+       stream->avail_out = bufsiz;
+
+       git_inflate_init(stream);
+       return git_inflate(stream, 0);
+}
+
+int unpack_sha1_header(git_zstream *stream,
+                      unsigned char *map, unsigned long mapsize,
+                      void *buffer, unsigned long bufsiz)
+{
+       int status = unpack_sha1_short_header(stream, map, mapsize,
+                                             buffer, bufsiz);
+
+       if (status < Z_OK)
+               return status;
+
+       /* Make sure we have the terminating NUL */
+       if (!memchr(buffer, '\0', stream->next_out - (unsigned char *)buffer))
+               return -1;
+       return 0;
+}
+
+static int unpack_sha1_header_to_strbuf(git_zstream *stream, unsigned char *map,
+                                       unsigned long mapsize, void *buffer,
+                                       unsigned long bufsiz, struct strbuf *header)
+{
+       int status;
+
+       status = unpack_sha1_short_header(stream, map, mapsize, buffer, bufsiz);
+       if (status < Z_OK)
+               return -1;
+
+       /*
+        * Check if entire header is unpacked in the first iteration.
+        */
+       if (memchr(buffer, '\0', stream->next_out - (unsigned char *)buffer))
+               return 0;
+
+       /*
+        * buffer[0..bufsiz] was not large enough.  Copy the partial
+        * result out to header, and then append the result of further
+        * reading the stream.
+        */
+       strbuf_add(header, buffer, stream->next_out - (unsigned char *)buffer);
+       stream->next_out = buffer;
+       stream->avail_out = bufsiz;
+
+       do {
+               status = git_inflate(stream, 0);
+               strbuf_add(header, buffer, stream->next_out - (unsigned char *)buffer);
+               if (memchr(buffer, '\0', stream->next_out - (unsigned char *)buffer))
+                       return 0;
+               stream->next_out = buffer;
+               stream->avail_out = bufsiz;
+       } while (status != Z_STREAM_END);
+       return -1;
+}
+
+static void *unpack_sha1_rest(git_zstream *stream, void *buffer, unsigned long size, const unsigned char *sha1)
+{
+       int bytes = strlen(buffer) + 1;
+       unsigned char *buf = xmallocz(size);
+       unsigned long n;
+       int status = Z_OK;
+
+       n = stream->total_out - bytes;
+       if (n > size)
+               n = size;
+       memcpy(buf, (char *) buffer + bytes, n);
+       bytes = n;
+       if (bytes <= size) {
+               /*
+                * The above condition must be (bytes <= size), not
+                * (bytes < size).  In other words, even though we
+                * expect no more output and set avail_out to zero,
+                * the input zlib stream may have bytes that express
+                * "this concludes the stream", and we *do* want to
+                * eat that input.
+                *
+                * Otherwise we would not be able to test that we
+                * consumed all the input to reach the expected size;
+                * we also want to check that zlib tells us that all
+                * went well with status == Z_STREAM_END at the end.
+                */
+               stream->next_out = buf + bytes;
+               stream->avail_out = size - bytes;
+               while (status == Z_OK)
+                       status = git_inflate(stream, Z_FINISH);
+       }
+       if (status == Z_STREAM_END && !stream->avail_in) {
+               git_inflate_end(stream);
+               return buf;
+       }
+
+       if (status < 0)
+               error("corrupt loose object '%s'", sha1_to_hex(sha1));
+       else if (stream->avail_in)
+               error("garbage at end of loose object '%s'",
+                     sha1_to_hex(sha1));
+       free(buf);
+       return NULL;
+}
+
+/*
+ * We used to just use "sscanf()", but that's actually way
+ * too permissive for what we want to check. So do an anal
+ * object header parse by hand.
+ */
+static int parse_sha1_header_extended(const char *hdr, struct object_info *oi,
+                              unsigned int flags)
+{
+       const char *type_buf = hdr;
+       unsigned long size;
+       int type, type_len = 0;
+
+       /*
+        * The type can be of any size but is followed by
+        * a space.
+        */
+       for (;;) {
+               char c = *hdr++;
+               if (!c)
+                       return -1;
+               if (c == ' ')
+                       break;
+               type_len++;
+       }
+
+       type = type_from_string_gently(type_buf, type_len, 1);
+       if (oi->type_name)
+               strbuf_add(oi->type_name, type_buf, type_len);
+       /*
+        * Set type to 0 if its an unknown object and
+        * we're obtaining the type using '--allow-unknown-type'
+        * option.
+        */
+       if ((flags & OBJECT_INFO_ALLOW_UNKNOWN_TYPE) && (type < 0))
+               type = 0;
+       else if (type < 0)
+               die("invalid object type");
+       if (oi->typep)
+               *oi->typep = type;
+
+       /*
+        * The length must follow immediately, and be in canonical
+        * decimal format (ie "010" is not valid).
+        */
+       size = *hdr++ - '0';
+       if (size > 9)
+               return -1;
+       if (size) {
+               for (;;) {
+                       unsigned long c = *hdr - '0';
+                       if (c > 9)
+                               break;
+                       hdr++;
+                       size = size * 10 + c;
+               }
+       }
+
+       if (oi->sizep)
+               *oi->sizep = size;
+
+       /*
+        * The length must be followed by a zero byte
+        */
+       return *hdr ? -1 : type;
+}
+
+int parse_sha1_header(const char *hdr, unsigned long *sizep)
+{
+       struct object_info oi = OBJECT_INFO_INIT;
+
+       oi.sizep = sizep;
+       return parse_sha1_header_extended(hdr, &oi, 0);
+}
+
+static int sha1_loose_object_info(struct repository *r,
+                                 const unsigned char *sha1,
+                                 struct object_info *oi, int flags)
+{
+       int status = 0;
+       unsigned long mapsize;
+       void *map;
+       git_zstream stream;
+       char hdr[MAX_HEADER_LEN];
+       struct strbuf hdrbuf = STRBUF_INIT;
+       unsigned long size_scratch;
+
+       if (oi->delta_base_sha1)
+               hashclr(oi->delta_base_sha1);
+
+       /*
+        * If we don't care about type or size, then we don't
+        * need to look inside the object at all. Note that we
+        * do not optimize out the stat call, even if the
+        * caller doesn't care about the disk-size, since our
+        * return value implicitly indicates whether the
+        * object even exists.
+        */
+       if (!oi->typep && !oi->type_name && !oi->sizep && !oi->contentp) {
+               const char *path;
+               struct stat st;
+               if (stat_sha1_file(r, sha1, &st, &path) < 0)
+                       return -1;
+               if (oi->disk_sizep)
+                       *oi->disk_sizep = st.st_size;
+               return 0;
+       }
+
+       map = map_sha1_file(r, sha1, &mapsize);
+       if (!map)
+               return -1;
+
+       if (!oi->sizep)
+               oi->sizep = &size_scratch;
+
+       if (oi->disk_sizep)
+               *oi->disk_sizep = mapsize;
+       if ((flags & OBJECT_INFO_ALLOW_UNKNOWN_TYPE)) {
+               if (unpack_sha1_header_to_strbuf(&stream, map, mapsize, hdr, sizeof(hdr), &hdrbuf) < 0)
+                       status = error("unable to unpack %s header with --allow-unknown-type",
+                                      sha1_to_hex(sha1));
+       } else if (unpack_sha1_header(&stream, map, mapsize, hdr, sizeof(hdr)) < 0)
+               status = error("unable to unpack %s header",
+                              sha1_to_hex(sha1));
+       if (status < 0)
+               ; /* Do nothing */
+       else if (hdrbuf.len) {
+               if ((status = parse_sha1_header_extended(hdrbuf.buf, oi, flags)) < 0)
+                       status = error("unable to parse %s header with --allow-unknown-type",
+                                      sha1_to_hex(sha1));
+       } else if ((status = parse_sha1_header_extended(hdr, oi, flags)) < 0)
+               status = error("unable to parse %s header", sha1_to_hex(sha1));
+
+       if (status >= 0 && oi->contentp) {
+               *oi->contentp = unpack_sha1_rest(&stream, hdr,
+                                                *oi->sizep, sha1);
+               if (!*oi->contentp) {
+                       git_inflate_end(&stream);
+                       status = -1;
+               }
+       } else
+               git_inflate_end(&stream);
+
+       munmap(map, mapsize);
+       if (status && oi->typep)
+               *oi->typep = status;
+       if (oi->sizep == &size_scratch)
+               oi->sizep = NULL;
+       strbuf_release(&hdrbuf);
+       oi->whence = OI_LOOSE;
+       return (status < 0) ? status : 0;
+}
+
+int fetch_if_missing = 1;
+
+int oid_object_info_extended(struct repository *r, const struct object_id *oid,
+                            struct object_info *oi, unsigned flags)
+{
+       static struct object_info blank_oi = OBJECT_INFO_INIT;
+       struct pack_entry e;
+       int rtype;
+       const struct object_id *real = oid;
+       int already_retried = 0;
+
+       if (flags & OBJECT_INFO_LOOKUP_REPLACE)
+               real = lookup_replace_object(r, oid);
+
+       if (is_null_oid(real))
+               return -1;
+
+       if (!oi)
+               oi = &blank_oi;
+
+       if (!(flags & OBJECT_INFO_SKIP_CACHED)) {
+               struct cached_object *co = find_cached_object(real->hash);
+               if (co) {
+                       if (oi->typep)
+                               *(oi->typep) = co->type;
+                       if (oi->sizep)
+                               *(oi->sizep) = co->size;
+                       if (oi->disk_sizep)
+                               *(oi->disk_sizep) = 0;
+                       if (oi->delta_base_sha1)
+                               hashclr(oi->delta_base_sha1);
+                       if (oi->type_name)
+                               strbuf_addstr(oi->type_name, type_name(co->type));
+                       if (oi->contentp)
+                               *oi->contentp = xmemdupz(co->buf, co->size);
+                       oi->whence = OI_CACHED;
+                       return 0;
+               }
+       }
+
+       while (1) {
+               if (find_pack_entry(r, real->hash, &e))
+                       break;
+
+               if (flags & OBJECT_INFO_IGNORE_LOOSE)
+                       return -1;
+
+               /* Most likely it's a loose object. */
+               if (!sha1_loose_object_info(r, real->hash, oi, flags))
+                       return 0;
+
+               /* Not a loose object; someone else may have just packed it. */
+               if (!(flags & OBJECT_INFO_QUICK)) {
+                       reprepare_packed_git(r);
+                       if (find_pack_entry(r, real->hash, &e))
+                               break;
+               }
+
+               /* Check if it is a missing object */
+               if (fetch_if_missing && repository_format_partial_clone &&
+                   !already_retried && r == the_repository) {
+                       /*
+                        * TODO Investigate having fetch_object() return
+                        * TODO error/success and stopping the music here.
+                        * TODO Pass a repository struct through fetch_object,
+                        * such that arbitrary repositories work.
+                        */
+                       fetch_object(repository_format_partial_clone, real->hash);
+                       already_retried = 1;
+                       continue;
+               }
+
+               return -1;
+       }
+
+       if (oi == &blank_oi)
+               /*
+                * We know that the caller doesn't actually need the
+                * information below, so return early.
+                */
+               return 0;
+       rtype = packed_object_info(r, e.p, e.offset, oi);
+       if (rtype < 0) {
+               mark_bad_packed_object(e.p, real->hash);
+               return oid_object_info_extended(r, real, oi, 0);
+       } else if (oi->whence == OI_PACKED) {
+               oi->u.packed.offset = e.offset;
+               oi->u.packed.pack = e.p;
+               oi->u.packed.is_delta = (rtype == OBJ_REF_DELTA ||
+                                        rtype == OBJ_OFS_DELTA);
+       }
+
+       return 0;
+}
+
+/* returns enum object_type or negative */
+int oid_object_info(struct repository *r,
+                   const struct object_id *oid,
+                   unsigned long *sizep)
+{
+       enum object_type type;
+       struct object_info oi = OBJECT_INFO_INIT;
+
+       oi.typep = &type;
+       oi.sizep = sizep;
+       if (oid_object_info_extended(r, oid, &oi,
+                                     OBJECT_INFO_LOOKUP_REPLACE) < 0)
+               return -1;
+       return type;
+}
+
+static void *read_object(const unsigned char *sha1, enum object_type *type,
+                        unsigned long *size)
+{
+       struct object_id oid;
+       struct object_info oi = OBJECT_INFO_INIT;
+       void *content;
+       oi.typep = type;
+       oi.sizep = size;
+       oi.contentp = &content;
+
+       hashcpy(oid.hash, sha1);
+
+       if (oid_object_info_extended(the_repository, &oid, &oi, 0) < 0)
+               return NULL;
+       return content;
+}
+
+int pretend_object_file(void *buf, unsigned long len, enum object_type type,
+                       struct object_id *oid)
+{
+       struct cached_object *co;
+
+       hash_object_file(buf, len, type_name(type), oid);
+       if (has_sha1_file(oid->hash) || find_cached_object(oid->hash))
+               return 0;
+       ALLOC_GROW(cached_objects, cached_object_nr + 1, cached_object_alloc);
+       co = &cached_objects[cached_object_nr++];
+       co->size = len;
+       co->type = type;
+       co->buf = xmalloc(len);
+       memcpy(co->buf, buf, len);
+       hashcpy(co->sha1, oid->hash);
+       return 0;
+}
+
+/*
+ * This function dies on corrupt objects; the callers who want to
+ * deal with them should arrange to call read_object() and give error
+ * messages themselves.
+ */
+void *read_object_file_extended(const struct object_id *oid,
+                               enum object_type *type,
+                               unsigned long *size,
+                               int lookup_replace)
+{
+       void *data;
+       const struct packed_git *p;
+       const char *path;
+       struct stat st;
+       const struct object_id *repl = lookup_replace ?
+               lookup_replace_object(the_repository, oid) : oid;
+
+       errno = 0;
+       data = read_object(repl->hash, type, size);
+       if (data)
+               return data;
+
+       if (errno && errno != ENOENT)
+               die_errno("failed to read object %s", oid_to_hex(oid));
+
+       /* die if we replaced an object with one that does not exist */
+       if (repl != oid)
+               die("replacement %s not found for %s",
+                   oid_to_hex(repl), oid_to_hex(oid));
+
+       if (!stat_sha1_file(the_repository, repl->hash, &st, &path))
+               die("loose object %s (stored in %s) is corrupt",
+                   oid_to_hex(repl), path);
+
+       if ((p = has_packed_and_bad(repl->hash)) != NULL)
+               die("packed object %s (stored in %s) is corrupt",
+                   oid_to_hex(repl), p->pack_name);
+
+       return NULL;
+}
+
+void *read_object_with_reference(const struct object_id *oid,
+                                const char *required_type_name,
+                                unsigned long *size,
+                                struct object_id *actual_oid_return)
+{
+       enum object_type type, required_type;
+       void *buffer;
+       unsigned long isize;
+       struct object_id actual_oid;
+
+       required_type = type_from_string(required_type_name);
+       oidcpy(&actual_oid, oid);
+       while (1) {
+               int ref_length = -1;
+               const char *ref_type = NULL;
+
+               buffer = read_object_file(&actual_oid, &type, &isize);
+               if (!buffer)
+                       return NULL;
+               if (type == required_type) {
+                       *size = isize;
+                       if (actual_oid_return)
+                               oidcpy(actual_oid_return, &actual_oid);
+                       return buffer;
+               }
+               /* Handle references */
+               else if (type == OBJ_COMMIT)
+                       ref_type = "tree ";
+               else if (type == OBJ_TAG)
+                       ref_type = "object ";
+               else {
+                       free(buffer);
+                       return NULL;
+               }
+               ref_length = strlen(ref_type);
+
+               if (ref_length + GIT_SHA1_HEXSZ > isize ||
+                   memcmp(buffer, ref_type, ref_length) ||
+                   get_oid_hex((char *) buffer + ref_length, &actual_oid)) {
+                       free(buffer);
+                       return NULL;
+               }
+               free(buffer);
+               /* Now we have the ID of the referred-to object in
+                * actual_oid.  Check again. */
+       }
+}
+
+static void write_object_file_prepare(const void *buf, unsigned long len,
+                                     const char *type, struct object_id *oid,
+                                     char *hdr, int *hdrlen)
+{
+       git_hash_ctx c;
+
+       /* Generate the header */
+       *hdrlen = xsnprintf(hdr, *hdrlen, "%s %lu", type, len)+1;
+
+       /* Sha1.. */
+       the_hash_algo->init_fn(&c);
+       the_hash_algo->update_fn(&c, hdr, *hdrlen);
+       the_hash_algo->update_fn(&c, buf, len);
+       the_hash_algo->final_fn(oid->hash, &c);
+}
+
+/*
+ * Move the just written object into its final resting place.
+ */
+int finalize_object_file(const char *tmpfile, const char *filename)
+{
+       int ret = 0;
+
+       if (object_creation_mode == OBJECT_CREATION_USES_RENAMES)
+               goto try_rename;
+       else if (link(tmpfile, filename))
+               ret = errno;
+
+       /*
+        * Coda hack - coda doesn't like cross-directory links,
+        * so we fall back to a rename, which will mean that it
+        * won't be able to check collisions, but that's not a
+        * big deal.
+        *
+        * The same holds for FAT formatted media.
+        *
+        * When this succeeds, we just return.  We have nothing
+        * left to unlink.
+        */
+       if (ret && ret != EEXIST) {
+       try_rename:
+               if (!rename(tmpfile, filename))
+                       goto out;
+               ret = errno;
+       }
+       unlink_or_warn(tmpfile);
+       if (ret) {
+               if (ret != EEXIST) {
+                       return error_errno("unable to write sha1 filename %s", filename);
+               }
+               /* FIXME!!! Collision check here ? */
+       }
+
+out:
+       if (adjust_shared_perm(filename))
+               return error("unable to set permission to '%s'", filename);
+       return 0;
+}
+
+static int write_buffer(int fd, const void *buf, size_t len)
+{
+       if (write_in_full(fd, buf, len) < 0)
+               return error_errno("file write error");
+       return 0;
+}
+
+int hash_object_file(const void *buf, unsigned long len, const char *type,
+                    struct object_id *oid)
+{
+       char hdr[MAX_HEADER_LEN];
+       int hdrlen = sizeof(hdr);
+       write_object_file_prepare(buf, len, type, oid, hdr, &hdrlen);
+       return 0;
+}
+
+/* Finalize a file on disk, and close it. */
+static void close_sha1_file(int fd)
+{
+       if (fsync_object_files)
+               fsync_or_die(fd, "sha1 file");
+       if (close(fd) != 0)
+               die_errno("error when closing sha1 file");
+}
+
+/* Size of directory component, including the ending '/' */
+static inline int directory_size(const char *filename)
+{
+       const char *s = strrchr(filename, '/');
+       if (!s)
+               return 0;
+       return s - filename + 1;
+}
+
+/*
+ * This creates a temporary file in the same directory as the final
+ * 'filename'
+ *
+ * We want to avoid cross-directory filename renames, because those
+ * can have problems on various filesystems (FAT, NFS, Coda).
+ */
+static int create_tmpfile(struct strbuf *tmp, const char *filename)
+{
+       int fd, dirlen = directory_size(filename);
+
+       strbuf_reset(tmp);
+       strbuf_add(tmp, filename, dirlen);
+       strbuf_addstr(tmp, "tmp_obj_XXXXXX");
+       fd = git_mkstemp_mode(tmp->buf, 0444);
+       if (fd < 0 && dirlen && errno == ENOENT) {
+               /*
+                * Make sure the directory exists; note that the contents
+                * of the buffer are undefined after mkstemp returns an
+                * error, so we have to rewrite the whole buffer from
+                * scratch.
+                */
+               strbuf_reset(tmp);
+               strbuf_add(tmp, filename, dirlen - 1);
+               if (mkdir(tmp->buf, 0777) && errno != EEXIST)
+                       return -1;
+               if (adjust_shared_perm(tmp->buf))
+                       return -1;
+
+               /* Try again */
+               strbuf_addstr(tmp, "/tmp_obj_XXXXXX");
+               fd = git_mkstemp_mode(tmp->buf, 0444);
+       }
+       return fd;
+}
+
+static int write_loose_object(const struct object_id *oid, char *hdr,
+                             int hdrlen, const void *buf, unsigned long len,
+                             time_t mtime)
+{
+       int fd, ret;
+       unsigned char compressed[4096];
+       git_zstream stream;
+       git_hash_ctx c;
+       struct object_id parano_oid;
+       static struct strbuf tmp_file = STRBUF_INIT;
+       static struct strbuf filename = STRBUF_INIT;
+
+       strbuf_reset(&filename);
+       sha1_file_name(the_repository, &filename, oid->hash);
+
+       fd = create_tmpfile(&tmp_file, filename.buf);
+       if (fd < 0) {
+               if (errno == EACCES)
+                       return error("insufficient permission for adding an object to repository database %s", get_object_directory());
+               else
+                       return error_errno("unable to create temporary file");
+       }
+
+       /* Set it up */
+       git_deflate_init(&stream, zlib_compression_level);
+       stream.next_out = compressed;
+       stream.avail_out = sizeof(compressed);
+       the_hash_algo->init_fn(&c);
+
+       /* First header.. */
+       stream.next_in = (unsigned char *)hdr;
+       stream.avail_in = hdrlen;
+       while (git_deflate(&stream, 0) == Z_OK)
+               ; /* nothing */
+       the_hash_algo->update_fn(&c, hdr, hdrlen);
+
+       /* Then the data itself.. */
+       stream.next_in = (void *)buf;
+       stream.avail_in = len;
+       do {
+               unsigned char *in0 = stream.next_in;
+               ret = git_deflate(&stream, Z_FINISH);
+               the_hash_algo->update_fn(&c, in0, stream.next_in - in0);
+               if (write_buffer(fd, compressed, stream.next_out - compressed) < 0)
+                       die("unable to write sha1 file");
+               stream.next_out = compressed;
+               stream.avail_out = sizeof(compressed);
+       } while (ret == Z_OK);
+
+       if (ret != Z_STREAM_END)
+               die("unable to deflate new object %s (%d)", oid_to_hex(oid),
+                   ret);
+       ret = git_deflate_end_gently(&stream);
+       if (ret != Z_OK)
+               die("deflateEnd on object %s failed (%d)", oid_to_hex(oid),
+                   ret);
+       the_hash_algo->final_fn(parano_oid.hash, &c);
+       if (oidcmp(oid, &parano_oid) != 0)
+               die("confused by unstable object source data for %s",
+                   oid_to_hex(oid));
+
+       close_sha1_file(fd);
+
+       if (mtime) {
+               struct utimbuf utb;
+               utb.actime = mtime;
+               utb.modtime = mtime;
+               if (utime(tmp_file.buf, &utb) < 0)
+                       warning_errno("failed utime() on %s", tmp_file.buf);
+       }
+
+       return finalize_object_file(tmp_file.buf, filename.buf);
+}
+
+static int freshen_loose_object(const unsigned char *sha1)
+{
+       return check_and_freshen(sha1, 1);
+}
+
+static int freshen_packed_object(const unsigned char *sha1)
+{
+       struct pack_entry e;
+       if (!find_pack_entry(the_repository, sha1, &e))
+               return 0;
+       if (e.p->freshened)
+               return 1;
+       if (!freshen_file(e.p->pack_name))
+               return 0;
+       e.p->freshened = 1;
+       return 1;
+}
+
+int write_object_file(const void *buf, unsigned long len, const char *type,
+                     struct object_id *oid)
+{
+       char hdr[MAX_HEADER_LEN];
+       int hdrlen = sizeof(hdr);
+
+       /* Normally if we have it in the pack then we do not bother writing
+        * it out into .git/objects/??/?{38} file.
+        */
+       write_object_file_prepare(buf, len, type, oid, hdr, &hdrlen);
+       if (freshen_packed_object(oid->hash) || freshen_loose_object(oid->hash))
+               return 0;
+       return write_loose_object(oid, hdr, hdrlen, buf, len, 0);
+}
+
+int hash_object_file_literally(const void *buf, unsigned long len,
+                              const char *type, struct object_id *oid,
+                              unsigned flags)
+{
+       char *header;
+       int hdrlen, status = 0;
+
+       /* type string, SP, %lu of the length plus NUL must fit this */
+       hdrlen = strlen(type) + MAX_HEADER_LEN;
+       header = xmalloc(hdrlen);
+       write_object_file_prepare(buf, len, type, oid, header, &hdrlen);
+
+       if (!(flags & HASH_WRITE_OBJECT))
+               goto cleanup;
+       if (freshen_packed_object(oid->hash) || freshen_loose_object(oid->hash))
+               goto cleanup;
+       status = write_loose_object(oid, header, hdrlen, buf, len, 0);
+
+cleanup:
+       free(header);
+       return status;
+}
+
+int force_object_loose(const struct object_id *oid, time_t mtime)
+{
+       void *buf;
+       unsigned long len;
+       enum object_type type;
+       char hdr[MAX_HEADER_LEN];
+       int hdrlen;
+       int ret;
+
+       if (has_loose_object(oid->hash))
+               return 0;
+       buf = read_object(oid->hash, &type, &len);
+       if (!buf)
+               return error("cannot read sha1_file for %s", oid_to_hex(oid));
+       hdrlen = xsnprintf(hdr, sizeof(hdr), "%s %lu", type_name(type), len) + 1;
+       ret = write_loose_object(oid, hdr, hdrlen, buf, len, mtime);
+       free(buf);
+
+       return ret;
+}
+
+int has_sha1_file_with_flags(const unsigned char *sha1, int flags)
+{
+       struct object_id oid;
+       if (!startup_info->have_repository)
+               return 0;
+       hashcpy(oid.hash, sha1);
+       return oid_object_info_extended(the_repository, &oid, NULL,
+                                       flags | OBJECT_INFO_SKIP_CACHED) >= 0;
+}
+
+int has_object_file(const struct object_id *oid)
+{
+       return has_sha1_file(oid->hash);
+}
+
+int has_object_file_with_flags(const struct object_id *oid, int flags)
+{
+       return has_sha1_file_with_flags(oid->hash, flags);
+}
+
+static void check_tree(const void *buf, size_t size)
+{
+       struct tree_desc desc;
+       struct name_entry entry;
+
+       init_tree_desc(&desc, buf, size);
+       while (tree_entry(&desc, &entry))
+               /* do nothing
+                * tree_entry() will die() on malformed entries */
+               ;
+}
+
+static void check_commit(const void *buf, size_t size)
+{
+       struct commit c;
+       memset(&c, 0, sizeof(c));
+       if (parse_commit_buffer(&c, buf, size))
+               die("corrupt commit");
+}
+
+static void check_tag(const void *buf, size_t size)
+{
+       struct tag t;
+       memset(&t, 0, sizeof(t));
+       if (parse_tag_buffer(&t, buf, size))
+               die("corrupt tag");
+}
+
+static int index_mem(struct object_id *oid, void *buf, size_t size,
+                    enum object_type type,
+                    const char *path, unsigned flags)
+{
+       int ret, re_allocated = 0;
+       int write_object = flags & HASH_WRITE_OBJECT;
+
+       if (!type)
+               type = OBJ_BLOB;
+
+       /*
+        * Convert blobs to git internal format
+        */
+       if ((type == OBJ_BLOB) && path) {
+               struct strbuf nbuf = STRBUF_INIT;
+               if (convert_to_git(&the_index, path, buf, size, &nbuf,
+                                  get_conv_flags(flags))) {
+                       buf = strbuf_detach(&nbuf, &size);
+                       re_allocated = 1;
+               }
+       }
+       if (flags & HASH_FORMAT_CHECK) {
+               if (type == OBJ_TREE)
+                       check_tree(buf, size);
+               if (type == OBJ_COMMIT)
+                       check_commit(buf, size);
+               if (type == OBJ_TAG)
+                       check_tag(buf, size);
+       }
+
+       if (write_object)
+               ret = write_object_file(buf, size, type_name(type), oid);
+       else
+               ret = hash_object_file(buf, size, type_name(type), oid);
+       if (re_allocated)
+               free(buf);
+       return ret;
+}
+
+static int index_stream_convert_blob(struct object_id *oid, int fd,
+                                    const char *path, unsigned flags)
+{
+       int ret;
+       const int write_object = flags & HASH_WRITE_OBJECT;
+       struct strbuf sbuf = STRBUF_INIT;
+
+       assert(path);
+       assert(would_convert_to_git_filter_fd(path));
+
+       convert_to_git_filter_fd(&the_index, path, fd, &sbuf,
+                                get_conv_flags(flags));
+
+       if (write_object)
+               ret = write_object_file(sbuf.buf, sbuf.len, type_name(OBJ_BLOB),
+                                       oid);
+       else
+               ret = hash_object_file(sbuf.buf, sbuf.len, type_name(OBJ_BLOB),
+                                      oid);
+       strbuf_release(&sbuf);
+       return ret;
+}
+
+static int index_pipe(struct object_id *oid, int fd, enum object_type type,
+                     const char *path, unsigned flags)
+{
+       struct strbuf sbuf = STRBUF_INIT;
+       int ret;
+
+       if (strbuf_read(&sbuf, fd, 4096) >= 0)
+               ret = index_mem(oid, sbuf.buf, sbuf.len, type, path, flags);
+       else
+               ret = -1;
+       strbuf_release(&sbuf);
+       return ret;
+}
+
+#define SMALL_FILE_SIZE (32*1024)
+
+static int index_core(struct object_id *oid, int fd, size_t size,
+                     enum object_type type, const char *path,
+                     unsigned flags)
+{
+       int ret;
+
+       if (!size) {
+               ret = index_mem(oid, "", size, type, path, flags);
+       } else if (size <= SMALL_FILE_SIZE) {
+               char *buf = xmalloc(size);
+               ssize_t read_result = read_in_full(fd, buf, size);
+               if (read_result < 0)
+                       ret = error_errno("read error while indexing %s",
+                                         path ? path : "<unknown>");
+               else if (read_result != size)
+                       ret = error("short read while indexing %s",
+                                   path ? path : "<unknown>");
+               else
+                       ret = index_mem(oid, buf, size, type, path, flags);
+               free(buf);
+       } else {
+               void *buf = xmmap(NULL, size, PROT_READ, MAP_PRIVATE, fd, 0);
+               ret = index_mem(oid, buf, size, type, path, flags);
+               munmap(buf, size);
+       }
+       return ret;
+}
+
+/*
+ * This creates one packfile per large blob unless bulk-checkin
+ * machinery is "plugged".
+ *
+ * This also bypasses the usual "convert-to-git" dance, and that is on
+ * purpose. We could write a streaming version of the converting
+ * functions and insert that before feeding the data to fast-import
+ * (or equivalent in-core API described above). However, that is
+ * somewhat complicated, as we do not know the size of the filter
+ * result, which we need to know beforehand when writing a git object.
+ * Since the primary motivation for trying to stream from the working
+ * tree file and to avoid mmaping it in core is to deal with large
+ * binary blobs, they generally do not want to get any conversion, and
+ * callers should avoid this code path when filters are requested.
+ */
+static int index_stream(struct object_id *oid, int fd, size_t size,
+                       enum object_type type, const char *path,
+                       unsigned flags)
+{
+       return index_bulk_checkin(oid, fd, size, type, path, flags);
+}
+
+int index_fd(struct object_id *oid, int fd, struct stat *st,
+            enum object_type type, const char *path, unsigned flags)
+{
+       int ret;
+
+       /*
+        * Call xsize_t() only when needed to avoid potentially unnecessary
+        * die() for large files.
+        */
+       if (type == OBJ_BLOB && path && would_convert_to_git_filter_fd(path))
+               ret = index_stream_convert_blob(oid, fd, path, flags);
+       else if (!S_ISREG(st->st_mode))
+               ret = index_pipe(oid, fd, type, path, flags);
+       else if (st->st_size <= big_file_threshold || type != OBJ_BLOB ||
+                (path && would_convert_to_git(&the_index, path)))
+               ret = index_core(oid, fd, xsize_t(st->st_size), type, path,
+                                flags);
+       else
+               ret = index_stream(oid, fd, xsize_t(st->st_size), type, path,
+                                  flags);
+       close(fd);
+       return ret;
+}
+
+int index_path(struct object_id *oid, const char *path, struct stat *st, unsigned flags)
+{
+       int fd;
+       struct strbuf sb = STRBUF_INIT;
+       int rc = 0;
+
+       switch (st->st_mode & S_IFMT) {
+       case S_IFREG:
+               fd = open(path, O_RDONLY);
+               if (fd < 0)
+                       return error_errno("open(\"%s\")", path);
+               if (index_fd(oid, fd, st, OBJ_BLOB, path, flags) < 0)
+                       return error("%s: failed to insert into database",
+                                    path);
+               break;
+       case S_IFLNK:
+               if (strbuf_readlink(&sb, path, st->st_size))
+                       return error_errno("readlink(\"%s\")", path);
+               if (!(flags & HASH_WRITE_OBJECT))
+                       hash_object_file(sb.buf, sb.len, blob_type, oid);
+               else if (write_object_file(sb.buf, sb.len, blob_type, oid))
+                       rc = error("%s: failed to insert into database", path);
+               strbuf_release(&sb);
+               break;
+       case S_IFDIR:
+               return resolve_gitlink_ref(path, "HEAD", oid);
+       default:
+               return error("%s: unsupported file type", path);
+       }
+       return rc;
+}
+
+int read_pack_header(int fd, struct pack_header *header)
+{
+       if (read_in_full(fd, header, sizeof(*header)) != sizeof(*header))
+               /* "eof before pack header was fully read" */
+               return PH_ERROR_EOF;
+
+       if (header->hdr_signature != htonl(PACK_SIGNATURE))
+               /* "protocol error (pack signature mismatch detected)" */
+               return PH_ERROR_PACK_SIGNATURE;
+       if (!pack_version_ok(header->hdr_version))
+               /* "protocol error (pack version unsupported)" */
+               return PH_ERROR_PROTOCOL;
+       return 0;
+}
+
+void assert_oid_type(const struct object_id *oid, enum object_type expect)
+{
+       enum object_type type = oid_object_info(the_repository, oid, NULL);
+       if (type < 0)
+               die("%s is not a valid object", oid_to_hex(oid));
+       if (type != expect)
+               die("%s is not a valid '%s' object", oid_to_hex(oid),
+                   type_name(expect));
+}
+
+int for_each_file_in_obj_subdir(unsigned int subdir_nr,
+                               struct strbuf *path,
+                               each_loose_object_fn obj_cb,
+                               each_loose_cruft_fn cruft_cb,
+                               each_loose_subdir_fn subdir_cb,
+                               void *data)
+{
+       size_t origlen, baselen;
+       DIR *dir;
+       struct dirent *de;
+       int r = 0;
+       struct object_id oid;
+
+       if (subdir_nr > 0xff)
+               BUG("invalid loose object subdirectory: %x", subdir_nr);
+
+       origlen = path->len;
+       strbuf_complete(path, '/');
+       strbuf_addf(path, "%02x", subdir_nr);
+
+       dir = opendir(path->buf);
+       if (!dir) {
+               if (errno != ENOENT)
+                       r = error_errno("unable to open %s", path->buf);
+               strbuf_setlen(path, origlen);
+               return r;
+       }
+
+       oid.hash[0] = subdir_nr;
+       strbuf_addch(path, '/');
+       baselen = path->len;
+
+       while ((de = readdir(dir))) {
+               size_t namelen;
+               if (is_dot_or_dotdot(de->d_name))
+                       continue;
+
+               namelen = strlen(de->d_name);
+               strbuf_setlen(path, baselen);
+               strbuf_add(path, de->d_name, namelen);
+               if (namelen == GIT_SHA1_HEXSZ - 2 &&
+                   !hex_to_bytes(oid.hash + 1, de->d_name,
+                                 GIT_SHA1_RAWSZ - 1)) {
+                       if (obj_cb) {
+                               r = obj_cb(&oid, path->buf, data);
+                               if (r)
+                                       break;
+                       }
+                       continue;
+               }
+
+               if (cruft_cb) {
+                       r = cruft_cb(de->d_name, path->buf, data);
+                       if (r)
+                               break;
+               }
+       }
+       closedir(dir);
+
+       strbuf_setlen(path, baselen - 1);
+       if (!r && subdir_cb)
+               r = subdir_cb(subdir_nr, path->buf, data);
+
+       strbuf_setlen(path, origlen);
+
+       return r;
+}
+
+int for_each_loose_file_in_objdir_buf(struct strbuf *path,
+                           each_loose_object_fn obj_cb,
+                           each_loose_cruft_fn cruft_cb,
+                           each_loose_subdir_fn subdir_cb,
+                           void *data)
+{
+       int r = 0;
+       int i;
+
+       for (i = 0; i < 256; i++) {
+               r = for_each_file_in_obj_subdir(i, path, obj_cb, cruft_cb,
+                                               subdir_cb, data);
+               if (r)
+                       break;
+       }
+
+       return r;
+}
+
+int for_each_loose_file_in_objdir(const char *path,
+                                 each_loose_object_fn obj_cb,
+                                 each_loose_cruft_fn cruft_cb,
+                                 each_loose_subdir_fn subdir_cb,
+                                 void *data)
+{
+       struct strbuf buf = STRBUF_INIT;
+       int r;
+
+       strbuf_addstr(&buf, path);
+       r = for_each_loose_file_in_objdir_buf(&buf, obj_cb, cruft_cb,
+                                             subdir_cb, data);
+       strbuf_release(&buf);
+
+       return r;
+}
+
+struct loose_alt_odb_data {
+       each_loose_object_fn *cb;
+       void *data;
+};
+
+static int loose_from_alt_odb(struct alternate_object_database *alt,
+                             void *vdata)
+{
+       struct loose_alt_odb_data *data = vdata;
+       struct strbuf buf = STRBUF_INIT;
+       int r;
+
+       strbuf_addstr(&buf, alt->path);
+       r = for_each_loose_file_in_objdir_buf(&buf,
+                                             data->cb, NULL, NULL,
+                                             data->data);
+       strbuf_release(&buf);
+       return r;
+}
+
+int for_each_loose_object(each_loose_object_fn cb, void *data, unsigned flags)
+{
+       struct loose_alt_odb_data alt;
+       int r;
+
+       r = for_each_loose_file_in_objdir(get_object_directory(),
+                                         cb, NULL, NULL, data);
+       if (r)
+               return r;
+
+       if (flags & FOR_EACH_OBJECT_LOCAL_ONLY)
+               return 0;
+
+       alt.cb = cb;
+       alt.data = data;
+       return foreach_alt_odb(loose_from_alt_odb, &alt);
+}
+
+static int check_stream_sha1(git_zstream *stream,
+                            const char *hdr,
+                            unsigned long size,
+                            const char *path,
+                            const unsigned char *expected_sha1)
+{
+       git_hash_ctx c;
+       unsigned char real_sha1[GIT_MAX_RAWSZ];
+       unsigned char buf[4096];
+       unsigned long total_read;
+       int status = Z_OK;
+
+       the_hash_algo->init_fn(&c);
+       the_hash_algo->update_fn(&c, hdr, stream->total_out);
+
+       /*
+        * We already read some bytes into hdr, but the ones up to the NUL
+        * do not count against the object's content size.
+        */
+       total_read = stream->total_out - strlen(hdr) - 1;
+
+       /*
+        * This size comparison must be "<=" to read the final zlib packets;
+        * see the comment in unpack_sha1_rest for details.
+        */
+       while (total_read <= size &&
+              (status == Z_OK || status == Z_BUF_ERROR)) {
+               stream->next_out = buf;
+               stream->avail_out = sizeof(buf);
+               if (size - total_read < stream->avail_out)
+                       stream->avail_out = size - total_read;
+               status = git_inflate(stream, Z_FINISH);
+               the_hash_algo->update_fn(&c, buf, stream->next_out - buf);
+               total_read += stream->next_out - buf;
+       }
+       git_inflate_end(stream);
+
+       if (status != Z_STREAM_END) {
+               error("corrupt loose object '%s'", sha1_to_hex(expected_sha1));
+               return -1;
+       }
+       if (stream->avail_in) {
+               error("garbage at end of loose object '%s'",
+                     sha1_to_hex(expected_sha1));
+               return -1;
+       }
+
+       the_hash_algo->final_fn(real_sha1, &c);
+       if (hashcmp(expected_sha1, real_sha1)) {
+               error("sha1 mismatch for %s (expected %s)", path,
+                     sha1_to_hex(expected_sha1));
+               return -1;
+       }
+
+       return 0;
+}
+
+int read_loose_object(const char *path,
+                     const struct object_id *expected_oid,
+                     enum object_type *type,
+                     unsigned long *size,
+                     void **contents)
+{
+       int ret = -1;
+       void *map = NULL;
+       unsigned long mapsize;
+       git_zstream stream;
+       char hdr[MAX_HEADER_LEN];
+
+       *contents = NULL;
+
+       map = map_sha1_file_1(the_repository, path, NULL, &mapsize);
+       if (!map) {
+               error_errno("unable to mmap %s", path);
+               goto out;
+       }
+
+       if (unpack_sha1_header(&stream, map, mapsize, hdr, sizeof(hdr)) < 0) {
+               error("unable to unpack header of %s", path);
+               goto out;
+       }
+
+       *type = parse_sha1_header(hdr, size);
+       if (*type < 0) {
+               error("unable to parse header of %s", path);
+               git_inflate_end(&stream);
+               goto out;
+       }
+
+       if (*type == OBJ_BLOB) {
+               if (check_stream_sha1(&stream, hdr, *size, path, expected_oid->hash) < 0)
+                       goto out;
+       } else {
+               *contents = unpack_sha1_rest(&stream, hdr, *size, expected_oid->hash);
+               if (!*contents) {
+                       error("unable to unpack contents of %s", path);
+                       git_inflate_end(&stream);
+                       goto out;
+               }
+               if (check_object_signature(expected_oid, *contents,
+                                        *size, type_name(*type))) {
+                       error("sha1 mismatch for %s (expected %s)", path,
+                             oid_to_hex(expected_oid));
+                       free(*contents);
+                       goto out;
+               }
+       }
+
+       ret = 0; /* everything checks out */
+
+out:
+       if (map)
+               munmap(map, mapsize);
+       return ret;
+}
diff --git a/sha1-name.c b/sha1-name.c
new file mode 100644 (file)
index 0000000..80030b1
--- /dev/null
@@ -0,0 +1,1734 @@
+#include "cache.h"
+#include "config.h"
+#include "tag.h"
+#include "commit.h"
+#include "tree.h"
+#include "blob.h"
+#include "tree-walk.h"
+#include "refs.h"
+#include "remote.h"
+#include "dir.h"
+#include "sha1-array.h"
+#include "packfile.h"
+#include "object-store.h"
+#include "repository.h"
+
+static int get_oid_oneline(const char *, struct object_id *, struct commit_list *);
+
+typedef int (*disambiguate_hint_fn)(const struct object_id *, void *);
+
+struct disambiguate_state {
+       int len; /* length of prefix in hex chars */
+       char hex_pfx[GIT_MAX_HEXSZ + 1];
+       struct object_id bin_pfx;
+
+       disambiguate_hint_fn fn;
+       void *cb_data;
+       struct object_id candidate;
+       unsigned candidate_exists:1;
+       unsigned candidate_checked:1;
+       unsigned candidate_ok:1;
+       unsigned disambiguate_fn_used:1;
+       unsigned ambiguous:1;
+       unsigned always_call_fn:1;
+};
+
+static void update_candidates(struct disambiguate_state *ds, const struct object_id *current)
+{
+       if (ds->always_call_fn) {
+               ds->ambiguous = ds->fn(current, ds->cb_data) ? 1 : 0;
+               return;
+       }
+       if (!ds->candidate_exists) {
+               /* this is the first candidate */
+               oidcpy(&ds->candidate, current);
+               ds->candidate_exists = 1;
+               return;
+       } else if (!oidcmp(&ds->candidate, current)) {
+               /* the same as what we already have seen */
+               return;
+       }
+
+       if (!ds->fn) {
+               /* cannot disambiguate between ds->candidate and current */
+               ds->ambiguous = 1;
+               return;
+       }
+
+       if (!ds->candidate_checked) {
+               ds->candidate_ok = ds->fn(&ds->candidate, ds->cb_data);
+               ds->disambiguate_fn_used = 1;
+               ds->candidate_checked = 1;
+       }
+
+       if (!ds->candidate_ok) {
+               /* discard the candidate; we know it does not satisfy fn */
+               oidcpy(&ds->candidate, current);
+               ds->candidate_checked = 0;
+               return;
+       }
+
+       /* if we reach this point, we know ds->candidate satisfies fn */
+       if (ds->fn(current, ds->cb_data)) {
+               /*
+                * if both current and candidate satisfy fn, we cannot
+                * disambiguate.
+                */
+               ds->candidate_ok = 0;
+               ds->ambiguous = 1;
+       }
+
+       /* otherwise, current can be discarded and candidate is still good */
+}
+
+static int append_loose_object(const struct object_id *oid, const char *path,
+                              void *data)
+{
+       oid_array_append(data, oid);
+       return 0;
+}
+
+static int match_sha(unsigned, const unsigned char *, const unsigned char *);
+
+static void find_short_object_filename(struct disambiguate_state *ds)
+{
+       int subdir_nr = ds->bin_pfx.hash[0];
+       struct alternate_object_database *alt;
+       static struct alternate_object_database *fakeent;
+
+       if (!fakeent) {
+               /*
+                * Create a "fake" alternate object database that
+                * points to our own object database, to make it
+                * easier to get a temporary working space in
+                * alt->name/alt->base while iterating over the
+                * object databases including our own.
+                */
+               fakeent = alloc_alt_odb(get_object_directory());
+       }
+       fakeent->next = the_repository->objects->alt_odb_list;
+
+       for (alt = fakeent; alt && !ds->ambiguous; alt = alt->next) {
+               int pos;
+
+               if (!alt->loose_objects_subdir_seen[subdir_nr]) {
+                       struct strbuf *buf = alt_scratch_buf(alt);
+                       for_each_file_in_obj_subdir(subdir_nr, buf,
+                                                   append_loose_object,
+                                                   NULL, NULL,
+                                                   &alt->loose_objects_cache);
+                       alt->loose_objects_subdir_seen[subdir_nr] = 1;
+               }
+
+               pos = oid_array_lookup(&alt->loose_objects_cache, &ds->bin_pfx);
+               if (pos < 0)
+                       pos = -1 - pos;
+               while (!ds->ambiguous && pos < alt->loose_objects_cache.nr) {
+                       const struct object_id *oid;
+                       oid = alt->loose_objects_cache.oid + pos;
+                       if (!match_sha(ds->len, ds->bin_pfx.hash, oid->hash))
+                               break;
+                       update_candidates(ds, oid);
+                       pos++;
+               }
+       }
+}
+
+static int match_sha(unsigned len, const unsigned char *a, const unsigned char *b)
+{
+       do {
+               if (*a != *b)
+                       return 0;
+               a++;
+               b++;
+               len -= 2;
+       } while (len > 1);
+       if (len)
+               if ((*a ^ *b) & 0xf0)
+                       return 0;
+       return 1;
+}
+
+static void unique_in_pack(struct packed_git *p,
+                          struct disambiguate_state *ds)
+{
+       uint32_t num, i, first = 0;
+       const struct object_id *current = NULL;
+
+       if (open_pack_index(p) || !p->num_objects)
+               return;
+
+       num = p->num_objects;
+       bsearch_pack(&ds->bin_pfx, p, &first);
+
+       /*
+        * At this point, "first" is the location of the lowest object
+        * with an object name that could match "bin_pfx".  See if we have
+        * 0, 1 or more objects that actually match(es).
+        */
+       for (i = first; i < num && !ds->ambiguous; i++) {
+               struct object_id oid;
+               current = nth_packed_object_oid(&oid, p, i);
+               if (!match_sha(ds->len, ds->bin_pfx.hash, current->hash))
+                       break;
+               update_candidates(ds, current);
+       }
+}
+
+static void find_short_packed_object(struct disambiguate_state *ds)
+{
+       struct packed_git *p;
+
+       for (p = get_packed_git(the_repository); p && !ds->ambiguous;
+            p = p->next)
+               unique_in_pack(p, ds);
+}
+
+#define SHORT_NAME_NOT_FOUND (-1)
+#define SHORT_NAME_AMBIGUOUS (-2)
+
+static int finish_object_disambiguation(struct disambiguate_state *ds,
+                                       struct object_id *oid)
+{
+       if (ds->ambiguous)
+               return SHORT_NAME_AMBIGUOUS;
+
+       if (!ds->candidate_exists)
+               return SHORT_NAME_NOT_FOUND;
+
+       if (!ds->candidate_checked)
+               /*
+                * If this is the only candidate, there is no point
+                * calling the disambiguation hint callback.
+                *
+                * On the other hand, if the current candidate
+                * replaced an earlier candidate that did _not_ pass
+                * the disambiguation hint callback, then we do have
+                * more than one objects that match the short name
+                * given, so we should make sure this one matches;
+                * otherwise, if we discovered this one and the one
+                * that we previously discarded in the reverse order,
+                * we would end up showing different results in the
+                * same repository!
+                */
+               ds->candidate_ok = (!ds->disambiguate_fn_used ||
+                                   ds->fn(&ds->candidate, ds->cb_data));
+
+       if (!ds->candidate_ok)
+               return SHORT_NAME_AMBIGUOUS;
+
+       oidcpy(oid, &ds->candidate);
+       return 0;
+}
+
+static int disambiguate_commit_only(const struct object_id *oid, void *cb_data_unused)
+{
+       int kind = oid_object_info(the_repository, oid, NULL);
+       return kind == OBJ_COMMIT;
+}
+
+static int disambiguate_committish_only(const struct object_id *oid, void *cb_data_unused)
+{
+       struct object *obj;
+       int kind;
+
+       kind = oid_object_info(the_repository, oid, NULL);
+       if (kind == OBJ_COMMIT)
+               return 1;
+       if (kind != OBJ_TAG)
+               return 0;
+
+       /* We need to do this the hard way... */
+       obj = deref_tag(parse_object(oid), NULL, 0);
+       if (obj && obj->type == OBJ_COMMIT)
+               return 1;
+       return 0;
+}
+
+static int disambiguate_tree_only(const struct object_id *oid, void *cb_data_unused)
+{
+       int kind = oid_object_info(the_repository, oid, NULL);
+       return kind == OBJ_TREE;
+}
+
+static int disambiguate_treeish_only(const struct object_id *oid, void *cb_data_unused)
+{
+       struct object *obj;
+       int kind;
+
+       kind = oid_object_info(the_repository, oid, NULL);
+       if (kind == OBJ_TREE || kind == OBJ_COMMIT)
+               return 1;
+       if (kind != OBJ_TAG)
+               return 0;
+
+       /* We need to do this the hard way... */
+       obj = deref_tag(parse_object(oid), NULL, 0);
+       if (obj && (obj->type == OBJ_TREE || obj->type == OBJ_COMMIT))
+               return 1;
+       return 0;
+}
+
+static int disambiguate_blob_only(const struct object_id *oid, void *cb_data_unused)
+{
+       int kind = oid_object_info(the_repository, oid, NULL);
+       return kind == OBJ_BLOB;
+}
+
+static disambiguate_hint_fn default_disambiguate_hint;
+
+int set_disambiguate_hint_config(const char *var, const char *value)
+{
+       static const struct {
+               const char *name;
+               disambiguate_hint_fn fn;
+       } hints[] = {
+               { "none", NULL },
+               { "commit", disambiguate_commit_only },
+               { "committish", disambiguate_committish_only },
+               { "tree", disambiguate_tree_only },
+               { "treeish", disambiguate_treeish_only },
+               { "blob", disambiguate_blob_only }
+       };
+       int i;
+
+       if (!value)
+               return config_error_nonbool(var);
+
+       for (i = 0; i < ARRAY_SIZE(hints); i++) {
+               if (!strcasecmp(value, hints[i].name)) {
+                       default_disambiguate_hint = hints[i].fn;
+                       return 0;
+               }
+       }
+
+       return error("unknown hint type for '%s': %s", var, value);
+}
+
+static int init_object_disambiguation(const char *name, int len,
+                                     struct disambiguate_state *ds)
+{
+       int i;
+
+       if (len < MINIMUM_ABBREV || len > GIT_SHA1_HEXSZ)
+               return -1;
+
+       memset(ds, 0, sizeof(*ds));
+
+       for (i = 0; i < len ;i++) {
+               unsigned char c = name[i];
+               unsigned char val;
+               if (c >= '0' && c <= '9')
+                       val = c - '0';
+               else if (c >= 'a' && c <= 'f')
+                       val = c - 'a' + 10;
+               else if (c >= 'A' && c <='F') {
+                       val = c - 'A' + 10;
+                       c -= 'A' - 'a';
+               }
+               else
+                       return -1;
+               ds->hex_pfx[i] = c;
+               if (!(i & 1))
+                       val <<= 4;
+               ds->bin_pfx.hash[i >> 1] |= val;
+       }
+
+       ds->len = len;
+       ds->hex_pfx[len] = '\0';
+       prepare_alt_odb(the_repository);
+       return 0;
+}
+
+static int show_ambiguous_object(const struct object_id *oid, void *data)
+{
+       const struct disambiguate_state *ds = data;
+       struct strbuf desc = STRBUF_INIT;
+       int type;
+
+
+       if (ds->fn && !ds->fn(oid, ds->cb_data))
+               return 0;
+
+       type = oid_object_info(the_repository, oid, NULL);
+       if (type == OBJ_COMMIT) {
+               struct commit *commit = lookup_commit(oid);
+               if (commit) {
+                       struct pretty_print_context pp = {0};
+                       pp.date_mode.type = DATE_SHORT;
+                       format_commit_message(commit, " %ad - %s", &desc, &pp);
+               }
+       } else if (type == OBJ_TAG) {
+               struct tag *tag = lookup_tag(oid);
+               if (!parse_tag(tag) && tag->tag)
+                       strbuf_addf(&desc, " %s", tag->tag);
+       }
+
+       advise("  %s %s%s",
+              find_unique_abbrev(oid, DEFAULT_ABBREV),
+              type_name(type) ? type_name(type) : "unknown type",
+              desc.buf);
+
+       strbuf_release(&desc);
+       return 0;
+}
+
+static int get_short_oid(const char *name, int len, struct object_id *oid,
+                         unsigned flags)
+{
+       int status;
+       struct disambiguate_state ds;
+       int quietly = !!(flags & GET_OID_QUIETLY);
+
+       if (init_object_disambiguation(name, len, &ds) < 0)
+               return -1;
+
+       if (HAS_MULTI_BITS(flags & GET_OID_DISAMBIGUATORS))
+               die("BUG: multiple get_short_oid disambiguator flags");
+
+       if (flags & GET_OID_COMMIT)
+               ds.fn = disambiguate_commit_only;
+       else if (flags & GET_OID_COMMITTISH)
+               ds.fn = disambiguate_committish_only;
+       else if (flags & GET_OID_TREE)
+               ds.fn = disambiguate_tree_only;
+       else if (flags & GET_OID_TREEISH)
+               ds.fn = disambiguate_treeish_only;
+       else if (flags & GET_OID_BLOB)
+               ds.fn = disambiguate_blob_only;
+       else
+               ds.fn = default_disambiguate_hint;
+
+       find_short_object_filename(&ds);
+       find_short_packed_object(&ds);
+       status = finish_object_disambiguation(&ds, oid);
+
+       if (!quietly && (status == SHORT_NAME_AMBIGUOUS)) {
+               error(_("short SHA1 %s is ambiguous"), ds.hex_pfx);
+
+               /*
+                * We may still have ambiguity if we simply saw a series of
+                * candidates that did not satisfy our hint function. In
+                * that case, we still want to show them, so disable the hint
+                * function entirely.
+                */
+               if (!ds.ambiguous)
+                       ds.fn = NULL;
+
+               advise(_("The candidates are:"));
+               for_each_abbrev(ds.hex_pfx, show_ambiguous_object, &ds);
+       }
+
+       return status;
+}
+
+static int collect_ambiguous(const struct object_id *oid, void *data)
+{
+       oid_array_append(data, oid);
+       return 0;
+}
+
+int for_each_abbrev(const char *prefix, each_abbrev_fn fn, void *cb_data)
+{
+       struct oid_array collect = OID_ARRAY_INIT;
+       struct disambiguate_state ds;
+       int ret;
+
+       if (init_object_disambiguation(prefix, strlen(prefix), &ds) < 0)
+               return -1;
+
+       ds.always_call_fn = 1;
+       ds.fn = collect_ambiguous;
+       ds.cb_data = &collect;
+       find_short_object_filename(&ds);
+       find_short_packed_object(&ds);
+
+       ret = oid_array_for_each_unique(&collect, fn, cb_data);
+       oid_array_clear(&collect);
+       return ret;
+}
+
+/*
+ * Return the slot of the most-significant bit set in "val". There are various
+ * ways to do this quickly with fls() or __builtin_clzl(), but speed is
+ * probably not a big deal here.
+ */
+static unsigned msb(unsigned long val)
+{
+       unsigned r = 0;
+       while (val >>= 1)
+               r++;
+       return r;
+}
+
+struct min_abbrev_data {
+       unsigned int init_len;
+       unsigned int cur_len;
+       char *hex;
+       const struct object_id *oid;
+};
+
+static inline char get_hex_char_from_oid(const struct object_id *oid,
+                                        unsigned int pos)
+{
+       static const char hex[] = "0123456789abcdef";
+
+       if ((pos & 1) == 0)
+               return hex[oid->hash[pos >> 1] >> 4];
+       else
+               return hex[oid->hash[pos >> 1] & 0xf];
+}
+
+static int extend_abbrev_len(const struct object_id *oid, void *cb_data)
+{
+       struct min_abbrev_data *mad = cb_data;
+
+       unsigned int i = mad->init_len;
+       while (mad->hex[i] && mad->hex[i] == get_hex_char_from_oid(oid, i))
+               i++;
+
+       if (i < GIT_MAX_RAWSZ && i >= mad->cur_len)
+               mad->cur_len = i + 1;
+
+       return 0;
+}
+
+static void find_abbrev_len_for_pack(struct packed_git *p,
+                                    struct min_abbrev_data *mad)
+{
+       int match = 0;
+       uint32_t num, first = 0;
+       struct object_id oid;
+       const struct object_id *mad_oid;
+
+       if (open_pack_index(p) || !p->num_objects)
+               return;
+
+       num = p->num_objects;
+       mad_oid = mad->oid;
+       match = bsearch_pack(mad_oid, p, &first);
+
+       /*
+        * first is now the position in the packfile where we would insert
+        * mad->hash if it does not exist (or the position of mad->hash if
+        * it does exist). Hence, we consider a maximum of two objects
+        * nearby for the abbreviation length.
+        */
+       mad->init_len = 0;
+       if (!match) {
+               if (nth_packed_object_oid(&oid, p, first))
+                       extend_abbrev_len(&oid, mad);
+       } else if (first < num - 1) {
+               if (nth_packed_object_oid(&oid, p, first + 1))
+                       extend_abbrev_len(&oid, mad);
+       }
+       if (first > 0) {
+               if (nth_packed_object_oid(&oid, p, first - 1))
+                       extend_abbrev_len(&oid, mad);
+       }
+       mad->init_len = mad->cur_len;
+}
+
+static void find_abbrev_len_packed(struct min_abbrev_data *mad)
+{
+       struct packed_git *p;
+
+       for (p = get_packed_git(the_repository); p; p = p->next)
+               find_abbrev_len_for_pack(p, mad);
+}
+
+int find_unique_abbrev_r(char *hex, const struct object_id *oid, int len)
+{
+       struct disambiguate_state ds;
+       struct min_abbrev_data mad;
+       struct object_id oid_ret;
+       if (len < 0) {
+               unsigned long count = approximate_object_count();
+               /*
+                * Add one because the MSB only tells us the highest bit set,
+                * not including the value of all the _other_ bits (so "15"
+                * is only one off of 2^4, but the MSB is the 3rd bit.
+                */
+               len = msb(count) + 1;
+               /*
+                * We now know we have on the order of 2^len objects, which
+                * expects a collision at 2^(len/2). But we also care about hex
+                * chars, not bits, and there are 4 bits per hex. So all
+                * together we need to divide by 2 and round up.
+                */
+               len = DIV_ROUND_UP(len, 2);
+               /*
+                * For very small repos, we stick with our regular fallback.
+                */
+               if (len < FALLBACK_DEFAULT_ABBREV)
+                       len = FALLBACK_DEFAULT_ABBREV;
+       }
+
+       oid_to_hex_r(hex, oid);
+       if (len == GIT_SHA1_HEXSZ || !len)
+               return GIT_SHA1_HEXSZ;
+
+       mad.init_len = len;
+       mad.cur_len = len;
+       mad.hex = hex;
+       mad.oid = oid;
+
+       find_abbrev_len_packed(&mad);
+
+       if (init_object_disambiguation(hex, mad.cur_len, &ds) < 0)
+               return -1;
+
+       ds.fn = extend_abbrev_len;
+       ds.always_call_fn = 1;
+       ds.cb_data = (void *)&mad;
+
+       find_short_object_filename(&ds);
+       (void)finish_object_disambiguation(&ds, &oid_ret);
+
+       hex[mad.cur_len] = 0;
+       return mad.cur_len;
+}
+
+const char *find_unique_abbrev(const struct object_id *oid, int len)
+{
+       static int bufno;
+       static char hexbuffer[4][GIT_MAX_HEXSZ + 1];
+       char *hex = hexbuffer[bufno];
+       bufno = (bufno + 1) % ARRAY_SIZE(hexbuffer);
+       find_unique_abbrev_r(hex, oid, len);
+       return hex;
+}
+
+static int ambiguous_path(const char *path, int len)
+{
+       int slash = 1;
+       int cnt;
+
+       for (cnt = 0; cnt < len; cnt++) {
+               switch (*path++) {
+               case '\0':
+                       break;
+               case '/':
+                       if (slash)
+                               break;
+                       slash = 1;
+                       continue;
+               case '.':
+                       continue;
+               default:
+                       slash = 0;
+                       continue;
+               }
+               break;
+       }
+       return slash;
+}
+
+static inline int at_mark(const char *string, int len,
+                         const char **suffix, int nr)
+{
+       int i;
+
+       for (i = 0; i < nr; i++) {
+               int suffix_len = strlen(suffix[i]);
+               if (suffix_len <= len
+                   && !strncasecmp(string, suffix[i], suffix_len))
+                       return suffix_len;
+       }
+       return 0;
+}
+
+static inline int upstream_mark(const char *string, int len)
+{
+       const char *suffix[] = { "@{upstream}", "@{u}" };
+       return at_mark(string, len, suffix, ARRAY_SIZE(suffix));
+}
+
+static inline int push_mark(const char *string, int len)
+{
+       const char *suffix[] = { "@{push}" };
+       return at_mark(string, len, suffix, ARRAY_SIZE(suffix));
+}
+
+static int get_oid_1(const char *name, int len, struct object_id *oid, unsigned lookup_flags);
+static int interpret_nth_prior_checkout(const char *name, int namelen, struct strbuf *buf);
+
+static int get_oid_basic(const char *str, int len, struct object_id *oid,
+                         unsigned int flags)
+{
+       static const char *warn_msg = "refname '%.*s' is ambiguous.";
+       static const char *object_name_msg = N_(
+       "Git normally never creates a ref that ends with 40 hex characters\n"
+       "because it will be ignored when you just specify 40-hex. These refs\n"
+       "may be created by mistake. For example,\n"
+       "\n"
+       "  git checkout -b $br $(git rev-parse ...)\n"
+       "\n"
+       "where \"$br\" is somehow empty and a 40-hex ref is created. Please\n"
+       "examine these refs and maybe delete them. Turn this message off by\n"
+       "running \"git config advice.objectNameWarning false\"");
+       struct object_id tmp_oid;
+       char *real_ref = NULL;
+       int refs_found = 0;
+       int at, reflog_len, nth_prior = 0;
+
+       if (len == GIT_SHA1_HEXSZ && !get_oid_hex(str, oid)) {
+               if (warn_ambiguous_refs && warn_on_object_refname_ambiguity) {
+                       refs_found = dwim_ref(str, len, &tmp_oid, &real_ref);
+                       if (refs_found > 0) {
+                               warning(warn_msg, len, str);
+                               if (advice_object_name_warning)
+                                       fprintf(stderr, "%s\n", _(object_name_msg));
+                       }
+                       free(real_ref);
+               }
+               return 0;
+       }
+
+       /* basic@{time or number or -number} format to query ref-log */
+       reflog_len = at = 0;
+       if (len && str[len-1] == '}') {
+               for (at = len-4; at >= 0; at--) {
+                       if (str[at] == '@' && str[at+1] == '{') {
+                               if (str[at+2] == '-') {
+                                       if (at != 0)
+                                               /* @{-N} not at start */
+                                               return -1;
+                                       nth_prior = 1;
+                                       continue;
+                               }
+                               if (!upstream_mark(str + at, len - at) &&
+                                   !push_mark(str + at, len - at)) {
+                                       reflog_len = (len-1) - (at+2);
+                                       len = at;
+                               }
+                               break;
+                       }
+               }
+       }
+
+       /* Accept only unambiguous ref paths. */
+       if (len && ambiguous_path(str, len))
+               return -1;
+
+       if (nth_prior) {
+               struct strbuf buf = STRBUF_INIT;
+               int detached;
+
+               if (interpret_nth_prior_checkout(str, len, &buf) > 0) {
+                       detached = (buf.len == GIT_SHA1_HEXSZ && !get_oid_hex(buf.buf, oid));
+                       strbuf_release(&buf);
+                       if (detached)
+                               return 0;
+               }
+       }
+
+       if (!len && reflog_len)
+               /* allow "@{...}" to mean the current branch reflog */
+               refs_found = dwim_ref("HEAD", 4, oid, &real_ref);
+       else if (reflog_len)
+               refs_found = dwim_log(str, len, oid, &real_ref);
+       else
+               refs_found = dwim_ref(str, len, oid, &real_ref);
+
+       if (!refs_found)
+               return -1;
+
+       if (warn_ambiguous_refs && !(flags & GET_OID_QUIETLY) &&
+           (refs_found > 1 ||
+            !get_short_oid(str, len, &tmp_oid, GET_OID_QUIETLY)))
+               warning(warn_msg, len, str);
+
+       if (reflog_len) {
+               int nth, i;
+               timestamp_t at_time;
+               timestamp_t co_time;
+               int co_tz, co_cnt;
+
+               /* Is it asking for N-th entry, or approxidate? */
+               for (i = nth = 0; 0 <= nth && i < reflog_len; i++) {
+                       char ch = str[at+2+i];
+                       if ('0' <= ch && ch <= '9')
+                               nth = nth * 10 + ch - '0';
+                       else
+                               nth = -1;
+               }
+               if (100000000 <= nth) {
+                       at_time = nth;
+                       nth = -1;
+               } else if (0 <= nth)
+                       at_time = 0;
+               else {
+                       int errors = 0;
+                       char *tmp = xstrndup(str + at + 2, reflog_len);
+                       at_time = approxidate_careful(tmp, &errors);
+                       free(tmp);
+                       if (errors) {
+                               free(real_ref);
+                               return -1;
+                       }
+               }
+               if (read_ref_at(real_ref, flags, at_time, nth, oid, NULL,
+                               &co_time, &co_tz, &co_cnt)) {
+                       if (!len) {
+                               if (starts_with(real_ref, "refs/heads/")) {
+                                       str = real_ref + 11;
+                                       len = strlen(real_ref + 11);
+                               } else {
+                                       /* detached HEAD */
+                                       str = "HEAD";
+                                       len = 4;
+                               }
+                       }
+                       if (at_time) {
+                               if (!(flags & GET_OID_QUIETLY)) {
+                                       warning("Log for '%.*s' only goes "
+                                               "back to %s.", len, str,
+                                               show_date(co_time, co_tz, DATE_MODE(RFC2822)));
+                               }
+                       } else {
+                               if (flags & GET_OID_QUIETLY) {
+                                       exit(128);
+                               }
+                               die("Log for '%.*s' only has %d entries.",
+                                   len, str, co_cnt);
+                       }
+               }
+       }
+
+       free(real_ref);
+       return 0;
+}
+
+static int get_parent(const char *name, int len,
+                     struct object_id *result, int idx)
+{
+       struct object_id oid;
+       int ret = get_oid_1(name, len, &oid, GET_OID_COMMITTISH);
+       struct commit *commit;
+       struct commit_list *p;
+
+       if (ret)
+               return ret;
+       commit = lookup_commit_reference(&oid);
+       if (parse_commit(commit))
+               return -1;
+       if (!idx) {
+               oidcpy(result, &commit->object.oid);
+               return 0;
+       }
+       p = commit->parents;
+       while (p) {
+               if (!--idx) {
+                       oidcpy(result, &p->item->object.oid);
+                       return 0;
+               }
+               p = p->next;
+       }
+       return -1;
+}
+
+static int get_nth_ancestor(const char *name, int len,
+                           struct object_id *result, int generation)
+{
+       struct object_id oid;
+       struct commit *commit;
+       int ret;
+
+       ret = get_oid_1(name, len, &oid, GET_OID_COMMITTISH);
+       if (ret)
+               return ret;
+       commit = lookup_commit_reference(&oid);
+       if (!commit)
+               return -1;
+
+       while (generation--) {
+               if (parse_commit(commit) || !commit->parents)
+                       return -1;
+               commit = commit->parents->item;
+       }
+       oidcpy(result, &commit->object.oid);
+       return 0;
+}
+
+struct object *peel_to_type(const char *name, int namelen,
+                           struct object *o, enum object_type expected_type)
+{
+       if (name && !namelen)
+               namelen = strlen(name);
+       while (1) {
+               if (!o || (!o->parsed && !parse_object(&o->oid)))
+                       return NULL;
+               if (expected_type == OBJ_ANY || o->type == expected_type)
+                       return o;
+               if (o->type == OBJ_TAG)
+                       o = ((struct tag*) o)->tagged;
+               else if (o->type == OBJ_COMMIT)
+                       o = &(get_commit_tree(((struct commit *)o))->object);
+               else {
+                       if (name)
+                               error("%.*s: expected %s type, but the object "
+                                     "dereferences to %s type",
+                                     namelen, name, type_name(expected_type),
+                                     type_name(o->type));
+                       return NULL;
+               }
+       }
+}
+
+static int peel_onion(const char *name, int len, struct object_id *oid,
+                     unsigned lookup_flags)
+{
+       struct object_id outer;
+       const char *sp;
+       unsigned int expected_type = 0;
+       struct object *o;
+
+       /*
+        * "ref^{type}" dereferences ref repeatedly until you cannot
+        * dereference anymore, or you get an object of given type,
+        * whichever comes first.  "ref^{}" means just dereference
+        * tags until you get a non-tag.  "ref^0" is a shorthand for
+        * "ref^{commit}".  "commit^{tree}" could be used to find the
+        * top-level tree of the given commit.
+        */
+       if (len < 4 || name[len-1] != '}')
+               return -1;
+
+       for (sp = name + len - 1; name <= sp; sp--) {
+               int ch = *sp;
+               if (ch == '{' && name < sp && sp[-1] == '^')
+                       break;
+       }
+       if (sp <= name)
+               return -1;
+
+       sp++; /* beginning of type name, or closing brace for empty */
+       if (starts_with(sp, "commit}"))
+               expected_type = OBJ_COMMIT;
+       else if (starts_with(sp, "tag}"))
+               expected_type = OBJ_TAG;
+       else if (starts_with(sp, "tree}"))
+               expected_type = OBJ_TREE;
+       else if (starts_with(sp, "blob}"))
+               expected_type = OBJ_BLOB;
+       else if (starts_with(sp, "object}"))
+               expected_type = OBJ_ANY;
+       else if (sp[0] == '}')
+               expected_type = OBJ_NONE;
+       else if (sp[0] == '/')
+               expected_type = OBJ_COMMIT;
+       else
+               return -1;
+
+       lookup_flags &= ~GET_OID_DISAMBIGUATORS;
+       if (expected_type == OBJ_COMMIT)
+               lookup_flags |= GET_OID_COMMITTISH;
+       else if (expected_type == OBJ_TREE)
+               lookup_flags |= GET_OID_TREEISH;
+
+       if (get_oid_1(name, sp - name - 2, &outer, lookup_flags))
+               return -1;
+
+       o = parse_object(&outer);
+       if (!o)
+               return -1;
+       if (!expected_type) {
+               o = deref_tag(o, name, sp - name - 2);
+               if (!o || (!o->parsed && !parse_object(&o->oid)))
+                       return -1;
+               oidcpy(oid, &o->oid);
+               return 0;
+       }
+
+       /*
+        * At this point, the syntax look correct, so
+        * if we do not get the needed object, we should
+        * barf.
+        */
+       o = peel_to_type(name, len, o, expected_type);
+       if (!o)
+               return -1;
+
+       oidcpy(oid, &o->oid);
+       if (sp[0] == '/') {
+               /* "$commit^{/foo}" */
+               char *prefix;
+               int ret;
+               struct commit_list *list = NULL;
+
+               /*
+                * $commit^{/}. Some regex implementation may reject.
+                * We don't need regex anyway. '' pattern always matches.
+                */
+               if (sp[1] == '}')
+                       return 0;
+
+               prefix = xstrndup(sp + 1, name + len - 1 - (sp + 1));
+               commit_list_insert((struct commit *)o, &list);
+               ret = get_oid_oneline(prefix, oid, list);
+               free(prefix);
+               return ret;
+       }
+       return 0;
+}
+
+static int get_describe_name(const char *name, int len, struct object_id *oid)
+{
+       const char *cp;
+       unsigned flags = GET_OID_QUIETLY | GET_OID_COMMIT;
+
+       for (cp = name + len - 1; name + 2 <= cp; cp--) {
+               char ch = *cp;
+               if (!isxdigit(ch)) {
+                       /* We must be looking at g in "SOMETHING-g"
+                        * for it to be describe output.
+                        */
+                       if (ch == 'g' && cp[-1] == '-') {
+                               cp++;
+                               len -= cp - name;
+                               return get_short_oid(cp, len, oid, flags);
+                       }
+               }
+       }
+       return -1;
+}
+
+static int get_oid_1(const char *name, int len, struct object_id *oid, unsigned lookup_flags)
+{
+       int ret, has_suffix;
+       const char *cp;
+
+       /*
+        * "name~3" is "name^^^", "name~" is "name~1", and "name^" is "name^1".
+        */
+       has_suffix = 0;
+       for (cp = name + len - 1; name <= cp; cp--) {
+               int ch = *cp;
+               if ('0' <= ch && ch <= '9')
+                       continue;
+               if (ch == '~' || ch == '^')
+                       has_suffix = ch;
+               break;
+       }
+
+       if (has_suffix) {
+               int num = 0;
+               int len1 = cp - name;
+               cp++;
+               while (cp < name + len)
+                       num = num * 10 + *cp++ - '0';
+               if (!num && len1 == len - 1)
+                       num = 1;
+               if (has_suffix == '^')
+                       return get_parent(name, len1, oid, num);
+               /* else if (has_suffix == '~') -- goes without saying */
+               return get_nth_ancestor(name, len1, oid, num);
+       }
+
+       ret = peel_onion(name, len, oid, lookup_flags);
+       if (!ret)
+               return 0;
+
+       ret = get_oid_basic(name, len, oid, lookup_flags);
+       if (!ret)
+               return 0;
+
+       /* It could be describe output that is "SOMETHING-gXXXX" */
+       ret = get_describe_name(name, len, oid);
+       if (!ret)
+               return 0;
+
+       return get_short_oid(name, len, oid, lookup_flags);
+}
+
+/*
+ * This interprets names like ':/Initial revision of "git"' by searching
+ * through history and returning the first commit whose message starts
+ * the given regular expression.
+ *
+ * For negative-matching, prefix the pattern-part with '!-', like: ':/!-WIP'.
+ *
+ * For a literal '!' character at the beginning of a pattern, you have to repeat
+ * that, like: ':/!!foo'
+ *
+ * For future extension, all other sequences beginning with ':/!' are reserved.
+ */
+
+/* Remember to update object flag allocation in object.h */
+#define ONELINE_SEEN (1u<<20)
+
+static int handle_one_ref(const char *path, const struct object_id *oid,
+                         int flag, void *cb_data)
+{
+       struct commit_list **list = cb_data;
+       struct object *object = parse_object(oid);
+       if (!object)
+               return 0;
+       if (object->type == OBJ_TAG) {
+               object = deref_tag(object, path, strlen(path));
+               if (!object)
+                       return 0;
+       }
+       if (object->type != OBJ_COMMIT)
+               return 0;
+       commit_list_insert((struct commit *)object, list);
+       return 0;
+}
+
+static int get_oid_oneline(const char *prefix, struct object_id *oid,
+                           struct commit_list *list)
+{
+       struct commit_list *backup = NULL, *l;
+       int found = 0;
+       int negative = 0;
+       regex_t regex;
+
+       if (prefix[0] == '!') {
+               prefix++;
+
+               if (prefix[0] == '-') {
+                       prefix++;
+                       negative = 1;
+               } else if (prefix[0] != '!') {
+                       return -1;
+               }
+       }
+
+       if (regcomp(&regex, prefix, REG_EXTENDED))
+               return -1;
+
+       for (l = list; l; l = l->next) {
+               l->item->object.flags |= ONELINE_SEEN;
+               commit_list_insert(l->item, &backup);
+       }
+       while (list) {
+               const char *p, *buf;
+               struct commit *commit;
+               int matches;
+
+               commit = pop_most_recent_commit(&list, ONELINE_SEEN);
+               if (!parse_object(&commit->object.oid))
+                       continue;
+               buf = get_commit_buffer(commit, NULL);
+               p = strstr(buf, "\n\n");
+               matches = negative ^ (p && !regexec(&regex, p + 2, 0, NULL, 0));
+               unuse_commit_buffer(commit, buf);
+
+               if (matches) {
+                       oidcpy(oid, &commit->object.oid);
+                       found = 1;
+                       break;
+               }
+       }
+       regfree(&regex);
+       free_commit_list(list);
+       for (l = backup; l; l = l->next)
+               clear_commit_marks(l->item, ONELINE_SEEN);
+       free_commit_list(backup);
+       return found ? 0 : -1;
+}
+
+struct grab_nth_branch_switch_cbdata {
+       int remaining;
+       struct strbuf buf;
+};
+
+static int grab_nth_branch_switch(struct object_id *ooid, struct object_id *noid,
+                                 const char *email, timestamp_t timestamp, int tz,
+                                 const char *message, void *cb_data)
+{
+       struct grab_nth_branch_switch_cbdata *cb = cb_data;
+       const char *match = NULL, *target = NULL;
+       size_t len;
+
+       if (skip_prefix(message, "checkout: moving from ", &match))
+               target = strstr(match, " to ");
+
+       if (!match || !target)
+               return 0;
+       if (--(cb->remaining) == 0) {
+               len = target - match;
+               strbuf_reset(&cb->buf);
+               strbuf_add(&cb->buf, match, len);
+               return 1; /* we are done */
+       }
+       return 0;
+}
+
+/*
+ * Parse @{-N} syntax, return the number of characters parsed
+ * if successful; otherwise signal an error with negative value.
+ */
+static int interpret_nth_prior_checkout(const char *name, int namelen,
+                                       struct strbuf *buf)
+{
+       long nth;
+       int retval;
+       struct grab_nth_branch_switch_cbdata cb;
+       const char *brace;
+       char *num_end;
+
+       if (namelen < 4)
+               return -1;
+       if (name[0] != '@' || name[1] != '{' || name[2] != '-')
+               return -1;
+       brace = memchr(name, '}', namelen);
+       if (!brace)
+               return -1;
+       nth = strtol(name + 3, &num_end, 10);
+       if (num_end != brace)
+               return -1;
+       if (nth <= 0)
+               return -1;
+       cb.remaining = nth;
+       strbuf_init(&cb.buf, 20);
+
+       retval = 0;
+       if (0 < for_each_reflog_ent_reverse("HEAD", grab_nth_branch_switch, &cb)) {
+               strbuf_reset(buf);
+               strbuf_addbuf(buf, &cb.buf);
+               retval = brace - name + 1;
+       }
+
+       strbuf_release(&cb.buf);
+       return retval;
+}
+
+int get_oid_mb(const char *name, struct object_id *oid)
+{
+       struct commit *one, *two;
+       struct commit_list *mbs;
+       struct object_id oid_tmp;
+       const char *dots;
+       int st;
+
+       dots = strstr(name, "...");
+       if (!dots)
+               return get_oid(name, oid);
+       if (dots == name)
+               st = get_oid("HEAD", &oid_tmp);
+       else {
+               struct strbuf sb;
+               strbuf_init(&sb, dots - name);
+               strbuf_add(&sb, name, dots - name);
+               st = get_oid_committish(sb.buf, &oid_tmp);
+               strbuf_release(&sb);
+       }
+       if (st)
+               return st;
+       one = lookup_commit_reference_gently(&oid_tmp, 0);
+       if (!one)
+               return -1;
+
+       if (get_oid_committish(dots[3] ? (dots + 3) : "HEAD", &oid_tmp))
+               return -1;
+       two = lookup_commit_reference_gently(&oid_tmp, 0);
+       if (!two)
+               return -1;
+       mbs = get_merge_bases(one, two);
+       if (!mbs || mbs->next)
+               st = -1;
+       else {
+               st = 0;
+               oidcpy(oid, &mbs->item->object.oid);
+       }
+       free_commit_list(mbs);
+       return st;
+}
+
+/* parse @something syntax, when 'something' is not {.*} */
+static int interpret_empty_at(const char *name, int namelen, int len, struct strbuf *buf)
+{
+       const char *next;
+
+       if (len || name[1] == '{')
+               return -1;
+
+       /* make sure it's a single @, or @@{.*}, not @foo */
+       next = memchr(name + len + 1, '@', namelen - len - 1);
+       if (next && next[1] != '{')
+               return -1;
+       if (!next)
+               next = name + namelen;
+       if (next != name + 1)
+               return -1;
+
+       strbuf_reset(buf);
+       strbuf_add(buf, "HEAD", 4);
+       return 1;
+}
+
+static int reinterpret(const char *name, int namelen, int len,
+                      struct strbuf *buf, unsigned allowed)
+{
+       /* we have extra data, which might need further processing */
+       struct strbuf tmp = STRBUF_INIT;
+       int used = buf->len;
+       int ret;
+
+       strbuf_add(buf, name + len, namelen - len);
+       ret = interpret_branch_name(buf->buf, buf->len, &tmp, allowed);
+       /* that data was not interpreted, remove our cruft */
+       if (ret < 0) {
+               strbuf_setlen(buf, used);
+               return len;
+       }
+       strbuf_reset(buf);
+       strbuf_addbuf(buf, &tmp);
+       strbuf_release(&tmp);
+       /* tweak for size of {-N} versus expanded ref name */
+       return ret - used + len;
+}
+
+static void set_shortened_ref(struct strbuf *buf, const char *ref)
+{
+       char *s = shorten_unambiguous_ref(ref, 0);
+       strbuf_reset(buf);
+       strbuf_addstr(buf, s);
+       free(s);
+}
+
+static int branch_interpret_allowed(const char *refname, unsigned allowed)
+{
+       if (!allowed)
+               return 1;
+
+       if ((allowed & INTERPRET_BRANCH_LOCAL) &&
+           starts_with(refname, "refs/heads/"))
+               return 1;
+       if ((allowed & INTERPRET_BRANCH_REMOTE) &&
+           starts_with(refname, "refs/remotes/"))
+               return 1;
+
+       return 0;
+}
+
+static int interpret_branch_mark(const char *name, int namelen,
+                                int at, struct strbuf *buf,
+                                int (*get_mark)(const char *, int),
+                                const char *(*get_data)(struct branch *,
+                                                        struct strbuf *),
+                                unsigned allowed)
+{
+       int len;
+       struct branch *branch;
+       struct strbuf err = STRBUF_INIT;
+       const char *value;
+
+       len = get_mark(name + at, namelen - at);
+       if (!len)
+               return -1;
+
+       if (memchr(name, ':', at))
+               return -1;
+
+       if (at) {
+               char *name_str = xmemdupz(name, at);
+               branch = branch_get(name_str);
+               free(name_str);
+       } else
+               branch = branch_get(NULL);
+
+       value = get_data(branch, &err);
+       if (!value)
+               die("%s", err.buf);
+
+       if (!branch_interpret_allowed(value, allowed))
+               return -1;
+
+       set_shortened_ref(buf, value);
+       return len + at;
+}
+
+int interpret_branch_name(const char *name, int namelen, struct strbuf *buf,
+                         unsigned allowed)
+{
+       char *at;
+       const char *start;
+       int len;
+
+       if (!namelen)
+               namelen = strlen(name);
+
+       if (!allowed || (allowed & INTERPRET_BRANCH_LOCAL)) {
+               len = interpret_nth_prior_checkout(name, namelen, buf);
+               if (!len) {
+                       return len; /* syntax Ok, not enough switches */
+               } else if (len > 0) {
+                       if (len == namelen)
+                               return len; /* consumed all */
+                       else
+                               return reinterpret(name, namelen, len, buf, allowed);
+               }
+       }
+
+       for (start = name;
+            (at = memchr(start, '@', namelen - (start - name)));
+            start = at + 1) {
+
+               if (!allowed || (allowed & INTERPRET_BRANCH_HEAD)) {
+                       len = interpret_empty_at(name, namelen, at - name, buf);
+                       if (len > 0)
+                               return reinterpret(name, namelen, len, buf,
+                                                  allowed);
+               }
+
+               len = interpret_branch_mark(name, namelen, at - name, buf,
+                                           upstream_mark, branch_get_upstream,
+                                           allowed);
+               if (len > 0)
+                       return len;
+
+               len = interpret_branch_mark(name, namelen, at - name, buf,
+                                           push_mark, branch_get_push,
+                                           allowed);
+               if (len > 0)
+                       return len;
+       }
+
+       return -1;
+}
+
+void strbuf_branchname(struct strbuf *sb, const char *name, unsigned allowed)
+{
+       int len = strlen(name);
+       int used = interpret_branch_name(name, len, sb, allowed);
+
+       if (used < 0)
+               used = 0;
+       strbuf_add(sb, name + used, len - used);
+}
+
+int strbuf_check_branch_ref(struct strbuf *sb, const char *name)
+{
+       if (startup_info->have_repository)
+               strbuf_branchname(sb, name, INTERPRET_BRANCH_LOCAL);
+       else
+               strbuf_addstr(sb, name);
+
+       /*
+        * This splice must be done even if we end up rejecting the
+        * name; builtin/branch.c::copy_or_rename_branch() still wants
+        * to see what the name expanded to so that "branch -m" can be
+        * used as a tool to correct earlier mistakes.
+        */
+       strbuf_splice(sb, 0, 0, "refs/heads/", 11);
+
+       if (*name == '-' ||
+           !strcmp(sb->buf, "refs/heads/HEAD"))
+               return -1;
+
+       return check_refname_format(sb->buf, 0);
+}
+
+/*
+ * This is like "get_oid_basic()", except it allows "object ID expressions",
+ * notably "xyz^" for "parent of xyz"
+ */
+int get_oid(const char *name, struct object_id *oid)
+{
+       struct object_context unused;
+       return get_oid_with_context(name, 0, oid, &unused);
+}
+
+
+/*
+ * Many callers know that the user meant to name a commit-ish by
+ * syntactical positions where the object name appears.  Calling this
+ * function allows the machinery to disambiguate shorter-than-unique
+ * abbreviated object names between commit-ish and others.
+ *
+ * Note that this does NOT error out when the named object is not a
+ * commit-ish. It is merely to give a hint to the disambiguation
+ * machinery.
+ */
+int get_oid_committish(const char *name, struct object_id *oid)
+{
+       struct object_context unused;
+       return get_oid_with_context(name, GET_OID_COMMITTISH,
+                                   oid, &unused);
+}
+
+int get_oid_treeish(const char *name, struct object_id *oid)
+{
+       struct object_context unused;
+       return get_oid_with_context(name, GET_OID_TREEISH,
+                                   oid, &unused);
+}
+
+int get_oid_commit(const char *name, struct object_id *oid)
+{
+       struct object_context unused;
+       return get_oid_with_context(name, GET_OID_COMMIT,
+                                   oid, &unused);
+}
+
+int get_oid_tree(const char *name, struct object_id *oid)
+{
+       struct object_context unused;
+       return get_oid_with_context(name, GET_OID_TREE,
+                                   oid, &unused);
+}
+
+int get_oid_blob(const char *name, struct object_id *oid)
+{
+       struct object_context unused;
+       return get_oid_with_context(name, GET_OID_BLOB,
+                                   oid, &unused);
+}
+
+/* Must be called only when object_name:filename doesn't exist. */
+static void diagnose_invalid_oid_path(const char *prefix,
+                                     const char *filename,
+                                     const struct object_id *tree_oid,
+                                     const char *object_name,
+                                     int object_name_len)
+{
+       struct object_id oid;
+       unsigned mode;
+
+       if (!prefix)
+               prefix = "";
+
+       if (file_exists(filename))
+               die("Path '%s' exists on disk, but not in '%.*s'.",
+                   filename, object_name_len, object_name);
+       if (is_missing_file_error(errno)) {
+               char *fullname = xstrfmt("%s%s", prefix, filename);
+
+               if (!get_tree_entry(tree_oid, fullname, &oid, &mode)) {
+                       die("Path '%s' exists, but not '%s'.\n"
+                           "Did you mean '%.*s:%s' aka '%.*s:./%s'?",
+                           fullname,
+                           filename,
+                           object_name_len, object_name,
+                           fullname,
+                           object_name_len, object_name,
+                           filename);
+               }
+               die("Path '%s' does not exist in '%.*s'",
+                   filename, object_name_len, object_name);
+       }
+}
+
+/* Must be called only when :stage:filename doesn't exist. */
+static void diagnose_invalid_index_path(int stage,
+                                       const char *prefix,
+                                       const char *filename)
+{
+       const struct cache_entry *ce;
+       int pos;
+       unsigned namelen = strlen(filename);
+       struct strbuf fullname = STRBUF_INIT;
+
+       if (!prefix)
+               prefix = "";
+
+       /* Wrong stage number? */
+       pos = cache_name_pos(filename, namelen);
+       if (pos < 0)
+               pos = -pos - 1;
+       if (pos < active_nr) {
+               ce = active_cache[pos];
+               if (ce_namelen(ce) == namelen &&
+                   !memcmp(ce->name, filename, namelen))
+                       die("Path '%s' is in the index, but not at stage %d.\n"
+                           "Did you mean ':%d:%s'?",
+                           filename, stage,
+                           ce_stage(ce), filename);
+       }
+
+       /* Confusion between relative and absolute filenames? */
+       strbuf_addstr(&fullname, prefix);
+       strbuf_addstr(&fullname, filename);
+       pos = cache_name_pos(fullname.buf, fullname.len);
+       if (pos < 0)
+               pos = -pos - 1;
+       if (pos < active_nr) {
+               ce = active_cache[pos];
+               if (ce_namelen(ce) == fullname.len &&
+                   !memcmp(ce->name, fullname.buf, fullname.len))
+                       die("Path '%s' is in the index, but not '%s'.\n"
+                           "Did you mean ':%d:%s' aka ':%d:./%s'?",
+                           fullname.buf, filename,
+                           ce_stage(ce), fullname.buf,
+                           ce_stage(ce), filename);
+       }
+
+       if (file_exists(filename))
+               die("Path '%s' exists on disk, but not in the index.", filename);
+       if (is_missing_file_error(errno))
+               die("Path '%s' does not exist (neither on disk nor in the index).",
+                   filename);
+
+       strbuf_release(&fullname);
+}
+
+
+static char *resolve_relative_path(const char *rel)
+{
+       if (!starts_with(rel, "./") && !starts_with(rel, "../"))
+               return NULL;
+
+       if (!is_inside_work_tree())
+               die("relative path syntax can't be used outside working tree.");
+
+       /* die() inside prefix_path() if resolved path is outside worktree */
+       return prefix_path(startup_info->prefix,
+                          startup_info->prefix ? strlen(startup_info->prefix) : 0,
+                          rel);
+}
+
+static int get_oid_with_context_1(const char *name,
+                                 unsigned flags,
+                                 const char *prefix,
+                                 struct object_id *oid,
+                                 struct object_context *oc)
+{
+       int ret, bracket_depth;
+       int namelen = strlen(name);
+       const char *cp;
+       int only_to_die = flags & GET_OID_ONLY_TO_DIE;
+
+       if (only_to_die)
+               flags |= GET_OID_QUIETLY;
+
+       memset(oc, 0, sizeof(*oc));
+       oc->mode = S_IFINVALID;
+       strbuf_init(&oc->symlink_path, 0);
+       ret = get_oid_1(name, namelen, oid, flags);
+       if (!ret)
+               return ret;
+       /*
+        * sha1:path --> object name of path in ent sha1
+        * :path -> object name of absolute path in index
+        * :./path -> object name of path relative to cwd in index
+        * :[0-3]:path -> object name of path in index at stage
+        * :/foo -> recent commit matching foo
+        */
+       if (name[0] == ':') {
+               int stage = 0;
+               const struct cache_entry *ce;
+               char *new_path = NULL;
+               int pos;
+               if (!only_to_die && namelen > 2 && name[1] == '/') {
+                       struct commit_list *list = NULL;
+
+                       for_each_ref(handle_one_ref, &list);
+                       commit_list_sort_by_date(&list);
+                       return get_oid_oneline(name + 2, oid, list);
+               }
+               if (namelen < 3 ||
+                   name[2] != ':' ||
+                   name[1] < '0' || '3' < name[1])
+                       cp = name + 1;
+               else {
+                       stage = name[1] - '0';
+                       cp = name + 3;
+               }
+               new_path = resolve_relative_path(cp);
+               if (!new_path) {
+                       namelen = namelen - (cp - name);
+               } else {
+                       cp = new_path;
+                       namelen = strlen(cp);
+               }
+
+               if (flags & GET_OID_RECORD_PATH)
+                       oc->path = xstrdup(cp);
+
+               if (!active_cache)
+                       read_cache();
+               pos = cache_name_pos(cp, namelen);
+               if (pos < 0)
+                       pos = -pos - 1;
+               while (pos < active_nr) {
+                       ce = active_cache[pos];
+                       if (ce_namelen(ce) != namelen ||
+                           memcmp(ce->name, cp, namelen))
+                               break;
+                       if (ce_stage(ce) == stage) {
+                               oidcpy(oid, &ce->oid);
+                               oc->mode = ce->ce_mode;
+                               free(new_path);
+                               return 0;
+                       }
+                       pos++;
+               }
+               if (only_to_die && name[1] && name[1] != '/')
+                       diagnose_invalid_index_path(stage, prefix, cp);
+               free(new_path);
+               return -1;
+       }
+       for (cp = name, bracket_depth = 0; *cp; cp++) {
+               if (*cp == '{')
+                       bracket_depth++;
+               else if (bracket_depth && *cp == '}')
+                       bracket_depth--;
+               else if (!bracket_depth && *cp == ':')
+                       break;
+       }
+       if (*cp == ':') {
+               struct object_id tree_oid;
+               int len = cp - name;
+               unsigned sub_flags = flags;
+
+               sub_flags &= ~GET_OID_DISAMBIGUATORS;
+               sub_flags |= GET_OID_TREEISH;
+
+               if (!get_oid_1(name, len, &tree_oid, sub_flags)) {
+                       const char *filename = cp+1;
+                       char *new_filename = NULL;
+
+                       new_filename = resolve_relative_path(filename);
+                       if (new_filename)
+                               filename = new_filename;
+                       if (flags & GET_OID_FOLLOW_SYMLINKS) {
+                               ret = get_tree_entry_follow_symlinks(tree_oid.hash,
+                                       filename, oid->hash, &oc->symlink_path,
+                                       &oc->mode);
+                       } else {
+                               ret = get_tree_entry(&tree_oid, filename, oid,
+                                                    &oc->mode);
+                               if (ret && only_to_die) {
+                                       diagnose_invalid_oid_path(prefix,
+                                                                  filename,
+                                                                  &tree_oid,
+                                                                  name, len);
+                               }
+                       }
+                       hashcpy(oc->tree, tree_oid.hash);
+                       if (flags & GET_OID_RECORD_PATH)
+                               oc->path = xstrdup(filename);
+
+                       free(new_filename);
+                       return ret;
+               } else {
+                       if (only_to_die)
+                               die("Invalid object name '%.*s'.", len, name);
+               }
+       }
+       return ret;
+}
+
+/*
+ * Call this function when you know "name" given by the end user must
+ * name an object but it doesn't; the function _may_ die with a better
+ * diagnostic message than "no such object 'name'", e.g. "Path 'doc' does not
+ * exist in 'HEAD'" when given "HEAD:doc", or it may return in which case
+ * you have a chance to diagnose the error further.
+ */
+void maybe_die_on_misspelt_object_name(const char *name, const char *prefix)
+{
+       struct object_context oc;
+       struct object_id oid;
+       get_oid_with_context_1(name, GET_OID_ONLY_TO_DIE, prefix, &oid, &oc);
+}
+
+int get_oid_with_context(const char *str, unsigned flags, struct object_id *oid, struct object_context *oc)
+{
+       if (flags & GET_OID_FOLLOW_SYMLINKS && flags & GET_OID_ONLY_TO_DIE)
+               die("BUG: incompatible flags for get_sha1_with_context");
+       return get_oid_with_context_1(str, flags, NULL, oid, oc);
+}
diff --git a/sha1_file.c b/sha1_file.c
deleted file mode 100644 (file)
index cc0f43e..0000000
+++ /dev/null
@@ -1,2237 +0,0 @@
-/*
- * GIT - The information manager from hell
- *
- * Copyright (C) Linus Torvalds, 2005
- *
- * This handles basic git sha1 object files - packing, unpacking,
- * creation etc.
- */
-#include "cache.h"
-#include "config.h"
-#include "string-list.h"
-#include "lockfile.h"
-#include "delta.h"
-#include "pack.h"
-#include "blob.h"
-#include "commit.h"
-#include "run-command.h"
-#include "tag.h"
-#include "tree.h"
-#include "tree-walk.h"
-#include "refs.h"
-#include "pack-revindex.h"
-#include "sha1-lookup.h"
-#include "bulk-checkin.h"
-#include "streaming.h"
-#include "dir.h"
-#include "list.h"
-#include "mergesort.h"
-#include "quote.h"
-#include "packfile.h"
-#include "fetch-object.h"
-
-const unsigned char null_sha1[GIT_MAX_RAWSZ];
-const struct object_id null_oid;
-const struct object_id empty_tree_oid = {
-       EMPTY_TREE_SHA1_BIN_LITERAL
-};
-const struct object_id empty_blob_oid = {
-       EMPTY_BLOB_SHA1_BIN_LITERAL
-};
-
-static void git_hash_sha1_init(git_hash_ctx *ctx)
-{
-       git_SHA1_Init(&ctx->sha1);
-}
-
-static void git_hash_sha1_update(git_hash_ctx *ctx, const void *data, size_t len)
-{
-       git_SHA1_Update(&ctx->sha1, data, len);
-}
-
-static void git_hash_sha1_final(unsigned char *hash, git_hash_ctx *ctx)
-{
-       git_SHA1_Final(hash, &ctx->sha1);
-}
-
-static void git_hash_unknown_init(git_hash_ctx *ctx)
-{
-       die("trying to init unknown hash");
-}
-
-static void git_hash_unknown_update(git_hash_ctx *ctx, const void *data, size_t len)
-{
-       die("trying to update unknown hash");
-}
-
-static void git_hash_unknown_final(unsigned char *hash, git_hash_ctx *ctx)
-{
-       die("trying to finalize unknown hash");
-}
-
-const struct git_hash_algo hash_algos[GIT_HASH_NALGOS] = {
-       {
-               NULL,
-               0x00000000,
-               0,
-               0,
-               git_hash_unknown_init,
-               git_hash_unknown_update,
-               git_hash_unknown_final,
-               NULL,
-               NULL,
-       },
-       {
-               "sha-1",
-               /* "sha1", big-endian */
-               0x73686131,
-               GIT_SHA1_RAWSZ,
-               GIT_SHA1_HEXSZ,
-               git_hash_sha1_init,
-               git_hash_sha1_update,
-               git_hash_sha1_final,
-               &empty_tree_oid,
-               &empty_blob_oid,
-       },
-};
-
-/*
- * This is meant to hold a *small* number of objects that you would
- * want read_sha1_file() to be able to return, but yet you do not want
- * to write them into the object store (e.g. a browse-only
- * application).
- */
-static struct cached_object {
-       unsigned char sha1[20];
-       enum object_type type;
-       void *buf;
-       unsigned long size;
-} *cached_objects;
-static int cached_object_nr, cached_object_alloc;
-
-static struct cached_object empty_tree = {
-       EMPTY_TREE_SHA1_BIN_LITERAL,
-       OBJ_TREE,
-       "",
-       0
-};
-
-static struct cached_object *find_cached_object(const unsigned char *sha1)
-{
-       int i;
-       struct cached_object *co = cached_objects;
-
-       for (i = 0; i < cached_object_nr; i++, co++) {
-               if (!hashcmp(co->sha1, sha1))
-                       return co;
-       }
-       if (!hashcmp(sha1, empty_tree.sha1))
-               return &empty_tree;
-       return NULL;
-}
-
-
-static int get_conv_flags(unsigned flags)
-{
-       if (flags & HASH_RENORMALIZE)
-               return CONV_EOL_RENORMALIZE;
-       else if (flags & HASH_WRITE_OBJECT)
-         return global_conv_flags_eol;
-       else
-               return 0;
-}
-
-
-int mkdir_in_gitdir(const char *path)
-{
-       if (mkdir(path, 0777)) {
-               int saved_errno = errno;
-               struct stat st;
-               struct strbuf sb = STRBUF_INIT;
-
-               if (errno != EEXIST)
-                       return -1;
-               /*
-                * Are we looking at a path in a symlinked worktree
-                * whose original repository does not yet have it?
-                * e.g. .git/rr-cache pointing at its original
-                * repository in which the user hasn't performed any
-                * conflict resolution yet?
-                */
-               if (lstat(path, &st) || !S_ISLNK(st.st_mode) ||
-                   strbuf_readlink(&sb, path, st.st_size) ||
-                   !is_absolute_path(sb.buf) ||
-                   mkdir(sb.buf, 0777)) {
-                       strbuf_release(&sb);
-                       errno = saved_errno;
-                       return -1;
-               }
-               strbuf_release(&sb);
-       }
-       return adjust_shared_perm(path);
-}
-
-enum scld_error safe_create_leading_directories(char *path)
-{
-       char *next_component = path + offset_1st_component(path);
-       enum scld_error ret = SCLD_OK;
-
-       while (ret == SCLD_OK && next_component) {
-               struct stat st;
-               char *slash = next_component, slash_character;
-
-               while (*slash && !is_dir_sep(*slash))
-                       slash++;
-
-               if (!*slash)
-                       break;
-
-               next_component = slash + 1;
-               while (is_dir_sep(*next_component))
-                       next_component++;
-               if (!*next_component)
-                       break;
-
-               slash_character = *slash;
-               *slash = '\0';
-               if (!stat(path, &st)) {
-                       /* path exists */
-                       if (!S_ISDIR(st.st_mode)) {
-                               errno = ENOTDIR;
-                               ret = SCLD_EXISTS;
-                       }
-               } else if (mkdir(path, 0777)) {
-                       if (errno == EEXIST &&
-                           !stat(path, &st) && S_ISDIR(st.st_mode))
-                               ; /* somebody created it since we checked */
-                       else if (errno == ENOENT)
-                               /*
-                                * Either mkdir() failed because
-                                * somebody just pruned the containing
-                                * directory, or stat() failed because
-                                * the file that was in our way was
-                                * just removed.  Either way, inform
-                                * the caller that it might be worth
-                                * trying again:
-                                */
-                               ret = SCLD_VANISHED;
-                       else
-                               ret = SCLD_FAILED;
-               } else if (adjust_shared_perm(path)) {
-                       ret = SCLD_PERMS;
-               }
-               *slash = slash_character;
-       }
-       return ret;
-}
-
-enum scld_error safe_create_leading_directories_const(const char *path)
-{
-       int save_errno;
-       /* path points to cache entries, so xstrdup before messing with it */
-       char *buf = xstrdup(path);
-       enum scld_error result = safe_create_leading_directories(buf);
-
-       save_errno = errno;
-       free(buf);
-       errno = save_errno;
-       return result;
-}
-
-int raceproof_create_file(const char *path, create_file_fn fn, void *cb)
-{
-       /*
-        * The number of times we will try to remove empty directories
-        * in the way of path. This is only 1 because if another
-        * process is racily creating directories that conflict with
-        * us, we don't want to fight against them.
-        */
-       int remove_directories_remaining = 1;
-
-       /*
-        * The number of times that we will try to create the
-        * directories containing path. We are willing to attempt this
-        * more than once, because another process could be trying to
-        * clean up empty directories at the same time as we are
-        * trying to create them.
-        */
-       int create_directories_remaining = 3;
-
-       /* A scratch copy of path, filled lazily if we need it: */
-       struct strbuf path_copy = STRBUF_INIT;
-
-       int ret, save_errno;
-
-       /* Sanity check: */
-       assert(*path);
-
-retry_fn:
-       ret = fn(path, cb);
-       save_errno = errno;
-       if (!ret)
-               goto out;
-
-       if (errno == EISDIR && remove_directories_remaining-- > 0) {
-               /*
-                * A directory is in the way. Maybe it is empty; try
-                * to remove it:
-                */
-               if (!path_copy.len)
-                       strbuf_addstr(&path_copy, path);
-
-               if (!remove_dir_recursively(&path_copy, REMOVE_DIR_EMPTY_ONLY))
-                       goto retry_fn;
-       } else if (errno == ENOENT && create_directories_remaining-- > 0) {
-               /*
-                * Maybe the containing directory didn't exist, or
-                * maybe it was just deleted by a process that is
-                * racing with us to clean up empty directories. Try
-                * to create it:
-                */
-               enum scld_error scld_result;
-
-               if (!path_copy.len)
-                       strbuf_addstr(&path_copy, path);
-
-               do {
-                       scld_result = safe_create_leading_directories(path_copy.buf);
-                       if (scld_result == SCLD_OK)
-                               goto retry_fn;
-               } while (scld_result == SCLD_VANISHED && create_directories_remaining-- > 0);
-       }
-
-out:
-       strbuf_release(&path_copy);
-       errno = save_errno;
-       return ret;
-}
-
-static void fill_sha1_path(struct strbuf *buf, const unsigned char *sha1)
-{
-       int i;
-       for (i = 0; i < 20; i++) {
-               static char hex[] = "0123456789abcdef";
-               unsigned int val = sha1[i];
-               strbuf_addch(buf, hex[val >> 4]);
-               strbuf_addch(buf, hex[val & 0xf]);
-               if (!i)
-                       strbuf_addch(buf, '/');
-       }
-}
-
-void sha1_file_name(struct strbuf *buf, const unsigned char *sha1)
-{
-       strbuf_addstr(buf, get_object_directory());
-       strbuf_addch(buf, '/');
-       fill_sha1_path(buf, sha1);
-}
-
-struct strbuf *alt_scratch_buf(struct alternate_object_database *alt)
-{
-       strbuf_setlen(&alt->scratch, alt->base_len);
-       return &alt->scratch;
-}
-
-static const char *alt_sha1_path(struct alternate_object_database *alt,
-                                const unsigned char *sha1)
-{
-       struct strbuf *buf = alt_scratch_buf(alt);
-       fill_sha1_path(buf, sha1);
-       return buf->buf;
-}
-
-struct alternate_object_database *alt_odb_list;
-static struct alternate_object_database **alt_odb_tail;
-
-/*
- * Return non-zero iff the path is usable as an alternate object database.
- */
-static int alt_odb_usable(struct strbuf *path, const char *normalized_objdir)
-{
-       struct alternate_object_database *alt;
-
-       /* Detect cases where alternate disappeared */
-       if (!is_directory(path->buf)) {
-               error("object directory %s does not exist; "
-                     "check .git/objects/info/alternates.",
-                     path->buf);
-               return 0;
-       }
-
-       /*
-        * Prevent the common mistake of listing the same
-        * thing twice, or object directory itself.
-        */
-       for (alt = alt_odb_list; alt; alt = alt->next) {
-               if (!fspathcmp(path->buf, alt->path))
-                       return 0;
-       }
-       if (!fspathcmp(path->buf, normalized_objdir))
-               return 0;
-
-       return 1;
-}
-
-/*
- * Prepare alternate object database registry.
- *
- * The variable alt_odb_list points at the list of struct
- * alternate_object_database.  The elements on this list come from
- * non-empty elements from colon separated ALTERNATE_DB_ENVIRONMENT
- * environment variable, and $GIT_OBJECT_DIRECTORY/info/alternates,
- * whose contents is similar to that environment variable but can be
- * LF separated.  Its base points at a statically allocated buffer that
- * contains "/the/directory/corresponding/to/.git/objects/...", while
- * its name points just after the slash at the end of ".git/objects/"
- * in the example above, and has enough space to hold 40-byte hex
- * SHA1, an extra slash for the first level indirection, and the
- * terminating NUL.
- */
-static void read_info_alternates(const char * relative_base, int depth);
-static int link_alt_odb_entry(const char *entry, const char *relative_base,
-       int depth, const char *normalized_objdir)
-{
-       struct alternate_object_database *ent;
-       struct strbuf pathbuf = STRBUF_INIT;
-
-       if (!is_absolute_path(entry) && relative_base) {
-               strbuf_realpath(&pathbuf, relative_base, 1);
-               strbuf_addch(&pathbuf, '/');
-       }
-       strbuf_addstr(&pathbuf, entry);
-
-       if (strbuf_normalize_path(&pathbuf) < 0 && relative_base) {
-               error("unable to normalize alternate object path: %s",
-                     pathbuf.buf);
-               strbuf_release(&pathbuf);
-               return -1;
-       }
-
-       /*
-        * The trailing slash after the directory name is given by
-        * this function at the end. Remove duplicates.
-        */
-       while (pathbuf.len && pathbuf.buf[pathbuf.len - 1] == '/')
-               strbuf_setlen(&pathbuf, pathbuf.len - 1);
-
-       if (!alt_odb_usable(&pathbuf, normalized_objdir)) {
-               strbuf_release(&pathbuf);
-               return -1;
-       }
-
-       ent = alloc_alt_odb(pathbuf.buf);
-
-       /* add the alternate entry */
-       *alt_odb_tail = ent;
-       alt_odb_tail = &(ent->next);
-       ent->next = NULL;
-
-       /* recursively add alternates */
-       read_info_alternates(pathbuf.buf, depth + 1);
-
-       strbuf_release(&pathbuf);
-       return 0;
-}
-
-static const char *parse_alt_odb_entry(const char *string,
-                                      int sep,
-                                      struct strbuf *out)
-{
-       const char *end;
-
-       strbuf_reset(out);
-
-       if (*string == '#') {
-               /* comment; consume up to next separator */
-               end = strchrnul(string, sep);
-       } else if (*string == '"' && !unquote_c_style(out, string, &end)) {
-               /*
-                * quoted path; unquote_c_style has copied the
-                * data for us and set "end". Broken quoting (e.g.,
-                * an entry that doesn't end with a quote) falls
-                * back to the unquoted case below.
-                */
-       } else {
-               /* normal, unquoted path */
-               end = strchrnul(string, sep);
-               strbuf_add(out, string, end - string);
-       }
-
-       if (*end)
-               end++;
-       return end;
-}
-
-static void link_alt_odb_entries(const char *alt, int sep,
-                                const char *relative_base, int depth)
-{
-       struct strbuf objdirbuf = STRBUF_INIT;
-       struct strbuf entry = STRBUF_INIT;
-
-       if (!alt || !*alt)
-               return;
-
-       if (depth > 5) {
-               error("%s: ignoring alternate object stores, nesting too deep.",
-                               relative_base);
-               return;
-       }
-
-       strbuf_add_absolute_path(&objdirbuf, get_object_directory());
-       if (strbuf_normalize_path(&objdirbuf) < 0)
-               die("unable to normalize object directory: %s",
-                   objdirbuf.buf);
-
-       while (*alt) {
-               alt = parse_alt_odb_entry(alt, sep, &entry);
-               if (!entry.len)
-                       continue;
-               link_alt_odb_entry(entry.buf, relative_base, depth, objdirbuf.buf);
-       }
-       strbuf_release(&entry);
-       strbuf_release(&objdirbuf);
-}
-
-static void read_info_alternates(const char * relative_base, int depth)
-{
-       char *path;
-       struct strbuf buf = STRBUF_INIT;
-
-       path = xstrfmt("%s/info/alternates", relative_base);
-       if (strbuf_read_file(&buf, path, 1024) < 0) {
-               warn_on_fopen_errors(path);
-               free(path);
-               return;
-       }
-
-       link_alt_odb_entries(buf.buf, '\n', relative_base, depth);
-       strbuf_release(&buf);
-       free(path);
-}
-
-struct alternate_object_database *alloc_alt_odb(const char *dir)
-{
-       struct alternate_object_database *ent;
-
-       FLEX_ALLOC_STR(ent, path, dir);
-       strbuf_init(&ent->scratch, 0);
-       strbuf_addf(&ent->scratch, "%s/", dir);
-       ent->base_len = ent->scratch.len;
-
-       return ent;
-}
-
-void add_to_alternates_file(const char *reference)
-{
-       struct lock_file lock = LOCK_INIT;
-       char *alts = git_pathdup("objects/info/alternates");
-       FILE *in, *out;
-       int found = 0;
-
-       hold_lock_file_for_update(&lock, alts, LOCK_DIE_ON_ERROR);
-       out = fdopen_lock_file(&lock, "w");
-       if (!out)
-               die_errno("unable to fdopen alternates lockfile");
-
-       in = fopen(alts, "r");
-       if (in) {
-               struct strbuf line = STRBUF_INIT;
-
-               while (strbuf_getline(&line, in) != EOF) {
-                       if (!strcmp(reference, line.buf)) {
-                               found = 1;
-                               break;
-                       }
-                       fprintf_or_die(out, "%s\n", line.buf);
-               }
-
-               strbuf_release(&line);
-               fclose(in);
-       }
-       else if (errno != ENOENT)
-               die_errno("unable to read alternates file");
-
-       if (found) {
-               rollback_lock_file(&lock);
-       } else {
-               fprintf_or_die(out, "%s\n", reference);
-               if (commit_lock_file(&lock))
-                       die_errno("unable to move new alternates file into place");
-               if (alt_odb_tail)
-                       link_alt_odb_entries(reference, '\n', NULL, 0);
-       }
-       free(alts);
-}
-
-void add_to_alternates_memory(const char *reference)
-{
-       /*
-        * Make sure alternates are initialized, or else our entry may be
-        * overwritten when they are.
-        */
-       prepare_alt_odb();
-
-       link_alt_odb_entries(reference, '\n', NULL, 0);
-}
-
-/*
- * Compute the exact path an alternate is at and returns it. In case of
- * error NULL is returned and the human readable error is added to `err`
- * `path` may be relative and should point to $GITDIR.
- * `err` must not be null.
- */
-char *compute_alternate_path(const char *path, struct strbuf *err)
-{
-       char *ref_git = NULL;
-       const char *repo, *ref_git_s;
-       int seen_error = 0;
-
-       ref_git_s = real_path_if_valid(path);
-       if (!ref_git_s) {
-               seen_error = 1;
-               strbuf_addf(err, _("path '%s' does not exist"), path);
-               goto out;
-       } else
-               /*
-                * Beware: read_gitfile(), real_path() and mkpath()
-                * return static buffer
-                */
-               ref_git = xstrdup(ref_git_s);
-
-       repo = read_gitfile(ref_git);
-       if (!repo)
-               repo = read_gitfile(mkpath("%s/.git", ref_git));
-       if (repo) {
-               free(ref_git);
-               ref_git = xstrdup(repo);
-       }
-
-       if (!repo && is_directory(mkpath("%s/.git/objects", ref_git))) {
-               char *ref_git_git = mkpathdup("%s/.git", ref_git);
-               free(ref_git);
-               ref_git = ref_git_git;
-       } else if (!is_directory(mkpath("%s/objects", ref_git))) {
-               struct strbuf sb = STRBUF_INIT;
-               seen_error = 1;
-               if (get_common_dir(&sb, ref_git)) {
-                       strbuf_addf(err,
-                                   _("reference repository '%s' as a linked "
-                                     "checkout is not supported yet."),
-                                   path);
-                       goto out;
-               }
-
-               strbuf_addf(err, _("reference repository '%s' is not a "
-                                       "local repository."), path);
-               goto out;
-       }
-
-       if (!access(mkpath("%s/shallow", ref_git), F_OK)) {
-               strbuf_addf(err, _("reference repository '%s' is shallow"),
-                           path);
-               seen_error = 1;
-               goto out;
-       }
-
-       if (!access(mkpath("%s/info/grafts", ref_git), F_OK)) {
-               strbuf_addf(err,
-                           _("reference repository '%s' is grafted"),
-                           path);
-               seen_error = 1;
-               goto out;
-       }
-
-out:
-       if (seen_error) {
-               FREE_AND_NULL(ref_git);
-       }
-
-       return ref_git;
-}
-
-int foreach_alt_odb(alt_odb_fn fn, void *cb)
-{
-       struct alternate_object_database *ent;
-       int r = 0;
-
-       prepare_alt_odb();
-       for (ent = alt_odb_list; ent; ent = ent->next) {
-               r = fn(ent, cb);
-               if (r)
-                       break;
-       }
-       return r;
-}
-
-void prepare_alt_odb(void)
-{
-       const char *alt;
-
-       if (alt_odb_tail)
-               return;
-
-       alt = getenv(ALTERNATE_DB_ENVIRONMENT);
-
-       alt_odb_tail = &alt_odb_list;
-       link_alt_odb_entries(alt, PATH_SEP, NULL, 0);
-
-       read_info_alternates(get_object_directory(), 0);
-}
-
-/* Returns 1 if we have successfully freshened the file, 0 otherwise. */
-static int freshen_file(const char *fn)
-{
-       struct utimbuf t;
-       t.actime = t.modtime = time(NULL);
-       return !utime(fn, &t);
-}
-
-/*
- * All of the check_and_freshen functions return 1 if the file exists and was
- * freshened (if freshening was requested), 0 otherwise. If they return
- * 0, you should not assume that it is safe to skip a write of the object (it
- * either does not exist on disk, or has a stale mtime and may be subject to
- * pruning).
- */
-int check_and_freshen_file(const char *fn, int freshen)
-{
-       if (access(fn, F_OK))
-               return 0;
-       if (freshen && !freshen_file(fn))
-               return 0;
-       return 1;
-}
-
-static int check_and_freshen_local(const unsigned char *sha1, int freshen)
-{
-       static struct strbuf buf = STRBUF_INIT;
-
-       strbuf_reset(&buf);
-       sha1_file_name(&buf, sha1);
-
-       return check_and_freshen_file(buf.buf, freshen);
-}
-
-static int check_and_freshen_nonlocal(const unsigned char *sha1, int freshen)
-{
-       struct alternate_object_database *alt;
-       prepare_alt_odb();
-       for (alt = alt_odb_list; alt; alt = alt->next) {
-               const char *path = alt_sha1_path(alt, sha1);
-               if (check_and_freshen_file(path, freshen))
-                       return 1;
-       }
-       return 0;
-}
-
-static int check_and_freshen(const unsigned char *sha1, int freshen)
-{
-       return check_and_freshen_local(sha1, freshen) ||
-              check_and_freshen_nonlocal(sha1, freshen);
-}
-
-int has_loose_object_nonlocal(const unsigned char *sha1)
-{
-       return check_and_freshen_nonlocal(sha1, 0);
-}
-
-static int has_loose_object(const unsigned char *sha1)
-{
-       return check_and_freshen(sha1, 0);
-}
-
-static void mmap_limit_check(size_t length)
-{
-       static size_t limit = 0;
-       if (!limit) {
-               limit = git_env_ulong("GIT_MMAP_LIMIT", 0);
-               if (!limit)
-                       limit = SIZE_MAX;
-       }
-       if (length > limit)
-               die("attempting to mmap %"PRIuMAX" over limit %"PRIuMAX,
-                   (uintmax_t)length, (uintmax_t)limit);
-}
-
-void *xmmap_gently(void *start, size_t length,
-                 int prot, int flags, int fd, off_t offset)
-{
-       void *ret;
-
-       mmap_limit_check(length);
-       ret = mmap(start, length, prot, flags, fd, offset);
-       if (ret == MAP_FAILED) {
-               if (!length)
-                       return NULL;
-               release_pack_memory(length);
-               ret = mmap(start, length, prot, flags, fd, offset);
-       }
-       return ret;
-}
-
-void *xmmap(void *start, size_t length,
-       int prot, int flags, int fd, off_t offset)
-{
-       void *ret = xmmap_gently(start, length, prot, flags, fd, offset);
-       if (ret == MAP_FAILED)
-               die_errno("mmap failed");
-       return ret;
-}
-
-/*
- * With an in-core object data in "map", rehash it to make sure the
- * object name actually matches "sha1" to detect object corruption.
- * With "map" == NULL, try reading the object named with "sha1" using
- * the streaming interface and rehash it to do the same.
- */
-int check_sha1_signature(const unsigned char *sha1, void *map,
-                        unsigned long size, const char *type)
-{
-       struct object_id real_oid;
-       enum object_type obj_type;
-       struct git_istream *st;
-       git_hash_ctx c;
-       char hdr[32];
-       int hdrlen;
-
-       if (map) {
-               hash_object_file(map, size, type, &real_oid);
-               return hashcmp(sha1, real_oid.hash) ? -1 : 0;
-       }
-
-       st = open_istream(sha1, &obj_type, &size, NULL);
-       if (!st)
-               return -1;
-
-       /* Generate the header */
-       hdrlen = xsnprintf(hdr, sizeof(hdr), "%s %lu", type_name(obj_type), size) + 1;
-
-       /* Sha1.. */
-       the_hash_algo->init_fn(&c);
-       the_hash_algo->update_fn(&c, hdr, hdrlen);
-       for (;;) {
-               char buf[1024 * 16];
-               ssize_t readlen = read_istream(st, buf, sizeof(buf));
-
-               if (readlen < 0) {
-                       close_istream(st);
-                       return -1;
-               }
-               if (!readlen)
-                       break;
-               the_hash_algo->update_fn(&c, buf, readlen);
-       }
-       the_hash_algo->final_fn(real_oid.hash, &c);
-       close_istream(st);
-       return hashcmp(sha1, real_oid.hash) ? -1 : 0;
-}
-
-int git_open_cloexec(const char *name, int flags)
-{
-       int fd;
-       static int o_cloexec = O_CLOEXEC;
-
-       fd = open(name, flags | o_cloexec);
-       if ((o_cloexec & O_CLOEXEC) && fd < 0 && errno == EINVAL) {
-               /* Try again w/o O_CLOEXEC: the kernel might not support it */
-               o_cloexec &= ~O_CLOEXEC;
-               fd = open(name, flags | o_cloexec);
-       }
-
-#if defined(F_GETFD) && defined(F_SETFD) && defined(FD_CLOEXEC)
-       {
-               static int fd_cloexec = FD_CLOEXEC;
-
-               if (!o_cloexec && 0 <= fd && fd_cloexec) {
-                       /* Opened w/o O_CLOEXEC?  try with fcntl(2) to add it */
-                       int flags = fcntl(fd, F_GETFD);
-                       if (fcntl(fd, F_SETFD, flags | fd_cloexec))
-                               fd_cloexec = 0;
-               }
-       }
-#endif
-       return fd;
-}
-
-/*
- * Find "sha1" as a loose object in the local repository or in an alternate.
- * Returns 0 on success, negative on failure.
- *
- * The "path" out-parameter will give the path of the object we found (if any).
- * Note that it may point to static storage and is only valid until another
- * call to sha1_file_name(), etc.
- */
-static int stat_sha1_file(const unsigned char *sha1, struct stat *st,
-                         const char **path)
-{
-       struct alternate_object_database *alt;
-       static struct strbuf buf = STRBUF_INIT;
-
-       strbuf_reset(&buf);
-       sha1_file_name(&buf, sha1);
-       *path = buf.buf;
-
-       if (!lstat(*path, st))
-               return 0;
-
-       prepare_alt_odb();
-       errno = ENOENT;
-       for (alt = alt_odb_list; alt; alt = alt->next) {
-               *path = alt_sha1_path(alt, sha1);
-               if (!lstat(*path, st))
-                       return 0;
-       }
-
-       return -1;
-}
-
-/*
- * Like stat_sha1_file(), but actually open the object and return the
- * descriptor. See the caveats on the "path" parameter above.
- */
-static int open_sha1_file(const unsigned char *sha1, const char **path)
-{
-       int fd;
-       struct alternate_object_database *alt;
-       int most_interesting_errno;
-       static struct strbuf buf = STRBUF_INIT;
-
-       strbuf_reset(&buf);
-       sha1_file_name(&buf, sha1);
-       *path = buf.buf;
-
-       fd = git_open(*path);
-       if (fd >= 0)
-               return fd;
-       most_interesting_errno = errno;
-
-       prepare_alt_odb();
-       for (alt = alt_odb_list; alt; alt = alt->next) {
-               *path = alt_sha1_path(alt, sha1);
-               fd = git_open(*path);
-               if (fd >= 0)
-                       return fd;
-               if (most_interesting_errno == ENOENT)
-                       most_interesting_errno = errno;
-       }
-       errno = most_interesting_errno;
-       return -1;
-}
-
-/*
- * Map the loose object at "path" if it is not NULL, or the path found by
- * searching for a loose object named "sha1".
- */
-static void *map_sha1_file_1(const char *path,
-                            const unsigned char *sha1,
-                            unsigned long *size)
-{
-       void *map;
-       int fd;
-
-       if (path)
-               fd = git_open(path);
-       else
-               fd = open_sha1_file(sha1, &path);
-       map = NULL;
-       if (fd >= 0) {
-               struct stat st;
-
-               if (!fstat(fd, &st)) {
-                       *size = xsize_t(st.st_size);
-                       if (!*size) {
-                               /* mmap() is forbidden on empty files */
-                               error("object file %s is empty", path);
-                               return NULL;
-                       }
-                       map = xmmap(NULL, *size, PROT_READ, MAP_PRIVATE, fd, 0);
-               }
-               close(fd);
-       }
-       return map;
-}
-
-void *map_sha1_file(const unsigned char *sha1, unsigned long *size)
-{
-       return map_sha1_file_1(NULL, sha1, size);
-}
-
-static int unpack_sha1_short_header(git_zstream *stream,
-                                   unsigned char *map, unsigned long mapsize,
-                                   void *buffer, unsigned long bufsiz)
-{
-       /* Get the data stream */
-       memset(stream, 0, sizeof(*stream));
-       stream->next_in = map;
-       stream->avail_in = mapsize;
-       stream->next_out = buffer;
-       stream->avail_out = bufsiz;
-
-       git_inflate_init(stream);
-       return git_inflate(stream, 0);
-}
-
-int unpack_sha1_header(git_zstream *stream,
-                      unsigned char *map, unsigned long mapsize,
-                      void *buffer, unsigned long bufsiz)
-{
-       int status = unpack_sha1_short_header(stream, map, mapsize,
-                                             buffer, bufsiz);
-
-       if (status < Z_OK)
-               return status;
-
-       /* Make sure we have the terminating NUL */
-       if (!memchr(buffer, '\0', stream->next_out - (unsigned char *)buffer))
-               return -1;
-       return 0;
-}
-
-static int unpack_sha1_header_to_strbuf(git_zstream *stream, unsigned char *map,
-                                       unsigned long mapsize, void *buffer,
-                                       unsigned long bufsiz, struct strbuf *header)
-{
-       int status;
-
-       status = unpack_sha1_short_header(stream, map, mapsize, buffer, bufsiz);
-       if (status < Z_OK)
-               return -1;
-
-       /*
-        * Check if entire header is unpacked in the first iteration.
-        */
-       if (memchr(buffer, '\0', stream->next_out - (unsigned char *)buffer))
-               return 0;
-
-       /*
-        * buffer[0..bufsiz] was not large enough.  Copy the partial
-        * result out to header, and then append the result of further
-        * reading the stream.
-        */
-       strbuf_add(header, buffer, stream->next_out - (unsigned char *)buffer);
-       stream->next_out = buffer;
-       stream->avail_out = bufsiz;
-
-       do {
-               status = git_inflate(stream, 0);
-               strbuf_add(header, buffer, stream->next_out - (unsigned char *)buffer);
-               if (memchr(buffer, '\0', stream->next_out - (unsigned char *)buffer))
-                       return 0;
-               stream->next_out = buffer;
-               stream->avail_out = bufsiz;
-       } while (status != Z_STREAM_END);
-       return -1;
-}
-
-static void *unpack_sha1_rest(git_zstream *stream, void *buffer, unsigned long size, const unsigned char *sha1)
-{
-       int bytes = strlen(buffer) + 1;
-       unsigned char *buf = xmallocz(size);
-       unsigned long n;
-       int status = Z_OK;
-
-       n = stream->total_out - bytes;
-       if (n > size)
-               n = size;
-       memcpy(buf, (char *) buffer + bytes, n);
-       bytes = n;
-       if (bytes <= size) {
-               /*
-                * The above condition must be (bytes <= size), not
-                * (bytes < size).  In other words, even though we
-                * expect no more output and set avail_out to zero,
-                * the input zlib stream may have bytes that express
-                * "this concludes the stream", and we *do* want to
-                * eat that input.
-                *
-                * Otherwise we would not be able to test that we
-                * consumed all the input to reach the expected size;
-                * we also want to check that zlib tells us that all
-                * went well with status == Z_STREAM_END at the end.
-                */
-               stream->next_out = buf + bytes;
-               stream->avail_out = size - bytes;
-               while (status == Z_OK)
-                       status = git_inflate(stream, Z_FINISH);
-       }
-       if (status == Z_STREAM_END && !stream->avail_in) {
-               git_inflate_end(stream);
-               return buf;
-       }
-
-       if (status < 0)
-               error("corrupt loose object '%s'", sha1_to_hex(sha1));
-       else if (stream->avail_in)
-               error("garbage at end of loose object '%s'",
-                     sha1_to_hex(sha1));
-       free(buf);
-       return NULL;
-}
-
-/*
- * We used to just use "sscanf()", but that's actually way
- * too permissive for what we want to check. So do an anal
- * object header parse by hand.
- */
-static int parse_sha1_header_extended(const char *hdr, struct object_info *oi,
-                              unsigned int flags)
-{
-       const char *type_buf = hdr;
-       unsigned long size;
-       int type, type_len = 0;
-
-       /*
-        * The type can be of any size but is followed by
-        * a space.
-        */
-       for (;;) {
-               char c = *hdr++;
-               if (!c)
-                       return -1;
-               if (c == ' ')
-                       break;
-               type_len++;
-       }
-
-       type = type_from_string_gently(type_buf, type_len, 1);
-       if (oi->type_name)
-               strbuf_add(oi->type_name, type_buf, type_len);
-       /*
-        * Set type to 0 if its an unknown object and
-        * we're obtaining the type using '--allow-unknown-type'
-        * option.
-        */
-       if ((flags & OBJECT_INFO_ALLOW_UNKNOWN_TYPE) && (type < 0))
-               type = 0;
-       else if (type < 0)
-               die("invalid object type");
-       if (oi->typep)
-               *oi->typep = type;
-
-       /*
-        * The length must follow immediately, and be in canonical
-        * decimal format (ie "010" is not valid).
-        */
-       size = *hdr++ - '0';
-       if (size > 9)
-               return -1;
-       if (size) {
-               for (;;) {
-                       unsigned long c = *hdr - '0';
-                       if (c > 9)
-                               break;
-                       hdr++;
-                       size = size * 10 + c;
-               }
-       }
-
-       if (oi->sizep)
-               *oi->sizep = size;
-
-       /*
-        * The length must be followed by a zero byte
-        */
-       return *hdr ? -1 : type;
-}
-
-int parse_sha1_header(const char *hdr, unsigned long *sizep)
-{
-       struct object_info oi = OBJECT_INFO_INIT;
-
-       oi.sizep = sizep;
-       return parse_sha1_header_extended(hdr, &oi, 0);
-}
-
-static int sha1_loose_object_info(const unsigned char *sha1,
-                                 struct object_info *oi,
-                                 int flags)
-{
-       int status = 0;
-       unsigned long mapsize;
-       void *map;
-       git_zstream stream;
-       char hdr[32];
-       struct strbuf hdrbuf = STRBUF_INIT;
-       unsigned long size_scratch;
-
-       if (oi->delta_base_sha1)
-               hashclr(oi->delta_base_sha1);
-
-       /*
-        * If we don't care about type or size, then we don't
-        * need to look inside the object at all. Note that we
-        * do not optimize out the stat call, even if the
-        * caller doesn't care about the disk-size, since our
-        * return value implicitly indicates whether the
-        * object even exists.
-        */
-       if (!oi->typep && !oi->type_name && !oi->sizep && !oi->contentp) {
-               const char *path;
-               struct stat st;
-               if (stat_sha1_file(sha1, &st, &path) < 0)
-                       return -1;
-               if (oi->disk_sizep)
-                       *oi->disk_sizep = st.st_size;
-               return 0;
-       }
-
-       map = map_sha1_file(sha1, &mapsize);
-       if (!map)
-               return -1;
-
-       if (!oi->sizep)
-               oi->sizep = &size_scratch;
-
-       if (oi->disk_sizep)
-               *oi->disk_sizep = mapsize;
-       if ((flags & OBJECT_INFO_ALLOW_UNKNOWN_TYPE)) {
-               if (unpack_sha1_header_to_strbuf(&stream, map, mapsize, hdr, sizeof(hdr), &hdrbuf) < 0)
-                       status = error("unable to unpack %s header with --allow-unknown-type",
-                                      sha1_to_hex(sha1));
-       } else if (unpack_sha1_header(&stream, map, mapsize, hdr, sizeof(hdr)) < 0)
-               status = error("unable to unpack %s header",
-                              sha1_to_hex(sha1));
-       if (status < 0)
-               ; /* Do nothing */
-       else if (hdrbuf.len) {
-               if ((status = parse_sha1_header_extended(hdrbuf.buf, oi, flags)) < 0)
-                       status = error("unable to parse %s header with --allow-unknown-type",
-                                      sha1_to_hex(sha1));
-       } else if ((status = parse_sha1_header_extended(hdr, oi, flags)) < 0)
-               status = error("unable to parse %s header", sha1_to_hex(sha1));
-
-       if (status >= 0 && oi->contentp) {
-               *oi->contentp = unpack_sha1_rest(&stream, hdr,
-                                                *oi->sizep, sha1);
-               if (!*oi->contentp) {
-                       git_inflate_end(&stream);
-                       status = -1;
-               }
-       } else
-               git_inflate_end(&stream);
-
-       munmap(map, mapsize);
-       if (status && oi->typep)
-               *oi->typep = status;
-       if (oi->sizep == &size_scratch)
-               oi->sizep = NULL;
-       strbuf_release(&hdrbuf);
-       oi->whence = OI_LOOSE;
-       return (status < 0) ? status : 0;
-}
-
-int fetch_if_missing = 1;
-
-int sha1_object_info_extended(const unsigned char *sha1, struct object_info *oi, unsigned flags)
-{
-       static struct object_info blank_oi = OBJECT_INFO_INIT;
-       struct pack_entry e;
-       int rtype;
-       const unsigned char *real = (flags & OBJECT_INFO_LOOKUP_REPLACE) ?
-                                   lookup_replace_object(sha1) :
-                                   sha1;
-       int already_retried = 0;
-
-       if (is_null_sha1(real))
-               return -1;
-
-       if (!oi)
-               oi = &blank_oi;
-
-       if (!(flags & OBJECT_INFO_SKIP_CACHED)) {
-               struct cached_object *co = find_cached_object(real);
-               if (co) {
-                       if (oi->typep)
-                               *(oi->typep) = co->type;
-                       if (oi->sizep)
-                               *(oi->sizep) = co->size;
-                       if (oi->disk_sizep)
-                               *(oi->disk_sizep) = 0;
-                       if (oi->delta_base_sha1)
-                               hashclr(oi->delta_base_sha1);
-                       if (oi->type_name)
-                               strbuf_addstr(oi->type_name, type_name(co->type));
-                       if (oi->contentp)
-                               *oi->contentp = xmemdupz(co->buf, co->size);
-                       oi->whence = OI_CACHED;
-                       return 0;
-               }
-       }
-
-       while (1) {
-               if (find_pack_entry(real, &e))
-                       break;
-
-               /* Most likely it's a loose object. */
-               if (!sha1_loose_object_info(real, oi, flags))
-                       return 0;
-
-               /* Not a loose object; someone else may have just packed it. */
-               if (!(flags & OBJECT_INFO_QUICK)) {
-                       reprepare_packed_git();
-                       if (find_pack_entry(real, &e))
-                               break;
-               }
-
-               /* Check if it is a missing object */
-               if (fetch_if_missing && repository_format_partial_clone &&
-                   !already_retried) {
-                       /*
-                        * TODO Investigate haveing fetch_object() return
-                        * TODO error/success and stopping the music here.
-                        */
-                       fetch_object(repository_format_partial_clone, real);
-                       already_retried = 1;
-                       continue;
-               }
-
-               return -1;
-       }
-
-       if (oi == &blank_oi)
-               /*
-                * We know that the caller doesn't actually need the
-                * information below, so return early.
-                */
-               return 0;
-       rtype = packed_object_info(e.p, e.offset, oi);
-       if (rtype < 0) {
-               mark_bad_packed_object(e.p, real);
-               return sha1_object_info_extended(real, oi, 0);
-       } else if (oi->whence == OI_PACKED) {
-               oi->u.packed.offset = e.offset;
-               oi->u.packed.pack = e.p;
-               oi->u.packed.is_delta = (rtype == OBJ_REF_DELTA ||
-                                        rtype == OBJ_OFS_DELTA);
-       }
-
-       return 0;
-}
-
-/* returns enum object_type or negative */
-int sha1_object_info(const unsigned char *sha1, unsigned long *sizep)
-{
-       enum object_type type;
-       struct object_info oi = OBJECT_INFO_INIT;
-
-       oi.typep = &type;
-       oi.sizep = sizep;
-       if (sha1_object_info_extended(sha1, &oi,
-                                     OBJECT_INFO_LOOKUP_REPLACE) < 0)
-               return -1;
-       return type;
-}
-
-static void *read_object(const unsigned char *sha1, enum object_type *type,
-                        unsigned long *size)
-{
-       struct object_info oi = OBJECT_INFO_INIT;
-       void *content;
-       oi.typep = type;
-       oi.sizep = size;
-       oi.contentp = &content;
-
-       if (sha1_object_info_extended(sha1, &oi, 0) < 0)
-               return NULL;
-       return content;
-}
-
-int pretend_object_file(void *buf, unsigned long len, enum object_type type,
-                       struct object_id *oid)
-{
-       struct cached_object *co;
-
-       hash_object_file(buf, len, type_name(type), oid);
-       if (has_sha1_file(oid->hash) || find_cached_object(oid->hash))
-               return 0;
-       ALLOC_GROW(cached_objects, cached_object_nr + 1, cached_object_alloc);
-       co = &cached_objects[cached_object_nr++];
-       co->size = len;
-       co->type = type;
-       co->buf = xmalloc(len);
-       memcpy(co->buf, buf, len);
-       hashcpy(co->sha1, oid->hash);
-       return 0;
-}
-
-/*
- * This function dies on corrupt objects; the callers who want to
- * deal with them should arrange to call read_object() and give error
- * messages themselves.
- */
-void *read_sha1_file_extended(const unsigned char *sha1,
-                             enum object_type *type,
-                             unsigned long *size,
-                             int lookup_replace)
-{
-       void *data;
-       const struct packed_git *p;
-       const char *path;
-       struct stat st;
-       const unsigned char *repl = lookup_replace ? lookup_replace_object(sha1)
-                                                  : sha1;
-
-       errno = 0;
-       data = read_object(repl, type, size);
-       if (data)
-               return data;
-
-       if (errno && errno != ENOENT)
-               die_errno("failed to read object %s", sha1_to_hex(sha1));
-
-       /* die if we replaced an object with one that does not exist */
-       if (repl != sha1)
-               die("replacement %s not found for %s",
-                   sha1_to_hex(repl), sha1_to_hex(sha1));
-
-       if (!stat_sha1_file(repl, &st, &path))
-               die("loose object %s (stored in %s) is corrupt",
-                   sha1_to_hex(repl), path);
-
-       if ((p = has_packed_and_bad(repl)) != NULL)
-               die("packed object %s (stored in %s) is corrupt",
-                   sha1_to_hex(repl), p->pack_name);
-
-       return NULL;
-}
-
-void *read_object_with_reference(const unsigned char *sha1,
-                                const char *required_type_name,
-                                unsigned long *size,
-                                unsigned char *actual_sha1_return)
-{
-       enum object_type type, required_type;
-       void *buffer;
-       unsigned long isize;
-       unsigned char actual_sha1[20];
-
-       required_type = type_from_string(required_type_name);
-       hashcpy(actual_sha1, sha1);
-       while (1) {
-               int ref_length = -1;
-               const char *ref_type = NULL;
-
-               buffer = read_sha1_file(actual_sha1, &type, &isize);
-               if (!buffer)
-                       return NULL;
-               if (type == required_type) {
-                       *size = isize;
-                       if (actual_sha1_return)
-                               hashcpy(actual_sha1_return, actual_sha1);
-                       return buffer;
-               }
-               /* Handle references */
-               else if (type == OBJ_COMMIT)
-                       ref_type = "tree ";
-               else if (type == OBJ_TAG)
-                       ref_type = "object ";
-               else {
-                       free(buffer);
-                       return NULL;
-               }
-               ref_length = strlen(ref_type);
-
-               if (ref_length + 40 > isize ||
-                   memcmp(buffer, ref_type, ref_length) ||
-                   get_sha1_hex((char *) buffer + ref_length, actual_sha1)) {
-                       free(buffer);
-                       return NULL;
-               }
-               free(buffer);
-               /* Now we have the ID of the referred-to object in
-                * actual_sha1.  Check again. */
-       }
-}
-
-static void write_object_file_prepare(const void *buf, unsigned long len,
-                                     const char *type, struct object_id *oid,
-                                     char *hdr, int *hdrlen)
-{
-       git_hash_ctx c;
-
-       /* Generate the header */
-       *hdrlen = xsnprintf(hdr, *hdrlen, "%s %lu", type, len)+1;
-
-       /* Sha1.. */
-       the_hash_algo->init_fn(&c);
-       the_hash_algo->update_fn(&c, hdr, *hdrlen);
-       the_hash_algo->update_fn(&c, buf, len);
-       the_hash_algo->final_fn(oid->hash, &c);
-}
-
-/*
- * Move the just written object into its final resting place.
- */
-int finalize_object_file(const char *tmpfile, const char *filename)
-{
-       int ret = 0;
-
-       if (object_creation_mode == OBJECT_CREATION_USES_RENAMES)
-               goto try_rename;
-       else if (link(tmpfile, filename))
-               ret = errno;
-
-       /*
-        * Coda hack - coda doesn't like cross-directory links,
-        * so we fall back to a rename, which will mean that it
-        * won't be able to check collisions, but that's not a
-        * big deal.
-        *
-        * The same holds for FAT formatted media.
-        *
-        * When this succeeds, we just return.  We have nothing
-        * left to unlink.
-        */
-       if (ret && ret != EEXIST) {
-       try_rename:
-               if (!rename(tmpfile, filename))
-                       goto out;
-               ret = errno;
-       }
-       unlink_or_warn(tmpfile);
-       if (ret) {
-               if (ret != EEXIST) {
-                       return error_errno("unable to write sha1 filename %s", filename);
-               }
-               /* FIXME!!! Collision check here ? */
-       }
-
-out:
-       if (adjust_shared_perm(filename))
-               return error("unable to set permission to '%s'", filename);
-       return 0;
-}
-
-static int write_buffer(int fd, const void *buf, size_t len)
-{
-       if (write_in_full(fd, buf, len) < 0)
-               return error_errno("file write error");
-       return 0;
-}
-
-int hash_object_file(const void *buf, unsigned long len, const char *type,
-                    struct object_id *oid)
-{
-       char hdr[32];
-       int hdrlen = sizeof(hdr);
-       write_object_file_prepare(buf, len, type, oid, hdr, &hdrlen);
-       return 0;
-}
-
-/* Finalize a file on disk, and close it. */
-static void close_sha1_file(int fd)
-{
-       if (fsync_object_files)
-               fsync_or_die(fd, "sha1 file");
-       if (close(fd) != 0)
-               die_errno("error when closing sha1 file");
-}
-
-/* Size of directory component, including the ending '/' */
-static inline int directory_size(const char *filename)
-{
-       const char *s = strrchr(filename, '/');
-       if (!s)
-               return 0;
-       return s - filename + 1;
-}
-
-/*
- * This creates a temporary file in the same directory as the final
- * 'filename'
- *
- * We want to avoid cross-directory filename renames, because those
- * can have problems on various filesystems (FAT, NFS, Coda).
- */
-static int create_tmpfile(struct strbuf *tmp, const char *filename)
-{
-       int fd, dirlen = directory_size(filename);
-
-       strbuf_reset(tmp);
-       strbuf_add(tmp, filename, dirlen);
-       strbuf_addstr(tmp, "tmp_obj_XXXXXX");
-       fd = git_mkstemp_mode(tmp->buf, 0444);
-       if (fd < 0 && dirlen && errno == ENOENT) {
-               /*
-                * Make sure the directory exists; note that the contents
-                * of the buffer are undefined after mkstemp returns an
-                * error, so we have to rewrite the whole buffer from
-                * scratch.
-                */
-               strbuf_reset(tmp);
-               strbuf_add(tmp, filename, dirlen - 1);
-               if (mkdir(tmp->buf, 0777) && errno != EEXIST)
-                       return -1;
-               if (adjust_shared_perm(tmp->buf))
-                       return -1;
-
-               /* Try again */
-               strbuf_addstr(tmp, "/tmp_obj_XXXXXX");
-               fd = git_mkstemp_mode(tmp->buf, 0444);
-       }
-       return fd;
-}
-
-static int write_loose_object(const struct object_id *oid, char *hdr,
-                             int hdrlen, const void *buf, unsigned long len,
-                             time_t mtime)
-{
-       int fd, ret;
-       unsigned char compressed[4096];
-       git_zstream stream;
-       git_hash_ctx c;
-       struct object_id parano_oid;
-       static struct strbuf tmp_file = STRBUF_INIT;
-       static struct strbuf filename = STRBUF_INIT;
-
-       strbuf_reset(&filename);
-       sha1_file_name(&filename, oid->hash);
-
-       fd = create_tmpfile(&tmp_file, filename.buf);
-       if (fd < 0) {
-               if (errno == EACCES)
-                       return error("insufficient permission for adding an object to repository database %s", get_object_directory());
-               else
-                       return error_errno("unable to create temporary file");
-       }
-
-       /* Set it up */
-       git_deflate_init(&stream, zlib_compression_level);
-       stream.next_out = compressed;
-       stream.avail_out = sizeof(compressed);
-       the_hash_algo->init_fn(&c);
-
-       /* First header.. */
-       stream.next_in = (unsigned char *)hdr;
-       stream.avail_in = hdrlen;
-       while (git_deflate(&stream, 0) == Z_OK)
-               ; /* nothing */
-       the_hash_algo->update_fn(&c, hdr, hdrlen);
-
-       /* Then the data itself.. */
-       stream.next_in = (void *)buf;
-       stream.avail_in = len;
-       do {
-               unsigned char *in0 = stream.next_in;
-               ret = git_deflate(&stream, Z_FINISH);
-               the_hash_algo->update_fn(&c, in0, stream.next_in - in0);
-               if (write_buffer(fd, compressed, stream.next_out - compressed) < 0)
-                       die("unable to write sha1 file");
-               stream.next_out = compressed;
-               stream.avail_out = sizeof(compressed);
-       } while (ret == Z_OK);
-
-       if (ret != Z_STREAM_END)
-               die("unable to deflate new object %s (%d)", oid_to_hex(oid),
-                   ret);
-       ret = git_deflate_end_gently(&stream);
-       if (ret != Z_OK)
-               die("deflateEnd on object %s failed (%d)", oid_to_hex(oid),
-                   ret);
-       the_hash_algo->final_fn(parano_oid.hash, &c);
-       if (oidcmp(oid, &parano_oid) != 0)
-               die("confused by unstable object source data for %s",
-                   oid_to_hex(oid));
-
-       close_sha1_file(fd);
-
-       if (mtime) {
-               struct utimbuf utb;
-               utb.actime = mtime;
-               utb.modtime = mtime;
-               if (utime(tmp_file.buf, &utb) < 0)
-                       warning_errno("failed utime() on %s", tmp_file.buf);
-       }
-
-       return finalize_object_file(tmp_file.buf, filename.buf);
-}
-
-static int freshen_loose_object(const unsigned char *sha1)
-{
-       return check_and_freshen(sha1, 1);
-}
-
-static int freshen_packed_object(const unsigned char *sha1)
-{
-       struct pack_entry e;
-       if (!find_pack_entry(sha1, &e))
-               return 0;
-       if (e.p->freshened)
-               return 1;
-       if (!freshen_file(e.p->pack_name))
-               return 0;
-       e.p->freshened = 1;
-       return 1;
-}
-
-int write_object_file(const void *buf, unsigned long len, const char *type,
-                     struct object_id *oid)
-{
-       char hdr[32];
-       int hdrlen = sizeof(hdr);
-
-       /* Normally if we have it in the pack then we do not bother writing
-        * it out into .git/objects/??/?{38} file.
-        */
-       write_object_file_prepare(buf, len, type, oid, hdr, &hdrlen);
-       if (freshen_packed_object(oid->hash) || freshen_loose_object(oid->hash))
-               return 0;
-       return write_loose_object(oid, hdr, hdrlen, buf, len, 0);
-}
-
-int hash_object_file_literally(const void *buf, unsigned long len,
-                              const char *type, struct object_id *oid,
-                              unsigned flags)
-{
-       char *header;
-       int hdrlen, status = 0;
-
-       /* type string, SP, %lu of the length plus NUL must fit this */
-       hdrlen = strlen(type) + 32;
-       header = xmalloc(hdrlen);
-       write_object_file_prepare(buf, len, type, oid, header, &hdrlen);
-
-       if (!(flags & HASH_WRITE_OBJECT))
-               goto cleanup;
-       if (freshen_packed_object(oid->hash) || freshen_loose_object(oid->hash))
-               goto cleanup;
-       status = write_loose_object(oid, header, hdrlen, buf, len, 0);
-
-cleanup:
-       free(header);
-       return status;
-}
-
-int force_object_loose(const struct object_id *oid, time_t mtime)
-{
-       void *buf;
-       unsigned long len;
-       enum object_type type;
-       char hdr[32];
-       int hdrlen;
-       int ret;
-
-       if (has_loose_object(oid->hash))
-               return 0;
-       buf = read_object(oid->hash, &type, &len);
-       if (!buf)
-               return error("cannot read sha1_file for %s", oid_to_hex(oid));
-       hdrlen = xsnprintf(hdr, sizeof(hdr), "%s %lu", type_name(type), len) + 1;
-       ret = write_loose_object(oid, hdr, hdrlen, buf, len, mtime);
-       free(buf);
-
-       return ret;
-}
-
-int has_sha1_file_with_flags(const unsigned char *sha1, int flags)
-{
-       if (!startup_info->have_repository)
-               return 0;
-       return sha1_object_info_extended(sha1, NULL,
-                                        flags | OBJECT_INFO_SKIP_CACHED) >= 0;
-}
-
-int has_object_file(const struct object_id *oid)
-{
-       return has_sha1_file(oid->hash);
-}
-
-int has_object_file_with_flags(const struct object_id *oid, int flags)
-{
-       return has_sha1_file_with_flags(oid->hash, flags);
-}
-
-static void check_tree(const void *buf, size_t size)
-{
-       struct tree_desc desc;
-       struct name_entry entry;
-
-       init_tree_desc(&desc, buf, size);
-       while (tree_entry(&desc, &entry))
-               /* do nothing
-                * tree_entry() will die() on malformed entries */
-               ;
-}
-
-static void check_commit(const void *buf, size_t size)
-{
-       struct commit c;
-       memset(&c, 0, sizeof(c));
-       if (parse_commit_buffer(&c, buf, size))
-               die("corrupt commit");
-}
-
-static void check_tag(const void *buf, size_t size)
-{
-       struct tag t;
-       memset(&t, 0, sizeof(t));
-       if (parse_tag_buffer(&t, buf, size))
-               die("corrupt tag");
-}
-
-static int index_mem(struct object_id *oid, void *buf, size_t size,
-                    enum object_type type,
-                    const char *path, unsigned flags)
-{
-       int ret, re_allocated = 0;
-       int write_object = flags & HASH_WRITE_OBJECT;
-
-       if (!type)
-               type = OBJ_BLOB;
-
-       /*
-        * Convert blobs to git internal format
-        */
-       if ((type == OBJ_BLOB) && path) {
-               struct strbuf nbuf = STRBUF_INIT;
-               if (convert_to_git(&the_index, path, buf, size, &nbuf,
-                                  get_conv_flags(flags))) {
-                       buf = strbuf_detach(&nbuf, &size);
-                       re_allocated = 1;
-               }
-       }
-       if (flags & HASH_FORMAT_CHECK) {
-               if (type == OBJ_TREE)
-                       check_tree(buf, size);
-               if (type == OBJ_COMMIT)
-                       check_commit(buf, size);
-               if (type == OBJ_TAG)
-                       check_tag(buf, size);
-       }
-
-       if (write_object)
-               ret = write_object_file(buf, size, type_name(type), oid);
-       else
-               ret = hash_object_file(buf, size, type_name(type), oid);
-       if (re_allocated)
-               free(buf);
-       return ret;
-}
-
-static int index_stream_convert_blob(struct object_id *oid, int fd,
-                                    const char *path, unsigned flags)
-{
-       int ret;
-       const int write_object = flags & HASH_WRITE_OBJECT;
-       struct strbuf sbuf = STRBUF_INIT;
-
-       assert(path);
-       assert(would_convert_to_git_filter_fd(path));
-
-       convert_to_git_filter_fd(&the_index, path, fd, &sbuf,
-                                get_conv_flags(flags));
-
-       if (write_object)
-               ret = write_object_file(sbuf.buf, sbuf.len, type_name(OBJ_BLOB),
-                                       oid);
-       else
-               ret = hash_object_file(sbuf.buf, sbuf.len, type_name(OBJ_BLOB),
-                                      oid);
-       strbuf_release(&sbuf);
-       return ret;
-}
-
-static int index_pipe(struct object_id *oid, int fd, enum object_type type,
-                     const char *path, unsigned flags)
-{
-       struct strbuf sbuf = STRBUF_INIT;
-       int ret;
-
-       if (strbuf_read(&sbuf, fd, 4096) >= 0)
-               ret = index_mem(oid, sbuf.buf, sbuf.len, type, path, flags);
-       else
-               ret = -1;
-       strbuf_release(&sbuf);
-       return ret;
-}
-
-#define SMALL_FILE_SIZE (32*1024)
-
-static int index_core(struct object_id *oid, int fd, size_t size,
-                     enum object_type type, const char *path,
-                     unsigned flags)
-{
-       int ret;
-
-       if (!size) {
-               ret = index_mem(oid, "", size, type, path, flags);
-       } else if (size <= SMALL_FILE_SIZE) {
-               char *buf = xmalloc(size);
-               ssize_t read_result = read_in_full(fd, buf, size);
-               if (read_result < 0)
-                       ret = error_errno("read error while indexing %s",
-                                         path ? path : "<unknown>");
-               else if (read_result != size)
-                       ret = error("short read while indexing %s",
-                                   path ? path : "<unknown>");
-               else
-                       ret = index_mem(oid, buf, size, type, path, flags);
-               free(buf);
-       } else {
-               void *buf = xmmap(NULL, size, PROT_READ, MAP_PRIVATE, fd, 0);
-               ret = index_mem(oid, buf, size, type, path, flags);
-               munmap(buf, size);
-       }
-       return ret;
-}
-
-/*
- * This creates one packfile per large blob unless bulk-checkin
- * machinery is "plugged".
- *
- * This also bypasses the usual "convert-to-git" dance, and that is on
- * purpose. We could write a streaming version of the converting
- * functions and insert that before feeding the data to fast-import
- * (or equivalent in-core API described above). However, that is
- * somewhat complicated, as we do not know the size of the filter
- * result, which we need to know beforehand when writing a git object.
- * Since the primary motivation for trying to stream from the working
- * tree file and to avoid mmaping it in core is to deal with large
- * binary blobs, they generally do not want to get any conversion, and
- * callers should avoid this code path when filters are requested.
- */
-static int index_stream(struct object_id *oid, int fd, size_t size,
-                       enum object_type type, const char *path,
-                       unsigned flags)
-{
-       return index_bulk_checkin(oid->hash, fd, size, type, path, flags);
-}
-
-int index_fd(struct object_id *oid, int fd, struct stat *st,
-            enum object_type type, const char *path, unsigned flags)
-{
-       int ret;
-
-       /*
-        * Call xsize_t() only when needed to avoid potentially unnecessary
-        * die() for large files.
-        */
-       if (type == OBJ_BLOB && path && would_convert_to_git_filter_fd(path))
-               ret = index_stream_convert_blob(oid, fd, path, flags);
-       else if (!S_ISREG(st->st_mode))
-               ret = index_pipe(oid, fd, type, path, flags);
-       else if (st->st_size <= big_file_threshold || type != OBJ_BLOB ||
-                (path && would_convert_to_git(&the_index, path)))
-               ret = index_core(oid, fd, xsize_t(st->st_size), type, path,
-                                flags);
-       else
-               ret = index_stream(oid, fd, xsize_t(st->st_size), type, path,
-                                  flags);
-       close(fd);
-       return ret;
-}
-
-int index_path(struct object_id *oid, const char *path, struct stat *st, unsigned flags)
-{
-       int fd;
-       struct strbuf sb = STRBUF_INIT;
-       int rc = 0;
-
-       switch (st->st_mode & S_IFMT) {
-       case S_IFREG:
-               fd = open(path, O_RDONLY);
-               if (fd < 0)
-                       return error_errno("open(\"%s\")", path);
-               if (index_fd(oid, fd, st, OBJ_BLOB, path, flags) < 0)
-                       return error("%s: failed to insert into database",
-                                    path);
-               break;
-       case S_IFLNK:
-               if (strbuf_readlink(&sb, path, st->st_size))
-                       return error_errno("readlink(\"%s\")", path);
-               if (!(flags & HASH_WRITE_OBJECT))
-                       hash_object_file(sb.buf, sb.len, blob_type, oid);
-               else if (write_object_file(sb.buf, sb.len, blob_type, oid))
-                       rc = error("%s: failed to insert into database", path);
-               strbuf_release(&sb);
-               break;
-       case S_IFDIR:
-               return resolve_gitlink_ref(path, "HEAD", oid);
-       default:
-               return error("%s: unsupported file type", path);
-       }
-       return rc;
-}
-
-int read_pack_header(int fd, struct pack_header *header)
-{
-       if (read_in_full(fd, header, sizeof(*header)) != sizeof(*header))
-               /* "eof before pack header was fully read" */
-               return PH_ERROR_EOF;
-
-       if (header->hdr_signature != htonl(PACK_SIGNATURE))
-               /* "protocol error (pack signature mismatch detected)" */
-               return PH_ERROR_PACK_SIGNATURE;
-       if (!pack_version_ok(header->hdr_version))
-               /* "protocol error (pack version unsupported)" */
-               return PH_ERROR_PROTOCOL;
-       return 0;
-}
-
-void assert_sha1_type(const unsigned char *sha1, enum object_type expect)
-{
-       enum object_type type = sha1_object_info(sha1, NULL);
-       if (type < 0)
-               die("%s is not a valid object", sha1_to_hex(sha1));
-       if (type != expect)
-               die("%s is not a valid '%s' object", sha1_to_hex(sha1),
-                   type_name(expect));
-}
-
-int for_each_file_in_obj_subdir(unsigned int subdir_nr,
-                               struct strbuf *path,
-                               each_loose_object_fn obj_cb,
-                               each_loose_cruft_fn cruft_cb,
-                               each_loose_subdir_fn subdir_cb,
-                               void *data)
-{
-       size_t origlen, baselen;
-       DIR *dir;
-       struct dirent *de;
-       int r = 0;
-       struct object_id oid;
-
-       if (subdir_nr > 0xff)
-               BUG("invalid loose object subdirectory: %x", subdir_nr);
-
-       origlen = path->len;
-       strbuf_complete(path, '/');
-       strbuf_addf(path, "%02x", subdir_nr);
-
-       dir = opendir(path->buf);
-       if (!dir) {
-               if (errno != ENOENT)
-                       r = error_errno("unable to open %s", path->buf);
-               strbuf_setlen(path, origlen);
-               return r;
-       }
-
-       oid.hash[0] = subdir_nr;
-       strbuf_addch(path, '/');
-       baselen = path->len;
-
-       while ((de = readdir(dir))) {
-               size_t namelen;
-               if (is_dot_or_dotdot(de->d_name))
-                       continue;
-
-               namelen = strlen(de->d_name);
-               strbuf_setlen(path, baselen);
-               strbuf_add(path, de->d_name, namelen);
-               if (namelen == GIT_SHA1_HEXSZ - 2 &&
-                   !hex_to_bytes(oid.hash + 1, de->d_name,
-                                 GIT_SHA1_RAWSZ - 1)) {
-                       if (obj_cb) {
-                               r = obj_cb(&oid, path->buf, data);
-                               if (r)
-                                       break;
-                       }
-                       continue;
-               }
-
-               if (cruft_cb) {
-                       r = cruft_cb(de->d_name, path->buf, data);
-                       if (r)
-                               break;
-               }
-       }
-       closedir(dir);
-
-       strbuf_setlen(path, baselen - 1);
-       if (!r && subdir_cb)
-               r = subdir_cb(subdir_nr, path->buf, data);
-
-       strbuf_setlen(path, origlen);
-
-       return r;
-}
-
-int for_each_loose_file_in_objdir_buf(struct strbuf *path,
-                           each_loose_object_fn obj_cb,
-                           each_loose_cruft_fn cruft_cb,
-                           each_loose_subdir_fn subdir_cb,
-                           void *data)
-{
-       int r = 0;
-       int i;
-
-       for (i = 0; i < 256; i++) {
-               r = for_each_file_in_obj_subdir(i, path, obj_cb, cruft_cb,
-                                               subdir_cb, data);
-               if (r)
-                       break;
-       }
-
-       return r;
-}
-
-int for_each_loose_file_in_objdir(const char *path,
-                                 each_loose_object_fn obj_cb,
-                                 each_loose_cruft_fn cruft_cb,
-                                 each_loose_subdir_fn subdir_cb,
-                                 void *data)
-{
-       struct strbuf buf = STRBUF_INIT;
-       int r;
-
-       strbuf_addstr(&buf, path);
-       r = for_each_loose_file_in_objdir_buf(&buf, obj_cb, cruft_cb,
-                                             subdir_cb, data);
-       strbuf_release(&buf);
-
-       return r;
-}
-
-struct loose_alt_odb_data {
-       each_loose_object_fn *cb;
-       void *data;
-};
-
-static int loose_from_alt_odb(struct alternate_object_database *alt,
-                             void *vdata)
-{
-       struct loose_alt_odb_data *data = vdata;
-       struct strbuf buf = STRBUF_INIT;
-       int r;
-
-       strbuf_addstr(&buf, alt->path);
-       r = for_each_loose_file_in_objdir_buf(&buf,
-                                             data->cb, NULL, NULL,
-                                             data->data);
-       strbuf_release(&buf);
-       return r;
-}
-
-int for_each_loose_object(each_loose_object_fn cb, void *data, unsigned flags)
-{
-       struct loose_alt_odb_data alt;
-       int r;
-
-       r = for_each_loose_file_in_objdir(get_object_directory(),
-                                         cb, NULL, NULL, data);
-       if (r)
-               return r;
-
-       if (flags & FOR_EACH_OBJECT_LOCAL_ONLY)
-               return 0;
-
-       alt.cb = cb;
-       alt.data = data;
-       return foreach_alt_odb(loose_from_alt_odb, &alt);
-}
-
-static int check_stream_sha1(git_zstream *stream,
-                            const char *hdr,
-                            unsigned long size,
-                            const char *path,
-                            const unsigned char *expected_sha1)
-{
-       git_hash_ctx c;
-       unsigned char real_sha1[GIT_MAX_RAWSZ];
-       unsigned char buf[4096];
-       unsigned long total_read;
-       int status = Z_OK;
-
-       the_hash_algo->init_fn(&c);
-       the_hash_algo->update_fn(&c, hdr, stream->total_out);
-
-       /*
-        * We already read some bytes into hdr, but the ones up to the NUL
-        * do not count against the object's content size.
-        */
-       total_read = stream->total_out - strlen(hdr) - 1;
-
-       /*
-        * This size comparison must be "<=" to read the final zlib packets;
-        * see the comment in unpack_sha1_rest for details.
-        */
-       while (total_read <= size &&
-              (status == Z_OK || status == Z_BUF_ERROR)) {
-               stream->next_out = buf;
-               stream->avail_out = sizeof(buf);
-               if (size - total_read < stream->avail_out)
-                       stream->avail_out = size - total_read;
-               status = git_inflate(stream, Z_FINISH);
-               the_hash_algo->update_fn(&c, buf, stream->next_out - buf);
-               total_read += stream->next_out - buf;
-       }
-       git_inflate_end(stream);
-
-       if (status != Z_STREAM_END) {
-               error("corrupt loose object '%s'", sha1_to_hex(expected_sha1));
-               return -1;
-       }
-       if (stream->avail_in) {
-               error("garbage at end of loose object '%s'",
-                     sha1_to_hex(expected_sha1));
-               return -1;
-       }
-
-       the_hash_algo->final_fn(real_sha1, &c);
-       if (hashcmp(expected_sha1, real_sha1)) {
-               error("sha1 mismatch for %s (expected %s)", path,
-                     sha1_to_hex(expected_sha1));
-               return -1;
-       }
-
-       return 0;
-}
-
-int read_loose_object(const char *path,
-                     const unsigned char *expected_sha1,
-                     enum object_type *type,
-                     unsigned long *size,
-                     void **contents)
-{
-       int ret = -1;
-       void *map = NULL;
-       unsigned long mapsize;
-       git_zstream stream;
-       char hdr[32];
-
-       *contents = NULL;
-
-       map = map_sha1_file_1(path, NULL, &mapsize);
-       if (!map) {
-               error_errno("unable to mmap %s", path);
-               goto out;
-       }
-
-       if (unpack_sha1_header(&stream, map, mapsize, hdr, sizeof(hdr)) < 0) {
-               error("unable to unpack header of %s", path);
-               goto out;
-       }
-
-       *type = parse_sha1_header(hdr, size);
-       if (*type < 0) {
-               error("unable to parse header of %s", path);
-               git_inflate_end(&stream);
-               goto out;
-       }
-
-       if (*type == OBJ_BLOB) {
-               if (check_stream_sha1(&stream, hdr, *size, path, expected_sha1) < 0)
-                       goto out;
-       } else {
-               *contents = unpack_sha1_rest(&stream, hdr, *size, expected_sha1);
-               if (!*contents) {
-                       error("unable to unpack contents of %s", path);
-                       git_inflate_end(&stream);
-                       goto out;
-               }
-               if (check_sha1_signature(expected_sha1, *contents,
-                                        *size, type_name(*type))) {
-                       error("sha1 mismatch for %s (expected %s)", path,
-                             sha1_to_hex(expected_sha1));
-                       free(*contents);
-                       goto out;
-               }
-       }
-
-       ret = 0; /* everything checks out */
-
-out:
-       if (map)
-               munmap(map, mapsize);
-       return ret;
-}
diff --git a/sha1_name.c b/sha1_name.c
deleted file mode 100644 (file)
index 735c1c0..0000000
+++ /dev/null
@@ -1,1767 +0,0 @@
-#include "cache.h"
-#include "config.h"
-#include "tag.h"
-#include "commit.h"
-#include "tree.h"
-#include "blob.h"
-#include "tree-walk.h"
-#include "refs.h"
-#include "remote.h"
-#include "dir.h"
-#include "sha1-array.h"
-#include "packfile.h"
-
-static int get_oid_oneline(const char *, struct object_id *, struct commit_list *);
-
-typedef int (*disambiguate_hint_fn)(const struct object_id *, void *);
-
-struct disambiguate_state {
-       int len; /* length of prefix in hex chars */
-       char hex_pfx[GIT_MAX_HEXSZ + 1];
-       struct object_id bin_pfx;
-
-       disambiguate_hint_fn fn;
-       void *cb_data;
-       struct object_id candidate;
-       unsigned candidate_exists:1;
-       unsigned candidate_checked:1;
-       unsigned candidate_ok:1;
-       unsigned disambiguate_fn_used:1;
-       unsigned ambiguous:1;
-       unsigned always_call_fn:1;
-};
-
-static void update_candidates(struct disambiguate_state *ds, const struct object_id *current)
-{
-       if (ds->always_call_fn) {
-               ds->ambiguous = ds->fn(current, ds->cb_data) ? 1 : 0;
-               return;
-       }
-       if (!ds->candidate_exists) {
-               /* this is the first candidate */
-               oidcpy(&ds->candidate, current);
-               ds->candidate_exists = 1;
-               return;
-       } else if (!oidcmp(&ds->candidate, current)) {
-               /* the same as what we already have seen */
-               return;
-       }
-
-       if (!ds->fn) {
-               /* cannot disambiguate between ds->candidate and current */
-               ds->ambiguous = 1;
-               return;
-       }
-
-       if (!ds->candidate_checked) {
-               ds->candidate_ok = ds->fn(&ds->candidate, ds->cb_data);
-               ds->disambiguate_fn_used = 1;
-               ds->candidate_checked = 1;
-       }
-
-       if (!ds->candidate_ok) {
-               /* discard the candidate; we know it does not satisfy fn */
-               oidcpy(&ds->candidate, current);
-               ds->candidate_checked = 0;
-               return;
-       }
-
-       /* if we reach this point, we know ds->candidate satisfies fn */
-       if (ds->fn(current, ds->cb_data)) {
-               /*
-                * if both current and candidate satisfy fn, we cannot
-                * disambiguate.
-                */
-               ds->candidate_ok = 0;
-               ds->ambiguous = 1;
-       }
-
-       /* otherwise, current can be discarded and candidate is still good */
-}
-
-static int append_loose_object(const struct object_id *oid, const char *path,
-                              void *data)
-{
-       oid_array_append(data, oid);
-       return 0;
-}
-
-static int match_sha(unsigned, const unsigned char *, const unsigned char *);
-
-static void find_short_object_filename(struct disambiguate_state *ds)
-{
-       int subdir_nr = ds->bin_pfx.hash[0];
-       struct alternate_object_database *alt;
-       static struct alternate_object_database *fakeent;
-
-       if (!fakeent) {
-               /*
-                * Create a "fake" alternate object database that
-                * points to our own object database, to make it
-                * easier to get a temporary working space in
-                * alt->name/alt->base while iterating over the
-                * object databases including our own.
-                */
-               fakeent = alloc_alt_odb(get_object_directory());
-       }
-       fakeent->next = alt_odb_list;
-
-       for (alt = fakeent; alt && !ds->ambiguous; alt = alt->next) {
-               int pos;
-
-               if (!alt->loose_objects_subdir_seen[subdir_nr]) {
-                       struct strbuf *buf = alt_scratch_buf(alt);
-                       for_each_file_in_obj_subdir(subdir_nr, buf,
-                                                   append_loose_object,
-                                                   NULL, NULL,
-                                                   &alt->loose_objects_cache);
-                       alt->loose_objects_subdir_seen[subdir_nr] = 1;
-               }
-
-               pos = oid_array_lookup(&alt->loose_objects_cache, &ds->bin_pfx);
-               if (pos < 0)
-                       pos = -1 - pos;
-               while (!ds->ambiguous && pos < alt->loose_objects_cache.nr) {
-                       const struct object_id *oid;
-                       oid = alt->loose_objects_cache.oid + pos;
-                       if (!match_sha(ds->len, ds->bin_pfx.hash, oid->hash))
-                               break;
-                       update_candidates(ds, oid);
-                       pos++;
-               }
-       }
-}
-
-static int match_sha(unsigned len, const unsigned char *a, const unsigned char *b)
-{
-       do {
-               if (*a != *b)
-                       return 0;
-               a++;
-               b++;
-               len -= 2;
-       } while (len > 1);
-       if (len)
-               if ((*a ^ *b) & 0xf0)
-                       return 0;
-       return 1;
-}
-
-static void unique_in_pack(struct packed_git *p,
-                          struct disambiguate_state *ds)
-{
-       uint32_t num, last, i, first = 0;
-       const struct object_id *current = NULL;
-
-       if (open_pack_index(p) || !p->num_objects)
-               return;
-
-       num = p->num_objects;
-       last = num;
-       while (first < last) {
-               uint32_t mid = first + (last - first) / 2;
-               const unsigned char *current;
-               int cmp;
-
-               current = nth_packed_object_sha1(p, mid);
-               cmp = hashcmp(ds->bin_pfx.hash, current);
-               if (!cmp) {
-                       first = mid;
-                       break;
-               }
-               if (cmp > 0) {
-                       first = mid+1;
-                       continue;
-               }
-               last = mid;
-       }
-
-       /*
-        * At this point, "first" is the location of the lowest object
-        * with an object name that could match "bin_pfx".  See if we have
-        * 0, 1 or more objects that actually match(es).
-        */
-       for (i = first; i < num && !ds->ambiguous; i++) {
-               struct object_id oid;
-               current = nth_packed_object_oid(&oid, p, i);
-               if (!match_sha(ds->len, ds->bin_pfx.hash, current->hash))
-                       break;
-               update_candidates(ds, current);
-       }
-}
-
-static void find_short_packed_object(struct disambiguate_state *ds)
-{
-       struct packed_git *p;
-
-       prepare_packed_git();
-       for (p = packed_git; p && !ds->ambiguous; p = p->next)
-               unique_in_pack(p, ds);
-}
-
-#define SHORT_NAME_NOT_FOUND (-1)
-#define SHORT_NAME_AMBIGUOUS (-2)
-
-static int finish_object_disambiguation(struct disambiguate_state *ds,
-                                       struct object_id *oid)
-{
-       if (ds->ambiguous)
-               return SHORT_NAME_AMBIGUOUS;
-
-       if (!ds->candidate_exists)
-               return SHORT_NAME_NOT_FOUND;
-
-       if (!ds->candidate_checked)
-               /*
-                * If this is the only candidate, there is no point
-                * calling the disambiguation hint callback.
-                *
-                * On the other hand, if the current candidate
-                * replaced an earlier candidate that did _not_ pass
-                * the disambiguation hint callback, then we do have
-                * more than one objects that match the short name
-                * given, so we should make sure this one matches;
-                * otherwise, if we discovered this one and the one
-                * that we previously discarded in the reverse order,
-                * we would end up showing different results in the
-                * same repository!
-                */
-               ds->candidate_ok = (!ds->disambiguate_fn_used ||
-                                   ds->fn(&ds->candidate, ds->cb_data));
-
-       if (!ds->candidate_ok)
-               return SHORT_NAME_AMBIGUOUS;
-
-       oidcpy(oid, &ds->candidate);
-       return 0;
-}
-
-static int disambiguate_commit_only(const struct object_id *oid, void *cb_data_unused)
-{
-       int kind = sha1_object_info(oid->hash, NULL);
-       return kind == OBJ_COMMIT;
-}
-
-static int disambiguate_committish_only(const struct object_id *oid, void *cb_data_unused)
-{
-       struct object *obj;
-       int kind;
-
-       kind = sha1_object_info(oid->hash, NULL);
-       if (kind == OBJ_COMMIT)
-               return 1;
-       if (kind != OBJ_TAG)
-               return 0;
-
-       /* We need to do this the hard way... */
-       obj = deref_tag(parse_object(oid), NULL, 0);
-       if (obj && obj->type == OBJ_COMMIT)
-               return 1;
-       return 0;
-}
-
-static int disambiguate_tree_only(const struct object_id *oid, void *cb_data_unused)
-{
-       int kind = sha1_object_info(oid->hash, NULL);
-       return kind == OBJ_TREE;
-}
-
-static int disambiguate_treeish_only(const struct object_id *oid, void *cb_data_unused)
-{
-       struct object *obj;
-       int kind;
-
-       kind = sha1_object_info(oid->hash, NULL);
-       if (kind == OBJ_TREE || kind == OBJ_COMMIT)
-               return 1;
-       if (kind != OBJ_TAG)
-               return 0;
-
-       /* We need to do this the hard way... */
-       obj = deref_tag(parse_object(oid), NULL, 0);
-       if (obj && (obj->type == OBJ_TREE || obj->type == OBJ_COMMIT))
-               return 1;
-       return 0;
-}
-
-static int disambiguate_blob_only(const struct object_id *oid, void *cb_data_unused)
-{
-       int kind = sha1_object_info(oid->hash, NULL);
-       return kind == OBJ_BLOB;
-}
-
-static disambiguate_hint_fn default_disambiguate_hint;
-
-int set_disambiguate_hint_config(const char *var, const char *value)
-{
-       static const struct {
-               const char *name;
-               disambiguate_hint_fn fn;
-       } hints[] = {
-               { "none", NULL },
-               { "commit", disambiguate_commit_only },
-               { "committish", disambiguate_committish_only },
-               { "tree", disambiguate_tree_only },
-               { "treeish", disambiguate_treeish_only },
-               { "blob", disambiguate_blob_only }
-       };
-       int i;
-
-       if (!value)
-               return config_error_nonbool(var);
-
-       for (i = 0; i < ARRAY_SIZE(hints); i++) {
-               if (!strcasecmp(value, hints[i].name)) {
-                       default_disambiguate_hint = hints[i].fn;
-                       return 0;
-               }
-       }
-
-       return error("unknown hint type for '%s': %s", var, value);
-}
-
-static int init_object_disambiguation(const char *name, int len,
-                                     struct disambiguate_state *ds)
-{
-       int i;
-
-       if (len < MINIMUM_ABBREV || len > GIT_SHA1_HEXSZ)
-               return -1;
-
-       memset(ds, 0, sizeof(*ds));
-
-       for (i = 0; i < len ;i++) {
-               unsigned char c = name[i];
-               unsigned char val;
-               if (c >= '0' && c <= '9')
-                       val = c - '0';
-               else if (c >= 'a' && c <= 'f')
-                       val = c - 'a' + 10;
-               else if (c >= 'A' && c <='F') {
-                       val = c - 'A' + 10;
-                       c -= 'A' - 'a';
-               }
-               else
-                       return -1;
-               ds->hex_pfx[i] = c;
-               if (!(i & 1))
-                       val <<= 4;
-               ds->bin_pfx.hash[i >> 1] |= val;
-       }
-
-       ds->len = len;
-       ds->hex_pfx[len] = '\0';
-       prepare_alt_odb();
-       return 0;
-}
-
-static int show_ambiguous_object(const struct object_id *oid, void *data)
-{
-       const struct disambiguate_state *ds = data;
-       struct strbuf desc = STRBUF_INIT;
-       int type;
-
-
-       if (ds->fn && !ds->fn(oid, ds->cb_data))
-               return 0;
-
-       type = sha1_object_info(oid->hash, NULL);
-       if (type == OBJ_COMMIT) {
-               struct commit *commit = lookup_commit(oid);
-               if (commit) {
-                       struct pretty_print_context pp = {0};
-                       pp.date_mode.type = DATE_SHORT;
-                       format_commit_message(commit, " %ad - %s", &desc, &pp);
-               }
-       } else if (type == OBJ_TAG) {
-               struct tag *tag = lookup_tag(oid);
-               if (!parse_tag(tag) && tag->tag)
-                       strbuf_addf(&desc, " %s", tag->tag);
-       }
-
-       advise("  %s %s%s",
-              find_unique_abbrev(oid->hash, DEFAULT_ABBREV),
-              type_name(type) ? type_name(type) : "unknown type",
-              desc.buf);
-
-       strbuf_release(&desc);
-       return 0;
-}
-
-static int get_short_oid(const char *name, int len, struct object_id *oid,
-                         unsigned flags)
-{
-       int status;
-       struct disambiguate_state ds;
-       int quietly = !!(flags & GET_OID_QUIETLY);
-
-       if (init_object_disambiguation(name, len, &ds) < 0)
-               return -1;
-
-       if (HAS_MULTI_BITS(flags & GET_OID_DISAMBIGUATORS))
-               die("BUG: multiple get_short_oid disambiguator flags");
-
-       if (flags & GET_OID_COMMIT)
-               ds.fn = disambiguate_commit_only;
-       else if (flags & GET_OID_COMMITTISH)
-               ds.fn = disambiguate_committish_only;
-       else if (flags & GET_OID_TREE)
-               ds.fn = disambiguate_tree_only;
-       else if (flags & GET_OID_TREEISH)
-               ds.fn = disambiguate_treeish_only;
-       else if (flags & GET_OID_BLOB)
-               ds.fn = disambiguate_blob_only;
-       else
-               ds.fn = default_disambiguate_hint;
-
-       find_short_object_filename(&ds);
-       find_short_packed_object(&ds);
-       status = finish_object_disambiguation(&ds, oid);
-
-       if (!quietly && (status == SHORT_NAME_AMBIGUOUS)) {
-               error(_("short SHA1 %s is ambiguous"), ds.hex_pfx);
-
-               /*
-                * We may still have ambiguity if we simply saw a series of
-                * candidates that did not satisfy our hint function. In
-                * that case, we still want to show them, so disable the hint
-                * function entirely.
-                */
-               if (!ds.ambiguous)
-                       ds.fn = NULL;
-
-               advise(_("The candidates are:"));
-               for_each_abbrev(ds.hex_pfx, show_ambiguous_object, &ds);
-       }
-
-       return status;
-}
-
-static int collect_ambiguous(const struct object_id *oid, void *data)
-{
-       oid_array_append(data, oid);
-       return 0;
-}
-
-int for_each_abbrev(const char *prefix, each_abbrev_fn fn, void *cb_data)
-{
-       struct oid_array collect = OID_ARRAY_INIT;
-       struct disambiguate_state ds;
-       int ret;
-
-       if (init_object_disambiguation(prefix, strlen(prefix), &ds) < 0)
-               return -1;
-
-       ds.always_call_fn = 1;
-       ds.fn = collect_ambiguous;
-       ds.cb_data = &collect;
-       find_short_object_filename(&ds);
-       find_short_packed_object(&ds);
-
-       ret = oid_array_for_each_unique(&collect, fn, cb_data);
-       oid_array_clear(&collect);
-       return ret;
-}
-
-/*
- * Return the slot of the most-significant bit set in "val". There are various
- * ways to do this quickly with fls() or __builtin_clzl(), but speed is
- * probably not a big deal here.
- */
-static unsigned msb(unsigned long val)
-{
-       unsigned r = 0;
-       while (val >>= 1)
-               r++;
-       return r;
-}
-
-struct min_abbrev_data {
-       unsigned int init_len;
-       unsigned int cur_len;
-       char *hex;
-       const unsigned char *hash;
-};
-
-static inline char get_hex_char_from_oid(const struct object_id *oid,
-                                        unsigned int pos)
-{
-       static const char hex[] = "0123456789abcdef";
-
-       if ((pos & 1) == 0)
-               return hex[oid->hash[pos >> 1] >> 4];
-       else
-               return hex[oid->hash[pos >> 1] & 0xf];
-}
-
-static int extend_abbrev_len(const struct object_id *oid, void *cb_data)
-{
-       struct min_abbrev_data *mad = cb_data;
-
-       unsigned int i = mad->init_len;
-       while (mad->hex[i] && mad->hex[i] == get_hex_char_from_oid(oid, i))
-               i++;
-
-       if (i < GIT_MAX_RAWSZ && i >= mad->cur_len)
-               mad->cur_len = i + 1;
-
-       return 0;
-}
-
-static void find_abbrev_len_for_pack(struct packed_git *p,
-                                    struct min_abbrev_data *mad)
-{
-       int match = 0;
-       uint32_t num, last, first = 0;
-       struct object_id oid;
-
-       if (open_pack_index(p) || !p->num_objects)
-               return;
-
-       num = p->num_objects;
-       last = num;
-       while (first < last) {
-               uint32_t mid = first + (last - first) / 2;
-               const unsigned char *current;
-               int cmp;
-
-               current = nth_packed_object_sha1(p, mid);
-               cmp = hashcmp(mad->hash, current);
-               if (!cmp) {
-                       match = 1;
-                       first = mid;
-                       break;
-               }
-               if (cmp > 0) {
-                       first = mid + 1;
-                       continue;
-               }
-               last = mid;
-       }
-
-       /*
-        * first is now the position in the packfile where we would insert
-        * mad->hash if it does not exist (or the position of mad->hash if
-        * it does exist). Hence, we consider a maximum of two objects
-        * nearby for the abbreviation length.
-        */
-       mad->init_len = 0;
-       if (!match) {
-               if (nth_packed_object_oid(&oid, p, first))
-                       extend_abbrev_len(&oid, mad);
-       } else if (first < num - 1) {
-               if (nth_packed_object_oid(&oid, p, first + 1))
-                       extend_abbrev_len(&oid, mad);
-       }
-       if (first > 0) {
-               if (nth_packed_object_oid(&oid, p, first - 1))
-                       extend_abbrev_len(&oid, mad);
-       }
-       mad->init_len = mad->cur_len;
-}
-
-static void find_abbrev_len_packed(struct min_abbrev_data *mad)
-{
-       struct packed_git *p;
-
-       prepare_packed_git();
-       for (p = packed_git; p; p = p->next)
-               find_abbrev_len_for_pack(p, mad);
-}
-
-int find_unique_abbrev_r(char *hex, const unsigned char *sha1, int len)
-{
-       struct disambiguate_state ds;
-       struct min_abbrev_data mad;
-       struct object_id oid_ret;
-       if (len < 0) {
-               unsigned long count = approximate_object_count();
-               /*
-                * Add one because the MSB only tells us the highest bit set,
-                * not including the value of all the _other_ bits (so "15"
-                * is only one off of 2^4, but the MSB is the 3rd bit.
-                */
-               len = msb(count) + 1;
-               /*
-                * We now know we have on the order of 2^len objects, which
-                * expects a collision at 2^(len/2). But we also care about hex
-                * chars, not bits, and there are 4 bits per hex. So all
-                * together we need to divide by 2 and round up.
-                */
-               len = DIV_ROUND_UP(len, 2);
-               /*
-                * For very small repos, we stick with our regular fallback.
-                */
-               if (len < FALLBACK_DEFAULT_ABBREV)
-                       len = FALLBACK_DEFAULT_ABBREV;
-       }
-
-       sha1_to_hex_r(hex, sha1);
-       if (len == GIT_SHA1_HEXSZ || !len)
-               return GIT_SHA1_HEXSZ;
-
-       mad.init_len = len;
-       mad.cur_len = len;
-       mad.hex = hex;
-       mad.hash = sha1;
-
-       find_abbrev_len_packed(&mad);
-
-       if (init_object_disambiguation(hex, mad.cur_len, &ds) < 0)
-               return -1;
-
-       ds.fn = extend_abbrev_len;
-       ds.always_call_fn = 1;
-       ds.cb_data = (void *)&mad;
-
-       find_short_object_filename(&ds);
-       (void)finish_object_disambiguation(&ds, &oid_ret);
-
-       hex[mad.cur_len] = 0;
-       return mad.cur_len;
-}
-
-const char *find_unique_abbrev(const unsigned char *sha1, int len)
-{
-       static int bufno;
-       static char hexbuffer[4][GIT_MAX_HEXSZ + 1];
-       char *hex = hexbuffer[bufno];
-       bufno = (bufno + 1) % ARRAY_SIZE(hexbuffer);
-       find_unique_abbrev_r(hex, sha1, len);
-       return hex;
-}
-
-static int ambiguous_path(const char *path, int len)
-{
-       int slash = 1;
-       int cnt;
-
-       for (cnt = 0; cnt < len; cnt++) {
-               switch (*path++) {
-               case '\0':
-                       break;
-               case '/':
-                       if (slash)
-                               break;
-                       slash = 1;
-                       continue;
-               case '.':
-                       continue;
-               default:
-                       slash = 0;
-                       continue;
-               }
-               break;
-       }
-       return slash;
-}
-
-static inline int at_mark(const char *string, int len,
-                         const char **suffix, int nr)
-{
-       int i;
-
-       for (i = 0; i < nr; i++) {
-               int suffix_len = strlen(suffix[i]);
-               if (suffix_len <= len
-                   && !strncasecmp(string, suffix[i], suffix_len))
-                       return suffix_len;
-       }
-       return 0;
-}
-
-static inline int upstream_mark(const char *string, int len)
-{
-       const char *suffix[] = { "@{upstream}", "@{u}" };
-       return at_mark(string, len, suffix, ARRAY_SIZE(suffix));
-}
-
-static inline int push_mark(const char *string, int len)
-{
-       const char *suffix[] = { "@{push}" };
-       return at_mark(string, len, suffix, ARRAY_SIZE(suffix));
-}
-
-static int get_oid_1(const char *name, int len, struct object_id *oid, unsigned lookup_flags);
-static int interpret_nth_prior_checkout(const char *name, int namelen, struct strbuf *buf);
-
-static int get_oid_basic(const char *str, int len, struct object_id *oid,
-                         unsigned int flags)
-{
-       static const char *warn_msg = "refname '%.*s' is ambiguous.";
-       static const char *object_name_msg = N_(
-       "Git normally never creates a ref that ends with 40 hex characters\n"
-       "because it will be ignored when you just specify 40-hex. These refs\n"
-       "may be created by mistake. For example,\n"
-       "\n"
-       "  git checkout -b $br $(git rev-parse ...)\n"
-       "\n"
-       "where \"$br\" is somehow empty and a 40-hex ref is created. Please\n"
-       "examine these refs and maybe delete them. Turn this message off by\n"
-       "running \"git config advice.objectNameWarning false\"");
-       struct object_id tmp_oid;
-       char *real_ref = NULL;
-       int refs_found = 0;
-       int at, reflog_len, nth_prior = 0;
-
-       if (len == GIT_SHA1_HEXSZ && !get_oid_hex(str, oid)) {
-               if (warn_ambiguous_refs && warn_on_object_refname_ambiguity) {
-                       refs_found = dwim_ref(str, len, &tmp_oid, &real_ref);
-                       if (refs_found > 0) {
-                               warning(warn_msg, len, str);
-                               if (advice_object_name_warning)
-                                       fprintf(stderr, "%s\n", _(object_name_msg));
-                       }
-                       free(real_ref);
-               }
-               return 0;
-       }
-
-       /* basic@{time or number or -number} format to query ref-log */
-       reflog_len = at = 0;
-       if (len && str[len-1] == '}') {
-               for (at = len-4; at >= 0; at--) {
-                       if (str[at] == '@' && str[at+1] == '{') {
-                               if (str[at+2] == '-') {
-                                       if (at != 0)
-                                               /* @{-N} not at start */
-                                               return -1;
-                                       nth_prior = 1;
-                                       continue;
-                               }
-                               if (!upstream_mark(str + at, len - at) &&
-                                   !push_mark(str + at, len - at)) {
-                                       reflog_len = (len-1) - (at+2);
-                                       len = at;
-                               }
-                               break;
-                       }
-               }
-       }
-
-       /* Accept only unambiguous ref paths. */
-       if (len && ambiguous_path(str, len))
-               return -1;
-
-       if (nth_prior) {
-               struct strbuf buf = STRBUF_INIT;
-               int detached;
-
-               if (interpret_nth_prior_checkout(str, len, &buf) > 0) {
-                       detached = (buf.len == GIT_SHA1_HEXSZ && !get_oid_hex(buf.buf, oid));
-                       strbuf_release(&buf);
-                       if (detached)
-                               return 0;
-               }
-       }
-
-       if (!len && reflog_len)
-               /* allow "@{...}" to mean the current branch reflog */
-               refs_found = dwim_ref("HEAD", 4, oid, &real_ref);
-       else if (reflog_len)
-               refs_found = dwim_log(str, len, oid, &real_ref);
-       else
-               refs_found = dwim_ref(str, len, oid, &real_ref);
-
-       if (!refs_found)
-               return -1;
-
-       if (warn_ambiguous_refs && !(flags & GET_OID_QUIETLY) &&
-           (refs_found > 1 ||
-            !get_short_oid(str, len, &tmp_oid, GET_OID_QUIETLY)))
-               warning(warn_msg, len, str);
-
-       if (reflog_len) {
-               int nth, i;
-               timestamp_t at_time;
-               timestamp_t co_time;
-               int co_tz, co_cnt;
-
-               /* Is it asking for N-th entry, or approxidate? */
-               for (i = nth = 0; 0 <= nth && i < reflog_len; i++) {
-                       char ch = str[at+2+i];
-                       if ('0' <= ch && ch <= '9')
-                               nth = nth * 10 + ch - '0';
-                       else
-                               nth = -1;
-               }
-               if (100000000 <= nth) {
-                       at_time = nth;
-                       nth = -1;
-               } else if (0 <= nth)
-                       at_time = 0;
-               else {
-                       int errors = 0;
-                       char *tmp = xstrndup(str + at + 2, reflog_len);
-                       at_time = approxidate_careful(tmp, &errors);
-                       free(tmp);
-                       if (errors) {
-                               free(real_ref);
-                               return -1;
-                       }
-               }
-               if (read_ref_at(real_ref, flags, at_time, nth, oid, NULL,
-                               &co_time, &co_tz, &co_cnt)) {
-                       if (!len) {
-                               if (starts_with(real_ref, "refs/heads/")) {
-                                       str = real_ref + 11;
-                                       len = strlen(real_ref + 11);
-                               } else {
-                                       /* detached HEAD */
-                                       str = "HEAD";
-                                       len = 4;
-                               }
-                       }
-                       if (at_time) {
-                               if (!(flags & GET_OID_QUIETLY)) {
-                                       warning("Log for '%.*s' only goes "
-                                               "back to %s.", len, str,
-                                               show_date(co_time, co_tz, DATE_MODE(RFC2822)));
-                               }
-                       } else {
-                               if (flags & GET_OID_QUIETLY) {
-                                       exit(128);
-                               }
-                               die("Log for '%.*s' only has %d entries.",
-                                   len, str, co_cnt);
-                       }
-               }
-       }
-
-       free(real_ref);
-       return 0;
-}
-
-static int get_parent(const char *name, int len,
-                     struct object_id *result, int idx)
-{
-       struct object_id oid;
-       int ret = get_oid_1(name, len, &oid, GET_OID_COMMITTISH);
-       struct commit *commit;
-       struct commit_list *p;
-
-       if (ret)
-               return ret;
-       commit = lookup_commit_reference(&oid);
-       if (parse_commit(commit))
-               return -1;
-       if (!idx) {
-               oidcpy(result, &commit->object.oid);
-               return 0;
-       }
-       p = commit->parents;
-       while (p) {
-               if (!--idx) {
-                       oidcpy(result, &p->item->object.oid);
-                       return 0;
-               }
-               p = p->next;
-       }
-       return -1;
-}
-
-static int get_nth_ancestor(const char *name, int len,
-                           struct object_id *result, int generation)
-{
-       struct object_id oid;
-       struct commit *commit;
-       int ret;
-
-       ret = get_oid_1(name, len, &oid, GET_OID_COMMITTISH);
-       if (ret)
-               return ret;
-       commit = lookup_commit_reference(&oid);
-       if (!commit)
-               return -1;
-
-       while (generation--) {
-               if (parse_commit(commit) || !commit->parents)
-                       return -1;
-               commit = commit->parents->item;
-       }
-       oidcpy(result, &commit->object.oid);
-       return 0;
-}
-
-struct object *peel_to_type(const char *name, int namelen,
-                           struct object *o, enum object_type expected_type)
-{
-       if (name && !namelen)
-               namelen = strlen(name);
-       while (1) {
-               if (!o || (!o->parsed && !parse_object(&o->oid)))
-                       return NULL;
-               if (expected_type == OBJ_ANY || o->type == expected_type)
-                       return o;
-               if (o->type == OBJ_TAG)
-                       o = ((struct tag*) o)->tagged;
-               else if (o->type == OBJ_COMMIT)
-                       o = &(((struct commit *) o)->tree->object);
-               else {
-                       if (name)
-                               error("%.*s: expected %s type, but the object "
-                                     "dereferences to %s type",
-                                     namelen, name, type_name(expected_type),
-                                     type_name(o->type));
-                       return NULL;
-               }
-       }
-}
-
-static int peel_onion(const char *name, int len, struct object_id *oid,
-                     unsigned lookup_flags)
-{
-       struct object_id outer;
-       const char *sp;
-       unsigned int expected_type = 0;
-       struct object *o;
-
-       /*
-        * "ref^{type}" dereferences ref repeatedly until you cannot
-        * dereference anymore, or you get an object of given type,
-        * whichever comes first.  "ref^{}" means just dereference
-        * tags until you get a non-tag.  "ref^0" is a shorthand for
-        * "ref^{commit}".  "commit^{tree}" could be used to find the
-        * top-level tree of the given commit.
-        */
-       if (len < 4 || name[len-1] != '}')
-               return -1;
-
-       for (sp = name + len - 1; name <= sp; sp--) {
-               int ch = *sp;
-               if (ch == '{' && name < sp && sp[-1] == '^')
-                       break;
-       }
-       if (sp <= name)
-               return -1;
-
-       sp++; /* beginning of type name, or closing brace for empty */
-       if (starts_with(sp, "commit}"))
-               expected_type = OBJ_COMMIT;
-       else if (starts_with(sp, "tag}"))
-               expected_type = OBJ_TAG;
-       else if (starts_with(sp, "tree}"))
-               expected_type = OBJ_TREE;
-       else if (starts_with(sp, "blob}"))
-               expected_type = OBJ_BLOB;
-       else if (starts_with(sp, "object}"))
-               expected_type = OBJ_ANY;
-       else if (sp[0] == '}')
-               expected_type = OBJ_NONE;
-       else if (sp[0] == '/')
-               expected_type = OBJ_COMMIT;
-       else
-               return -1;
-
-       lookup_flags &= ~GET_OID_DISAMBIGUATORS;
-       if (expected_type == OBJ_COMMIT)
-               lookup_flags |= GET_OID_COMMITTISH;
-       else if (expected_type == OBJ_TREE)
-               lookup_flags |= GET_OID_TREEISH;
-
-       if (get_oid_1(name, sp - name - 2, &outer, lookup_flags))
-               return -1;
-
-       o = parse_object(&outer);
-       if (!o)
-               return -1;
-       if (!expected_type) {
-               o = deref_tag(o, name, sp - name - 2);
-               if (!o || (!o->parsed && !parse_object(&o->oid)))
-                       return -1;
-               oidcpy(oid, &o->oid);
-               return 0;
-       }
-
-       /*
-        * At this point, the syntax look correct, so
-        * if we do not get the needed object, we should
-        * barf.
-        */
-       o = peel_to_type(name, len, o, expected_type);
-       if (!o)
-               return -1;
-
-       oidcpy(oid, &o->oid);
-       if (sp[0] == '/') {
-               /* "$commit^{/foo}" */
-               char *prefix;
-               int ret;
-               struct commit_list *list = NULL;
-
-               /*
-                * $commit^{/}. Some regex implementation may reject.
-                * We don't need regex anyway. '' pattern always matches.
-                */
-               if (sp[1] == '}')
-                       return 0;
-
-               prefix = xstrndup(sp + 1, name + len - 1 - (sp + 1));
-               commit_list_insert((struct commit *)o, &list);
-               ret = get_oid_oneline(prefix, oid, list);
-               free(prefix);
-               return ret;
-       }
-       return 0;
-}
-
-static int get_describe_name(const char *name, int len, struct object_id *oid)
-{
-       const char *cp;
-       unsigned flags = GET_OID_QUIETLY | GET_OID_COMMIT;
-
-       for (cp = name + len - 1; name + 2 <= cp; cp--) {
-               char ch = *cp;
-               if (!isxdigit(ch)) {
-                       /* We must be looking at g in "SOMETHING-g"
-                        * for it to be describe output.
-                        */
-                       if (ch == 'g' && cp[-1] == '-') {
-                               cp++;
-                               len -= cp - name;
-                               return get_short_oid(cp, len, oid, flags);
-                       }
-               }
-       }
-       return -1;
-}
-
-static int get_oid_1(const char *name, int len, struct object_id *oid, unsigned lookup_flags)
-{
-       int ret, has_suffix;
-       const char *cp;
-
-       /*
-        * "name~3" is "name^^^", "name~" is "name~1", and "name^" is "name^1".
-        */
-       has_suffix = 0;
-       for (cp = name + len - 1; name <= cp; cp--) {
-               int ch = *cp;
-               if ('0' <= ch && ch <= '9')
-                       continue;
-               if (ch == '~' || ch == '^')
-                       has_suffix = ch;
-               break;
-       }
-
-       if (has_suffix) {
-               int num = 0;
-               int len1 = cp - name;
-               cp++;
-               while (cp < name + len)
-                       num = num * 10 + *cp++ - '0';
-               if (!num && len1 == len - 1)
-                       num = 1;
-               if (has_suffix == '^')
-                       return get_parent(name, len1, oid, num);
-               /* else if (has_suffix == '~') -- goes without saying */
-               return get_nth_ancestor(name, len1, oid, num);
-       }
-
-       ret = peel_onion(name, len, oid, lookup_flags);
-       if (!ret)
-               return 0;
-
-       ret = get_oid_basic(name, len, oid, lookup_flags);
-       if (!ret)
-               return 0;
-
-       /* It could be describe output that is "SOMETHING-gXXXX" */
-       ret = get_describe_name(name, len, oid);
-       if (!ret)
-               return 0;
-
-       return get_short_oid(name, len, oid, lookup_flags);
-}
-
-/*
- * This interprets names like ':/Initial revision of "git"' by searching
- * through history and returning the first commit whose message starts
- * the given regular expression.
- *
- * For negative-matching, prefix the pattern-part with '!-', like: ':/!-WIP'.
- *
- * For a literal '!' character at the beginning of a pattern, you have to repeat
- * that, like: ':/!!foo'
- *
- * For future extension, all other sequences beginning with ':/!' are reserved.
- */
-
-/* Remember to update object flag allocation in object.h */
-#define ONELINE_SEEN (1u<<20)
-
-static int handle_one_ref(const char *path, const struct object_id *oid,
-                         int flag, void *cb_data)
-{
-       struct commit_list **list = cb_data;
-       struct object *object = parse_object(oid);
-       if (!object)
-               return 0;
-       if (object->type == OBJ_TAG) {
-               object = deref_tag(object, path, strlen(path));
-               if (!object)
-                       return 0;
-       }
-       if (object->type != OBJ_COMMIT)
-               return 0;
-       commit_list_insert((struct commit *)object, list);
-       return 0;
-}
-
-static int get_oid_oneline(const char *prefix, struct object_id *oid,
-                           struct commit_list *list)
-{
-       struct commit_list *backup = NULL, *l;
-       int found = 0;
-       int negative = 0;
-       regex_t regex;
-
-       if (prefix[0] == '!') {
-               prefix++;
-
-               if (prefix[0] == '-') {
-                       prefix++;
-                       negative = 1;
-               } else if (prefix[0] != '!') {
-                       return -1;
-               }
-       }
-
-       if (regcomp(&regex, prefix, REG_EXTENDED))
-               return -1;
-
-       for (l = list; l; l = l->next) {
-               l->item->object.flags |= ONELINE_SEEN;
-               commit_list_insert(l->item, &backup);
-       }
-       while (list) {
-               const char *p, *buf;
-               struct commit *commit;
-               int matches;
-
-               commit = pop_most_recent_commit(&list, ONELINE_SEEN);
-               if (!parse_object(&commit->object.oid))
-                       continue;
-               buf = get_commit_buffer(commit, NULL);
-               p = strstr(buf, "\n\n");
-               matches = negative ^ (p && !regexec(&regex, p + 2, 0, NULL, 0));
-               unuse_commit_buffer(commit, buf);
-
-               if (matches) {
-                       oidcpy(oid, &commit->object.oid);
-                       found = 1;
-                       break;
-               }
-       }
-       regfree(&regex);
-       free_commit_list(list);
-       for (l = backup; l; l = l->next)
-               clear_commit_marks(l->item, ONELINE_SEEN);
-       free_commit_list(backup);
-       return found ? 0 : -1;
-}
-
-struct grab_nth_branch_switch_cbdata {
-       int remaining;
-       struct strbuf buf;
-};
-
-static int grab_nth_branch_switch(struct object_id *ooid, struct object_id *noid,
-                                 const char *email, timestamp_t timestamp, int tz,
-                                 const char *message, void *cb_data)
-{
-       struct grab_nth_branch_switch_cbdata *cb = cb_data;
-       const char *match = NULL, *target = NULL;
-       size_t len;
-
-       if (skip_prefix(message, "checkout: moving from ", &match))
-               target = strstr(match, " to ");
-
-       if (!match || !target)
-               return 0;
-       if (--(cb->remaining) == 0) {
-               len = target - match;
-               strbuf_reset(&cb->buf);
-               strbuf_add(&cb->buf, match, len);
-               return 1; /* we are done */
-       }
-       return 0;
-}
-
-/*
- * Parse @{-N} syntax, return the number of characters parsed
- * if successful; otherwise signal an error with negative value.
- */
-static int interpret_nth_prior_checkout(const char *name, int namelen,
-                                       struct strbuf *buf)
-{
-       long nth;
-       int retval;
-       struct grab_nth_branch_switch_cbdata cb;
-       const char *brace;
-       char *num_end;
-
-       if (namelen < 4)
-               return -1;
-       if (name[0] != '@' || name[1] != '{' || name[2] != '-')
-               return -1;
-       brace = memchr(name, '}', namelen);
-       if (!brace)
-               return -1;
-       nth = strtol(name + 3, &num_end, 10);
-       if (num_end != brace)
-               return -1;
-       if (nth <= 0)
-               return -1;
-       cb.remaining = nth;
-       strbuf_init(&cb.buf, 20);
-
-       retval = 0;
-       if (0 < for_each_reflog_ent_reverse("HEAD", grab_nth_branch_switch, &cb)) {
-               strbuf_reset(buf);
-               strbuf_addbuf(buf, &cb.buf);
-               retval = brace - name + 1;
-       }
-
-       strbuf_release(&cb.buf);
-       return retval;
-}
-
-int get_oid_mb(const char *name, struct object_id *oid)
-{
-       struct commit *one, *two;
-       struct commit_list *mbs;
-       struct object_id oid_tmp;
-       const char *dots;
-       int st;
-
-       dots = strstr(name, "...");
-       if (!dots)
-               return get_oid(name, oid);
-       if (dots == name)
-               st = get_oid("HEAD", &oid_tmp);
-       else {
-               struct strbuf sb;
-               strbuf_init(&sb, dots - name);
-               strbuf_add(&sb, name, dots - name);
-               st = get_oid_committish(sb.buf, &oid_tmp);
-               strbuf_release(&sb);
-       }
-       if (st)
-               return st;
-       one = lookup_commit_reference_gently(&oid_tmp, 0);
-       if (!one)
-               return -1;
-
-       if (get_oid_committish(dots[3] ? (dots + 3) : "HEAD", &oid_tmp))
-               return -1;
-       two = lookup_commit_reference_gently(&oid_tmp, 0);
-       if (!two)
-               return -1;
-       mbs = get_merge_bases(one, two);
-       if (!mbs || mbs->next)
-               st = -1;
-       else {
-               st = 0;
-               oidcpy(oid, &mbs->item->object.oid);
-       }
-       free_commit_list(mbs);
-       return st;
-}
-
-/* parse @something syntax, when 'something' is not {.*} */
-static int interpret_empty_at(const char *name, int namelen, int len, struct strbuf *buf)
-{
-       const char *next;
-
-       if (len || name[1] == '{')
-               return -1;
-
-       /* make sure it's a single @, or @@{.*}, not @foo */
-       next = memchr(name + len + 1, '@', namelen - len - 1);
-       if (next && next[1] != '{')
-               return -1;
-       if (!next)
-               next = name + namelen;
-       if (next != name + 1)
-               return -1;
-
-       strbuf_reset(buf);
-       strbuf_add(buf, "HEAD", 4);
-       return 1;
-}
-
-static int reinterpret(const char *name, int namelen, int len,
-                      struct strbuf *buf, unsigned allowed)
-{
-       /* we have extra data, which might need further processing */
-       struct strbuf tmp = STRBUF_INIT;
-       int used = buf->len;
-       int ret;
-
-       strbuf_add(buf, name + len, namelen - len);
-       ret = interpret_branch_name(buf->buf, buf->len, &tmp, allowed);
-       /* that data was not interpreted, remove our cruft */
-       if (ret < 0) {
-               strbuf_setlen(buf, used);
-               return len;
-       }
-       strbuf_reset(buf);
-       strbuf_addbuf(buf, &tmp);
-       strbuf_release(&tmp);
-       /* tweak for size of {-N} versus expanded ref name */
-       return ret - used + len;
-}
-
-static void set_shortened_ref(struct strbuf *buf, const char *ref)
-{
-       char *s = shorten_unambiguous_ref(ref, 0);
-       strbuf_reset(buf);
-       strbuf_addstr(buf, s);
-       free(s);
-}
-
-static int branch_interpret_allowed(const char *refname, unsigned allowed)
-{
-       if (!allowed)
-               return 1;
-
-       if ((allowed & INTERPRET_BRANCH_LOCAL) &&
-           starts_with(refname, "refs/heads/"))
-               return 1;
-       if ((allowed & INTERPRET_BRANCH_REMOTE) &&
-           starts_with(refname, "refs/remotes/"))
-               return 1;
-
-       return 0;
-}
-
-static int interpret_branch_mark(const char *name, int namelen,
-                                int at, struct strbuf *buf,
-                                int (*get_mark)(const char *, int),
-                                const char *(*get_data)(struct branch *,
-                                                        struct strbuf *),
-                                unsigned allowed)
-{
-       int len;
-       struct branch *branch;
-       struct strbuf err = STRBUF_INIT;
-       const char *value;
-
-       len = get_mark(name + at, namelen - at);
-       if (!len)
-               return -1;
-
-       if (memchr(name, ':', at))
-               return -1;
-
-       if (at) {
-               char *name_str = xmemdupz(name, at);
-               branch = branch_get(name_str);
-               free(name_str);
-       } else
-               branch = branch_get(NULL);
-
-       value = get_data(branch, &err);
-       if (!value)
-               die("%s", err.buf);
-
-       if (!branch_interpret_allowed(value, allowed))
-               return -1;
-
-       set_shortened_ref(buf, value);
-       return len + at;
-}
-
-int interpret_branch_name(const char *name, int namelen, struct strbuf *buf,
-                         unsigned allowed)
-{
-       char *at;
-       const char *start;
-       int len;
-
-       if (!namelen)
-               namelen = strlen(name);
-
-       if (!allowed || (allowed & INTERPRET_BRANCH_LOCAL)) {
-               len = interpret_nth_prior_checkout(name, namelen, buf);
-               if (!len) {
-                       return len; /* syntax Ok, not enough switches */
-               } else if (len > 0) {
-                       if (len == namelen)
-                               return len; /* consumed all */
-                       else
-                               return reinterpret(name, namelen, len, buf, allowed);
-               }
-       }
-
-       for (start = name;
-            (at = memchr(start, '@', namelen - (start - name)));
-            start = at + 1) {
-
-               if (!allowed || (allowed & INTERPRET_BRANCH_HEAD)) {
-                       len = interpret_empty_at(name, namelen, at - name, buf);
-                       if (len > 0)
-                               return reinterpret(name, namelen, len, buf,
-                                                  allowed);
-               }
-
-               len = interpret_branch_mark(name, namelen, at - name, buf,
-                                           upstream_mark, branch_get_upstream,
-                                           allowed);
-               if (len > 0)
-                       return len;
-
-               len = interpret_branch_mark(name, namelen, at - name, buf,
-                                           push_mark, branch_get_push,
-                                           allowed);
-               if (len > 0)
-                       return len;
-       }
-
-       return -1;
-}
-
-void strbuf_branchname(struct strbuf *sb, const char *name, unsigned allowed)
-{
-       int len = strlen(name);
-       int used = interpret_branch_name(name, len, sb, allowed);
-
-       if (used < 0)
-               used = 0;
-       strbuf_add(sb, name + used, len - used);
-}
-
-int strbuf_check_branch_ref(struct strbuf *sb, const char *name)
-{
-       if (startup_info->have_repository)
-               strbuf_branchname(sb, name, INTERPRET_BRANCH_LOCAL);
-       else
-               strbuf_addstr(sb, name);
-
-       /*
-        * This splice must be done even if we end up rejecting the
-        * name; builtin/branch.c::copy_or_rename_branch() still wants
-        * to see what the name expanded to so that "branch -m" can be
-        * used as a tool to correct earlier mistakes.
-        */
-       strbuf_splice(sb, 0, 0, "refs/heads/", 11);
-
-       if (*name == '-' ||
-           !strcmp(sb->buf, "refs/heads/HEAD"))
-               return -1;
-
-       return check_refname_format(sb->buf, 0);
-}
-
-/*
- * This is like "get_oid_basic()", except it allows "object ID expressions",
- * notably "xyz^" for "parent of xyz"
- */
-int get_oid(const char *name, struct object_id *oid)
-{
-       struct object_context unused;
-       return get_oid_with_context(name, 0, oid, &unused);
-}
-
-
-/*
- * Many callers know that the user meant to name a commit-ish by
- * syntactical positions where the object name appears.  Calling this
- * function allows the machinery to disambiguate shorter-than-unique
- * abbreviated object names between commit-ish and others.
- *
- * Note that this does NOT error out when the named object is not a
- * commit-ish. It is merely to give a hint to the disambiguation
- * machinery.
- */
-int get_oid_committish(const char *name, struct object_id *oid)
-{
-       struct object_context unused;
-       return get_oid_with_context(name, GET_OID_COMMITTISH,
-                                   oid, &unused);
-}
-
-int get_oid_treeish(const char *name, struct object_id *oid)
-{
-       struct object_context unused;
-       return get_oid_with_context(name, GET_OID_TREEISH,
-                                   oid, &unused);
-}
-
-int get_oid_commit(const char *name, struct object_id *oid)
-{
-       struct object_context unused;
-       return get_oid_with_context(name, GET_OID_COMMIT,
-                                   oid, &unused);
-}
-
-int get_oid_tree(const char *name, struct object_id *oid)
-{
-       struct object_context unused;
-       return get_oid_with_context(name, GET_OID_TREE,
-                                   oid, &unused);
-}
-
-int get_oid_blob(const char *name, struct object_id *oid)
-{
-       struct object_context unused;
-       return get_oid_with_context(name, GET_OID_BLOB,
-                                   oid, &unused);
-}
-
-/* Must be called only when object_name:filename doesn't exist. */
-static void diagnose_invalid_oid_path(const char *prefix,
-                                     const char *filename,
-                                     const struct object_id *tree_oid,
-                                     const char *object_name,
-                                     int object_name_len)
-{
-       struct object_id oid;
-       unsigned mode;
-
-       if (!prefix)
-               prefix = "";
-
-       if (file_exists(filename))
-               die("Path '%s' exists on disk, but not in '%.*s'.",
-                   filename, object_name_len, object_name);
-       if (is_missing_file_error(errno)) {
-               char *fullname = xstrfmt("%s%s", prefix, filename);
-
-               if (!get_tree_entry(tree_oid->hash, fullname,
-                                   oid.hash, &mode)) {
-                       die("Path '%s' exists, but not '%s'.\n"
-                           "Did you mean '%.*s:%s' aka '%.*s:./%s'?",
-                           fullname,
-                           filename,
-                           object_name_len, object_name,
-                           fullname,
-                           object_name_len, object_name,
-                           filename);
-               }
-               die("Path '%s' does not exist in '%.*s'",
-                   filename, object_name_len, object_name);
-       }
-}
-
-/* Must be called only when :stage:filename doesn't exist. */
-static void diagnose_invalid_index_path(int stage,
-                                       const char *prefix,
-                                       const char *filename)
-{
-       const struct cache_entry *ce;
-       int pos;
-       unsigned namelen = strlen(filename);
-       struct strbuf fullname = STRBUF_INIT;
-
-       if (!prefix)
-               prefix = "";
-
-       /* Wrong stage number? */
-       pos = cache_name_pos(filename, namelen);
-       if (pos < 0)
-               pos = -pos - 1;
-       if (pos < active_nr) {
-               ce = active_cache[pos];
-               if (ce_namelen(ce) == namelen &&
-                   !memcmp(ce->name, filename, namelen))
-                       die("Path '%s' is in the index, but not at stage %d.\n"
-                           "Did you mean ':%d:%s'?",
-                           filename, stage,
-                           ce_stage(ce), filename);
-       }
-
-       /* Confusion between relative and absolute filenames? */
-       strbuf_addstr(&fullname, prefix);
-       strbuf_addstr(&fullname, filename);
-       pos = cache_name_pos(fullname.buf, fullname.len);
-       if (pos < 0)
-               pos = -pos - 1;
-       if (pos < active_nr) {
-               ce = active_cache[pos];
-               if (ce_namelen(ce) == fullname.len &&
-                   !memcmp(ce->name, fullname.buf, fullname.len))
-                       die("Path '%s' is in the index, but not '%s'.\n"
-                           "Did you mean ':%d:%s' aka ':%d:./%s'?",
-                           fullname.buf, filename,
-                           ce_stage(ce), fullname.buf,
-                           ce_stage(ce), filename);
-       }
-
-       if (file_exists(filename))
-               die("Path '%s' exists on disk, but not in the index.", filename);
-       if (is_missing_file_error(errno))
-               die("Path '%s' does not exist (neither on disk nor in the index).",
-                   filename);
-
-       strbuf_release(&fullname);
-}
-
-
-static char *resolve_relative_path(const char *rel)
-{
-       if (!starts_with(rel, "./") && !starts_with(rel, "../"))
-               return NULL;
-
-       if (!is_inside_work_tree())
-               die("relative path syntax can't be used outside working tree.");
-
-       /* die() inside prefix_path() if resolved path is outside worktree */
-       return prefix_path(startup_info->prefix,
-                          startup_info->prefix ? strlen(startup_info->prefix) : 0,
-                          rel);
-}
-
-static int get_oid_with_context_1(const char *name,
-                                 unsigned flags,
-                                 const char *prefix,
-                                 struct object_id *oid,
-                                 struct object_context *oc)
-{
-       int ret, bracket_depth;
-       int namelen = strlen(name);
-       const char *cp;
-       int only_to_die = flags & GET_OID_ONLY_TO_DIE;
-
-       if (only_to_die)
-               flags |= GET_OID_QUIETLY;
-
-       memset(oc, 0, sizeof(*oc));
-       oc->mode = S_IFINVALID;
-       strbuf_init(&oc->symlink_path, 0);
-       ret = get_oid_1(name, namelen, oid, flags);
-       if (!ret)
-               return ret;
-       /*
-        * sha1:path --> object name of path in ent sha1
-        * :path -> object name of absolute path in index
-        * :./path -> object name of path relative to cwd in index
-        * :[0-3]:path -> object name of path in index at stage
-        * :/foo -> recent commit matching foo
-        */
-       if (name[0] == ':') {
-               int stage = 0;
-               const struct cache_entry *ce;
-               char *new_path = NULL;
-               int pos;
-               if (!only_to_die && namelen > 2 && name[1] == '/') {
-                       struct commit_list *list = NULL;
-
-                       for_each_ref(handle_one_ref, &list);
-                       commit_list_sort_by_date(&list);
-                       return get_oid_oneline(name + 2, oid, list);
-               }
-               if (namelen < 3 ||
-                   name[2] != ':' ||
-                   name[1] < '0' || '3' < name[1])
-                       cp = name + 1;
-               else {
-                       stage = name[1] - '0';
-                       cp = name + 3;
-               }
-               new_path = resolve_relative_path(cp);
-               if (!new_path) {
-                       namelen = namelen - (cp - name);
-               } else {
-                       cp = new_path;
-                       namelen = strlen(cp);
-               }
-
-               if (flags & GET_OID_RECORD_PATH)
-                       oc->path = xstrdup(cp);
-
-               if (!active_cache)
-                       read_cache();
-               pos = cache_name_pos(cp, namelen);
-               if (pos < 0)
-                       pos = -pos - 1;
-               while (pos < active_nr) {
-                       ce = active_cache[pos];
-                       if (ce_namelen(ce) != namelen ||
-                           memcmp(ce->name, cp, namelen))
-                               break;
-                       if (ce_stage(ce) == stage) {
-                               oidcpy(oid, &ce->oid);
-                               oc->mode = ce->ce_mode;
-                               free(new_path);
-                               return 0;
-                       }
-                       pos++;
-               }
-               if (only_to_die && name[1] && name[1] != '/')
-                       diagnose_invalid_index_path(stage, prefix, cp);
-               free(new_path);
-               return -1;
-       }
-       for (cp = name, bracket_depth = 0; *cp; cp++) {
-               if (*cp == '{')
-                       bracket_depth++;
-               else if (bracket_depth && *cp == '}')
-                       bracket_depth--;
-               else if (!bracket_depth && *cp == ':')
-                       break;
-       }
-       if (*cp == ':') {
-               struct object_id tree_oid;
-               int len = cp - name;
-               unsigned sub_flags = flags;
-
-               sub_flags &= ~GET_OID_DISAMBIGUATORS;
-               sub_flags |= GET_OID_TREEISH;
-
-               if (!get_oid_1(name, len, &tree_oid, sub_flags)) {
-                       const char *filename = cp+1;
-                       char *new_filename = NULL;
-
-                       new_filename = resolve_relative_path(filename);
-                       if (new_filename)
-                               filename = new_filename;
-                       if (flags & GET_OID_FOLLOW_SYMLINKS) {
-                               ret = get_tree_entry_follow_symlinks(tree_oid.hash,
-                                       filename, oid->hash, &oc->symlink_path,
-                                       &oc->mode);
-                       } else {
-                               ret = get_tree_entry(tree_oid.hash, filename,
-                                                    oid->hash, &oc->mode);
-                               if (ret && only_to_die) {
-                                       diagnose_invalid_oid_path(prefix,
-                                                                  filename,
-                                                                  &tree_oid,
-                                                                  name, len);
-                               }
-                       }
-                       hashcpy(oc->tree, tree_oid.hash);
-                       if (flags & GET_OID_RECORD_PATH)
-                               oc->path = xstrdup(filename);
-
-                       free(new_filename);
-                       return ret;
-               } else {
-                       if (only_to_die)
-                               die("Invalid object name '%.*s'.", len, name);
-               }
-       }
-       return ret;
-}
-
-/*
- * Call this function when you know "name" given by the end user must
- * name an object but it doesn't; the function _may_ die with a better
- * diagnostic message than "no such object 'name'", e.g. "Path 'doc' does not
- * exist in 'HEAD'" when given "HEAD:doc", or it may return in which case
- * you have a chance to diagnose the error further.
- */
-void maybe_die_on_misspelt_object_name(const char *name, const char *prefix)
-{
-       struct object_context oc;
-       struct object_id oid;
-       get_oid_with_context_1(name, GET_OID_ONLY_TO_DIE, prefix, &oid, &oc);
-}
-
-int get_oid_with_context(const char *str, unsigned flags, struct object_id *oid, struct object_context *oc)
-{
-       if (flags & GET_OID_FOLLOW_SYMLINKS && flags & GET_OID_ONLY_TO_DIE)
-               die("BUG: incompatible flags for get_sha1_with_context");
-       return get_oid_with_context_1(str, flags, NULL, oid, oc);
-}
diff --git a/shell.c b/shell.c
index 234b2d4f16fe79e9260c8409bd4d7c964fe27e72..0200d10796c43d6ea1249c314ee158f2b57a481f 100644 (file)
--- a/shell.c
+++ b/shell.c
@@ -1,6 +1,6 @@
 #include "cache.h"
 #include "quote.h"
-#include "exec_cmd.h"
+#include "exec-cmd.h"
 #include "strbuf.h"
 #include "run-command.h"
 
index 6d7f943e4384fa90001fbb73cc5b63e2c0cfa639..325bf0e974ab9bd8c2c5b5483fbb619ac425531b 100644 (file)
@@ -13,7 +13,7 @@
  * the remote died unexpectedly.  A flush() concludes the stream.
  */
 
-#define PREFIX "remote: "
+#define DISPLAY_PREFIX "remote: "
 
 #define ANSI_SUFFIX "\033[K"
 #define DUMB_SUFFIX "        "
@@ -49,7 +49,7 @@ int recv_sideband(const char *me, int in_stream, int out)
                switch (band) {
                case 3:
                        strbuf_addf(&outbuf, "%s%s%s", outbuf.len ? "\n" : "",
-                                   PREFIX, buf + 1);
+                                   DISPLAY_PREFIX, buf + 1);
                        retval = SIDEBAND_REMOTE_ERROR;
                        break;
                case 2:
@@ -67,7 +67,7 @@ int recv_sideband(const char *me, int in_stream, int out)
                                int linelen = brk - b;
 
                                if (!outbuf.len)
-                                       strbuf_addstr(&outbuf, PREFIX);
+                                       strbuf_addstr(&outbuf, DISPLAY_PREFIX);
                                if (linelen > 0) {
                                        strbuf_addf(&outbuf, "%.*s%s%c",
                                                    linelen, b, suffix, *brk);
@@ -81,8 +81,8 @@ int recv_sideband(const char *me, int in_stream, int out)
                        }
 
                        if (*b)
-                               strbuf_addf(&outbuf, "%s%s",
-                                           outbuf.len ? "" : PREFIX, b);
+                               strbuf_addf(&outbuf, "%s%s", outbuf.len ?
+                                           "" : DISPLAY_PREFIX, b);
                        break;
                case 1:
                        write_or_die(out, buf + 1, len);
index 0759590b3e56de0fab6ff34f25206fb4cdeca4c5..75d0c2d89f351bac46e7c5038aaebe8023ca81cb 100644 (file)
--- a/strbuf.c
+++ b/strbuf.c
@@ -1,5 +1,6 @@
 #include "cache.h"
 #include "refs.h"
+#include "string-list.h"
 #include "utf8.h"
 
 int starts_with(const char *str, const char *prefix)
@@ -11,6 +12,15 @@ int starts_with(const char *str, const char *prefix)
                        return 0;
 }
 
+int istarts_with(const char *str, const char *prefix)
+{
+       for (; ; str++, prefix++)
+               if (!*prefix)
+                       return 1;
+               else if (tolower(*str) != tolower(*prefix))
+                       return 0;
+}
+
 int skip_to_optional_arg_default(const char *str, const char *prefix,
                                 const char **arg, const char *def)
 {
@@ -171,6 +181,21 @@ struct strbuf **strbuf_split_buf(const char *str, size_t slen,
        return ret;
 }
 
+void strbuf_add_separated_string_list(struct strbuf *str,
+                                     const char *sep,
+                                     struct string_list *slist)
+{
+       struct string_list_item *item;
+       int sep_needed = 0;
+
+       for_each_string_list_item(item, slist) {
+               if (sep_needed)
+                       strbuf_addstr(str, sep);
+               strbuf_addstr(str, item->string);
+               sep_needed = 1;
+       }
+}
+
 void strbuf_list_free(struct strbuf **sbs)
 {
        struct strbuf **s = sbs;
@@ -793,7 +818,18 @@ char *xstrdup_tolower(const char *string)
        result = xmallocz(len);
        for (i = 0; i < len; i++)
                result[i] = tolower(string[i]);
-       result[i] = '\0';
+       return result;
+}
+
+char *xstrdup_toupper(const char *string)
+{
+       char *result;
+       size_t len, i;
+
+       len = strlen(string);
+       result = xmallocz(len);
+       for (i = 0; i < len; i++)
+               result[i] = toupper(string[i]);
        return result;
 }
 
@@ -881,12 +917,12 @@ void strbuf_addftime(struct strbuf *sb, const char *fmt, const struct tm *tm,
        strbuf_setlen(sb, sb->len + len);
 }
 
-void strbuf_add_unique_abbrev(struct strbuf *sb, const unsigned char *sha1,
+void strbuf_add_unique_abbrev(struct strbuf *sb, const struct object_id *oid,
                              int abbrev_len)
 {
        int r;
        strbuf_grow(sb, GIT_SHA1_HEXSZ + 1);
-       r = find_unique_abbrev_r(sb->buf + sb->len, sha1, abbrev_len);
+       r = find_unique_abbrev_r(sb->buf + sb->len, oid, abbrev_len);
        strbuf_setlen(sb, sb->len + r);
 }
 
index e6cae5f4398c8eb4c4a53c7db226a920e32bfde9..60a35aef165ca1a1bdc20b005751ea70e3c49ee0 100644 (file)
--- a/strbuf.h
+++ b/strbuf.h
@@ -1,6 +1,8 @@
 #ifndef STRBUF_H
 #define STRBUF_H
 
+struct string_list;
+
 /**
  * strbuf's are meant to be used with all the usual C string and memory
  * APIs. Given that the length of the buffer is known, it's often better to
@@ -70,6 +72,12 @@ struct strbuf {
 extern char strbuf_slopbuf[];
 #define STRBUF_INIT  { .alloc = 0, .len = 0, .buf = strbuf_slopbuf }
 
+/*
+ * Predeclare this here, since cache.h includes this file before it defines the
+ * struct.
+ */
+struct object_id;
+
 /**
  * Life Cycle Functions
  * --------------------
@@ -531,6 +539,20 @@ static inline struct strbuf **strbuf_split(const struct strbuf *sb,
        return strbuf_split_max(sb, terminator, 0);
 }
 
+/*
+ * Adds all strings of a string list to the strbuf, separated by the given
+ * separator.  For example, if sep is
+ *   ', '
+ * and slist contains
+ *   ['element1', 'element2', ..., 'elementN'],
+ * then write:
+ *   'element1, element2, ..., elementN'
+ * to str.  If only one element, just write "element1" to str.
+ */
+extern void strbuf_add_separated_string_list(struct strbuf *str,
+                                            const char *sep,
+                                            struct string_list *slist);
+
 /**
  * Free a NULL-terminated list of strbufs (for example, the return
  * values of the strbuf_split*() functions).
@@ -542,7 +564,7 @@ extern void strbuf_list_free(struct strbuf **);
  * the strbuf `sb`.
  */
 extern void strbuf_add_unique_abbrev(struct strbuf *sb,
-                                    const unsigned char *sha1,
+                                    const struct object_id *oid,
                                     int abbrev_len);
 
 /**
@@ -610,6 +632,7 @@ __attribute__((format (printf,2,3)))
 extern int fprintf_ln(FILE *fp, const char *fmt, ...);
 
 char *xstrdup_tolower(const char *);
+char *xstrdup_toupper(const char *);
 
 /**
  * Create a newly allocated string using printf format. You can do this easily
index 5892b50bd89c3c66bdb541ca0100f0671834a542..d1e6b2dce6877cb1407ac9d38e65d8b2bae25daa 100644 (file)
@@ -3,6 +3,9 @@
  */
 #include "cache.h"
 #include "streaming.h"
+#include "repository.h"
+#include "object-store.h"
+#include "replace-object.h"
 #include "packfile.h"
 
 enum input_source {
@@ -14,7 +17,7 @@ enum input_source {
 
 typedef int (*open_istream_fn)(struct git_istream *,
                               struct object_info *,
-                              const unsigned char *,
+                              const struct object_id *,
                               enum object_type *);
 typedef int (*close_istream_fn)(struct git_istream *);
 typedef ssize_t (*read_istream_fn)(struct git_istream *, char *, size_t);
@@ -27,7 +30,7 @@ struct stream_vtbl {
 #define open_method_decl(name) \
        int open_istream_ ##name \
        (struct git_istream *st, struct object_info *oi, \
-        const unsigned char *sha1, \
+        const struct object_id *oid, \
         enum object_type *type)
 
 #define close_method_decl(name) \
@@ -105,7 +108,7 @@ ssize_t read_istream(struct git_istream *st, void *buf, size_t sz)
        return st->vtbl->read(st, buf, sz);
 }
 
-static enum input_source istream_source(const unsigned char *sha1,
+static enum input_source istream_source(const struct object_id *oid,
                                        enum object_type *type,
                                        struct object_info *oi)
 {
@@ -114,7 +117,7 @@ static enum input_source istream_source(const unsigned char *sha1,
 
        oi->typep = type;
        oi->sizep = &size;
-       status = sha1_object_info_extended(sha1, oi, 0);
+       status = oid_object_info_extended(the_repository, oid, oi, 0);
        if (status < 0)
                return stream_error;
 
@@ -130,14 +133,14 @@ static enum input_source istream_source(const unsigned char *sha1,
        }
 }
 
-struct git_istream *open_istream(const unsigned char *sha1,
+struct git_istream *open_istream(const struct object_id *oid,
                                 enum object_type *type,
                                 unsigned long *size,
                                 struct stream_filter *filter)
 {
        struct git_istream *st;
        struct object_info oi = OBJECT_INFO_INIT;
-       const unsigned char *real = lookup_replace_object(sha1);
+       const struct object_id *real = lookup_replace_object(the_repository, oid);
        enum input_source src = istream_source(real, type, &oi);
 
        if (src < 0)
@@ -335,7 +338,8 @@ static struct stream_vtbl loose_vtbl = {
 
 static open_method_decl(loose)
 {
-       st->u.loose.mapped = map_sha1_file(sha1, &st->u.loose.mapsize);
+       st->u.loose.mapped = map_sha1_file(the_repository,
+                                          oid->hash, &st->u.loose.mapsize);
        if (!st->u.loose.mapped)
                return -1;
        if ((unpack_sha1_header(&st->z,
@@ -486,7 +490,7 @@ static struct stream_vtbl incore_vtbl = {
 
 static open_method_decl(incore)
 {
-       st->u.incore.buf = read_sha1_file_extended(sha1, type, &st->size, 0);
+       st->u.incore.buf = read_object_file_extended(oid, type, &st->size, 0);
        st->u.incore.read_ptr = 0;
        st->vtbl = &incore_vtbl;
 
@@ -507,7 +511,7 @@ int stream_blob_to_fd(int fd, const struct object_id *oid, struct stream_filter
        ssize_t kept = 0;
        int result = -1;
 
-       st = open_istream(oid->hash, &type, &sz, filter);
+       st = open_istream(oid, &type, &sz, filter);
        if (!st) {
                if (filter)
                        free_stream_filter(filter);
index 73c1d156b352898c9b5661a3e480f579b80c5a00..32f46267710b4e88cd0fc90e1a5a6f6388361c61 100644 (file)
@@ -8,7 +8,7 @@
 /* opaque */
 struct git_istream;
 
-extern struct git_istream *open_istream(const unsigned char *, enum object_type *, unsigned long *, struct stream_filter *);
+extern struct git_istream *open_istream(const struct object_id *, enum object_type *, unsigned long *, struct stream_filter *);
 extern int close_istream(struct git_istream *);
 extern ssize_t read_istream(struct git_istream *, void *, size_t);
 
index 602ba8ca8b8455df9b34e2990397c838d542569f..d87c3ff63a3cbc5bc0964499f84b7b4cebf04494 100644 (file)
@@ -520,7 +520,7 @@ static const struct submodule *config_from(struct submodule_cache *cache,
        if (submodule)
                goto out;
 
-       config = read_sha1_file(oid.hash, &type, &config_size);
+       config = read_object_file(&oid, &type, &config_size);
        if (!config || type != OBJ_BLOB)
                goto out;
 
@@ -619,31 +619,24 @@ static void gitmodules_read_check(struct repository *repo)
                repo_read_gitmodules(repo);
 }
 
-const struct submodule *submodule_from_name(const struct object_id *treeish_name,
+const struct submodule *submodule_from_name(struct repository *r,
+                                           const struct object_id *treeish_name,
                const char *name)
 {
-       gitmodules_read_check(the_repository);
-       return config_from(the_repository->submodule_cache, treeish_name, name, lookup_name);
+       gitmodules_read_check(r);
+       return config_from(r->submodule_cache, treeish_name, name, lookup_name);
 }
 
-const struct submodule *submodule_from_path(const struct object_id *treeish_name,
+const struct submodule *submodule_from_path(struct repository *r,
+                                           const struct object_id *treeish_name,
                const char *path)
 {
-       gitmodules_read_check(the_repository);
-       return config_from(the_repository->submodule_cache, treeish_name, path, lookup_path);
+       gitmodules_read_check(r);
+       return config_from(r->submodule_cache, treeish_name, path, lookup_path);
 }
 
-const struct submodule *submodule_from_cache(struct repository *repo,
-                                            const struct object_id *treeish_name,
-                                            const char *key)
+void submodule_free(struct repository *r)
 {
-       gitmodules_read_check(repo);
-       return config_from(repo->submodule_cache, treeish_name,
-                          key, lookup_path);
-}
-
-void submodule_free(void)
-{
-       if (the_repository->submodule_cache)
-               submodule_cache_clear(the_repository->submodule_cache);
+       if (r->submodule_cache)
+               submodule_cache_clear(r->submodule_cache);
 }
index a5503a5d177e90e009be9240bfddd68c9ead475b..6f686184e86cc0004410aae3b771de4d3b488a13 100644 (file)
@@ -39,13 +39,12 @@ extern int parse_update_recurse_submodules_arg(const char *opt, const char *arg)
 extern int parse_push_recurse_submodules_arg(const char *opt, const char *arg);
 extern void repo_read_gitmodules(struct repository *repo);
 extern void gitmodules_config_oid(const struct object_id *commit_oid);
-extern const struct submodule *submodule_from_name(
-               const struct object_id *commit_or_tree, const char *name);
-extern const struct submodule *submodule_from_path(
-               const struct object_id *commit_or_tree, const char *path);
-extern const struct submodule *submodule_from_cache(struct repository *repo,
-                                                   const struct object_id *treeish_name,
-                                                   const char *key);
-extern void submodule_free(void);
+const struct submodule *submodule_from_name(struct repository *r,
+                                           const struct object_id *commit_or_tree,
+                                           const char *name);
+const struct submodule *submodule_from_path(struct repository *r,
+                                           const struct object_id *commit_or_tree,
+                                           const char *path);
+void submodule_free(struct repository *r);
 
 #endif /* SUBMODULE_CONFIG_H */
index 12a2503fda7df9060d858bd572ea106e3141b083..8fd8e5d178d4d6a8d7cf4fb465d96bb323fd02ee 100644 (file)
@@ -21,6 +21,7 @@
 #include "remote.h"
 #include "worktree.h"
 #include "parse-options.h"
+#include "object-store.h"
 
 static int config_update_recurse_submodules = RECURSE_SUBMODULES_OFF;
 static struct string_list changed_submodule_names = STRING_LIST_INIT_DUP;
@@ -95,7 +96,7 @@ int update_path_in_gitmodules(const char *oldpath, const char *newpath)
        if (is_gitmodules_unmerged(&the_index))
                die(_("Cannot change unmerged .gitmodules, resolve merge conflicts first"));
 
-       submodule = submodule_from_path(&null_oid, oldpath);
+       submodule = submodule_from_path(the_repository, &null_oid, oldpath);
        if (!submodule || !submodule->name) {
                warning(_("Could not find section in .gitmodules where path=%s"), oldpath);
                return -1;
@@ -129,7 +130,7 @@ int remove_path_from_gitmodules(const char *path)
        if (is_gitmodules_unmerged(&the_index))
                die(_("Cannot change unmerged .gitmodules, resolve merge conflicts first"));
 
-       submodule = submodule_from_path(&null_oid, path);
+       submodule = submodule_from_path(the_repository, &null_oid, path);
        if (!submodule || !submodule->name) {
                warning(_("Could not find section in .gitmodules where path=%s"), path);
                return -1;
@@ -173,7 +174,8 @@ static int add_submodule_odb(const char *path)
 void set_diffopt_flags_from_submodule_config(struct diff_options *diffopt,
                                             const char *path)
 {
-       const struct submodule *submodule = submodule_from_path(&null_oid, path);
+       const struct submodule *submodule = submodule_from_path(the_repository,
+                                                               &null_oid, path);
        if (submodule) {
                const char *ignore;
                char *key;
@@ -229,7 +231,7 @@ int is_submodule_active(struct repository *repo, const char *path)
        const struct string_list *sl;
        const struct submodule *module;
 
-       module = submodule_from_cache(repo, &null_oid, path);
+       module = submodule_from_path(repo, &null_oid, path);
 
        /* early return if there isn't a path->module mapping */
        if (!module)
@@ -540,9 +542,9 @@ static void show_submodule_header(struct diff_options *o, const char *path,
 
 output_header:
        strbuf_addf(&sb, "Submodule %s ", path);
-       strbuf_add_unique_abbrev(&sb, one->hash, DEFAULT_ABBREV);
+       strbuf_add_unique_abbrev(&sb, one, DEFAULT_ABBREV);
        strbuf_addstr(&sb, (fast_backward || fast_forward) ? ".." : "...");
-       strbuf_add_unique_abbrev(&sb, two->hash, DEFAULT_ABBREV);
+       strbuf_add_unique_abbrev(&sb, two, DEFAULT_ABBREV);
        if (message)
                strbuf_addf(&sb, " %s\n", message);
        else
@@ -673,7 +675,7 @@ const struct submodule *submodule_from_ce(const struct cache_entry *ce)
        if (!should_update_submodules())
                return NULL;
 
-       return submodule_from_path(&null_oid, ce->name);
+       return submodule_from_path(the_repository, &null_oid, ce->name);
 }
 
 static struct oid_array *submodule_commits(struct string_list *submodules,
@@ -730,13 +732,14 @@ static void collect_changed_submodules_cb(struct diff_queue_struct *q,
                if (!S_ISGITLINK(p->two->mode))
                        continue;
 
-               submodule = submodule_from_path(commit_oid, p->two->path);
+               submodule = submodule_from_path(the_repository,
+                                               commit_oid, p->two->path);
                if (submodule)
                        name = submodule->name;
                else {
                        name = default_name_or_path(p->two->path);
                        /* make sure name does not collide with existing one */
-                       submodule = submodule_from_name(commit_oid, name);
+                       submodule = submodule_from_name(the_repository, commit_oid, name);
                        if (submodule) {
                                warning("Submodule in commit %s at path: "
                                        "'%s' collides with a submodule named "
@@ -817,7 +820,7 @@ static int check_has_commit(const struct object_id *oid, void *data)
 {
        struct has_commit_data *cb = data;
 
-       enum object_type type = sha1_object_info(oid->hash, NULL);
+       enum object_type type = oid_object_info(the_repository, oid, NULL);
 
        switch (type) {
        case OBJ_COMMIT:
@@ -944,7 +947,7 @@ int find_unpushed_submodules(struct oid_array *commits,
                const struct submodule *submodule;
                const char *path = NULL;
 
-               submodule = submodule_from_name(&null_oid, name->string);
+               submodule = submodule_from_name(the_repository, &null_oid, name->string);
                if (submodule)
                        path = submodule->path;
                else
@@ -1112,7 +1115,7 @@ static void calculate_changed_submodule_paths(void)
        const struct string_list_item *name;
 
        /* No need to check if there are no submodules configured */
-       if (!submodule_from_path(NULL, NULL))
+       if (!submodule_from_path(the_repository, NULL, NULL))
                return;
 
        argv_array_push(&argv, "--"); /* argv[0] program name */
@@ -1133,7 +1136,7 @@ static void calculate_changed_submodule_paths(void)
                const struct submodule *submodule;
                const char *path = NULL;
 
-               submodule = submodule_from_name(&null_oid, name->string);
+               submodule = submodule_from_name(the_repository, &null_oid, name->string);
                if (submodule)
                        path = submodule->path;
                else
@@ -1161,7 +1164,7 @@ int submodule_touches_in_range(struct object_id *excl_oid,
        int ret;
 
        /* No need to check if there are no submodules configured */
-       if (!submodule_from_path(NULL, NULL))
+       if (!submodule_from_path(the_repository, NULL, NULL))
                return 0;
 
        argv_array_push(&args, "--"); /* args[0] program name */
@@ -1233,7 +1236,7 @@ static int get_next_submodule(struct child_process *cp,
                if (!S_ISGITLINK(ce->ce_mode))
                        continue;
 
-               submodule = submodule_from_cache(spf->r, &null_oid, ce->name);
+               submodule = submodule_from_path(spf->r, &null_oid, ce->name);
                if (!submodule) {
                        const char *name = default_name_or_path(ce->name);
                        if (name) {
@@ -1603,7 +1606,7 @@ int submodule_move_head(const char *path,
        if (old_head && !is_submodule_populated_gently(path, error_code_ptr))
                return 0;
 
-       sub = submodule_from_path(&null_oid, path);
+       sub = submodule_from_path(the_repository, &null_oid, path);
 
        if (!sub)
                die("BUG: could not get submodule information for '%s'", path);
@@ -1622,7 +1625,7 @@ int submodule_move_head(const char *path,
                } else {
                        char *gitdir = xstrfmt("%s/modules/%s",
                                    get_git_common_dir(), sub->name);
-                       connect_work_tree_and_git_dir(path, gitdir);
+                       connect_work_tree_and_git_dir(path, gitdir, 0);
                        free(gitdir);
 
                        /* make sure the index is clean as well */
@@ -1632,7 +1635,7 @@ int submodule_move_head(const char *path,
                if (old_head && (flags & SUBMODULE_MOVE_HEAD_FORCE)) {
                        char *gitdir = xstrfmt("%s/modules/%s",
                                    get_git_common_dir(), sub->name);
-                       connect_work_tree_and_git_dir(path, gitdir);
+                       connect_work_tree_and_git_dir(path, gitdir, 1);
                        free(gitdir);
                }
        }
@@ -1885,7 +1888,7 @@ static void relocate_single_git_dir_into_superproject(const char *prefix,
 
        real_old_git_dir = real_pathdup(old_git_dir, 1);
 
-       sub = submodule_from_path(&null_oid, path);
+       sub = submodule_from_path(the_repository, &null_oid, path);
        if (!sub)
                die(_("could not lookup name for submodule '%s'"), path);
 
@@ -1941,11 +1944,11 @@ void absorb_git_dir_into_superproject(const char *prefix,
                * superproject did not rewrite the git file links yet,
                * fix it now.
                */
-               sub = submodule_from_path(&null_oid, path);
+               sub = submodule_from_path(the_repository, &null_oid, path);
                if (!sub)
                        die(_("could not lookup name for submodule '%s'"), path);
                connect_work_tree_and_git_dir(path,
-                       git_path("modules/%s", sub->name));
+                       git_path("modules/%s", sub->name), 0);
        } else {
                /* Is it already absorbed into the superprojects git dir? */
                char *real_sub_git_dir = real_pathdup(sub_git_dir, 1);
@@ -2087,7 +2090,7 @@ int submodule_to_gitdir(struct strbuf *buf, const char *submodule)
                strbuf_addstr(buf, git_dir);
        }
        if (!is_git_directory(buf->buf)) {
-               sub = submodule_from_path(&null_oid, submodule);
+               sub = submodule_from_path(the_repository, &null_oid, submodule);
                if (!sub) {
                        ret = -1;
                        goto cleanup;
index 9589f131273d4f04605c8dbf7dcce05aaea606ad..e5526f6aaab93f85d279e89fc33b8e2e8740c32a 100644 (file)
@@ -105,7 +105,6 @@ extern int push_unpushed_submodules(struct oid_array *commits,
                                    const char **refspec, int refspec_nr,
                                    const struct string_list *push_options,
                                    int dry_run);
-extern void connect_work_tree_and_git_dir(const char *work_tree, const char *git_dir);
 /*
  * Given a submodule path (as in the index), return the repository
  * path of that submodule in 'buf'. Return -1 on error or when the
index 24ddebfabf97be1251452fe5b6bba847597431fd..8373a27fea38b1ec111e80d203bf61f1143b7861 100644 (file)
--- a/t/README
+++ b/t/README
@@ -293,6 +293,28 @@ and know what setup is needed for it.  Or when you want to run
 everything up to a certain test.
 
 
+Running tests with special setups
+---------------------------------
+
+The whole test suite could be run to test some special features
+that cannot be easily covered by a few specific test cases. These
+could be enabled by running the test suite with correct GIT_TEST_
+environment set.
+
+GIT_TEST_SPLIT_INDEX=<boolean> forces split-index mode on the whole
+test suite. Accept any boolean values that are accepted by git-config.
+
+GIT_TEST_FULL_IN_PACK_ARRAY=<boolean> exercises the uncommon
+pack-objects code path where there are more than 1024 packs even if
+the actual number of packs in repository is below this limit. Accept
+any boolean values that are accepted by git-config.
+
+GIT_TEST_OE_SIZE=<n> exercises the uncommon pack-objects code path
+where we do not cache object size in memory and read it from existing
+packs on demand. This normally only happens when the object size is
+over 2GB. This variable forces the code path on any object larger than
+<n> bytes.
+
 Naming Tests
 ------------
 
index e760256406fa9c2fe0f9b2cde0ffb97ff11c6cab..aa22af48c2a6f48a81a6306a1850f5b5256fa319 100644 (file)
@@ -5,32 +5,43 @@
  *
  * The mtime can be changed to an absolute value:
  *
- *     test-chmtime =<seconds> file...
+ *     test-tool chmtime =<seconds> file...
  *
  * Relative to the current time as returned by time(3):
  *
- *     test-chmtime =+<seconds> (or =-<seconds>) file...
+ *     test-tool chmtime =+<seconds> (or =-<seconds>) file...
  *
  * Or relative to the current mtime of the file:
  *
- *     test-chmtime <seconds> file...
- *     test-chmtime +<seconds> (or -<seconds>) file...
+ *     test-tool chmtime <seconds> file...
+ *     test-tool chmtime +<seconds> (or -<seconds>) file...
  *
  * Examples:
  *
- * To just print the mtime use --verbose and set the file mtime offset to 0:
+ * To print the mtime and the file name use --verbose and set
+ * the file mtime offset to 0:
  *
- *     test-chmtime -v +0 file
+ *     test-tool chmtime -v +0 file
+ *
+ * To print only the mtime use --get:
+ *
+ *     test-tool chmtime --get file
  *
  * To set the mtime to current time:
  *
- *     test-chmtime =+0 file
+ *     test-tool chmtime =+0 file
+ *
+ * To set the file mtime offset to +1 and print the new value:
+ *
+ *     test-tool chmtime --get +1 file
  *
  */
+#include "test-tool.h"
 #include "git-compat-util.h"
 #include <utime.h>
 
-static const char usage_str[] = "-v|--verbose (+|=|=+|=-|-)<seconds> <file>...";
+static const char usage_str[] =
+       "(-v|--verbose|-g|--get) (+|=|=+|=-|-)<seconds> <file>...";
 
 static int timespec_arg(const char *arg, long int *set_time, int *set_eq)
 {
@@ -46,7 +57,6 @@ static int timespec_arg(const char *arg, long int *set_time, int *set_eq)
        }
        *set_time = strtol(timespec, &test, 10);
        if (*test) {
-               fprintf(stderr, "Not a base-10 integer: %s\n", arg + 1);
                return 0;
        }
        if ((*set_eq && *set_time < 0) || *set_eq == 2) {
@@ -56,9 +66,10 @@ static int timespec_arg(const char *arg, long int *set_time, int *set_eq)
        return 1;
 }
 
-int cmd_main(int argc, const char **argv)
+int cmd__chmtime(int argc, const char **argv)
 {
        static int verbose;
+       static int get;
 
        int i = 1;
        /* no mtime change by default */
@@ -68,18 +79,34 @@ int cmd_main(int argc, const char **argv)
        if (argc < 3)
                goto usage;
 
-       if (strcmp(argv[i], "--verbose") == 0 || strcmp(argv[i], "-v") == 0) {
+       if (strcmp(argv[i], "--get") == 0 || strcmp(argv[i], "-g") == 0) {
+               get = 1;
+               ++i;
+       } else if (strcmp(argv[i], "--verbose") == 0 || strcmp(argv[i], "-v") == 0) {
                verbose = 1;
                ++i;
        }
-       if (timespec_arg(argv[i], &set_time, &set_eq))
+
+       if (i == argc) {
+               goto usage;
+       }
+
+       if (timespec_arg(argv[i], &set_time, &set_eq)) {
                ++i;
-       else
+       } else {
+               if (get == 0) {
+                       fprintf(stderr, "Not a base-10 integer: %s\n", argv[i] + 1);
+                       goto usage;
+               }
+       }
+
+       if (i == argc)
                goto usage;
 
        for (; i < argc; i++) {
                struct stat sb;
                struct utimbuf utb;
+               uintmax_t mtime;
 
                if (stat(argv[i], &sb) < 0) {
                        fprintf(stderr, "Failed to stat %s: %s\n",
@@ -99,8 +126,10 @@ int cmd_main(int argc, const char **argv)
                utb.actime = sb.st_atime;
                utb.modtime = set_eq ? set_time : sb.st_mtime + set_time;
 
-               if (verbose) {
-                       uintmax_t mtime = utb.modtime < 0 ? 0: utb.modtime;
+               mtime = utb.modtime < 0 ? 0: utb.modtime;
+               if (get) {
+                       printf("%"PRIuMAX"\n", mtime);
+               } else if (verbose) {
                        printf("%"PRIuMAX"\t%s\n", mtime, argv[i]);
                }
 
index 1a7b8bd3d650fe1111c77115d2c68644b6ac9edb..214003d5b2f9bbe978d5aa4d9c9ab3f9b8a990da 100644 (file)
@@ -1,3 +1,4 @@
+#include "test-tool.h"
 #include "cache.h"
 #include "config.h"
 #include "string-list.h"
@@ -32,7 +33,7 @@
  * Examples:
  *
  * To print the value with highest priority for key "foo.bAr Baz.rock":
- *     test-config get_value "foo.bAr Baz.rock"
+ *     test-tool config get_value "foo.bAr Baz.rock"
  *
  */
 
@@ -77,7 +78,7 @@ static int early_config_cb(const char *var, const char *value, void *vdata)
        return 0;
 }
 
-int cmd_main(int argc, const char **argv)
+int cmd__config(int argc, const char **argv)
 {
        int i, val;
        const char *v;
index bb72c47df570d9c07ae4567fd6d31ea49fe49656..92c4c2313e78a305dd0da1e7a853e58e325d2f84 100644 (file)
@@ -1,3 +1,4 @@
+#include "test-tool.h"
 #include "cache.h"
 
 static int rc;
@@ -28,7 +29,7 @@ static int is_in(const char *s, int ch)
 #define LOWER "abcdefghijklmnopqrstuvwxyz"
 #define UPPER "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
 
-int cmd_main(int argc, const char **argv)
+int cmd__ctype(int argc, const char **argv)
 {
        TEST_CLASS(isdigit, DIGIT);
        TEST_CLASS(isspace, " \n\r\t");
index ac8368797073adfdf203f60d93df55a988443dbd..a0837371aba17956331ddcdb04847c6c9edeefd2 100644 (file)
@@ -1,13 +1,14 @@
+#include "test-tool.h"
 #include "cache.h"
 
 static const char *usage_msg = "\n"
-"  test-date relative [time_t]...\n"
-"  test-date show:<format> [time_t]...\n"
-"  test-date parse [date]...\n"
-"  test-date approxidate [date]...\n"
-"  test-date timestamp [date]...\n"
-"  test-date is64bit\n"
-"  test-date time_t-is64bit\n";
+"  test-tool date relative [time_t]...\n"
+"  test-tool date show:<format> [time_t]...\n"
+"  test-tool date parse [date]...\n"
+"  test-tool date approxidate [date]...\n"
+"  test-tool date timestamp [date]...\n"
+"  test-tool date is64bit\n"
+"  test-tool date time_t-is64bit\n";
 
 static void show_relative_dates(const char **argv, struct timeval *now)
 {
@@ -81,7 +82,7 @@ static void parse_approx_timestamp(const char **argv, struct timeval *now)
        }
 }
 
-int cmd_main(int argc, const char **argv)
+int cmd__date(int argc, const char **argv)
 {
        struct timeval now;
        const char *x;
index 591730adc4f3940940fdb4691da0afb81648e353..34c7259248760ff8bdea495ac5effd5de2b04ae3 100644 (file)
@@ -8,14 +8,15 @@
  * published by the Free Software Foundation.
  */
 
+#include "test-tool.h"
 #include "git-compat-util.h"
 #include "delta.h"
 #include "cache.h"
 
 static const char usage_str[] =
-       "test-delta (-d|-p) <from_file> <data_file> <out_file>";
+       "test-tool delta (-d|-p) <from_file> <data_file> <out_file>";
 
-int cmd_main(int argc, const char **argv)
+int cmd__delta(int argc, const char **argv)
 {
        int fd;
        struct stat st;
index bd1a857d5224a1a8d3c4a0d8e0ce54476d70372e..d6bcfddf13352b92d6be4d9d5b8e7d7f05cc06ee 100644 (file)
@@ -1,6 +1,8 @@
+#include "test-tool.h"
 #include "git-compat-util.h"
 
 #if defined(GIT_WINDOWS_NATIVE)
+#include "lazyload.h"
 
 static int cmd_sync(void)
 {
@@ -81,8 +83,7 @@ static int cmd_dropcaches(void)
 {
        HANDLE hProcess = GetCurrentProcess();
        HANDLE hToken;
-       HMODULE ntdll;
-       DWORD(WINAPI *NtSetSystemInformation)(INT, PVOID, ULONG);
+       DECLARE_PROC_ADDR(ntdll.dll, DWORD, NtSetSystemInformation, INT, PVOID, ULONG);
        SYSTEM_MEMORY_LIST_COMMAND command;
        int status;
 
@@ -94,14 +95,8 @@ static int cmd_dropcaches(void)
 
        CloseHandle(hToken);
 
-       ntdll = LoadLibrary("ntdll.dll");
-       if (!ntdll)
-               return error("Can't load ntdll.dll, wrong Windows version?");
-
-       NtSetSystemInformation =
-               (DWORD(WINAPI *)(INT, PVOID, ULONG))GetProcAddress(ntdll, "NtSetSystemInformation");
-       if (!NtSetSystemInformation)
-               return error("Can't get function addresses, wrong Windows version?");
+       if (!INIT_PROC_ADDR(NtSetSystemInformation))
+               return error("Could not find NtSetSystemInformation() function");
 
        command = MemoryPurgeStandbyList;
        status = NtSetSystemInformation(
@@ -114,8 +109,6 @@ static int cmd_dropcaches(void)
        else if (status != STATUS_SUCCESS)
                error("Unable to execute the memory list command %d", status);
 
-       FreeLibrary(ntdll);
-
        return status;
 }
 
@@ -157,7 +150,7 @@ static int cmd_dropcaches(void)
 
 #endif
 
-int cmd_main(int argc, const char **argv)
+int cmd__drop_caches(int argc, const char **argv)
 {
        cmd_sync();
        return cmd_dropcaches();
index ebf3aab22d6c197b99b0203dbe9881ccf417be86..98a4891f1dc936a486075703de319affdacb1c78 100644 (file)
@@ -1,3 +1,4 @@
+#include "test-tool.h"
 #include "cache.h"
 #include "tree.h"
 #include "cache-tree.h"
@@ -54,7 +55,7 @@ static int dump_cache_tree(struct cache_tree *it,
        return errs;
 }
 
-int cmd_main(int ac, const char **av)
+int cmd__dump_cache_tree(int ac, const char **av)
 {
        struct index_state istate;
        struct cache_tree *another = cache_tree();
index e44430b699db732252afa6fcb06686ec89b5811d..4e2fdb5e30d1ae30b1f75b7e3ca636afa4c0e097 100644 (file)
@@ -1,3 +1,4 @@
+#include "test-tool.h"
 #include "cache.h"
 #include "split-index.h"
 #include "ewah/ewok.h"
@@ -7,7 +8,7 @@ static void show_bit(size_t pos, void *data)
        printf(" %d", (int)pos);
 }
 
-int cmd_main(int ac, const char **av)
+int cmd__dump_split_index(int ac, const char **av)
 {
        struct split_index *si;
        int i;
index 90dc97a9d0444bc2dd8beaa1bfdd970f4280efb7..081115bf8eb7cb2e27c39f632af0e40001b5b6e9 100644 (file)
@@ -1,8 +1,9 @@
+#include "test-tool.h"
 #include "cache.h"
 #include "object.h"
 #include "decorate.h"
 
-int cmd_main(int argc, const char **argv)
+int cmd__example_decorate(int argc, const char **argv)
 {
        struct decoration n;
        struct object_id one_oid = { {1} };
index 8d11d22d98649900b6d558cc174e2af1dbff9948..99b8dc1e2d9cdc3a0e0f464fdeee0f94733d00d1 100644 (file)
@@ -4,9 +4,10 @@
  * Copyright (C) 2007 by Nicolas Pitre, licensed under the GPL version 2.
  */
 
+#include "test-tool.h"
 #include "git-compat-util.h"
 
-int cmd_main(int argc, const char **argv)
+int cmd__genrandom(int argc, const char **argv)
 {
        unsigned long count, next = 0;
        unsigned char *c;
index 9ae9281c071254019ccca3486b3a6762d9c0085f..23d2b172fe708f711a15613e906637cd948324ef 100644 (file)
@@ -1,3 +1,4 @@
+#include "test-tool.h"
 #include "git-compat-util.h"
 #include "hashmap.h"
 #include "strbuf.h"
@@ -77,7 +78,7 @@ static unsigned int hash(unsigned int method, unsigned int i, const char *key)
 
 /*
  * Test performance of hashmap.[ch]
- * Usage: time echo "perfhashmap method rounds" | test-hashmap
+ * Usage: time echo "perfhashmap method rounds" | test-tool hashmap
  */
 static void perf_hashmap(unsigned int method, unsigned int rounds)
 {
@@ -144,7 +145,7 @@ static void perf_hashmap(unsigned int method, unsigned int rounds)
  *
  * perfhashmap method rounds -> test hashmap.[ch] performance
  */
-int cmd_main(int argc, const char **argv)
+int cmd__hashmap(int argc, const char **argv)
 {
        struct strbuf line = STRBUF_INIT;
        struct hashmap map;
index f569f6b7eff87227f82dbe6390fd31fb970a5fca..fcd10968cc10bdab8db146b8ba65080431e5ed24 100644 (file)
@@ -1,6 +1,7 @@
+#include "test-tool.h"
 #include "cache.h"
 
-int cmd_main(int argc, const char **argv)
+int cmd__index_version(int argc, const char **argv)
 {
        struct cache_header hdr;
        int version;
index 297fb01d61eded5d83d02175199efcd217376738..b99a37080d935fa27df4e372e0e8cddd9fd08b3c 100644 (file)
@@ -1,3 +1,4 @@
+#include "test-tool.h"
 #include "cache.h"
 #include "parse-options.h"
 
@@ -184,14 +185,14 @@ static void analyze_run(void)
        }
 }
 
-int cmd_main(int argc, const char **argv)
+int cmd__lazy_init_name_hash(int argc, const char **argv)
 {
        const char *usage[] = {
-               "test-lazy-init-name-hash -d (-s | -m)",
-               "test-lazy-init-name-hash -p [-c c]",
-               "test-lazy-init-name-hash -a a [--step s] [-c c]",
-               "test-lazy-init-name-hash (-s | -m) [-c c]",
-               "test-lazy-init-name-hash -s -m [-c c]",
+               "test-tool lazy-init-name-hash -d (-s | -m)",
+               "test-tool lazy-init-name-hash -p [-c c]",
+               "test-tool lazy-init-name-hash -a a [--step s] [-c c]",
+               "test-tool lazy-init-name-hash (-s | -m) [-c c]",
+               "test-tool lazy-init-name-hash -s -m [-c c]",
                NULL
        };
        struct option options[] = {
index 356d8edef1d25524abaae0f0fcb3bf072d229c30..96857f26ac8540cf22e74aed72bbd30bc8147f00 100644 (file)
@@ -1,7 +1,8 @@
+#include "test-tool.h"
 #include "cache.h"
 #include "tree.h"
 
-int cmd_main(int ac, const char **av)
+int cmd__match_trees(int ac, const char **av)
 {
        struct object_id hash1, hash2, shifted;
        struct tree *one, *two;
index 335cf6b6264cdaf9563736fbcfa40e7a3006a432..c5cffaa4b73ff52b2f166231ceb4a77b24ee8cef 100644 (file)
@@ -1,3 +1,4 @@
+#include "test-tool.h"
 #include "cache.h"
 #include "mergesort.h"
 
@@ -22,7 +23,7 @@ static int compare_strings(const void *a, const void *b)
        return strcmp(x->text, y->text);
 }
 
-int cmd_main(int argc, const char **argv)
+int cmd__mergesort(int argc, const char **argv)
 {
        struct line *line, *p = NULL, *lines = NULL;
        struct strbuf sb = STRBUF_INIT;
index 89d9b2f7bee05ff5c9fde31ba6798651ccee2947..229068894029338c52f5d0b9828c442658be67e5 100644 (file)
@@ -1,9 +1,10 @@
 /*
  * test-mktemp.c: code to exercise the creation of temporary files
  */
+#include "test-tool.h"
 #include "git-compat-util.h"
 
-int cmd_main(int argc, const char **argv)
+int cmd__mktemp(int argc, const char **argv)
 {
        if (argc != 2)
                usage("Expected 1 parameter defining the temporary file template");
index 06c09c6b886f843b2d3c6fdb8e00730afc7fe40b..8cb0d53840f3dc60d583fa1b72f95572d2148d11 100644 (file)
@@ -1,7 +1,8 @@
+#include "test-tool.h"
 #include "git-compat-util.h"
 #include "thread-utils.h"
 
-int cmd_main(int argc, const char **argv)
+int cmd__online_cpus(int argc, const char **argv)
 {
        printf("%d\n", online_cpus());
        return 0;
index 2b3c5092a199835ea2e84339b473a9e29778178d..e115d44ac26e1d4fed48a5fd82632e5a93a487e6 100644 (file)
@@ -1,3 +1,4 @@
+#include "test-tool.h"
 #include "cache.h"
 #include "string-list.h"
 
@@ -170,7 +171,7 @@ static struct test_data dirname_data[] = {
        { NULL,              NULL     }
 };
 
-int cmd_main(int argc, const char **argv)
+int cmd__path_utils(int argc, const char **argv)
 {
        if (argc == 3 && !strcmp(argv[1], "normalize_path_copy")) {
                char *buf = xmallocz(strlen(argv[2]));
diff --git a/t/helper/test-pkt-line.c b/t/helper/test-pkt-line.c
new file mode 100644 (file)
index 0000000..0f19e53
--- /dev/null
@@ -0,0 +1,64 @@
+#include "pkt-line.h"
+
+static void pack_line(const char *line)
+{
+       if (!strcmp(line, "0000") || !strcmp(line, "0000\n"))
+               packet_flush(1);
+       else if (!strcmp(line, "0001") || !strcmp(line, "0001\n"))
+               packet_delim(1);
+       else
+               packet_write_fmt(1, "%s", line);
+}
+
+static void pack(int argc, const char **argv)
+{
+       if (argc) { /* read from argv */
+               int i;
+               for (i = 0; i < argc; i++)
+                       pack_line(argv[i]);
+       } else { /* read from stdin */
+               char line[LARGE_PACKET_MAX];
+               while (fgets(line, sizeof(line), stdin)) {
+                       pack_line(line);
+               }
+       }
+}
+
+static void unpack(void)
+{
+       struct packet_reader reader;
+       packet_reader_init(&reader, 0, NULL, 0,
+                          PACKET_READ_GENTLE_ON_EOF |
+                          PACKET_READ_CHOMP_NEWLINE);
+
+       while (packet_reader_read(&reader) != PACKET_READ_EOF) {
+               switch (reader.status) {
+               case PACKET_READ_EOF:
+                       break;
+               case PACKET_READ_NORMAL:
+                       printf("%s\n", reader.line);
+                       break;
+               case PACKET_READ_FLUSH:
+                       printf("0000\n");
+                       break;
+               case PACKET_READ_DELIM:
+                       printf("0001\n");
+                       break;
+               }
+       }
+}
+
+int cmd_main(int argc, const char **argv)
+{
+       if (argc < 2)
+               die("too few arguments");
+
+       if (!strcmp(argv[1], "pack"))
+               pack(argc - 2, argv + 2);
+       else if (!strcmp(argv[1], "unpack"))
+               unpack();
+       else
+               die("invalid argument '%s'", argv[1]);
+
+       return 0;
+}
index ae58fff35972a09c08a47d2bc0abb67c96ba20eb..9807b649b14c0002bee6d10b200c27bb89a54812 100644 (file)
@@ -1,3 +1,4 @@
+#include "test-tool.h"
 #include "cache.h"
 #include "prio-queue.h"
 
@@ -16,7 +17,7 @@ static void show(int *v)
        free(v);
 }
 
-int cmd_main(int argc, const char **argv)
+int cmd__prio_queue(int argc, const char **argv)
 {
        struct prio_queue pq = { intcmp };
 
index 48255eef31a1e83d9a2bbb01670ba089a11c91f8..d674c88ba092d60366a14eaeccc9f4bc6f32660c 100644 (file)
@@ -1,6 +1,7 @@
+#include "test-tool.h"
 #include "cache.h"
 
-int cmd_main(int argc, const char **argv)
+int cmd__read_cache(int argc, const char **argv)
 {
        int i, cnt = 1;
        if (argc == 2)
index 7120634b04733bb8abe1f0622f0e1e9c8280b643..e9e0541276c50d1739b6d39f2c01ba8ecb782adc 100644 (file)
@@ -1,6 +1,9 @@
+#include "test-tool.h"
 #include "cache.h"
 #include "refs.h"
 #include "worktree.h"
+#include "object-store.h"
+#include "repository.h"
 
 static const char *notnull(const char *arg, const char *name)
 {
@@ -21,7 +24,7 @@ static const char **get_store(const char **argv, struct ref_store **refs)
        if (!argv[0]) {
                die("ref store required");
        } else if (!strcmp(argv[0], "main")) {
-               *refs = get_main_ref_store();
+               *refs = get_main_ref_store(the_repository);
        } else if (skip_prefix(argv[0], "submodule:", &gitdir)) {
                struct strbuf sb = STRBUF_INIT;
                int ret;
@@ -274,7 +277,7 @@ static struct command commands[] = {
        { NULL, NULL }
 };
 
-int cmd_main(int argc, const char **argv)
+int cmd__ref_store(int argc, const char **argv)
 {
        struct ref_store *refs;
        const char *func;
index b5ea8a97c54e1737d91dec894c1cc02e1baf64e5..10284cc56fa9f69703aa69f242edef113d6a40de 100644 (file)
@@ -1,3 +1,4 @@
+#include "test-tool.h"
 #include "git-compat-util.h"
 #include "gettext.h"
 
@@ -36,7 +37,7 @@ static int test_regex_bug(void)
        return 0;
 }
 
-int cmd_main(int argc, const char **argv)
+int cmd__regex(int argc, const char **argv)
 {
        const char *pat;
        const char *str;
@@ -47,8 +48,8 @@ int cmd_main(int argc, const char **argv)
        if (argc == 2 && !strcmp(argv[1], "--bug"))
                return test_regex_bug();
        else if (argc < 3)
-               usage("test-regex --bug\n"
-                     "test-regex <pattern> <string> [<options>]");
+               usage("test-tool regex --bug\n"
+                     "test-tool regex <pattern> <string> [<options>]");
 
        argv++;
        pat = *argv++;
index b8e6fe1d007449d30dd30ccd4319b26f151bbf23..4f8bc758213c47906d3dc1a4d4e02df2737c508d 100644 (file)
@@ -8,6 +8,7 @@
  * published by the Free Software Foundation.
  */
 
+#include "test-tool.h"
 #include "cache.h"
 #include "commit.h"
 #include "diff.h"
@@ -45,7 +46,7 @@ static int run_revision_walk(void)
        return got_revision;
 }
 
-int cmd_main(int argc, const char **argv)
+int cmd__revision_walking(int argc, const char **argv)
 {
        if (argc < 2)
                return 1;
index 153342e44dd11ae357cc299a9214f4c365614a5e..2cc93bb69c522d99491cd8a9e02e211b2c3df807 100644 (file)
@@ -8,6 +8,7 @@
  * published by the Free Software Foundation.
  */
 
+#include "test-tool.h"
 #include "git-compat-util.h"
 #include "run-command.h"
 #include "argv-array.h"
@@ -49,7 +50,7 @@ static int task_finished(int result,
        return 1;
 }
 
-int cmd_main(int argc, const char **argv)
+int cmd__run_command(int argc, const char **argv)
 {
        struct child_process proc = CHILD_PROCESS_INIT;
        int jobs;
index d2a63bea4346fb76d38ba43508ee6e60599e41a9..d26d3e7c8b1a407e2cf40ce20750495f500d5170 100644 (file)
@@ -1,3 +1,4 @@
+#include "test-tool.h"
 #include "cache.h"
 #include "lockfile.h"
 #include "tree.h"
@@ -5,7 +6,7 @@
 
 static struct lock_file index_lock;
 
-int cmd_main(int ac, const char **av)
+int cmd__scrap_cache_tree(int ac, const char **av)
 {
        setup_git_directory();
        hold_locked_index(&index_lock, LOCK_DIE_ON_ERROR);
index edfd52d82aeca0bb9ba2c5b2ce18bc39d27e5a34..ad5e69f9d3b0e03442f0d23b3b559bbfc163ee7b 100644 (file)
@@ -1,3 +1,4 @@
+#include "test-tool.h"
 #include "cache.h"
 #include "sha1-array.h"
 
@@ -7,7 +8,7 @@ static int print_oid(const struct object_id *oid, void *data)
        return 0;
 }
 
-int cmd_main(int argc, const char **argv)
+int cmd__sha1_array(int argc, const char **argv)
 {
        struct oid_array array = OID_ARRAY_INIT;
        struct strbuf line = STRBUF_INIT;
index a1c13f54eca0db7d11a5df134d565171d70b8cce..1ba0675c75f0d2dab281d054b577272cd45c39f9 100644 (file)
@@ -1,6 +1,7 @@
+#include "test-tool.h"
 #include "cache.h"
 
-int cmd_main(int ac, const char **av)
+int cmd__sha1(int ac, const char **av)
 {
        git_SHA_CTX ctx;
        unsigned char sha1[20];
index 750b95a0a1c39b4d761b7f2f861f6df4c85ecb65..84594885c703887c3b09aceb51583b9895ac3b45 100755 (executable)
@@ -1,7 +1,7 @@
 #!/bin/sh
 
 dd if=/dev/zero bs=1048576 count=100 2>/dev/null |
-/usr/bin/time t/helper/test-sha1 >/dev/null
+/usr/bin/time t/helper/test-tool sha1 >/dev/null
 
 while read expect cnt pfx
 do
@@ -11,7 +11,7 @@ do
                        test -z "$pfx" || echo "$pfx"
                        dd if=/dev/zero bs=1048576 count=$cnt 2>/dev/null |
                        perl -pe 'y/\000/g/'
-               } | ./t/helper/test-sha1 $cnt
+               } | ./t/helper/test-tool sha1 $cnt
        )
        if test "$expect" = "$actual"
        then
index b71edbd4429184b59b4bd1355d5cfb53970a1876..77ac5bc33f8eb635f78d8ba590c23bbbe4f29636 100644 (file)
@@ -1,3 +1,4 @@
+#include "test-tool.h"
 #include "cache.h"
 #include "sigchain.h"
 
@@ -13,7 +14,7 @@ X(two)
 X(three)
 #undef X
 
-int cmd_main(int argc, const char **argv) {
+int cmd__sigchain(int argc, const char **argv) {
        sigchain_push(SIGTERM, one);
        sigchain_push(SIGTERM, two);
        sigchain_push(SIGTERM, three);
index e159c9a127f6854541ab497c8fd4b6efb9d4ea39..44e4a6d143e24cbd05b5d94404a5f5d4e732c880 100644 (file)
@@ -1,6 +1,7 @@
+#include "test-tool.h"
 #include "cache.h"
 
-int cmd_main(int argc, const char **argv)
+int cmd__strcmp_offset(int argc, const char **argv)
 {
        int result;
        size_t offset;
index 829ec3d7d2f58b9538bb3bbab47213eddb9e62ec..2123dda85bf10033dcbf0d801028b3705e73a507 100644 (file)
@@ -1,3 +1,4 @@
+#include "test-tool.h"
 #include "cache.h"
 #include "string-list.h"
 
@@ -41,7 +42,7 @@ static int prefix_cb(struct string_list_item *item, void *cb_data)
        return starts_with(item->string, prefix);
 }
 
-int cmd_main(int argc, const char **argv)
+int cmd__string_list(int argc, const char **argv)
 {
        if (argc == 5 && !strcmp(argv[1], "split")) {
                struct string_list list = STRING_LIST_INIT_DUP;
index f23db3b19a9911b554ca8eaf567cd0370d42af6e..e2692746dfdb0e6a5c3b1c748124059bcbdd5fae 100644 (file)
@@ -1,3 +1,4 @@
+#include "test-tool.h"
 #include "cache.h"
 #include "config.h"
 #include "submodule-config.h"
@@ -10,7 +11,7 @@ static void die_usage(int argc, const char **argv, const char *msg)
        exit(1);
 }
 
-int cmd_main(int argc, const char **argv)
+int cmd__submodule_config(int argc, const char **argv)
 {
        const char **arg = argv;
        int my_argc = argc;
@@ -48,9 +49,11 @@ int cmd_main(int argc, const char **argv)
                        die_usage(argc, argv, "Commit not found.");
 
                if (lookup_name) {
-                       submodule = submodule_from_name(&commit_oid, path_or_name);
+                       submodule = submodule_from_name(the_repository,
+                                                       &commit_oid, path_or_name);
                } else
-                       submodule = submodule_from_path(&commit_oid, path_or_name);
+                       submodule = submodule_from_path(the_repository,
+                                                       &commit_oid, path_or_name);
                if (!submodule)
                        die_usage(argc, argv, "Submodule not found.");
 
@@ -64,7 +67,7 @@ int cmd_main(int argc, const char **argv)
                arg += 2;
        }
 
-       submodule_free();
+       submodule_free(the_repository);
 
        return 0;
 }
index 30c5765bfc3590421c21bc2350eed882752de3a0..92b69de635296d32d38d1f7d7589d5f8b6fbc296 100644 (file)
@@ -1,7 +1,8 @@
+#include "test-tool.h"
 #include "cache.h"
 #include "run-command.h"
 
-int cmd_main(int argc, const char **argv)
+int cmd__subprocess(int argc, const char **argv)
 {
        struct child_process cp = CHILD_PROCESS_INIT;
        int nogit = 0;
diff --git a/t/helper/test-tool.c b/t/helper/test-tool.c
new file mode 100644 (file)
index 0000000..87066ce
--- /dev/null
@@ -0,0 +1,62 @@
+#include "git-compat-util.h"
+#include "test-tool.h"
+
+struct test_cmd {
+       const char *name;
+       int (*fn)(int argc, const char **argv);
+};
+
+static struct test_cmd cmds[] = {
+       { "chmtime", cmd__chmtime },
+       { "config", cmd__config },
+       { "ctype", cmd__ctype },
+       { "date", cmd__date },
+       { "delta", cmd__delta },
+       { "drop-caches", cmd__drop_caches },
+       { "dump-cache-tree", cmd__dump_cache_tree },
+       { "dump-split-index", cmd__dump_split_index },
+       { "example-decorate", cmd__example_decorate },
+       { "genrandom", cmd__genrandom },
+       { "hashmap", cmd__hashmap },
+       { "index-version", cmd__index_version },
+       { "lazy-init-name-hash", cmd__lazy_init_name_hash },
+       { "match-trees", cmd__match_trees },
+       { "mergesort", cmd__mergesort },
+       { "mktemp", cmd__mktemp },
+       { "online-cpus", cmd__online_cpus },
+       { "path-utils", cmd__path_utils },
+       { "prio-queue", cmd__prio_queue },
+       { "read-cache", cmd__read_cache },
+       { "ref-store", cmd__ref_store },
+       { "regex", cmd__regex },
+       { "revision-walking", cmd__revision_walking },
+       { "run-command", cmd__run_command },
+       { "scrap-cache-tree", cmd__scrap_cache_tree },
+       { "sha1-array", cmd__sha1_array },
+       { "sha1", cmd__sha1 },
+       { "sigchain", cmd__sigchain },
+       { "strcmp-offset", cmd__strcmp_offset },
+       { "string-list", cmd__string_list },
+       { "submodule-config", cmd__submodule_config },
+       { "subprocess", cmd__subprocess },
+       { "urlmatch-normalization", cmd__urlmatch_normalization },
+       { "wildmatch", cmd__wildmatch },
+       { "write-cache", cmd__write_cache },
+};
+
+int cmd_main(int argc, const char **argv)
+{
+       int i;
+
+       if (argc < 2)
+               die("I need a test name!");
+
+       for (i = 0; i < ARRAY_SIZE(cmds); i++) {
+               if (!strcmp(cmds[i].name, argv[1])) {
+                       argv++;
+                       argc--;
+                       return cmds[i].fn(argc, argv);
+               }
+       }
+       die("There is no test named '%s'", argv[1]);
+}
diff --git a/t/helper/test-tool.h b/t/helper/test-tool.h
new file mode 100644 (file)
index 0000000..7116ddf
--- /dev/null
@@ -0,0 +1,40 @@
+#ifndef __TEST_TOOL_H__
+#define __TEST_TOOL_H__
+
+int cmd__chmtime(int argc, const char **argv);
+int cmd__config(int argc, const char **argv);
+int cmd__ctype(int argc, const char **argv);
+int cmd__date(int argc, const char **argv);
+int cmd__delta(int argc, const char **argv);
+int cmd__drop_caches(int argc, const char **argv);
+int cmd__dump_cache_tree(int argc, const char **argv);
+int cmd__dump_split_index(int argc, const char **argv);
+int cmd__example_decorate(int argc, const char **argv);
+int cmd__genrandom(int argc, const char **argv);
+int cmd__hashmap(int argc, const char **argv);
+int cmd__index_version(int argc, const char **argv);
+int cmd__lazy_init_name_hash(int argc, const char **argv);
+int cmd__match_trees(int argc, const char **argv);
+int cmd__mergesort(int argc, const char **argv);
+int cmd__mktemp(int argc, const char **argv);
+int cmd__online_cpus(int argc, const char **argv);
+int cmd__path_utils(int argc, const char **argv);
+int cmd__prio_queue(int argc, const char **argv);
+int cmd__read_cache(int argc, const char **argv);
+int cmd__ref_store(int argc, const char **argv);
+int cmd__regex(int argc, const char **argv);
+int cmd__revision_walking(int argc, const char **argv);
+int cmd__run_command(int argc, const char **argv);
+int cmd__scrap_cache_tree(int argc, const char **argv);
+int cmd__sha1_array(int argc, const char **argv);
+int cmd__sha1(int argc, const char **argv);
+int cmd__sigchain(int argc, const char **argv);
+int cmd__strcmp_offset(int argc, const char **argv);
+int cmd__string_list(int argc, const char **argv);
+int cmd__submodule_config(int argc, const char **argv);
+int cmd__subprocess(int argc, const char **argv);
+int cmd__urlmatch_normalization(int argc, const char **argv);
+int cmd__wildmatch(int argc, const char **argv);
+int cmd__write_cache(int argc, const char **argv);
+
+#endif
index 49b6e836be257c0689601bf17138439cff0d61a0..8f4d67e646953c5b094b55ec8112f6ba3b9b9024 100644 (file)
@@ -1,9 +1,10 @@
+#include "test-tool.h"
 #include "git-compat-util.h"
 #include "urlmatch.h"
 
-int cmd_main(int argc, const char **argv)
+int cmd__urlmatch_normalization(int argc, const char **argv)
 {
-       const char usage[] = "test-urlmatch-normalization [-p | -l] <url1> | <url1> <url2>";
+       const char usage[] = "test-tool urlmatch-normalization [-p | -l] <url1> | <url1> <url2>";
        char *url1, *url2;
        int opt_p = 0, opt_l = 0;
 
index 66d33dfcfd1a0b3fa6b94a029aab9117a2511224..2c103d1824cfc7f035aea4a6453c9706f9b4491e 100644 (file)
@@ -1,6 +1,7 @@
+#include "test-tool.h"
 #include "cache.h"
 
-int cmd_main(int argc, const char **argv)
+int cmd__wildmatch(int argc, const char **argv)
 {
        int i;
        for (i = 2; i < argc; i++) {
index b7ee0396692b6143e852af16695dd5800c0044be..017dc303800d9ba385a9836d85f81445c6b7e8b7 100644 (file)
@@ -1,9 +1,10 @@
+#include "test-tool.h"
 #include "cache.h"
 #include "lockfile.h"
 
 static struct lock_file index_lock;
 
-int cmd_main(int argc, const char **argv)
+int cmd__write_cache(int argc, const char **argv)
 {
        int i, cnt = 1, lockfd;
        if (argc == 2)
index 54fd5a6ca02757f77004d0f8babfcc6d616162c0..c27599474cf2f272b53e2e76997e5e38af0fe647 100644 (file)
@@ -39,7 +39,7 @@ native_path () {
        then
                path=$(cygpath --windows "$path")
        else
-               path=$(test-path-utils real_path "$path")
+               path=$(test-tool path-utils real_path "$path")
        fi &&
        echo "$path"
 }
index 4c1f81f1678d83ab8b9ee7704d400bdd47ea70f7..a8130f9119d629462efb6b52f91890c0352e4e85 100644 (file)
@@ -49,7 +49,7 @@ rawsvnrepo="$svnrepo"
 svnrepo="file://$svnrepo"
 
 poke() {
-       test-chmtime +1 "$1"
+       test-tool chmtime +1 "$1"
 }
 
 # We need this, because we should pass empty configuration directory to
index 75098465716512a3373e64d51a5cdf848e9f1101..501078249dc7d5d5b27dd6b75a22086767a0eddf 100644 (file)
@@ -85,7 +85,7 @@ pack_obj () {
 
 # Compute and append pack trailer to "$1"
 pack_trailer () {
-       test-sha1 -b <"$1" >trailer.tmp &&
+       test-tool sha1 -b <"$1" >trailer.tmp &&
        cat trailer.tmp >>"$1" &&
        rm -f trailer.tmp
 }
index 821cf1498b78bbb5b43b2bda32bbcc88d580c31b..bc865160e7e3370f9462beda9d8b3866e3c2111b 100755 (executable)
@@ -4,6 +4,7 @@
 use strict;
 use warnings;
 use JSON;
+use Getopt::Long;
 use Git;
 
 sub get_times {
@@ -36,34 +37,34 @@ sub format_times {
        return $out;
 }
 
+sub usage {
+       print <<EOT;
+./aggregate.perl [options] [--] [<dir_or_rev>...] [--] [<test_script>...] >
+
+  Options:
+    --codespeed          * Format output for Codespeed
+    --reponame    <str>  * Send given reponame to codespeed
+    --sort-by     <str>  * Sort output (only "regression" criteria is supported)
+    --subsection  <str>  * Use results from given subsection
+
+EOT
+       exit(1);
+}
+
 my (@dirs, %dirnames, %dirabbrevs, %prefixes, @tests,
-    $codespeed, $subsection, $reponame);
+    $codespeed, $sortby, $subsection, $reponame);
+
+Getopt::Long::Configure qw/ require_order /;
+
+my $rc = GetOptions("codespeed"     => \$codespeed,
+                   "reponame=s"    => \$reponame,
+                   "sort-by=s"     => \$sortby,
+                   "subsection=s"  => \$subsection);
+usage() unless $rc;
+
 while (scalar @ARGV) {
        my $arg = $ARGV[0];
        my $dir;
-       if ($arg eq "--codespeed") {
-               $codespeed = 1;
-               shift @ARGV;
-               next;
-       }
-       if ($arg eq "--subsection") {
-               shift @ARGV;
-               $subsection = $ARGV[0];
-               shift @ARGV;
-               if (! $subsection) {
-                       die "empty subsection";
-               }
-               next;
-       }
-       if ($arg eq "--reponame") {
-               shift @ARGV;
-               $reponame = $ARGV[0];
-               shift @ARGV;
-               if (! $reponame) {
-                       die "empty reponame";
-               }
-               next;
-       }
        last if -f $arg or $arg eq "--";
        if (! -d $arg) {
                my $rev = Git::command_oneline(qw(rev-parse --verify), $arg);
@@ -147,6 +148,11 @@ sub have_slash {
        return 0;
 }
 
+sub display_dir {
+       my ($d) = @_;
+       return exists $dirabbrevs{$d} ? $dirabbrevs{$d} : $dirnames{$d};
+}
+
 sub print_default_results {
        my %descrs;
        my $descrlen = 4; # "Test"
@@ -168,8 +174,7 @@ sub print_default_results {
        my %times;
        my @colwidth = ((0)x@dirs);
        for my $i (0..$#dirs) {
-               my $d = $dirs[$i];
-               my $w = length (exists $dirabbrevs{$d} ? $dirabbrevs{$d} : $dirnames{$d});
+               my $w = length display_dir($dirs[$i]);
                $colwidth[$i] = $w if $w > $colwidth[$i];
        }
        for my $t (@subtests) {
@@ -188,8 +193,7 @@ sub print_default_results {
 
        printf "%-${descrlen}s", "Test";
        for my $i (0..$#dirs) {
-               my $d = $dirs[$i];
-               printf "   %-$colwidth[$i]s", (exists $dirabbrevs{$d} ? $dirabbrevs{$d} : $dirnames{$d});
+               printf "   %-$colwidth[$i]s", display_dir($dirs[$i]);
        }
        print "\n";
        print "-"x$totalwidth, "\n";
@@ -206,6 +210,50 @@ sub print_default_results {
        }
 }
 
+sub print_sorted_results {
+       my ($sortby) = @_;
+
+       if ($sortby ne "regression") {
+               print "Only 'regression' is supported as '--sort-by' argument\n";
+               usage();
+       }
+
+       my @evolutions;
+       for my $t (@subtests) {
+               my ($prevr, $prevu, $prevs, $prevrev);
+               for my $i (0..$#dirs) {
+                       my $d = $dirs[$i];
+                       my ($r, $u, $s) = get_times("$resultsdir/$prefixes{$d}$t.times");
+                       if ($i > 0 and defined $r and defined $prevr and $prevr > 0) {
+                               my $percent = 100.0 * ($r - $prevr) / $prevr;
+                               push @evolutions, { "percent"  => $percent,
+                                                   "test"     => $t,
+                                                   "prevrev"  => $prevrev,
+                                                   "rev"      => $d,
+                                                   "prevr"    => $prevr,
+                                                   "r"        => $r,
+                                                   "prevu"    => $prevu,
+                                                   "u"        => $u,
+                                                   "prevs"    => $prevs,
+                                                   "s"        => $s};
+                       }
+                       ($prevr, $prevu, $prevs, $prevrev) = ($r, $u, $s, $d);
+               }
+       }
+
+       my @sorted_evolutions = sort { $b->{percent} <=> $a->{percent} } @evolutions;
+
+       for my $e (@sorted_evolutions) {
+               printf "%+.1f%%", $e->{percent};
+               print " " . $e->{test};
+               print " " . format_times($e->{prevr}, $e->{prevu}, $e->{prevs});
+               print " " . format_times($e->{r}, $e->{u}, $e->{s});
+               print " " . display_dir($e->{prevrev});
+               print " " . display_dir($e->{rev});
+               print "\n";
+       }
+}
+
 sub print_codespeed_results {
        my ($subsection) = @_;
 
@@ -260,6 +308,8 @@ sub print_codespeed_results {
 
 if ($codespeed) {
        print_codespeed_results($subsection);
+} elsif (defined $sortby) {
+       print_sorted_results($sortby);
 } else {
        print_default_results();
 }
diff --git a/t/perf/bisect_regression b/t/perf/bisect_regression
new file mode 100755 (executable)
index 0000000..a94d995
--- /dev/null
@@ -0,0 +1,73 @@
+#!/bin/sh
+
+# Read a line coming from `./aggregate.perl --sort-by regression ...`
+# and automatically bisect to find the commit responsible for the
+# performance regression.
+#
+# Lines from `./aggregate.perl --sort-by regression ...` look like:
+#
+# +100.0% p7821-grep-engines-fixed.1 0.04(0.10+0.03) 0.08(0.11+0.08) v2.14.3 v2.15.1
+# +33.3% p7820-grep-engines.1 0.03(0.08+0.02) 0.04(0.08+0.02) v2.14.3 v2.15.1
+#
+
+die () {
+       echo >&2 "error: $*"
+       exit 1
+}
+
+while [ $# -gt 0 ]; do
+       arg="$1"
+       case "$arg" in
+       --help)
+               echo "usage: $0 [--config file] [--subsection subsection]"
+               exit 0
+               ;;
+       --config)
+               shift
+               GIT_PERF_CONFIG_FILE=$(cd "$(dirname "$1")"; pwd)/$(basename "$1")
+               export GIT_PERF_CONFIG_FILE
+               shift ;;
+       --subsection)
+               shift
+               GIT_PERF_SUBSECTION="$1"
+               export GIT_PERF_SUBSECTION
+               shift ;;
+       --*)
+               die "unrecognised option: '$arg'" ;;
+       *)
+               die "unknown argument '$arg'"
+               ;;
+       esac
+done
+
+read -r regression subtest oldtime newtime oldrev newrev
+
+test_script=$(echo "$subtest" | sed -e 's/\(.*\)\.[0-9]*$/\1.sh/')
+test_number=$(echo "$subtest" | sed -e 's/.*\.\([0-9]*\)$/\1/')
+
+# oldtime and newtime are decimal number, not integers
+
+oldtime=$(echo "$oldtime" | sed -e 's/^\([0-9]\+\.[0-9]\+\).*$/\1/')
+newtime=$(echo "$newtime" | sed -e 's/^\([0-9]\+\.[0-9]\+\).*$/\1/')
+
+test $(echo "$newtime" "$oldtime" | awk '{ print ($1 > $2) }') = 1 ||
+       die "New time '$newtime' shoud be greater than old time '$oldtime'"
+
+tmpdir=$(mktemp -d -t bisect_regression_XXXXXX) || die "Failed to create temp directory"
+echo "$oldtime" >"$tmpdir/oldtime" || die "Failed to write to '$tmpdir/oldtime'"
+echo "$newtime" >"$tmpdir/newtime" || die "Failed to write to '$tmpdir/newtime'"
+
+# Bisecting must be performed from the top level directory (even with --no-checkout)
+(
+       toplevel_dir=$(git rev-parse --show-toplevel) || die "Failed to find top level directory"
+       cd "$toplevel_dir" || die "Failed to cd into top level directory '$toplevel_dir'"
+
+       git bisect start --no-checkout "$newrev" "$oldrev" || die "Failed to start bisecting"
+
+       git bisect run t/perf/bisect_run_script "$test_script" "$test_number" "$tmpdir"
+       res="$?"
+
+       git bisect reset
+
+       exit "$res"
+)
diff --git a/t/perf/bisect_run_script b/t/perf/bisect_run_script
new file mode 100755 (executable)
index 0000000..3ebaf15
--- /dev/null
@@ -0,0 +1,53 @@
+#!/bin/sh
+
+script="$1"
+test_number="$2"
+info_dir="$3"
+
+# This aborts the bisection immediately
+die () {
+       echo >&2 "error: $*"
+       exit 255
+}
+
+bisect_head=$(git rev-parse --verify BISECT_HEAD) || die "Failed to find BISECT_HEAD ref"
+
+script_number=$(echo "$script" | sed -e "s/^p\([0-9]*\).*\$/\1/") || die "Failed to get script number for '$script'"
+
+oldtime=$(cat "$info_dir/oldtime") || die "Failed to access '$info_dir/oldtime'"
+newtime=$(cat "$info_dir/newtime") || die "Failed to access '$info_dir/newtime'"
+
+cd t/perf || die "Failed to cd into 't/perf'"
+
+result_file="$info_dir/perf_${script_number}_${bisect_head}_results.txt"
+
+GIT_PERF_DIRS_OR_REVS="$bisect_head"
+export GIT_PERF_DIRS_OR_REVS
+
+# Don't use codespeed
+GIT_PERF_CODESPEED_OUTPUT=
+GIT_PERF_SEND_TO_CODESPEED=
+export GIT_PERF_CODESPEED_OUTPUT
+export GIT_PERF_SEND_TO_CODESPEED
+
+./run "$script" >"$result_file" 2>&1 || die "Failed to run perf test '$script'"
+
+rtime=$(sed -n "s/^$script_number\.$test_number:.*\([0-9]\+\.[0-9]\+\)(.*).*\$/\1/p" "$result_file")
+
+echo "newtime: $newtime"
+echo "rtime: $rtime"
+echo "oldtime: $oldtime"
+
+# Compare ($newtime - $rtime) with ($rtime - $oldtime)
+# Times are decimal number, not integers
+
+if test $(echo "$newtime" "$rtime" "$oldtime" | awk '{ print ($1 - $2 > $2 - $3) }') = 1
+then
+       # Current commit is considered "good/old"
+       echo "$rtime" >"$info_dir/oldtime"
+       exit 0
+else
+       # Current commit is considered "bad/new"
+       echo "$rtime" >"$info_dir/newtime"
+       exit 1
+fi
index 9180ae9343a03824653e087aaa766b9d33e50a94..cdd105a5945239850a12479ff1138a5428e3b6d6 100755 (executable)
@@ -8,7 +8,7 @@ test_perf_default_repo
 
 count=1000
 test_perf "read_cache/discard_cache $count times" "
-       test-read-cache $count
+       test-tool read-cache $count
 "
 
 test_done
index 8de5a98cfc6c513bb35cd8355f3e831ab3a8007d..1afc08fe7f1990cdf7143f0dfd3cfaa66628ca73 100755 (executable)
@@ -7,8 +7,8 @@ test_perf_large_repo
 test_checkout_worktree
 
 test_expect_success 'verify both methods build the same hashmaps' '
-       test-lazy-init-name-hash --dump --single >out.single &&
-       if test-lazy-init-name-hash --dump --multi >out.multi
+       test-tool lazy-init-name-hash --dump --single >out.single &&
+       if test-tool lazy-init-name-hash --dump --multi >out.multi
        then
                test_set_prereq REPO_BIG_ENOUGH_FOR_MULTI &&
                sort <out.single >sorted.single &&
@@ -46,11 +46,11 @@ test_expect_success 'calibrate' '
 '
 
 test_perf "single-threaded, $desc" "
-       test-lazy-init-name-hash --single --count=$count
+       test-tool lazy-init-name-hash --single --count=$count
 "
 
 test_perf REPO_BIG_ENOUGH_FOR_MULTI "multi-threaded, $desc" "
-       test-lazy-init-name-hash --multi --count=$count
+       test-tool lazy-init-name-hash --multi --count=$count
 "
 
 test_done
index 261fe92fd93aaf65101dfe4d1b87741453985cbe..09595264f09fa4b1dc28b3e5d2a3d6482d998bcc 100755 (executable)
@@ -23,7 +23,7 @@ test_expect_success "setup repo" '
 
 count=3
 test_perf "write_locked_index $count times ($nr_files files)" "
-       test-write-cache $count
+       test-tool write-cache $count
 "
 
 test_done
index 7c9a35a646a6406e5c284a65748863a93098c2af..6e924f5fa3f900b4b82a55f282e810572a62f20b 100755 (executable)
@@ -16,7 +16,7 @@ test_perf 'sort(1)' '
 '
 
 test_perf 'string_list_sort()' '
-       test-string-list sort <unsorted >actual
+       test-tool string-list sort <unsorted >actual
 '
 
 test_expect_success 'string_list_sort() sorts like sort(1)' '
index 65e145c02d9eafc5a43d56c1fb30fef7be63d704..def7ecdbc786c6472e5b7909a97650a1034cecb2 100755 (executable)
@@ -118,7 +118,7 @@ test_expect_success "setup for fsmonitor" '
 '
 
 if test -n "$GIT_PERF_7519_DROP_CACHE"; then
-       test-drop-caches
+       test-tool drop-caches
 fi
 
 test_perf "status (fsmonitor=$INTEGRATION_SCRIPT)" '
@@ -126,7 +126,7 @@ test_perf "status (fsmonitor=$INTEGRATION_SCRIPT)" '
 '
 
 if test -n "$GIT_PERF_7519_DROP_CACHE"; then
-       test-drop-caches
+       test-tool drop-caches
 fi
 
 test_perf "status -uno (fsmonitor=$INTEGRATION_SCRIPT)" '
@@ -134,7 +134,7 @@ test_perf "status -uno (fsmonitor=$INTEGRATION_SCRIPT)" '
 '
 
 if test -n "$GIT_PERF_7519_DROP_CACHE"; then
-       test-drop-caches
+       test-tool drop-caches
 fi
 
 test_perf "status -uall (fsmonitor=$INTEGRATION_SCRIPT)" '
@@ -148,7 +148,7 @@ test_expect_success "setup without fsmonitor" '
 '
 
 if test -n "$GIT_PERF_7519_DROP_CACHE"; then
-       test-drop-caches
+       test-tool drop-caches
 fi
 
 test_perf "status (fsmonitor=$INTEGRATION_SCRIPT)" '
@@ -156,7 +156,7 @@ test_perf "status (fsmonitor=$INTEGRATION_SCRIPT)" '
 '
 
 if test -n "$GIT_PERF_7519_DROP_CACHE"; then
-       test-drop-caches
+       test-tool drop-caches
 fi
 
 test_perf "status -uno (fsmonitor=$INTEGRATION_SCRIPT)" '
@@ -164,7 +164,7 @@ test_perf "status -uno (fsmonitor=$INTEGRATION_SCRIPT)" '
 '
 
 if test -n "$GIT_PERF_7519_DROP_CACHE"; then
-       test-drop-caches
+       test-tool drop-caches
 fi
 
 test_perf "status -uall (fsmonitor=$INTEGRATION_SCRIPT)" '
index 213da5d6b9437b7db7f3e5b824afea1df465d934..9aaa733c770fb97157e273b6c6d1efd38f9a630a 100755 (executable)
@@ -1,21 +1,34 @@
 #!/bin/sh
 
-case "$1" in
+die () {
+       echo >&2 "error: $*"
+       exit 1
+}
+
+while [ $# -gt 0 ]; do
+       arg="$1"
+       case "$arg" in
+       --)
+               break ;;
        --help)
-               echo "usage: $0 [--config file] [other_git_tree...] [--] [test_scripts]"
-               exit 0
-               ;;
+               echo "usage: $0 [--config file] [--subsection subsec] [other_git_tree...] [--] [test_scripts]"
+               exit 0 ;;
        --config)
                shift
                GIT_PERF_CONFIG_FILE=$(cd "$(dirname "$1")"; pwd)/$(basename "$1")
                export GIT_PERF_CONFIG_FILE
                shift ;;
-esac
-
-die () {
-       echo >&2 "error: $*"
-       exit 1
-}
+       --subsection)
+               shift
+               GIT_PERF_SUBSECTION="$1"
+               export GIT_PERF_SUBSECTION
+               shift ;;
+       --*)
+               die "unrecognised option: '$arg'" ;;
+       *)
+               break ;;
+       esac
+done
 
 run_one_dir () {
        if test $# -eq 0; then
@@ -172,9 +185,32 @@ get_subsections "perf" >test-results/run_subsections.names
 
 if test $(wc -l <test-results/run_subsections.names) -eq 0
 then
+       if test -n "$GIT_PERF_SUBSECTION"
+       then
+               if test -n "$GIT_PERF_CONFIG_FILE"
+               then
+                       die "no subsections are defined in config file '$GIT_PERF_CONFIG_FILE'"
+               else
+                       die "subsection '$GIT_PERF_SUBSECTION' defined without a config file"
+               fi
+       fi
        (
                run_subsection "$@"
        )
+elif test -n "$GIT_PERF_SUBSECTION"
+then
+       egrep "^$GIT_PERF_SUBSECTION\$" test-results/run_subsections.names >/dev/null ||
+               die "subsection '$GIT_PERF_SUBSECTION' not found in '$GIT_PERF_CONFIG_FILE'"
+
+       egrep "^$GIT_PERF_SUBSECTION\$" test-results/run_subsections.names | while read -r subsec
+       do
+               (
+                       GIT_PERF_SUBSECTION="$subsec"
+                       export GIT_PERF_SUBSECTION
+                       echo "======== Run for subsection '$GIT_PERF_SUBSECTION' ========"
+                       run_subsection "$@"
+               )
+       done
 else
        while read -r subsec
        do
index 46042f1f1338f628d5256f0e932a4037e98b34ab..4c214bd11c48859f0c4e64dbbe2d783e744eb1d1 100755 (executable)
@@ -10,7 +10,7 @@ one
 EOF
 
 test_expect_success 'sigchain works' '
-       { test-sigchain >actual; ret=$?; } &&
+       { test-tool sigchain >actual; ret=$?; } &&
        {
                # Signal death by raise() on Windows acts like exit(3),
                # regardless of the signal number. So we must allow that
@@ -24,7 +24,7 @@ test_expect_success 'sigchain works' '
 test_expect_success !MINGW 'signals are propagated using shell convention' '
        # we use exec here to avoid any sub-shell interpretation
        # of the exit code
-       git config alias.sigterm "!exec test-sigchain" &&
+       git config alias.sigterm "!exec test-tool sigchain" &&
        test_expect_code 143 git sigterm
 '
 
@@ -36,7 +36,7 @@ large_git () {
 }
 
 test_expect_success 'create blob' '
-       test-genrandom foo 16384 >file &&
+       test-tool genrandom foo 16384 >file &&
        git add file
 '
 
index 7ac9466d5055e02179467fa9e41004bbc89df6dc..64ff86df8eb02aa635af1bad0b6d1f31578f222a 100755 (executable)
@@ -10,7 +10,7 @@ check_relative() {
        t=$(($TEST_DATE_NOW - $1))
        echo "$t -> $2" >expect
        test_expect_${3:-success} "relative date ($2)" "
-       test-date relative $t >actual &&
+       test-tool date relative $t >actual &&
        test_i18ncmp expect actual
        "
 }
@@ -35,7 +35,7 @@ check_show () {
        zone=$5
        test_expect_success $prereqs "show date ($format:$time)" '
                echo "$time -> $expect" >expect &&
-               TZ=${zone:-$TZ} test-date show:"$format" "$time" >actual &&
+               TZ=${zone:-$TZ} test-tool date show:"$format" "$time" >actual &&
                test_cmp expect actual
        '
 }
@@ -71,7 +71,7 @@ check_show iso-local "$FUTURE" "2152-06-19 22:24:56 +0000" TIME_IS_64BIT,TIME_T_
 check_parse() {
        echo "$1 -> $2" >expect
        test_expect_${4:-success} "parse date ($1${3:+ TZ=$3})" "
-       TZ=${3:-$TZ} test-date parse '$1' >actual &&
+       TZ=${3:-$TZ} test-tool date parse '$1' >actual &&
        test_cmp expect actual
        "
 }
@@ -92,7 +92,7 @@ check_parse '2008-02-14 20:30:45' '2008-02-14 20:30:45 -0500' EST5
 check_approxidate() {
        echo "$1 -> $2 +0000" >expect
        test_expect_${3:-success} "parse approxidate ($1)" "
-       test-date approxidate '$1' >actual &&
+       test-tool date approxidate '$1' >actual &&
        test_cmp expect actual
        "
 }
index 94045c3fad18d94e0e83acf757594cede066134b..e56dfce6680298c133bc2b2de12d9492fda66b7a 100755 (executable)
@@ -17,7 +17,7 @@ cat >expect <<'EOF'
 10
 EOF
 test_expect_success 'basic ordering' '
-       test-prio-queue 2 6 3 10 9 5 7 4 5 8 1 dump >actual &&
+       test-tool prio-queue 2 6 3 10 9 5 7 4 5 8 1 dump >actual &&
        test_cmp expect actual
 '
 
@@ -30,7 +30,7 @@ cat >expect <<'EOF'
 6
 EOF
 test_expect_success 'mixed put and get' '
-       test-prio-queue 6 2 4 get 5 3 get get 1 dump >actual &&
+       test-tool prio-queue 6 2 4 get 5 3 get get 1 dump >actual &&
        test_cmp expect actual
 '
 
@@ -43,7 +43,7 @@ NULL
 NULL
 EOF
 test_expect_success 'notice empty queue' '
-       test-prio-queue 1 2 get get get 1 2 get get get >actual &&
+       test-tool prio-queue 1 2 get get get 1 2 get get get >actual &&
        test_cmp expect actual
 '
 
index 9c217d948c14dfd75f73e27a0911fd194eb11bce..3f1f505e8937f391666a1b7e6d9b972a5f146974 100755 (executable)
@@ -4,7 +4,7 @@ test_description='test hashmap and string hash functions'
 . ./test-lib.sh
 
 test_hashmap() {
-       echo "$1" | test-hashmap $3 > actual &&
+       echo "$1" | test-tool hashmap $3 > actual &&
        echo "$2" > expect &&
        test_cmp expect actual
 }
@@ -232,7 +232,7 @@ test_expect_success 'grow / shrink' '
        echo value40 >> expect &&
        echo size >> in &&
        echo 64 39 >> expect &&
-       cat in | test-hashmap > out &&
+       cat in | test-tool hashmap > out &&
        test_cmp expect out
 
 '
index 6d655cb161b2ed39eda97bb4acb54d1cf74a826d..419f31a8f7d4cb20041f054d25ddb25c7d43e3fb 100755 (executable)
@@ -11,7 +11,7 @@ then
 fi
 
 test_expect_success 'test-sha1 detects shattered pdf' '
-       test_must_fail test-sha1 <"$TEST_DATA/shattered-1.pdf" 2>err &&
+       test_must_fail test-tool sha1 <"$TEST_DATA/shattered-1.pdf" 2>err &&
        test_i18ngrep collision err &&
        grep 38762cf7f55934b34d179ae6a4c80cadccbb7f0a err
 '
index 46f8e583c37da7d03d715ea5cb1a4ee5bbe0ca28..9479a4aaabc1a4187fc702f864642ac493508bb9 100755 (executable)
@@ -19,7 +19,7 @@ write_script rot13-filter.pl "$PERL_PATH" \
 generate_random_characters () {
        LEN=$1
        NAME=$2
-       test-genrandom some-seed $LEN |
+       test-tool genrandom some-seed $LEN |
                perl -pe "s/./chr((ord($&) % 26) + ord('a'))/sge" >"$TEST_ROOT/$NAME"
 }
 
@@ -267,7 +267,7 @@ test_expect_success 'filtering large input to small output should use little mem
 '
 
 test_expect_success 'filter that does not read is fine' '
-       test-genrandom foo $((128 * 1024 + 1)) >big &&
+       test-tool genrandom foo $((128 * 1024 + 1)) >big &&
        echo "big filter=epipe" >.gitattributes &&
        test_config filter.epipe.clean "echo xyzzy" &&
        git add big &&
diff --git a/t/t0028-working-tree-encoding.sh b/t/t0028-working-tree-encoding.sh
new file mode 100755 (executable)
index 0000000..12b8eb9
--- /dev/null
@@ -0,0 +1,245 @@
+#!/bin/sh
+
+test_description='working-tree-encoding conversion via gitattributes'
+
+. ./test-lib.sh
+
+GIT_TRACE_WORKING_TREE_ENCODING=1 && export GIT_TRACE_WORKING_TREE_ENCODING
+
+test_expect_success 'setup test files' '
+       git config core.eol lf &&
+
+       text="hallo there!\ncan you read me?" &&
+       echo "*.utf16 text working-tree-encoding=utf-16" >.gitattributes &&
+       printf "$text" >test.utf8.raw &&
+       printf "$text" | iconv -f UTF-8 -t UTF-16 >test.utf16.raw &&
+       printf "$text" | iconv -f UTF-8 -t UTF-32 >test.utf32.raw &&
+
+       # Line ending tests
+       printf "one\ntwo\nthree\n" >lf.utf8.raw &&
+       printf "one\r\ntwo\r\nthree\r\n" >crlf.utf8.raw &&
+
+       # BOM tests
+       printf "\0a\0b\0c"                         >nobom.utf16be.raw &&
+       printf "a\0b\0c\0"                         >nobom.utf16le.raw &&
+       printf "\376\777\0a\0b\0c"                 >bebom.utf16be.raw &&
+       printf "\777\376a\0b\0c\0"                 >lebom.utf16le.raw &&
+       printf "\0\0\0a\0\0\0b\0\0\0c"             >nobom.utf32be.raw &&
+       printf "a\0\0\0b\0\0\0c\0\0\0"             >nobom.utf32le.raw &&
+       printf "\0\0\376\777\0\0\0a\0\0\0b\0\0\0c" >bebom.utf32be.raw &&
+       printf "\777\376\0\0a\0\0\0b\0\0\0c\0\0\0" >lebom.utf32le.raw &&
+
+       # Add only UTF-16 file, we will add the UTF-32 file later
+       cp test.utf16.raw test.utf16 &&
+       cp test.utf32.raw test.utf32 &&
+       git add .gitattributes test.utf16 &&
+       git commit -m initial
+'
+
+test_expect_success 'ensure UTF-8 is stored in Git' '
+       test_when_finished "rm -f test.utf16.git" &&
+
+       git cat-file -p :test.utf16 >test.utf16.git &&
+       test_cmp_bin test.utf8.raw test.utf16.git
+'
+
+test_expect_success 're-encode to UTF-16 on checkout' '
+       test_when_finished "rm -f test.utf16.raw" &&
+
+       rm test.utf16 &&
+       git checkout test.utf16 &&
+       test_cmp_bin test.utf16.raw test.utf16
+'
+
+test_expect_success 'check $GIT_DIR/info/attributes support' '
+       test_when_finished "rm -f test.utf32.git" &&
+       test_when_finished "git reset --hard HEAD" &&
+
+       echo "*.utf32 text working-tree-encoding=utf-32" >.git/info/attributes &&
+       git add test.utf32 &&
+
+       git cat-file -p :test.utf32 >test.utf32.git &&
+       test_cmp_bin test.utf8.raw test.utf32.git
+'
+
+for i in 16 32
+do
+       test_expect_success "check prohibited UTF-${i} BOM" '
+               test_when_finished "git reset --hard HEAD" &&
+
+               echo "*.utf${i}be text working-tree-encoding=utf-${i}be" >>.gitattributes &&
+               echo "*.utf${i}le text working-tree-encoding=utf-${i}LE" >>.gitattributes &&
+
+               # Here we add a UTF-16 (resp. UTF-32) files with BOM (big/little-endian)
+               # but we tell Git to treat it as UTF-16BE/UTF-16LE (resp. UTF-32).
+               # In these cases the BOM is prohibited.
+               cp bebom.utf${i}be.raw bebom.utf${i}be &&
+               test_must_fail git add bebom.utf${i}be 2>err.out &&
+               test_i18ngrep "fatal: BOM is prohibited .* utf-${i}be" err.out &&
+               test_i18ngrep "use UTF-${i} as working-tree-encoding" err.out &&
+
+               cp lebom.utf${i}le.raw lebom.utf${i}be &&
+               test_must_fail git add lebom.utf${i}be 2>err.out &&
+               test_i18ngrep "fatal: BOM is prohibited .* utf-${i}be" err.out &&
+               test_i18ngrep "use UTF-${i} as working-tree-encoding" err.out &&
+
+               cp bebom.utf${i}be.raw bebom.utf${i}le &&
+               test_must_fail git add bebom.utf${i}le 2>err.out &&
+               test_i18ngrep "fatal: BOM is prohibited .* utf-${i}LE" err.out &&
+               test_i18ngrep "use UTF-${i} as working-tree-encoding" err.out &&
+
+               cp lebom.utf${i}le.raw lebom.utf${i}le &&
+               test_must_fail git add lebom.utf${i}le 2>err.out &&
+               test_i18ngrep "fatal: BOM is prohibited .* utf-${i}LE" err.out &&
+               test_i18ngrep "use UTF-${i} as working-tree-encoding" err.out
+       '
+
+       test_expect_success "check required UTF-${i} BOM" '
+               test_when_finished "git reset --hard HEAD" &&
+
+               echo "*.utf${i} text working-tree-encoding=utf-${i}" >>.gitattributes &&
+
+               cp nobom.utf${i}be.raw nobom.utf${i} &&
+               test_must_fail git add nobom.utf${i} 2>err.out &&
+               test_i18ngrep "fatal: BOM is required .* utf-${i}" err.out &&
+               test_i18ngrep "use UTF-${i}BE or UTF-${i}LE" err.out &&
+
+               cp nobom.utf${i}le.raw nobom.utf${i} &&
+               test_must_fail git add nobom.utf${i} 2>err.out &&
+               test_i18ngrep "fatal: BOM is required .* utf-${i}" err.out &&
+               test_i18ngrep "use UTF-${i}BE or UTF-${i}LE" err.out
+       '
+
+       test_expect_success "eol conversion for UTF-${i} encoded files on checkout" '
+               test_when_finished "rm -f crlf.utf${i}.raw lf.utf${i}.raw" &&
+               test_when_finished "git reset --hard HEAD^" &&
+
+               cat lf.utf8.raw | iconv -f UTF-8 -t UTF-${i} >lf.utf${i}.raw &&
+               cat crlf.utf8.raw | iconv -f UTF-8 -t UTF-${i} >crlf.utf${i}.raw &&
+               cp crlf.utf${i}.raw eol.utf${i} &&
+
+               cat >expectIndexLF <<-EOF &&
+                       i/lf    w/-text attr/text               eol.utf${i}
+               EOF
+
+               git add eol.utf${i} &&
+               git commit -m eol &&
+
+               # UTF-${i} with CRLF (Windows line endings)
+               rm eol.utf${i} &&
+               git -c core.eol=crlf checkout eol.utf${i} &&
+               test_cmp_bin crlf.utf${i}.raw eol.utf${i} &&
+
+               # Although the file has CRLF in the working tree,
+               # ensure LF in the index
+               git ls-files --eol eol.utf${i} >actual &&
+               test_cmp expectIndexLF actual &&
+
+               # UTF-${i} with LF (Unix line endings)
+               rm eol.utf${i} &&
+               git -c core.eol=lf checkout eol.utf${i} &&
+               test_cmp_bin lf.utf${i}.raw eol.utf${i} &&
+
+               # The file LF in the working tree, ensure LF in the index
+               git ls-files --eol eol.utf${i} >actual &&
+               test_cmp expectIndexLF actual
+       '
+done
+
+test_expect_success 'check unsupported encodings' '
+       test_when_finished "git reset --hard HEAD" &&
+
+       echo "*.set text working-tree-encoding" >.gitattributes &&
+       printf "set" >t.set &&
+       test_must_fail git add t.set 2>err.out &&
+       test_i18ngrep "true/false are no valid working-tree-encodings" err.out &&
+
+       echo "*.unset text -working-tree-encoding" >.gitattributes &&
+       printf "unset" >t.unset &&
+       git add t.unset &&
+
+       echo "*.empty text working-tree-encoding=" >.gitattributes &&
+       printf "empty" >t.empty &&
+       git add t.empty &&
+
+       echo "*.garbage text working-tree-encoding=garbage" >.gitattributes &&
+       printf "garbage" >t.garbage &&
+       test_must_fail git add t.garbage 2>err.out &&
+       test_i18ngrep "failed to encode" err.out
+'
+
+test_expect_success 'error if encoding round trip is not the same during refresh' '
+       BEFORE_STATE=$(git rev-parse HEAD) &&
+       test_when_finished "git reset --hard $BEFORE_STATE" &&
+
+       # Add and commit a UTF-16 file but skip the "working-tree-encoding"
+       # filter. Consequently, the in-repo representation is UTF-16 and not
+       # UTF-8. This simulates a Git version that has no working tree encoding
+       # support.
+       echo "*.utf16le text working-tree-encoding=utf-16le" >.gitattributes &&
+       echo "hallo" >nonsense.utf16le &&
+       TEST_HASH=$(git hash-object --no-filters -w nonsense.utf16le) &&
+       git update-index --add --cacheinfo 100644 $TEST_HASH nonsense.utf16le &&
+       COMMIT=$(git commit-tree -p $(git rev-parse HEAD) -m "plain commit" $(git write-tree)) &&
+       git update-ref refs/heads/master $COMMIT &&
+
+       test_must_fail git checkout HEAD^ 2>err.out &&
+       test_i18ngrep "error: .* overwritten by checkout:" err.out
+'
+
+test_expect_success 'error if encoding garbage is already in Git' '
+       BEFORE_STATE=$(git rev-parse HEAD) &&
+       test_when_finished "git reset --hard $BEFORE_STATE" &&
+
+       # Skip the UTF-16 filter for the added file
+       # This simulates a Git version that has no checkoutEncoding support
+       cp nobom.utf16be.raw nonsense.utf16 &&
+       TEST_HASH=$(git hash-object --no-filters -w nonsense.utf16) &&
+       git update-index --add --cacheinfo 100644 $TEST_HASH nonsense.utf16 &&
+       COMMIT=$(git commit-tree -p $(git rev-parse HEAD) -m "plain commit" $(git write-tree)) &&
+       git update-ref refs/heads/master $COMMIT &&
+
+       git diff 2>err.out &&
+       test_i18ngrep "error: BOM is required" err.out
+'
+
+test_expect_success 'check roundtrip encoding' '
+       test_when_finished "rm -f roundtrip.shift roundtrip.utf16" &&
+       test_when_finished "git reset --hard HEAD" &&
+
+       text="hallo there!\nroundtrip test here!" &&
+       printf "$text" | iconv -f UTF-8 -t SHIFT-JIS >roundtrip.shift &&
+       printf "$text" | iconv -f UTF-8 -t UTF-16 >roundtrip.utf16 &&
+       echo "*.shift text working-tree-encoding=SHIFT-JIS" >>.gitattributes &&
+
+       # SHIFT-JIS encoded files are round-trip checked by default...
+       GIT_TRACE=1 git add .gitattributes roundtrip.shift 2>&1 |
+               grep "Checking roundtrip encoding for SHIFT-JIS" &&
+       git reset &&
+
+       # ... unless we overwrite the Git config!
+       ! GIT_TRACE=1 git -c core.checkRoundtripEncoding=garbage \
+               add .gitattributes roundtrip.shift 2>&1 |
+               grep "Checking roundtrip encoding for SHIFT-JIS" &&
+       git reset &&
+
+       # UTF-16 encoded files should not be round-trip checked by default...
+       ! GIT_TRACE=1 git add roundtrip.utf16 2>&1 |
+               grep "Checking roundtrip encoding for UTF-16" &&
+       git reset &&
+
+       # ... unless we tell Git to check it!
+       GIT_TRACE=1 git -c core.checkRoundtripEncoding="UTF-16, UTF-32" \
+               add roundtrip.utf16 2>&1 |
+               grep "Checking roundtrip encoding for utf-16" &&
+       git reset &&
+
+       # ... unless we tell Git to check it!
+       # (here we also check that the casing of the encoding is irrelevant)
+       GIT_TRACE=1 git -c core.checkRoundtripEncoding="UTF-32, utf-16" \
+               add roundtrip.utf16 2>&1 |
+               grep "Checking roundtrip encoding for utf-16" &&
+       git reset
+'
+
+test_done
index 0c2fc81d7b0fa401db58c41cd2fed4469e80b058..04d474c84fd69121c686f5ca5adc40ce081f0e9d 100755 (executable)
@@ -291,7 +291,7 @@ test_expect_success 'OPT_CALLBACK() and OPT_BIT() work' '
 test_expect_success 'OPT_CALLBACK() and callback errors work' '
        test_must_fail test-parse-options --no-length >output 2>output.err &&
        test_i18ncmp expect output &&
-       test_i18ncmp expect.err output.err
+       test_must_be_empty output.err
 '
 
 cat >expect <<\EOF
diff --git a/t/t0041-usage.sh b/t/t0041-usage.sh
new file mode 100755 (executable)
index 0000000..5b927b7
--- /dev/null
@@ -0,0 +1,107 @@
+#!/bin/sh
+
+test_description='Test commands behavior when given invalid argument value'
+
+. ./test-lib.sh
+
+test_expect_success 'setup ' '
+       test_commit "v1.0"
+'
+
+test_expect_success 'tag --contains <existent_tag>' '
+       git tag --contains "v1.0" >actual 2>actual.err &&
+       grep "v1.0" actual &&
+       test_line_count = 0 actual.err
+'
+
+test_expect_success 'tag --contains <inexistent_tag>' '
+       test_must_fail git tag --contains "notag" >actual 2>actual.err &&
+       test_line_count = 0 actual &&
+       test_i18ngrep "error" actual.err &&
+       test_i18ngrep ! "usage" actual.err
+'
+
+test_expect_success 'tag --no-contains <existent_tag>' '
+       git tag --no-contains "v1.0" >actual 2>actual.err  &&
+       test_line_count = 0 actual &&
+       test_line_count = 0 actual.err
+'
+
+test_expect_success 'tag --no-contains <inexistent_tag>' '
+       test_must_fail git tag --no-contains "notag" >actual 2>actual.err &&
+       test_line_count = 0 actual &&
+       test_i18ngrep "error" actual.err &&
+       test_i18ngrep ! "usage" actual.err
+'
+
+test_expect_success 'tag usage error' '
+       test_must_fail git tag --noopt >actual 2>actual.err &&
+       test_line_count = 0 actual &&
+       test_i18ngrep "usage" actual.err
+'
+
+test_expect_success 'branch --contains <existent_commit>' '
+       git branch --contains "master" >actual 2>actual.err &&
+       test_i18ngrep "master" actual &&
+       test_line_count = 0 actual.err
+'
+
+test_expect_success 'branch --contains <inexistent_commit>' '
+       test_must_fail git branch --no-contains "nocommit" >actual 2>actual.err &&
+       test_line_count = 0 actual &&
+       test_i18ngrep "error" actual.err &&
+       test_i18ngrep ! "usage" actual.err
+'
+
+test_expect_success 'branch --no-contains <existent_commit>' '
+       git branch --no-contains "master" >actual 2>actual.err &&
+       test_line_count = 0 actual &&
+       test_line_count = 0 actual.err
+'
+
+test_expect_success 'branch --no-contains <inexistent_commit>' '
+       test_must_fail git branch --no-contains "nocommit" >actual 2>actual.err &&
+       test_line_count = 0 actual &&
+       test_i18ngrep "error" actual.err &&
+       test_i18ngrep ! "usage" actual.err
+'
+
+test_expect_success 'branch usage error' '
+       test_must_fail git branch --noopt >actual 2>actual.err &&
+       test_line_count = 0 actual &&
+       test_i18ngrep "usage" actual.err
+'
+
+test_expect_success 'for-each-ref --contains <existent_object>' '
+       git for-each-ref --contains "master" >actual 2>actual.err &&
+       test_line_count = 2 actual &&
+       test_line_count = 0 actual.err
+'
+
+test_expect_success 'for-each-ref --contains <inexistent_object>' '
+       test_must_fail git for-each-ref --no-contains "noobject" >actual 2>actual.err &&
+       test_line_count = 0 actual &&
+       test_i18ngrep "error" actual.err &&
+       test_i18ngrep ! "usage" actual.err
+'
+
+test_expect_success 'for-each-ref --no-contains <existent_object>' '
+       git for-each-ref --no-contains "master" >actual 2>actual.err &&
+       test_line_count = 0 actual &&
+       test_line_count = 0 actual.err
+'
+
+test_expect_success 'for-each-ref --no-contains <inexistent_object>' '
+       test_must_fail git for-each-ref --no-contains "noobject" >actual 2>actual.err &&
+       test_line_count = 0 actual &&
+       test_i18ngrep "error" actual.err &&
+       test_i18ngrep ! "usage" actual.err
+'
+
+test_expect_success 'for-each-ref usage error' '
+       test_must_fail git for-each-ref --noopt >actual 2>actual.err &&
+       test_line_count = 0 actual &&
+       test_i18ngrep "usage" actual.err
+'
+
+test_done
index 7ea2bb515bd80cc026a18dbfdf4a66cb77d27f20..f46e3c4995509f62f954231f53321f1d24756eff 100755 (executable)
@@ -8,15 +8,15 @@ test_description='Test various path utilities'
 . ./test-lib.sh
 
 norm_path() {
-       expected=$(test-path-utils print_path "$2")
+       expected=$(test-tool path-utils print_path "$2")
        test_expect_success $3 "normalize path: $1 => $2" \
-       "test \"\$(test-path-utils normalize_path_copy '$1')\" = '$expected'"
+       "test \"\$(test-tool path-utils normalize_path_copy '$1')\" = '$expected'"
 }
 
 relative_path() {
-       expected=$(test-path-utils print_path "$3")
+       expected=$(test-tool path-utils print_path "$3")
        test_expect_success $4 "relative path: $1 $2 => $3" \
-       "test \"\$(test-path-utils relative_path '$1' '$2')\" = '$expected'"
+       "test \"\$(test-tool path-utils relative_path '$1' '$2')\" = '$expected'"
 }
 
 test_submodule_relative_url() {
@@ -37,7 +37,7 @@ test_git_path() {
 # On Windows, we are using MSYS's bash, which mangles the paths.
 # Absolute paths are anchored at the MSYS installation directory,
 # which means that the path / accounts for this many characters:
-rootoff=$(test-path-utils normalize_path_copy / | wc -c)
+rootoff=$(test-tool path-utils normalize_path_copy / | wc -c)
 # Account for the trailing LF:
 if test $rootoff = 2; then
        rootoff=        # we are on Unix
@@ -46,7 +46,7 @@ else
        # In MSYS2, the root directory "/" is translated into a Windows
        # directory *with* trailing slash. Let's test for that and adjust
        # our expected longest ancestor length accordingly.
-       case "$(test-path-utils print_path /)" in
+       case "$(test-tool path-utils print_path /)" in
        */) rootslash=1;;
        *) rootslash=0;;
        esac
@@ -61,7 +61,7 @@ ancestor() {
                expected=$(($expected+$rootoff))
        fi
        test_expect_success "longest ancestor: $1 $2 => $expected" \
-       "actual=\$(test-path-utils longest_ancestor_length '$1' '$2') &&
+       "actual=\$(test-tool path-utils longest_ancestor_length '$1' '$2') &&
         test \"\$actual\" = '$expected'"
 }
 
@@ -77,8 +77,8 @@ case $(uname -s) in
        ;;
 esac
 
-test_expect_success basename 'test-path-utils basename'
-test_expect_success dirname 'test-path-utils dirname'
+test_expect_success basename 'test-tool path-utils basename'
+test_expect_success dirname 'test-tool path-utils dirname'
 
 norm_path "" ""
 norm_path . ""
@@ -157,48 +157,48 @@ ancestor /foo/bar /foo:/bar 4
 ancestor /foo/bar /bar -1
 
 test_expect_success 'strip_path_suffix' '
-       test c:/msysgit = $(test-path-utils strip_path_suffix \
+       test c:/msysgit = $(test-tool path-utils strip_path_suffix \
                c:/msysgit/libexec//git-core libexec/git-core)
 '
 
 test_expect_success 'absolute path rejects the empty string' '
-       test_must_fail test-path-utils absolute_path ""
+       test_must_fail test-tool path-utils absolute_path ""
 '
 
 test_expect_success 'real path rejects the empty string' '
-       test_must_fail test-path-utils real_path ""
+       test_must_fail test-tool path-utils real_path ""
 '
 
 test_expect_success POSIX 'real path works on absolute paths 1' '
        nopath="hopefully-absent-path" &&
-       test "/" = "$(test-path-utils real_path "/")" &&
-       test "/$nopath" = "$(test-path-utils real_path "/$nopath")"
+       test "/" = "$(test-tool path-utils real_path "/")" &&
+       test "/$nopath" = "$(test-tool path-utils real_path "/$nopath")"
 '
 
 test_expect_success 'real path works on absolute paths 2' '
        nopath="hopefully-absent-path" &&
        # Find an existing top-level directory for the remaining tests:
        d=$(pwd -P | sed -e "s|^\([^/]*/[^/]*\)/.*|\1|") &&
-       test "$d" = "$(test-path-utils real_path "$d")" &&
-       test "$d/$nopath" = "$(test-path-utils real_path "$d/$nopath")"
+       test "$d" = "$(test-tool path-utils real_path "$d")" &&
+       test "$d/$nopath" = "$(test-tool path-utils real_path "$d/$nopath")"
 '
 
 test_expect_success POSIX 'real path removes extra leading slashes' '
        nopath="hopefully-absent-path" &&
-       test "/" = "$(test-path-utils real_path "///")" &&
-       test "/$nopath" = "$(test-path-utils real_path "///$nopath")" &&
+       test "/" = "$(test-tool path-utils real_path "///")" &&
+       test "/$nopath" = "$(test-tool path-utils real_path "///$nopath")" &&
        # Find an existing top-level directory for the remaining tests:
        d=$(pwd -P | sed -e "s|^\([^/]*/[^/]*\)/.*|\1|") &&
-       test "$d" = "$(test-path-utils real_path "//$d")" &&
-       test "$d/$nopath" = "$(test-path-utils real_path "//$d/$nopath")"
+       test "$d" = "$(test-tool path-utils real_path "//$d")" &&
+       test "$d/$nopath" = "$(test-tool path-utils real_path "//$d/$nopath")"
 '
 
 test_expect_success 'real path removes other extra slashes' '
        nopath="hopefully-absent-path" &&
        # Find an existing top-level directory for the remaining tests:
        d=$(pwd -P | sed -e "s|^\([^/]*/[^/]*\)/.*|\1|") &&
-       test "$d" = "$(test-path-utils real_path "$d///")" &&
-       test "$d/$nopath" = "$(test-path-utils real_path "$d///$nopath")"
+       test "$d" = "$(test-tool path-utils real_path "$d///")" &&
+       test "$d/$nopath" = "$(test-tool path-utils real_path "$d///$nopath")"
 '
 
 test_expect_success SYMLINKS 'real path works on symlinks' '
@@ -209,35 +209,35 @@ test_expect_success SYMLINKS 'real path works on symlinks' '
        mkdir third &&
        dir="$(cd .git; pwd -P)" &&
        dir2=third/../second/other/.git &&
-       test "$dir" = "$(test-path-utils real_path $dir2)" &&
+       test "$dir" = "$(test-tool path-utils real_path $dir2)" &&
        file="$dir"/index &&
-       test "$file" = "$(test-path-utils real_path $dir2/index)" &&
+       test "$file" = "$(test-tool path-utils real_path $dir2/index)" &&
        basename=blub &&
-       test "$dir/$basename" = "$(cd .git && test-path-utils real_path "$basename")" &&
+       test "$dir/$basename" = "$(cd .git && test-tool path-utils real_path "$basename")" &&
        ln -s ../first/file .git/syml &&
        sym="$(cd first; pwd -P)"/file &&
-       test "$sym" = "$(test-path-utils real_path "$dir2/syml")"
+       test "$sym" = "$(test-tool path-utils real_path "$dir2/syml")"
 '
 
 test_expect_success SYMLINKS 'prefix_path works with absolute paths to work tree symlinks' '
        ln -s target symlink &&
-       test "$(test-path-utils prefix_path prefix "$(pwd)/symlink")" = "symlink"
+       test "$(test-tool path-utils prefix_path prefix "$(pwd)/symlink")" = "symlink"
 '
 
 test_expect_success 'prefix_path works with only absolute path to work tree' '
        echo "" >expected &&
-       test-path-utils prefix_path prefix "$(pwd)" >actual &&
+       test-tool path-utils prefix_path prefix "$(pwd)" >actual &&
        test_cmp expected actual
 '
 
 test_expect_success 'prefix_path rejects absolute path to dir with same beginning as work tree' '
-       test_must_fail test-path-utils prefix_path prefix "$(pwd)a"
+       test_must_fail test-tool path-utils prefix_path prefix "$(pwd)a"
 '
 
 test_expect_success SYMLINKS 'prefix_path works with absolute path to a symlink to work tree having  same beginning as work tree' '
        git init repo &&
        ln -s repo repolink &&
-       test "a" = "$(cd repo && test-path-utils prefix_path prefix "$(pwd)/../repolink/a")"
+       test "a" = "$(cd repo && test-tool path-utils prefix_path prefix "$(pwd)/../repolink/a")"
 '
 
 relative_path /foo/a/b/c/      /foo/a/b/       c/
index 24c92b6cd7b1c54eb6541a81abd7e5812b3b99b0..c887ed5b45e824d281343196c8781cbb6e85abed 100755 (executable)
@@ -14,13 +14,13 @@ EOF
 >empty
 
 test_expect_success 'start_command reports ENOENT' '
-       test-run-command start-command-ENOENT ./does-not-exist
+       test-tool run-command start-command-ENOENT ./does-not-exist
 '
 
 test_expect_success 'run_command can run a command' '
        cat hello-script >hello.sh &&
        chmod +x hello.sh &&
-       test-run-command run-command ./hello.sh >actual 2>err &&
+       test-tool run-command run-command ./hello.sh >actual 2>err &&
 
        test_cmp hello-script actual &&
        test_cmp empty err
@@ -31,7 +31,7 @@ test_expect_success !MINGW 'run_command can run a script without a #! line' '
        cat hello-script
        EOF
        chmod +x hello &&
-       test-run-command run-command ./hello >actual 2>err &&
+       test-tool run-command run-command ./hello >actual 2>err &&
 
        test_cmp hello-script actual &&
        test_cmp empty err
@@ -45,7 +45,7 @@ test_expect_success 'run_command does not try to execute a directory' '
        EOF
 
        PATH=$PWD/bin1:$PWD/bin2:$PATH \
-               test-run-command run-command greet >actual 2>err &&
+               test-tool run-command run-command greet >actual 2>err &&
        test_cmp bin2/greet actual &&
        test_cmp empty err
 '
@@ -62,7 +62,7 @@ test_expect_success POSIXPERM 'run_command passes over non-executable file' '
        EOF
 
        PATH=$PWD/bin1:$PWD/bin2:$PATH \
-               test-run-command run-command greet >actual 2>err &&
+               test-tool run-command run-command greet >actual 2>err &&
        test_cmp bin2/greet actual &&
        test_cmp empty err
 '
@@ -70,7 +70,7 @@ test_expect_success POSIXPERM 'run_command passes over non-executable file' '
 test_expect_success POSIXPERM 'run_command reports EACCES' '
        cat hello-script >hello.sh &&
        chmod -x hello.sh &&
-       test_must_fail test-run-command run-command ./hello.sh 2>err &&
+       test_must_fail test-tool run-command run-command ./hello.sh 2>err &&
 
        grep "fatal: cannot exec.*hello.sh" err
 '
@@ -104,17 +104,17 @@ World
 EOF
 
 test_expect_success 'run_command runs in parallel with more jobs available than tasks' '
-       test-run-command run-command-parallel 5 sh -c "printf \"%s\n%s\n\" Hello World" 2>actual &&
+       test-tool run-command run-command-parallel 5 sh -c "printf \"%s\n%s\n\" Hello World" 2>actual &&
        test_cmp expect actual
 '
 
 test_expect_success 'run_command runs in parallel with as many jobs as tasks' '
-       test-run-command run-command-parallel 4 sh -c "printf \"%s\n%s\n\" Hello World" 2>actual &&
+       test-tool run-command run-command-parallel 4 sh -c "printf \"%s\n%s\n\" Hello World" 2>actual &&
        test_cmp expect actual
 '
 
 test_expect_success 'run_command runs in parallel with more tasks than jobs available' '
-       test-run-command run-command-parallel 3 sh -c "printf \"%s\n%s\n\" Hello World" 2>actual &&
+       test-tool run-command run-command-parallel 3 sh -c "printf \"%s\n%s\n\" Hello World" 2>actual &&
        test_cmp expect actual
 '
 
@@ -128,7 +128,7 @@ asking for a quick stop
 EOF
 
 test_expect_success 'run_command is asked to abort gracefully' '
-       test-run-command run-command-abort 3 false 2>actual &&
+       test-tool run-command run-command-abort 3 false 2>actual &&
        test_cmp expect actual
 '
 
@@ -137,15 +137,15 @@ no further jobs available
 EOF
 
 test_expect_success 'run_command outputs ' '
-       test-run-command run-command-no-jobs 3 sh -c "printf \"%s\n%s\n\" Hello World" 2>actual &&
+       test-tool run-command run-command-no-jobs 3 sh -c "printf \"%s\n%s\n\" Hello World" 2>actual &&
        test_cmp expect actual
 '
 
 test_trace () {
        expect="$1"
        shift
-       GIT_TRACE=1 test-run-command "$@" run-command true 2>&1 >/dev/null | \
-               sed 's/.* run_command: //' >actual &&
+       GIT_TRACE=1 test-tool run-command "$@" run-command true 2>&1 >/dev/null | \
+               sed -e 's/.* run_command: //' -e '/trace: .*/d' >actual &&
        echo "$expect true" >expect &&
        test_cmp expect actual
 }
index 113c728e676679f8e8e8eb0fce433a84bbee8050..8e215867b8c197dedef32c569e1d1f632d22b631 100755 (executable)
@@ -26,7 +26,7 @@ test_expect_success 'setup' '
 '
 
 test_expect_success 'revision walking can be done twice' '
-       test-revision-walking run-twice >run_twice_actual &&
+       test-tool revision-walking run-twice >run_twice_actual &&
        test_cmp run_twice_expected run_twice_actual
 '
 
index dbfc05ebdc3990bf4ea5b0163afb4c6a9e698fa7..c6ee9f66b11d55f312ee673106ddf295c39bb50d 100755 (executable)
@@ -10,9 +10,9 @@ test_description='Test string list functionality'
 test_split () {
        cat >expected &&
        test_expect_success "split $1 at $2, max $3" "
-               test-string-list split '$1' '$2' '$3' >actual &&
+               test-tool string-list split '$1' '$2' '$3' >actual &&
                test_cmp expected actual &&
-               test-string-list split_in_place '$1' '$2' '$3' >actual &&
+               test-tool string-list split_in_place '$1' '$2' '$3' >actual &&
                test_cmp expected actual
        "
 }
@@ -61,31 +61,31 @@ test_split ":" ":" "-1" <<EOF
 EOF
 
 test_expect_success "test filter_string_list" '
-       test "x-" = "x$(test-string-list filter - y)" &&
-       test "x-" = "x$(test-string-list filter no y)" &&
-       test yes = "$(test-string-list filter yes y)" &&
-       test yes = "$(test-string-list filter no:yes y)" &&
-       test yes = "$(test-string-list filter yes:no y)" &&
-       test y1:y2 = "$(test-string-list filter y1:y2 y)" &&
-       test y2:y1 = "$(test-string-list filter y2:y1 y)" &&
-       test "x-" = "x$(test-string-list filter x1:x2 y)"
+       test "x-" = "x$(test-tool string-list filter - y)" &&
+       test "x-" = "x$(test-tool string-list filter no y)" &&
+       test yes = "$(test-tool string-list filter yes y)" &&
+       test yes = "$(test-tool string-list filter no:yes y)" &&
+       test yes = "$(test-tool string-list filter yes:no y)" &&
+       test y1:y2 = "$(test-tool string-list filter y1:y2 y)" &&
+       test y2:y1 = "$(test-tool string-list filter y2:y1 y)" &&
+       test "x-" = "x$(test-tool string-list filter x1:x2 y)"
 '
 
 test_expect_success "test remove_duplicates" '
-       test "x-" = "x$(test-string-list remove_duplicates -)" &&
-       test "x" = "x$(test-string-list remove_duplicates "")" &&
-       test a = "$(test-string-list remove_duplicates a)" &&
-       test a = "$(test-string-list remove_duplicates a:a)" &&
-       test a = "$(test-string-list remove_duplicates a:a:a:a:a)" &&
-       test a:b = "$(test-string-list remove_duplicates a:b)" &&
-       test a:b = "$(test-string-list remove_duplicates a:a:b)" &&
-       test a:b = "$(test-string-list remove_duplicates a:b:b)" &&
-       test a:b:c = "$(test-string-list remove_duplicates a:b:c)" &&
-       test a:b:c = "$(test-string-list remove_duplicates a:a:b:c)" &&
-       test a:b:c = "$(test-string-list remove_duplicates a:b:b:c)" &&
-       test a:b:c = "$(test-string-list remove_duplicates a:b:c:c)" &&
-       test a:b:c = "$(test-string-list remove_duplicates a:a:b:b:c:c)" &&
-       test a:b:c = "$(test-string-list remove_duplicates a:a:a:b:b:b:c:c:c)"
+       test "x-" = "x$(test-tool string-list remove_duplicates -)" &&
+       test "x" = "x$(test-tool string-list remove_duplicates "")" &&
+       test a = "$(test-tool string-list remove_duplicates a)" &&
+       test a = "$(test-tool string-list remove_duplicates a:a)" &&
+       test a = "$(test-tool string-list remove_duplicates a:a:a:a:a)" &&
+       test a:b = "$(test-tool string-list remove_duplicates a:b)" &&
+       test a:b = "$(test-tool string-list remove_duplicates a:a:b)" &&
+       test a:b = "$(test-tool string-list remove_duplicates a:b:b)" &&
+       test a:b:c = "$(test-tool string-list remove_duplicates a:b:c)" &&
+       test a:b:c = "$(test-tool string-list remove_duplicates a:a:b:c)" &&
+       test a:b:c = "$(test-tool string-list remove_duplicates a:b:b:c)" &&
+       test a:b:c = "$(test-tool string-list remove_duplicates a:b:c:c)" &&
+       test a:b:c = "$(test-tool string-list remove_duplicates a:a:b:b:c:c)" &&
+       test a:b:c = "$(test-tool string-list remove_duplicates a:a:a:b:b:b:c:c:c)"
 '
 
 test_done
index 50b31ffe756658dfa8ce435381ad7f48fee91462..67484502a007e3fed09fe381898bee52d21d07bb 100755 (executable)
@@ -18,7 +18,7 @@ test_expect_success 'ordered enumeration' '
        {
                echo20 append 88 44 aa 55 &&
                echo for_each_unique
-       } | test-sha1-array >actual &&
+       } | test-tool sha1-array >actual &&
        test_cmp expect actual
 '
 
@@ -28,7 +28,7 @@ test_expect_success 'ordered enumeration with duplicate suppression' '
                echo20 append 88 44 aa 55 &&
                echo20 append 88 44 aa 55 &&
                echo for_each_unique
-       } | test-sha1-array >actual &&
+       } | test-tool sha1-array >actual &&
        test_cmp expect actual
 '
 
@@ -36,7 +36,7 @@ test_expect_success 'lookup' '
        {
                echo20 append 88 44 aa 55 &&
                echo20 lookup 55
-       } | test-sha1-array >actual &&
+       } | test-tool sha1-array >actual &&
        n=$(cat actual) &&
        test "$n" -eq 1
 '
@@ -45,7 +45,7 @@ test_expect_success 'lookup non-existing entry' '
        {
                echo20 append 88 44 aa 55 &&
                echo20 lookup 33
-       } | test-sha1-array >actual &&
+       } | test-tool sha1-array >actual &&
        n=$(cat actual) &&
        test "$n" -lt 0
 '
@@ -55,7 +55,7 @@ test_expect_success 'lookup with duplicates' '
                echo20 append 88 44 aa 55 &&
                echo20 append 88 44 aa 55 &&
                echo20 lookup 55
-       } | test-sha1-array >actual &&
+       } | test-tool sha1-array >actual &&
        n=$(cat actual) &&
        test "$n" -ge 2 &&
        test "$n" -le 3
@@ -66,7 +66,7 @@ test_expect_success 'lookup non-existing entry with duplicates' '
                echo20 append 88 44 aa 55 &&
                echo20 append 88 44 aa 55 &&
                echo20 lookup 66
-       } | test-sha1-array >actual &&
+       } | test-tool sha1-array >actual &&
        n=$(cat actual) &&
        test "$n" -lt 0
 '
@@ -76,7 +76,7 @@ test_expect_success 'lookup with almost duplicate values' '
                echo "append 5555555555555555555555555555555555555555" &&
                echo "append 555555555555555555555555555555555555555f" &&
                echo20 lookup 55
-       } | test-sha1-array >actual &&
+       } | test-tool sha1-array >actual &&
        n=$(cat actual) &&
        test "$n" -eq 0
 '
@@ -85,7 +85,7 @@ test_expect_success 'lookup with single duplicate value' '
        {
                echo20 append 55 55 &&
                echo20 lookup 55
-       } | test-sha1-array >actual &&
+       } | test-tool sha1-array >actual &&
        n=$(cat actual) &&
        test "$n" -ge 0 &&
        test "$n" -le 1
index 7d6d21425f483c412bb6d42ea551aaf5eaef4aa0..91fa639c4a74ed69ed69ff7360728b2f1ab08a66 100755 (executable)
@@ -8,7 +8,7 @@ while read s1 s2 expect
 do
        test_expect_success "strcmp_offset($s1, $s2)" '
                echo "$expect" >expect &&
-               test-strcmp-offset "$s1" "$s2" >actual &&
+               test-tool strcmp-offset "$s1" "$s2" >actual &&
                test_cmp expect actual
        '
 done <<-EOF
index 991ed2a48dbf15fb4cb794a587ad900071391e11..23fbe6434abd3f05057d4916f70e5d5b30115b68 100755 (executable)
@@ -9,11 +9,11 @@ Verify wrappers and compatibility functions.
 . ./test-lib.sh
 
 test_expect_success 'character classes (isspace, isalpha etc.)' '
-       test-ctype
+       test-tool ctype
 '
 
 test_expect_success 'mktemp to nonexistent directory prints filename' '
-       test_must_fail test-mktemp doesnotexist/testXXXXXX 2>err &&
+       test_must_fail test-tool mktemp doesnotexist/testXXXXXX 2>err &&
        grep "doesnotexist/test" err
 '
 
@@ -21,7 +21,7 @@ test_expect_success POSIXPERM,SANITY 'mktemp to unwritable directory prints file
        mkdir cannotwrite &&
        chmod -w cannotwrite &&
        test_when_finished "chmod +w cannotwrite" &&
-       test_must_fail test-mktemp cannotwrite/testXXXXXX 2>err &&
+       test_must_fail test-tool mktemp cannotwrite/testXXXXXX 2>err &&
        grep "cannotwrite/test" err
 '
 
@@ -31,7 +31,7 @@ test_expect_success 'git_mkstemps_mode does not fail if fd 0 is not open' '
 
 test_expect_success 'check for a bug in the regex routines' '
        # if this test fails, re-build git with NO_REGEX=1
-       test-regex --bug
+       test-tool regex --bug
 '
 
 test_done
index adfd4f0b5eea1c0c438647d3d6567479acc5d495..4ae0995cd9140d3030da7d975c74ff3d9e55b039 100755 (executable)
@@ -8,13 +8,13 @@ cache-tree extension.
  . ./test-lib.sh
 
 cmp_cache_tree () {
-       test-dump-cache-tree | sed -e '/#(ref)/d' >actual &&
+       test-tool dump-cache-tree | sed -e '/#(ref)/d' >actual &&
        sed "s/$_x40/SHA/" <actual >filtered &&
        test_cmp "$1" filtered
 }
 
 # We don't bother with actually checking the SHA1:
-# test-dump-cache-tree already verifies that all existing data is
+# test-tool dump-cache-tree already verifies that all existing data is
 # correct.
 generate_expected_cache_tree_rec () {
        dir="$1${1:+/}" &&
@@ -47,7 +47,7 @@ test_cache_tree () {
 
 test_invalid_cache_tree () {
        printf "invalid                                  %s ()\n" "" "$@" >expect &&
-       test-dump-cache-tree |
+       test-tool dump-cache-tree |
        sed -n -e "s/[0-9]* subtrees//" -e '/#(ref)/d' -e '/^invalid /p' >actual &&
        test_cmp expect actual
 }
@@ -115,14 +115,14 @@ test_expect_success 'update-index invalidates cache-tree' '
 '
 
 test_expect_success 'write-tree establishes cache-tree' '
-       test-scrap-cache-tree &&
+       test-tool scrap-cache-tree &&
        git write-tree &&
        test_cache_tree
 '
 
-test_expect_success 'test-scrap-cache-tree works' '
+test_expect_success 'test-tool scrap-cache-tree works' '
        git read-tree HEAD &&
-       test-scrap-cache-tree &&
+       test-tool scrap-cache-tree &&
        test_no_cache_tree
 '
 
@@ -170,7 +170,7 @@ test_expect_success 'commit in child dir has cache-tree' '
 '
 
 test_expect_success 'reset --hard gives cache-tree' '
-       test-scrap-cache-tree &&
+       test-tool scrap-cache-tree &&
        git reset --hard &&
        test_cache_tree
 '
@@ -246,9 +246,9 @@ test_expect_success 'switching trees does not invalidate shared index' '
        git update-index --split-index &&
        >split &&
        git add split &&
-       test-dump-split-index .git/index | grep -v ^own >before &&
+       test-tool dump-split-index .git/index | grep -v ^own >before &&
        git commit -m "as-is" &&
-       test-dump-split-index .git/index | grep -v ^own >after &&
+       test-tool dump-split-index .git/index | grep -v ^own >after &&
        test_cmp before after
 '
 
index 410d5768ca11b1d7e322c135cf18f60f41685b68..f99529d83853e2c468b1c948f3ec4408a58fb919 100755 (executable)
@@ -9,172 +9,172 @@ tu="$TEST_DIRECTORY/t0110/url"
 # Note that only file: URLs should be allowed without a host
 
 test_expect_success 'url scheme' '
-       ! test-urlmatch-normalization "" &&
-       ! test-urlmatch-normalization "_" &&
-       ! test-urlmatch-normalization "scheme" &&
-       ! test-urlmatch-normalization "scheme:" &&
-       ! test-urlmatch-normalization "scheme:/" &&
-       ! test-urlmatch-normalization "scheme://" &&
-       ! test-urlmatch-normalization "file" &&
-       ! test-urlmatch-normalization "file:" &&
-       ! test-urlmatch-normalization "file:/" &&
-       test-urlmatch-normalization "file://" &&
-       ! test-urlmatch-normalization "://acme.co" &&
-       ! test-urlmatch-normalization "x_test://acme.co" &&
-       ! test-urlmatch-normalization "-test://acme.co" &&
-       ! test-urlmatch-normalization "0test://acme.co" &&
-       ! test-urlmatch-normalization "+test://acme.co" &&
-       ! test-urlmatch-normalization ".test://acme.co" &&
-       ! test-urlmatch-normalization "schem%6e://" &&
-       test-urlmatch-normalization "x-Test+v1.0://acme.co" &&
-       test "$(test-urlmatch-normalization -p "AbCdeF://x.Y")" = "abcdef://x.y/"
+       ! test-tool urlmatch-normalization "" &&
+       ! test-tool urlmatch-normalization "_" &&
+       ! test-tool urlmatch-normalization "scheme" &&
+       ! test-tool urlmatch-normalization "scheme:" &&
+       ! test-tool urlmatch-normalization "scheme:/" &&
+       ! test-tool urlmatch-normalization "scheme://" &&
+       ! test-tool urlmatch-normalization "file" &&
+       ! test-tool urlmatch-normalization "file:" &&
+       ! test-tool urlmatch-normalization "file:/" &&
+       test-tool urlmatch-normalization "file://" &&
+       ! test-tool urlmatch-normalization "://acme.co" &&
+       ! test-tool urlmatch-normalization "x_test://acme.co" &&
+       ! test-tool urlmatch-normalization "-test://acme.co" &&
+       ! test-tool urlmatch-normalization "0test://acme.co" &&
+       ! test-tool urlmatch-normalization "+test://acme.co" &&
+       ! test-tool urlmatch-normalization ".test://acme.co" &&
+       ! test-tool urlmatch-normalization "schem%6e://" &&
+       test-tool urlmatch-normalization "x-Test+v1.0://acme.co" &&
+       test "$(test-tool urlmatch-normalization -p "AbCdeF://x.Y")" = "abcdef://x.y/"
 '
 
 test_expect_success 'url authority' '
-       ! test-urlmatch-normalization "scheme://user:pass@" &&
-       ! test-urlmatch-normalization "scheme://?" &&
-       ! test-urlmatch-normalization "scheme://#" &&
-       ! test-urlmatch-normalization "scheme:///" &&
-       ! test-urlmatch-normalization "scheme://:" &&
-       ! test-urlmatch-normalization "scheme://:555" &&
-       test-urlmatch-normalization "file://user:pass@" &&
-       test-urlmatch-normalization "file://?" &&
-       test-urlmatch-normalization "file://#" &&
-       test-urlmatch-normalization "file:///" &&
-       test-urlmatch-normalization "file://:" &&
-       ! test-urlmatch-normalization "file://:555" &&
-       test-urlmatch-normalization "scheme://user:pass@host" &&
-       test-urlmatch-normalization "scheme://@host" &&
-       test-urlmatch-normalization "scheme://%00@host" &&
-       ! test-urlmatch-normalization "scheme://%%@host" &&
-       ! test-urlmatch-normalization "scheme://host_" &&
-       test-urlmatch-normalization "scheme://user:pass@host/" &&
-       test-urlmatch-normalization "scheme://@host/" &&
-       test-urlmatch-normalization "scheme://host/" &&
-       test-urlmatch-normalization "scheme://host?x" &&
-       test-urlmatch-normalization "scheme://host#x" &&
-       test-urlmatch-normalization "scheme://host/@" &&
-       test-urlmatch-normalization "scheme://host?@x" &&
-       test-urlmatch-normalization "scheme://host#@x" &&
-       test-urlmatch-normalization "scheme://[::1]" &&
-       test-urlmatch-normalization "scheme://[::1]/" &&
-       ! test-urlmatch-normalization "scheme://hos%41/" &&
-       test-urlmatch-normalization "scheme://[invalid....:/" &&
-       test-urlmatch-normalization "scheme://invalid....:]/" &&
-       ! test-urlmatch-normalization "scheme://invalid....:[/" &&
-       ! test-urlmatch-normalization "scheme://invalid....:["
+       ! test-tool urlmatch-normalization "scheme://user:pass@" &&
+       ! test-tool urlmatch-normalization "scheme://?" &&
+       ! test-tool urlmatch-normalization "scheme://#" &&
+       ! test-tool urlmatch-normalization "scheme:///" &&
+       ! test-tool urlmatch-normalization "scheme://:" &&
+       ! test-tool urlmatch-normalization "scheme://:555" &&
+       test-tool urlmatch-normalization "file://user:pass@" &&
+       test-tool urlmatch-normalization "file://?" &&
+       test-tool urlmatch-normalization "file://#" &&
+       test-tool urlmatch-normalization "file:///" &&
+       test-tool urlmatch-normalization "file://:" &&
+       ! test-tool urlmatch-normalization "file://:555" &&
+       test-tool urlmatch-normalization "scheme://user:pass@host" &&
+       test-tool urlmatch-normalization "scheme://@host" &&
+       test-tool urlmatch-normalization "scheme://%00@host" &&
+       ! test-tool urlmatch-normalization "scheme://%%@host" &&
+       ! test-tool urlmatch-normalization "scheme://host_" &&
+       test-tool urlmatch-normalization "scheme://user:pass@host/" &&
+       test-tool urlmatch-normalization "scheme://@host/" &&
+       test-tool urlmatch-normalization "scheme://host/" &&
+       test-tool urlmatch-normalization "scheme://host?x" &&
+       test-tool urlmatch-normalization "scheme://host#x" &&
+       test-tool urlmatch-normalization "scheme://host/@" &&
+       test-tool urlmatch-normalization "scheme://host?@x" &&
+       test-tool urlmatch-normalization "scheme://host#@x" &&
+       test-tool urlmatch-normalization "scheme://[::1]" &&
+       test-tool urlmatch-normalization "scheme://[::1]/" &&
+       ! test-tool urlmatch-normalization "scheme://hos%41/" &&
+       test-tool urlmatch-normalization "scheme://[invalid....:/" &&
+       test-tool urlmatch-normalization "scheme://invalid....:]/" &&
+       ! test-tool urlmatch-normalization "scheme://invalid....:[/" &&
+       ! test-tool urlmatch-normalization "scheme://invalid....:["
 '
 
 test_expect_success 'url port checks' '
-       test-urlmatch-normalization "xyz://q@some.host:" &&
-       test-urlmatch-normalization "xyz://q@some.host:456/" &&
-       ! test-urlmatch-normalization "xyz://q@some.host:0" &&
-       ! test-urlmatch-normalization "xyz://q@some.host:0000000" &&
-       test-urlmatch-normalization "xyz://q@some.host:0000001?" &&
-       test-urlmatch-normalization "xyz://q@some.host:065535#" &&
-       test-urlmatch-normalization "xyz://q@some.host:65535" &&
-       ! test-urlmatch-normalization "xyz://q@some.host:65536" &&
-       ! test-urlmatch-normalization "xyz://q@some.host:99999" &&
-       ! test-urlmatch-normalization "xyz://q@some.host:100000" &&
-       ! test-urlmatch-normalization "xyz://q@some.host:100001" &&
-       test-urlmatch-normalization "http://q@some.host:80" &&
-       test-urlmatch-normalization "https://q@some.host:443" &&
-       test-urlmatch-normalization "http://q@some.host:80/" &&
-       test-urlmatch-normalization "https://q@some.host:443?" &&
-       ! test-urlmatch-normalization "http://q@:8008" &&
-       ! test-urlmatch-normalization "http://:8080" &&
-       ! test-urlmatch-normalization "http://:" &&
-       test-urlmatch-normalization "xyz://q@some.host:456/" &&
-       test-urlmatch-normalization "xyz://[::1]:456/" &&
-       test-urlmatch-normalization "xyz://[::1]:/" &&
-       ! test-urlmatch-normalization "xyz://[::1]:000/" &&
-       ! test-urlmatch-normalization "xyz://[::1]:0%300/" &&
-       ! test-urlmatch-normalization "xyz://[::1]:0x80/" &&
-       ! test-urlmatch-normalization "xyz://[::1]:4294967297/" &&
-       ! test-urlmatch-normalization "xyz://[::1]:030f/"
+       test-tool urlmatch-normalization "xyz://q@some.host:" &&
+       test-tool urlmatch-normalization "xyz://q@some.host:456/" &&
+       ! test-tool urlmatch-normalization "xyz://q@some.host:0" &&
+       ! test-tool urlmatch-normalization "xyz://q@some.host:0000000" &&
+       test-tool urlmatch-normalization "xyz://q@some.host:0000001?" &&
+       test-tool urlmatch-normalization "xyz://q@some.host:065535#" &&
+       test-tool urlmatch-normalization "xyz://q@some.host:65535" &&
+       ! test-tool urlmatch-normalization "xyz://q@some.host:65536" &&
+       ! test-tool urlmatch-normalization "xyz://q@some.host:99999" &&
+       ! test-tool urlmatch-normalization "xyz://q@some.host:100000" &&
+       ! test-tool urlmatch-normalization "xyz://q@some.host:100001" &&
+       test-tool urlmatch-normalization "http://q@some.host:80" &&
+       test-tool urlmatch-normalization "https://q@some.host:443" &&
+       test-tool urlmatch-normalization "http://q@some.host:80/" &&
+       test-tool urlmatch-normalization "https://q@some.host:443?" &&
+       ! test-tool urlmatch-normalization "http://q@:8008" &&
+       ! test-tool urlmatch-normalization "http://:8080" &&
+       ! test-tool urlmatch-normalization "http://:" &&
+       test-tool urlmatch-normalization "xyz://q@some.host:456/" &&
+       test-tool urlmatch-normalization "xyz://[::1]:456/" &&
+       test-tool urlmatch-normalization "xyz://[::1]:/" &&
+       ! test-tool urlmatch-normalization "xyz://[::1]:000/" &&
+       ! test-tool urlmatch-normalization "xyz://[::1]:0%300/" &&
+       ! test-tool urlmatch-normalization "xyz://[::1]:0x80/" &&
+       ! test-tool urlmatch-normalization "xyz://[::1]:4294967297/" &&
+       ! test-tool urlmatch-normalization "xyz://[::1]:030f/"
 '
 
 test_expect_success 'url port normalization' '
-       test "$(test-urlmatch-normalization -p "http://x:800")" = "http://x:800/" &&
-       test "$(test-urlmatch-normalization -p "http://x:0800")" = "http://x:800/" &&
-       test "$(test-urlmatch-normalization -p "http://x:00000800")" = "http://x:800/" &&
-       test "$(test-urlmatch-normalization -p "http://x:065535")" = "http://x:65535/" &&
-       test "$(test-urlmatch-normalization -p "http://x:1")" = "http://x:1/" &&
-       test "$(test-urlmatch-normalization -p "http://x:80")" = "http://x/" &&
-       test "$(test-urlmatch-normalization -p "http://x:080")" = "http://x/" &&
-       test "$(test-urlmatch-normalization -p "http://x:000000080")" = "http://x/" &&
-       test "$(test-urlmatch-normalization -p "https://x:443")" = "https://x/" &&
-       test "$(test-urlmatch-normalization -p "https://x:0443")" = "https://x/" &&
-       test "$(test-urlmatch-normalization -p "https://x:000000443")" = "https://x/"
+       test "$(test-tool urlmatch-normalization -p "http://x:800")" = "http://x:800/" &&
+       test "$(test-tool urlmatch-normalization -p "http://x:0800")" = "http://x:800/" &&
+       test "$(test-tool urlmatch-normalization -p "http://x:00000800")" = "http://x:800/" &&
+       test "$(test-tool urlmatch-normalization -p "http://x:065535")" = "http://x:65535/" &&
+       test "$(test-tool urlmatch-normalization -p "http://x:1")" = "http://x:1/" &&
+       test "$(test-tool urlmatch-normalization -p "http://x:80")" = "http://x/" &&
+       test "$(test-tool urlmatch-normalization -p "http://x:080")" = "http://x/" &&
+       test "$(test-tool urlmatch-normalization -p "http://x:000000080")" = "http://x/" &&
+       test "$(test-tool urlmatch-normalization -p "https://x:443")" = "https://x/" &&
+       test "$(test-tool urlmatch-normalization -p "https://x:0443")" = "https://x/" &&
+       test "$(test-tool urlmatch-normalization -p "https://x:000000443")" = "https://x/"
 '
 
 test_expect_success 'url general escapes' '
-       ! test-urlmatch-normalization "http://x.y?%fg" &&
-       test "$(test-urlmatch-normalization -p "X://W/%7e%41^%3a")" = "x://w/~A%5E%3A" &&
-       test "$(test-urlmatch-normalization -p "X://W/:/?#[]@")" = "x://w/:/?#[]@" &&
-       test "$(test-urlmatch-normalization -p "X://W/$&()*+,;=")" = "x://w/$&()*+,;=" &&
-       test "$(test-urlmatch-normalization -p "X://W/'\''")" = "x://w/'\''" &&
-       test "$(test-urlmatch-normalization -p "X://W?'\!'")" = "x://w/?'\!'"
+       ! test-tool urlmatch-normalization "http://x.y?%fg" &&
+       test "$(test-tool urlmatch-normalization -p "X://W/%7e%41^%3a")" = "x://w/~A%5E%3A" &&
+       test "$(test-tool urlmatch-normalization -p "X://W/:/?#[]@")" = "x://w/:/?#[]@" &&
+       test "$(test-tool urlmatch-normalization -p "X://W/$&()*+,;=")" = "x://w/$&()*+,;=" &&
+       test "$(test-tool urlmatch-normalization -p "X://W/'\''")" = "x://w/'\''" &&
+       test "$(test-tool urlmatch-normalization -p "X://W?'\!'")" = "x://w/?'\!'"
 '
 
 test_expect_success !MINGW 'url high-bit escapes' '
-       test "$(test-urlmatch-normalization -p "$(cat "$tu-1")")" = "x://q/%01%02%03%04%05%06%07%08%0E%0F%10%11%12" &&
-       test "$(test-urlmatch-normalization -p "$(cat "$tu-2")")" = "x://q/%13%14%15%16%17%18%19%1B%1C%1D%1E%1F%7F" &&
-       test "$(test-urlmatch-normalization -p "$(cat "$tu-3")")" = "x://q/%80%81%82%83%84%85%86%87%88%89%8A%8B%8C%8D%8E%8F" &&
-       test "$(test-urlmatch-normalization -p "$(cat "$tu-4")")" = "x://q/%90%91%92%93%94%95%96%97%98%99%9A%9B%9C%9D%9E%9F" &&
-       test "$(test-urlmatch-normalization -p "$(cat "$tu-5")")" = "x://q/%A0%A1%A2%A3%A4%A5%A6%A7%A8%A9%AA%AB%AC%AD%AE%AF" &&
-       test "$(test-urlmatch-normalization -p "$(cat "$tu-6")")" = "x://q/%B0%B1%B2%B3%B4%B5%B6%B7%B8%B9%BA%BB%BC%BD%BE%BF" &&
-       test "$(test-urlmatch-normalization -p "$(cat "$tu-7")")" = "x://q/%C0%C1%C2%C3%C4%C5%C6%C7%C8%C9%CA%CB%CC%CD%CE%CF" &&
-       test "$(test-urlmatch-normalization -p "$(cat "$tu-8")")" = "x://q/%D0%D1%D2%D3%D4%D5%D6%D7%D8%D9%DA%DB%DC%DD%DE%DF" &&
-       test "$(test-urlmatch-normalization -p "$(cat "$tu-9")")" = "x://q/%E0%E1%E2%E3%E4%E5%E6%E7%E8%E9%EA%EB%EC%ED%EE%EF" &&
-       test "$(test-urlmatch-normalization -p "$(cat "$tu-10")")" = "x://q/%F0%F1%F2%F3%F4%F5%F6%F7%F8%F9%FA%FB%FC%FD%FE%FF"
+       test "$(test-tool urlmatch-normalization -p "$(cat "$tu-1")")" = "x://q/%01%02%03%04%05%06%07%08%0E%0F%10%11%12" &&
+       test "$(test-tool urlmatch-normalization -p "$(cat "$tu-2")")" = "x://q/%13%14%15%16%17%18%19%1B%1C%1D%1E%1F%7F" &&
+       test "$(test-tool urlmatch-normalization -p "$(cat "$tu-3")")" = "x://q/%80%81%82%83%84%85%86%87%88%89%8A%8B%8C%8D%8E%8F" &&
+       test "$(test-tool urlmatch-normalization -p "$(cat "$tu-4")")" = "x://q/%90%91%92%93%94%95%96%97%98%99%9A%9B%9C%9D%9E%9F" &&
+       test "$(test-tool urlmatch-normalization -p "$(cat "$tu-5")")" = "x://q/%A0%A1%A2%A3%A4%A5%A6%A7%A8%A9%AA%AB%AC%AD%AE%AF" &&
+       test "$(test-tool urlmatch-normalization -p "$(cat "$tu-6")")" = "x://q/%B0%B1%B2%B3%B4%B5%B6%B7%B8%B9%BA%BB%BC%BD%BE%BF" &&
+       test "$(test-tool urlmatch-normalization -p "$(cat "$tu-7")")" = "x://q/%C0%C1%C2%C3%C4%C5%C6%C7%C8%C9%CA%CB%CC%CD%CE%CF" &&
+       test "$(test-tool urlmatch-normalization -p "$(cat "$tu-8")")" = "x://q/%D0%D1%D2%D3%D4%D5%D6%D7%D8%D9%DA%DB%DC%DD%DE%DF" &&
+       test "$(test-tool urlmatch-normalization -p "$(cat "$tu-9")")" = "x://q/%E0%E1%E2%E3%E4%E5%E6%E7%E8%E9%EA%EB%EC%ED%EE%EF" &&
+       test "$(test-tool urlmatch-normalization -p "$(cat "$tu-10")")" = "x://q/%F0%F1%F2%F3%F4%F5%F6%F7%F8%F9%FA%FB%FC%FD%FE%FF"
 '
 
 test_expect_success 'url utf-8 escapes' '
-       test "$(test-urlmatch-normalization -p "$(cat "$tu-11")")" = "x://q/%C2%80%DF%BF%E0%A0%80%EF%BF%BD%F0%90%80%80%F0%AF%BF%BD"
+       test "$(test-tool urlmatch-normalization -p "$(cat "$tu-11")")" = "x://q/%C2%80%DF%BF%E0%A0%80%EF%BF%BD%F0%90%80%80%F0%AF%BF%BD"
 '
 
 test_expect_success 'url username/password escapes' '
-       test "$(test-urlmatch-normalization -p "x://%41%62(^):%70+d@foo")" = "x://Ab(%5E):p+d@foo/"
+       test "$(test-tool urlmatch-normalization -p "x://%41%62(^):%70+d@foo")" = "x://Ab(%5E):p+d@foo/"
 '
 
 test_expect_success 'url normalized lengths' '
-       test "$(test-urlmatch-normalization -l "Http://%4d%65:%4d^%70@The.Host")" = 25 &&
-       test "$(test-urlmatch-normalization -l "http://%41:%42@x.y/%61/")" = 17 &&
-       test "$(test-urlmatch-normalization -l "http://@x.y/^")" = 15
+       test "$(test-tool urlmatch-normalization -l "Http://%4d%65:%4d^%70@The.Host")" = 25 &&
+       test "$(test-tool urlmatch-normalization -l "http://%41:%42@x.y/%61/")" = 17 &&
+       test "$(test-tool urlmatch-normalization -l "http://@x.y/^")" = 15
 '
 
 test_expect_success 'url . and .. segments' '
-       test "$(test-urlmatch-normalization -p "x://y/.")" = "x://y/" &&
-       test "$(test-urlmatch-normalization -p "x://y/./")" = "x://y/" &&
-       test "$(test-urlmatch-normalization -p "x://y/a/.")" = "x://y/a" &&
-       test "$(test-urlmatch-normalization -p "x://y/a/./")" = "x://y/a/" &&
-       test "$(test-urlmatch-normalization -p "x://y/.?")" = "x://y/?" &&
-       test "$(test-urlmatch-normalization -p "x://y/./?")" = "x://y/?" &&
-       test "$(test-urlmatch-normalization -p "x://y/a/.?")" = "x://y/a?" &&
-       test "$(test-urlmatch-normalization -p "x://y/a/./?")" = "x://y/a/?" &&
-       test "$(test-urlmatch-normalization -p "x://y/a/./b/.././../c")" = "x://y/c" &&
-       test "$(test-urlmatch-normalization -p "x://y/a/./b/../.././c/")" = "x://y/c/" &&
-       test "$(test-urlmatch-normalization -p "x://y/a/./b/.././../c/././.././.")" = "x://y/" &&
-       ! test-urlmatch-normalization "x://y/a/./b/.././../c/././.././.." &&
-       test "$(test-urlmatch-normalization -p "x://y/a/./?/././..")" = "x://y/a/?/././.." &&
-       test "$(test-urlmatch-normalization -p "x://y/%2e/")" = "x://y/" &&
-       test "$(test-urlmatch-normalization -p "x://y/%2E/")" = "x://y/" &&
-       test "$(test-urlmatch-normalization -p "x://y/a/%2e./")" = "x://y/" &&
-       test "$(test-urlmatch-normalization -p "x://y/b/.%2E/")" = "x://y/" &&
-       test "$(test-urlmatch-normalization -p "x://y/c/%2e%2E/")" = "x://y/"
+       test "$(test-tool urlmatch-normalization -p "x://y/.")" = "x://y/" &&
+       test "$(test-tool urlmatch-normalization -p "x://y/./")" = "x://y/" &&
+       test "$(test-tool urlmatch-normalization -p "x://y/a/.")" = "x://y/a" &&
+       test "$(test-tool urlmatch-normalization -p "x://y/a/./")" = "x://y/a/" &&
+       test "$(test-tool urlmatch-normalization -p "x://y/.?")" = "x://y/?" &&
+       test "$(test-tool urlmatch-normalization -p "x://y/./?")" = "x://y/?" &&
+       test "$(test-tool urlmatch-normalization -p "x://y/a/.?")" = "x://y/a?" &&
+       test "$(test-tool urlmatch-normalization -p "x://y/a/./?")" = "x://y/a/?" &&
+       test "$(test-tool urlmatch-normalization -p "x://y/a/./b/.././../c")" = "x://y/c" &&
+       test "$(test-tool urlmatch-normalization -p "x://y/a/./b/../.././c/")" = "x://y/c/" &&
+       test "$(test-tool urlmatch-normalization -p "x://y/a/./b/.././../c/././.././.")" = "x://y/" &&
+       ! test-tool urlmatch-normalization "x://y/a/./b/.././../c/././.././.." &&
+       test "$(test-tool urlmatch-normalization -p "x://y/a/./?/././..")" = "x://y/a/?/././.." &&
+       test "$(test-tool urlmatch-normalization -p "x://y/%2e/")" = "x://y/" &&
+       test "$(test-tool urlmatch-normalization -p "x://y/%2E/")" = "x://y/" &&
+       test "$(test-tool urlmatch-normalization -p "x://y/a/%2e./")" = "x://y/" &&
+       test "$(test-tool urlmatch-normalization -p "x://y/b/.%2E/")" = "x://y/" &&
+       test "$(test-tool urlmatch-normalization -p "x://y/c/%2e%2E/")" = "x://y/"
 '
 
 # http://@foo specifies an empty user name but does not specify a password
 # http://foo  specifies neither a user name nor a password
 # So they should not be equivalent
 test_expect_success 'url equivalents' '
-       test-urlmatch-normalization "httP://x" "Http://X/" &&
-       test-urlmatch-normalization "Http://%4d%65:%4d^%70@The.Host" "hTTP://Me:%4D^p@the.HOST:80/" &&
-       ! test-urlmatch-normalization "https://@x.y/^" "httpS://x.y:443/^" &&
-       test-urlmatch-normalization "https://@x.y/^" "httpS://@x.y:0443/^" &&
-       test-urlmatch-normalization "https://@x.y/^/../abc" "httpS://@x.y:0443/abc" &&
-       test-urlmatch-normalization "https://@x.y/^/.." "httpS://@x.y:0443/"
+       test-tool urlmatch-normalization "httP://x" "Http://X/" &&
+       test-tool urlmatch-normalization "Http://%4d%65:%4d^%70@The.Host" "hTTP://Me:%4D^p@the.HOST:80/" &&
+       ! test-tool urlmatch-normalization "https://@x.y/^" "httpS://x.y:443/^" &&
+       test-tool urlmatch-normalization "https://@x.y/^" "httpS://@x.y:0443/^" &&
+       test-tool urlmatch-normalization "https://@x.y/^/../abc" "httpS://@x.y:0443/abc" &&
+       test-tool urlmatch-normalization "https://@x.y/^/.." "httpS://@x.y:0443/"
 '
 
 test_done
index b19f3326946203409fe0e428b9fc73d34134d756..2ac3b940c611db08fd48e5782db13d2f8f1d48ec 100755 (executable)
@@ -282,7 +282,7 @@ test_expect_success "--batch-check with multiple sha1s gives correct format" '
 '
 
 test_expect_success 'setup blobs which are likely to delta' '
-       test-genrandom foo 10240 >foo &&
+       test-tool genrandom foo 10240 >foo &&
        { cat foo; echo plus; } >foo-plus &&
        git add foo foo-plus &&
        git commit -m foo &&
index c167f606ca7b8c1628ac8d00507d50f032278730..0c6f48f3024c81de765a8acb489e2d5e3ec42a56 100755 (executable)
@@ -15,8 +15,11 @@ test_description='sparse checkout tests
 . "$TEST_DIRECTORY"/lib-read-tree.sh
 
 test_expect_success 'setup' '
+       test_commit init &&
+       echo modified >>init.t &&
+
        cat >expected <<-EOF &&
-       100644 77f0ba1734ed79d12881f81b36ee134de6a3327b 0       init.t
+       100644 $(git hash-object init.t) 0      init.t
        100644 $EMPTY_BLOB 0    sub/added
        100644 $EMPTY_BLOB 0    sub/addedtoo
        100644 $EMPTY_BLOB 0    subsub/added
@@ -28,8 +31,6 @@ test_expect_success 'setup' '
        H subsub/added
        EOF
 
-       test_commit init &&
-       echo modified >>init.t &&
        mkdir sub subsub &&
        touch sub/added sub/addedtoo subsub/added &&
        git add init.t sub/added sub/addedtoo subsub/added &&
index 6fd264cff0d6de1961656c2cd1193d8ce37e9a1f..f9eb143f43420b0e2f2b864b4f83f78de7886a7b 100755 (executable)
@@ -103,9 +103,9 @@ test_expect_success 'packsize limit' '
                # mid1 and mid2 will fit within 256k limit but
                # appending mid3 will bust the limit and will
                # result in a separate packfile.
-               test-genrandom "a" $(( 66 * 1024 )) >mid1 &&
-               test-genrandom "b" $(( 80 * 1024 )) >mid2 &&
-               test-genrandom "c" $(( 128 * 1024 )) >mid3 &&
+               test-tool genrandom "a" $(( 66 * 1024 )) >mid1 &&
+               test-tool genrandom "b" $(( 80 * 1024 )) >mid2 &&
+               test-tool genrandom "c" $(( 128 * 1024 )) >mid3 &&
                git add mid1 mid2 mid3 &&
 
                count=0
diff --git a/t/t1300-config.sh b/t/t1300-config.sh
new file mode 100755 (executable)
index 0000000..03c2237
--- /dev/null
@@ -0,0 +1,1803 @@
+#!/bin/sh
+#
+# Copyright (c) 2005 Johannes Schindelin
+#
+
+test_description='Test git config in different settings'
+
+. ./test-lib.sh
+
+test_expect_success 'clear default config' '
+       rm -f .git/config
+'
+
+cat > expect << EOF
+[core]
+       penguin = little blue
+EOF
+test_expect_success 'initial' '
+       git config core.penguin "little blue" &&
+       test_cmp expect .git/config
+'
+
+cat > expect << EOF
+[core]
+       penguin = little blue
+       Movie = BadPhysics
+EOF
+test_expect_success 'mixed case' '
+       git config Core.Movie BadPhysics &&
+       test_cmp expect .git/config
+'
+
+cat > expect << EOF
+[core]
+       penguin = little blue
+       Movie = BadPhysics
+[Cores]
+       WhatEver = Second
+EOF
+test_expect_success 'similar section' '
+       git config Cores.WhatEver Second &&
+       test_cmp expect .git/config
+'
+
+cat > expect << EOF
+[core]
+       penguin = little blue
+       Movie = BadPhysics
+       UPPERCASE = true
+[Cores]
+       WhatEver = Second
+EOF
+test_expect_success 'uppercase section' '
+       git config CORE.UPPERCASE true &&
+       test_cmp expect .git/config
+'
+
+test_expect_success 'replace with non-match' '
+       git config core.penguin kingpin !blue
+'
+
+test_expect_success 'replace with non-match (actually matching)' '
+       git config core.penguin "very blue" !kingpin
+'
+
+cat > expect << EOF
+[core]
+       penguin = very blue
+       Movie = BadPhysics
+       UPPERCASE = true
+       penguin = kingpin
+[Cores]
+       WhatEver = Second
+EOF
+
+test_expect_success 'non-match result' 'test_cmp expect .git/config'
+
+test_expect_success 'find mixed-case key by canonical name' '
+       echo Second >expect &&
+       git config cores.whatever >actual &&
+       test_cmp expect actual
+'
+
+test_expect_success 'find mixed-case key by non-canonical name' '
+       echo Second >expect &&
+       git config CoReS.WhAtEvEr >actual &&
+       test_cmp expect actual
+'
+
+test_expect_success 'subsections are not canonicalized by git-config' '
+       cat >>.git/config <<-\EOF &&
+       [section.SubSection]
+       key = one
+       [section "SubSection"]
+       key = two
+       EOF
+       echo one >expect &&
+       git config section.subsection.key >actual &&
+       test_cmp expect actual &&
+       echo two >expect &&
+       git config section.SubSection.key >actual &&
+       test_cmp expect actual
+'
+
+cat > .git/config <<\EOF
+[alpha]
+bar = foo
+[beta]
+baz = multiple \
+lines
+foo = bar
+EOF
+
+test_expect_success 'unset with cont. lines' '
+       git config --unset beta.baz
+'
+
+cat > expect <<\EOF
+[alpha]
+bar = foo
+[beta]
+foo = bar
+EOF
+
+test_expect_success 'unset with cont. lines is correct' 'test_cmp expect .git/config'
+
+cat > .git/config << EOF
+[beta] ; silly comment # another comment
+noIndent= sillyValue ; 'nother silly comment
+
+# empty line
+               ; comment
+               haha   ="beta" # last silly comment
+haha = hello
+       haha = bello
+[nextSection] noNewline = ouch
+EOF
+
+cp .git/config .git/config2
+
+test_expect_success 'multiple unset' '
+       git config --unset-all beta.haha
+'
+
+cat > expect << EOF
+[beta] ; silly comment # another comment
+noIndent= sillyValue ; 'nother silly comment
+
+# empty line
+               ; comment
+[nextSection] noNewline = ouch
+EOF
+
+test_expect_success 'multiple unset is correct' '
+       test_cmp expect .git/config
+'
+
+cp .git/config2 .git/config
+
+test_expect_success '--replace-all missing value' '
+       test_must_fail git config --replace-all beta.haha &&
+       test_cmp .git/config2 .git/config
+'
+
+rm .git/config2
+
+test_expect_success '--replace-all' '
+       git config --replace-all beta.haha gamma
+'
+
+cat > expect << EOF
+[beta] ; silly comment # another comment
+noIndent= sillyValue ; 'nother silly comment
+
+# empty line
+               ; comment
+       haha = gamma
+[nextSection] noNewline = ouch
+EOF
+
+test_expect_success 'all replaced' '
+       test_cmp expect .git/config
+'
+
+cat > expect << EOF
+[beta] ; silly comment # another comment
+noIndent= sillyValue ; 'nother silly comment
+
+# empty line
+               ; comment
+       haha = alpha
+[nextSection] noNewline = ouch
+EOF
+test_expect_success 'really mean test' '
+       git config beta.haha alpha &&
+       test_cmp expect .git/config
+'
+
+cat > expect << EOF
+[beta] ; silly comment # another comment
+noIndent= sillyValue ; 'nother silly comment
+
+# empty line
+               ; comment
+       haha = alpha
+[nextSection]
+       nonewline = wow
+EOF
+test_expect_success 'really really mean test' '
+       git config nextsection.nonewline wow &&
+       test_cmp expect .git/config
+'
+
+test_expect_success 'get value' '
+       echo alpha >expect &&
+       git config beta.haha >actual &&
+       test_cmp expect actual
+'
+
+cat > expect << EOF
+[beta] ; silly comment # another comment
+noIndent= sillyValue ; 'nother silly comment
+
+# empty line
+               ; comment
+[nextSection]
+       nonewline = wow
+EOF
+test_expect_success 'unset' '
+       git config --unset beta.haha &&
+       test_cmp expect .git/config
+'
+
+cat > expect << EOF
+[beta] ; silly comment # another comment
+noIndent= sillyValue ; 'nother silly comment
+
+# empty line
+               ; comment
+[nextSection]
+       nonewline = wow
+       NoNewLine = wow2 for me
+EOF
+test_expect_success 'multivar' '
+       git config nextsection.NoNewLine "wow2 for me" "for me$" &&
+       test_cmp expect .git/config
+'
+
+test_expect_success 'non-match' '
+       git config --get nextsection.nonewline !for
+'
+
+test_expect_success 'non-match value' '
+       echo wow >expect &&
+       git config --get nextsection.nonewline !for >actual &&
+       test_cmp expect actual
+'
+
+test_expect_success 'multi-valued get returns final one' '
+       echo "wow2 for me" >expect &&
+       git config --get nextsection.nonewline >actual &&
+       test_cmp expect actual
+'
+
+test_expect_success 'multi-valued get-all returns all' '
+       cat >expect <<-\EOF &&
+       wow
+       wow2 for me
+       EOF
+       git config --get-all nextsection.nonewline >actual &&
+       test_cmp expect actual
+'
+
+cat > expect << EOF
+[beta] ; silly comment # another comment
+noIndent= sillyValue ; 'nother silly comment
+
+# empty line
+               ; comment
+[nextSection]
+       nonewline = wow3
+       NoNewLine = wow2 for me
+EOF
+test_expect_success 'multivar replace' '
+       git config nextsection.nonewline "wow3" "wow$" &&
+       test_cmp expect .git/config
+'
+
+test_expect_success 'ambiguous unset' '
+       test_must_fail git config --unset nextsection.nonewline
+'
+
+test_expect_success 'invalid unset' '
+       test_must_fail git config --unset somesection.nonewline
+'
+
+cat > expect << EOF
+[beta] ; silly comment # another comment
+noIndent= sillyValue ; 'nother silly comment
+
+# empty line
+               ; comment
+[nextSection]
+       NoNewLine = wow2 for me
+EOF
+
+test_expect_success 'multivar unset' '
+       git config --unset nextsection.nonewline "wow3$" &&
+       test_cmp expect .git/config
+'
+
+test_expect_success 'invalid key' 'test_must_fail git config inval.2key blabla'
+
+test_expect_success 'correct key' 'git config 123456.a123 987'
+
+test_expect_success 'hierarchical section' '
+       git config Version.1.2.3eX.Alpha beta
+'
+
+cat > expect << EOF
+[beta] ; silly comment # another comment
+noIndent= sillyValue ; 'nother silly comment
+
+# empty line
+               ; comment
+[nextSection]
+       NoNewLine = wow2 for me
+[123456]
+       a123 = 987
+[Version "1.2.3eX"]
+       Alpha = beta
+EOF
+
+test_expect_success 'hierarchical section value' '
+       test_cmp expect .git/config
+'
+
+cat > expect << EOF
+beta.noindent=sillyValue
+nextsection.nonewline=wow2 for me
+123456.a123=987
+version.1.2.3eX.alpha=beta
+EOF
+
+test_expect_success 'working --list' '
+       git config --list > output &&
+       test_cmp expect output
+'
+cat > expect << EOF
+EOF
+
+test_expect_success '--list without repo produces empty output' '
+       git --git-dir=nonexistent config --list >output &&
+       test_cmp expect output
+'
+
+cat > expect << EOF
+beta.noindent
+nextsection.nonewline
+123456.a123
+version.1.2.3eX.alpha
+EOF
+
+test_expect_success '--name-only --list' '
+       git config --name-only --list >output &&
+       test_cmp expect output
+'
+
+cat > expect << EOF
+beta.noindent sillyValue
+nextsection.nonewline wow2 for me
+EOF
+
+test_expect_success '--get-regexp' '
+       git config --get-regexp in >output &&
+       test_cmp expect output
+'
+
+cat > expect << EOF
+beta.noindent
+nextsection.nonewline
+EOF
+
+test_expect_success '--name-only --get-regexp' '
+       git config --name-only --get-regexp in >output &&
+       test_cmp expect output
+'
+
+cat > expect << EOF
+wow2 for me
+wow4 for you
+EOF
+
+test_expect_success '--add' '
+       git config --add nextsection.nonewline "wow4 for you" &&
+       git config --get-all nextsection.nonewline > output &&
+       test_cmp expect output
+'
+
+cat > .git/config << EOF
+[novalue]
+       variable
+[emptyvalue]
+       variable =
+EOF
+
+test_expect_success 'get variable with no value' '
+       git config --get novalue.variable ^$
+'
+
+test_expect_success 'get variable with empty value' '
+       git config --get emptyvalue.variable ^$
+'
+
+echo novalue.variable > expect
+
+test_expect_success 'get-regexp variable with no value' '
+       git config --get-regexp novalue > output &&
+       test_cmp expect output
+'
+
+echo 'novalue.variable true' > expect
+
+test_expect_success 'get-regexp --bool variable with no value' '
+       git config --bool --get-regexp novalue > output &&
+       test_cmp expect output
+'
+
+echo 'emptyvalue.variable ' > expect
+
+test_expect_success 'get-regexp variable with empty value' '
+       git config --get-regexp emptyvalue > output &&
+       test_cmp expect output
+'
+
+echo true > expect
+
+test_expect_success 'get bool variable with no value' '
+       git config --bool novalue.variable > output &&
+       test_cmp expect output
+'
+
+echo false > expect
+
+test_expect_success 'get bool variable with empty value' '
+       git config --bool emptyvalue.variable > output &&
+       test_cmp expect output
+'
+
+test_expect_success 'no arguments, but no crash' '
+       test_must_fail git config >output 2>&1 &&
+       test_i18ngrep usage output
+'
+
+cat > .git/config << EOF
+[a.b]
+       c = d
+EOF
+
+cat > expect << EOF
+[a.b]
+       c = d
+[a]
+       x = y
+EOF
+
+test_expect_success 'new section is partial match of another' '
+       git config a.x y &&
+       test_cmp expect .git/config
+'
+
+cat > expect << EOF
+[a.b]
+       c = d
+[a]
+       x = y
+       b = c
+[b]
+       x = y
+EOF
+
+test_expect_success 'new variable inserts into proper section' '
+       git config b.x y &&
+       git config a.b c &&
+       test_cmp expect .git/config
+'
+
+test_expect_success 'alternative --file (non-existing file should fail)' '
+       test_must_fail git config --file non-existing-config -l
+'
+
+cat > other-config << EOF
+[ein]
+       bahn = strasse
+EOF
+
+cat > expect << EOF
+ein.bahn=strasse
+EOF
+
+test_expect_success 'alternative GIT_CONFIG' '
+       GIT_CONFIG=other-config git config --list >output &&
+       test_cmp expect output
+'
+
+test_expect_success 'alternative GIT_CONFIG (--file)' '
+       git config --file other-config --list >output &&
+       test_cmp expect output
+'
+
+test_expect_success 'alternative GIT_CONFIG (--file=-)' '
+       git config --file - --list <other-config >output &&
+       test_cmp expect output
+'
+
+test_expect_success 'setting a value in stdin is an error' '
+       test_must_fail git config --file - some.value foo
+'
+
+test_expect_success 'editing stdin is an error' '
+       test_must_fail git config --file - --edit
+'
+
+test_expect_success 'refer config from subdirectory' '
+       mkdir x &&
+       (
+               cd x &&
+               echo strasse >expect &&
+               git config --get --file ../other-config ein.bahn >actual &&
+               test_cmp expect actual
+       )
+
+'
+
+test_expect_success 'refer config from subdirectory via --file' '
+       (
+               cd x &&
+               git config --file=../other-config --get ein.bahn >actual &&
+               test_cmp expect actual
+       )
+'
+
+cat > expect << EOF
+[ein]
+       bahn = strasse
+[anwohner]
+       park = ausweis
+EOF
+
+test_expect_success '--set in alternative file' '
+       git config --file=other-config anwohner.park ausweis &&
+       test_cmp expect other-config
+'
+
+cat > .git/config << EOF
+# Hallo
+       #Bello
+[branch "eins"]
+       x = 1
+[branch.eins]
+       y = 1
+       [branch "1 234 blabl/a"]
+weird
+EOF
+
+test_expect_success 'rename section' '
+       git config --rename-section branch.eins branch.zwei
+'
+
+cat > expect << EOF
+# Hallo
+       #Bello
+[branch "zwei"]
+       x = 1
+[branch "zwei"]
+       y = 1
+       [branch "1 234 blabl/a"]
+weird
+EOF
+
+test_expect_success 'rename succeeded' '
+       test_cmp expect .git/config
+'
+
+test_expect_success 'rename non-existing section' '
+       test_must_fail git config --rename-section \
+               branch."world domination" branch.drei
+'
+
+test_expect_success 'rename succeeded' '
+       test_cmp expect .git/config
+'
+
+test_expect_success 'rename another section' '
+       git config --rename-section branch."1 234 blabl/a" branch.drei
+'
+
+cat > expect << EOF
+# Hallo
+       #Bello
+[branch "zwei"]
+       x = 1
+[branch "zwei"]
+       y = 1
+[branch "drei"]
+weird
+EOF
+
+test_expect_success 'rename succeeded' '
+       test_cmp expect .git/config
+'
+
+cat >> .git/config << EOF
+[branch "vier"] z = 1
+EOF
+
+test_expect_success 'rename a section with a var on the same line' '
+       git config --rename-section branch.vier branch.zwei
+'
+
+cat > expect << EOF
+# Hallo
+       #Bello
+[branch "zwei"]
+       x = 1
+[branch "zwei"]
+       y = 1
+[branch "drei"]
+weird
+[branch "zwei"]
+       z = 1
+EOF
+
+test_expect_success 'rename succeeded' '
+       test_cmp expect .git/config
+'
+
+test_expect_success 'renaming empty section name is rejected' '
+       test_must_fail git config --rename-section branch.zwei ""
+'
+
+test_expect_success 'renaming to bogus section is rejected' '
+       test_must_fail git config --rename-section branch.zwei "bogus name"
+'
+
+cat >> .git/config << EOF
+  [branch "zwei"] a = 1 [branch "vier"]
+EOF
+
+test_expect_success 'remove section' '
+       git config --remove-section branch.zwei
+'
+
+cat > expect << EOF
+# Hallo
+       #Bello
+[branch "drei"]
+weird
+EOF
+
+test_expect_success 'section was removed properly' '
+       test_cmp expect .git/config
+'
+
+cat > expect << EOF
+[gitcvs]
+       enabled = true
+       dbname = %Ggitcvs2.%a.%m.sqlite
+[gitcvs "ext"]
+       dbname = %Ggitcvs1.%a.%m.sqlite
+EOF
+
+test_expect_success 'section ending' '
+       rm -f .git/config &&
+       git config gitcvs.enabled true &&
+       git config gitcvs.ext.dbname %Ggitcvs1.%a.%m.sqlite &&
+       git config gitcvs.dbname %Ggitcvs2.%a.%m.sqlite &&
+       test_cmp expect .git/config
+
+'
+
+test_expect_success numbers '
+       git config kilo.gram 1k &&
+       git config mega.ton 1m &&
+       echo 1024 >expect &&
+       echo 1048576 >>expect &&
+       git config --int --get kilo.gram >actual &&
+       git config --int --get mega.ton >>actual &&
+       test_cmp expect actual
+'
+
+test_expect_success '--int is at least 64 bits' '
+       git config giga.watts 121g &&
+       echo 129922760704 >expect &&
+       git config --int --get giga.watts >actual &&
+       test_cmp expect actual
+'
+
+test_expect_success 'invalid unit' '
+       git config aninvalid.unit "1auto" &&
+       echo 1auto >expect &&
+       git config aninvalid.unit >actual &&
+       test_cmp expect actual &&
+       test_must_fail git config --int --get aninvalid.unit 2>actual &&
+       test_i18ngrep "bad numeric config value .1auto. for .aninvalid.unit. in file .git/config: invalid unit" actual
+'
+
+test_expect_success 'line number is reported correctly' '
+       printf "[bool]\n\tvar\n" >invalid &&
+       test_must_fail git config -f invalid --path bool.var 2>actual &&
+       test_i18ngrep "line 2" actual
+'
+
+test_expect_success 'invalid stdin config' '
+       echo "[broken" | test_must_fail git config --list --file - >output 2>&1 &&
+       test_i18ngrep "bad config line 1 in standard input" output
+'
+
+cat > expect << EOF
+true
+false
+true
+false
+true
+false
+true
+false
+EOF
+
+test_expect_success bool '
+
+       git config bool.true1 01 &&
+       git config bool.true2 -1 &&
+       git config bool.true3 YeS &&
+       git config bool.true4 true &&
+       git config bool.false1 000 &&
+       git config bool.false2 "" &&
+       git config bool.false3 nO &&
+       git config bool.false4 FALSE &&
+       rm -f result &&
+       for i in 1 2 3 4
+       do
+           git config --bool --get bool.true$i >>result
+           git config --bool --get bool.false$i >>result
+       done &&
+       test_cmp expect result'
+
+test_expect_success 'invalid bool (--get)' '
+
+       git config bool.nobool foobar &&
+       test_must_fail git config --bool --get bool.nobool'
+
+test_expect_success 'invalid bool (set)' '
+
+       test_must_fail git config --bool bool.nobool foobar'
+
+cat > expect <<\EOF
+[bool]
+       true1 = true
+       true2 = true
+       true3 = true
+       true4 = true
+       false1 = false
+       false2 = false
+       false3 = false
+       false4 = false
+EOF
+
+test_expect_success 'set --bool' '
+
+       rm -f .git/config &&
+       git config --bool bool.true1 01 &&
+       git config --bool bool.true2 -1 &&
+       git config --bool bool.true3 YeS &&
+       git config --bool bool.true4 true &&
+       git config --bool bool.false1 000 &&
+       git config --bool bool.false2 "" &&
+       git config --bool bool.false3 nO &&
+       git config --bool bool.false4 FALSE &&
+       test_cmp expect .git/config'
+
+cat > expect <<\EOF
+[int]
+       val1 = 1
+       val2 = -1
+       val3 = 5242880
+EOF
+
+test_expect_success 'set --int' '
+
+       rm -f .git/config &&
+       git config --int int.val1 01 &&
+       git config --int int.val2 -1 &&
+       git config --int int.val3 5m &&
+       test_cmp expect .git/config
+'
+
+test_expect_success 'get --bool-or-int' '
+       cat >.git/config <<-\EOF &&
+       [bool]
+       true1
+       true2 = true
+       false = false
+       [int]
+       int1 = 0
+       int2 = 1
+       int3 = -1
+       EOF
+       cat >expect <<-\EOF &&
+       true
+       true
+       false
+       0
+       1
+       -1
+       EOF
+       {
+               git config --bool-or-int bool.true1 &&
+               git config --bool-or-int bool.true2 &&
+               git config --bool-or-int bool.false &&
+               git config --bool-or-int int.int1 &&
+               git config --bool-or-int int.int2 &&
+               git config --bool-or-int int.int3
+       } >actual &&
+       test_cmp expect actual
+'
+
+cat >expect <<\EOF
+[bool]
+       true1 = true
+       false1 = false
+       true2 = true
+       false2 = false
+[int]
+       int1 = 0
+       int2 = 1
+       int3 = -1
+EOF
+
+test_expect_success 'set --bool-or-int' '
+       rm -f .git/config &&
+       git config --bool-or-int bool.true1 true &&
+       git config --bool-or-int bool.false1 false &&
+       git config --bool-or-int bool.true2 yes &&
+       git config --bool-or-int bool.false2 no &&
+       git config --bool-or-int int.int1 0 &&
+       git config --bool-or-int int.int2 1 &&
+       git config --bool-or-int int.int3 -1 &&
+       test_cmp expect .git/config
+'
+
+cat >expect <<\EOF
+[path]
+       home = ~/
+       normal = /dev/null
+       trailingtilde = foo~
+EOF
+
+test_expect_success !MINGW 'set --path' '
+       rm -f .git/config &&
+       git config --path path.home "~/" &&
+       git config --path path.normal "/dev/null" &&
+       git config --path path.trailingtilde "foo~" &&
+       test_cmp expect .git/config'
+
+if test_have_prereq !MINGW && test "${HOME+set}"
+then
+       test_set_prereq HOMEVAR
+fi
+
+cat >expect <<EOF
+$HOME/
+/dev/null
+foo~
+EOF
+
+test_expect_success HOMEVAR 'get --path' '
+       git config --get --path path.home > result &&
+       git config --get --path path.normal >> result &&
+       git config --get --path path.trailingtilde >> result &&
+       test_cmp expect result
+'
+
+cat >expect <<\EOF
+/dev/null
+foo~
+EOF
+
+test_expect_success !MINGW 'get --path copes with unset $HOME' '
+       (
+               unset HOME;
+               test_must_fail git config --get --path path.home \
+                       >result 2>msg &&
+               git config --get --path path.normal >>result &&
+               git config --get --path path.trailingtilde >>result
+       ) &&
+       test_i18ngrep "[Ff]ailed to expand.*~/" msg &&
+       test_cmp expect result
+'
+
+test_expect_success 'get --path barfs on boolean variable' '
+       echo "[path]bool" >.git/config &&
+       test_must_fail git config --get --path path.bool
+'
+
+test_expect_success 'get --expiry-date' '
+       rel="3.weeks.5.days.00:00" &&
+       rel_out="$rel ->" &&
+       cat >.git/config <<-\EOF &&
+       [date]
+       valid1 = "3.weeks.5.days 00:00"
+       valid2 = "Fri Jun 4 15:46:55 2010"
+       valid3 = "2017/11/11 11:11:11PM"
+       valid4 = "2017/11/10 09:08:07 PM"
+       valid5 = "never"
+       invalid1 = "abc"
+       EOF
+       cat >expect <<-EOF &&
+       $(test-tool date timestamp $rel)
+       1275666415
+       1510441871
+       1510348087
+       0
+       EOF
+       {
+               echo "$rel_out $(git config --expiry-date date.valid1)"
+               git config --expiry-date date.valid2 &&
+               git config --expiry-date date.valid3 &&
+               git config --expiry-date date.valid4 &&
+               git config --expiry-date date.valid5
+       } >actual &&
+       test_cmp expect actual &&
+       test_must_fail git config --expiry-date date.invalid1
+'
+
+test_expect_success 'get --type=color' '
+       rm .git/config &&
+       git config foo.color "red" &&
+       git config --get --type=color foo.color >actual.raw &&
+       test_decode_color <actual.raw >actual &&
+       echo "<RED>" >expect &&
+       test_cmp expect actual
+'
+
+cat >expect << EOF
+[foo]
+       color = red
+EOF
+
+test_expect_success 'set --type=color' '
+       rm .git/config &&
+       git config --type=color foo.color "red" &&
+       test_cmp expect .git/config
+'
+
+test_expect_success 'get --type=color barfs on non-color' '
+       echo "[foo]bar=not-a-color" >.git/config &&
+       test_must_fail git config --get --type=color foo.bar
+'
+
+test_expect_success 'set --type=color barfs on non-color' '
+       test_must_fail git config --type=color foo.color "not-a-color" 2>error &&
+       test_i18ngrep "cannot parse color" error
+'
+
+cat > expect << EOF
+[quote]
+       leading = " test"
+       ending = "test "
+       semicolon = "test;test"
+       hash = "test#test"
+EOF
+test_expect_success 'quoting' '
+       rm -f .git/config &&
+       git config quote.leading " test" &&
+       git config quote.ending "test " &&
+       git config quote.semicolon "test;test" &&
+       git config quote.hash "test#test" &&
+       test_cmp expect .git/config
+'
+
+test_expect_success 'key with newline' '
+       test_must_fail git config "key.with
+newline" 123'
+
+test_expect_success 'value with newline' 'git config key.sub value.with\\\
+newline'
+
+cat > .git/config <<\EOF
+[section]
+       ; comment \
+       continued = cont\
+inued
+       noncont   = not continued ; \
+       quotecont = "cont;\
+inued"
+EOF
+
+cat > expect <<\EOF
+section.continued=continued
+section.noncont=not continued
+section.quotecont=cont;inued
+EOF
+
+test_expect_success 'value continued on next line' '
+       git config --list > result &&
+       test_cmp result expect
+'
+
+cat > .git/config <<\EOF
+[section "sub=section"]
+       val1 = foo=bar
+       val2 = foo\nbar
+       val3 = \n\n
+       val4 =
+       val5
+EOF
+
+cat > expect <<\EOF
+section.sub=section.val1
+foo=barQsection.sub=section.val2
+foo
+barQsection.sub=section.val3
+
+
+Qsection.sub=section.val4
+Qsection.sub=section.val5Q
+EOF
+test_expect_success '--null --list' '
+       git config --null --list >result.raw &&
+       nul_to_q <result.raw >result &&
+       echo >>result &&
+       test_cmp expect result
+'
+
+test_expect_success '--null --get-regexp' '
+       git config --null --get-regexp "val[0-9]" >result.raw &&
+       nul_to_q <result.raw >result &&
+       echo >>result &&
+       test_cmp expect result
+'
+
+test_expect_success 'inner whitespace kept verbatim' '
+       git config section.val "foo       bar" &&
+       echo "foo         bar" >expect &&
+       git config section.val >actual &&
+       test_cmp expect actual
+'
+
+test_expect_success SYMLINKS 'symlinked configuration' '
+       ln -s notyet myconfig &&
+       git config --file=myconfig test.frotz nitfol &&
+       test -h myconfig &&
+       test -f notyet &&
+       test "z$(git config --file=notyet test.frotz)" = znitfol &&
+       git config --file=myconfig test.xyzzy rezrov &&
+       test -h myconfig &&
+       test -f notyet &&
+       cat >expect <<-\EOF &&
+       nitfol
+       rezrov
+       EOF
+       {
+               git config --file=notyet test.frotz &&
+               git config --file=notyet test.xyzzy
+       } >actual &&
+       test_cmp expect actual
+'
+
+test_expect_success 'nonexistent configuration' '
+       test_must_fail git config --file=doesnotexist --list &&
+       test_must_fail git config --file=doesnotexist test.xyzzy
+'
+
+test_expect_success SYMLINKS 'symlink to nonexistent configuration' '
+       ln -s doesnotexist linktonada &&
+       ln -s linktonada linktolinktonada &&
+       test_must_fail git config --file=linktonada --list &&
+       test_must_fail git config --file=linktolinktonada --list
+'
+
+test_expect_success 'check split_cmdline return' "
+       git config alias.split-cmdline-fix 'echo \"' &&
+       test_must_fail git split-cmdline-fix &&
+       echo foo > foo &&
+       git add foo &&
+       git commit -m 'initial commit' &&
+       git config branch.master.mergeoptions 'echo \"' &&
+       test_must_fail git merge master
+"
+
+test_expect_success 'git -c "key=value" support' '
+       cat >expect <<-\EOF &&
+       value
+       value
+       true
+       EOF
+       {
+               git -c core.name=value config core.name &&
+               git -c foo.CamelCase=value config foo.camelcase &&
+               git -c foo.flag config --bool foo.flag
+       } >actual &&
+       test_cmp expect actual &&
+       test_must_fail git -c name=value config core.name
+'
+
+# We just need a type-specifier here that cares about the
+# distinction internally between a NULL boolean and a real
+# string (because most of git's internal parsers do care).
+# Using "--path" works, but we do not otherwise care about
+# its semantics.
+test_expect_success 'git -c can represent empty string' '
+       echo >expect &&
+       git -c foo.empty= config --path foo.empty >actual &&
+       test_cmp expect actual
+'
+
+test_expect_success 'key sanity-checking' '
+       test_must_fail git config foo=bar &&
+       test_must_fail git config foo=.bar &&
+       test_must_fail git config foo.ba=r &&
+       test_must_fail git config foo.1bar &&
+       test_must_fail git config foo."ba
+                               z".bar &&
+       test_must_fail git config . false &&
+       test_must_fail git config .foo false &&
+       test_must_fail git config foo. false &&
+       test_must_fail git config .foo. false &&
+       git config foo.bar true &&
+       git config foo."ba =z".bar false
+'
+
+test_expect_success 'git -c works with aliases of builtins' '
+       git config alias.checkconfig "-c foo.check=bar config foo.check" &&
+       echo bar >expect &&
+       git checkconfig >actual &&
+       test_cmp expect actual
+'
+
+test_expect_success 'aliases can be CamelCased' '
+       test_config alias.CamelCased "rev-parse HEAD" &&
+       git CamelCased >out &&
+       git rev-parse HEAD >expect &&
+       test_cmp expect out
+'
+
+test_expect_success 'git -c does not split values on equals' '
+       echo "value with = in it" >expect &&
+       git -c core.foo="value with = in it" config core.foo >actual &&
+       test_cmp expect actual
+'
+
+test_expect_success 'git -c dies on bogus config' '
+       test_must_fail git -c core.bare=foo rev-parse
+'
+
+test_expect_success 'git -c complains about empty key' '
+       test_must_fail git -c "=foo" rev-parse
+'
+
+test_expect_success 'git -c complains about empty key and value' '
+       test_must_fail git -c "" rev-parse
+'
+
+test_expect_success 'multiple git -c appends config' '
+       test_config alias.x "!git -c x.two=2 config --get-regexp ^x\.*" &&
+       cat >expect <<-\EOF &&
+       x.one 1
+       x.two 2
+       EOF
+       git -c x.one=1 x >actual &&
+       test_cmp expect actual
+'
+
+test_expect_success 'last one wins: two level vars' '
+
+       # sec.var and sec.VAR are the same variable, as the first
+       # and the last level of a configuration variable name is
+       # case insensitive.
+
+       echo VAL >expect &&
+
+       git -c sec.var=val -c sec.VAR=VAL config --get sec.var >actual &&
+       test_cmp expect actual &&
+       git -c SEC.var=val -c sec.var=VAL config --get sec.var >actual &&
+       test_cmp expect actual &&
+
+       git -c sec.var=val -c sec.VAR=VAL config --get SEC.var >actual &&
+       test_cmp expect actual &&
+       git -c SEC.var=val -c sec.var=VAL config --get sec.VAR >actual &&
+       test_cmp expect actual
+'
+
+test_expect_success 'last one wins: three level vars' '
+
+       # v.a.r and v.A.r are not the same variable, as the middle
+       # level of a three-level configuration variable name is
+       # case sensitive.
+
+       echo val >expect &&
+       git -c v.a.r=val -c v.A.r=VAL config --get v.a.r >actual &&
+       test_cmp expect actual &&
+       git -c v.a.r=val -c v.A.r=VAL config --get V.a.R >actual &&
+       test_cmp expect actual &&
+
+       # v.a.r and V.a.R are the same variable, as the first
+       # and the last level of a configuration variable name is
+       # case insensitive.
+
+       echo VAL >expect &&
+       git -c v.a.r=val -c v.a.R=VAL config --get v.a.r >actual &&
+       test_cmp expect actual &&
+       git -c v.a.r=val -c V.a.r=VAL config --get v.a.r >actual &&
+       test_cmp expect actual &&
+       git -c v.a.r=val -c v.a.R=VAL config --get V.a.R >actual &&
+       test_cmp expect actual &&
+       git -c v.a.r=val -c V.a.r=VAL config --get V.a.R >actual &&
+       test_cmp expect actual
+'
+
+for VAR in a .a a. a.0b a."b c". a."b c".0d
+do
+       test_expect_success "git -c $VAR=VAL rejects invalid '$VAR'" '
+               test_must_fail git -c "$VAR=VAL" config -l
+       '
+done
+
+for VAR in a.b a."b c".d
+do
+       test_expect_success "git -c $VAR=VAL works with valid '$VAR'" '
+               echo VAL >expect &&
+               git -c "$VAR=VAL" config --get "$VAR" >actual &&
+               test_cmp expect actual
+       '
+done
+
+test_expect_success 'git -c is not confused by empty environment' '
+       GIT_CONFIG_PARAMETERS="" git -c x.one=1 config --list
+'
+
+sq="'"
+test_expect_success 'detect bogus GIT_CONFIG_PARAMETERS' '
+       cat >expect <<-\EOF &&
+       env.one one
+       env.two two
+       EOF
+       GIT_CONFIG_PARAMETERS="${sq}env.one=one${sq} ${sq}env.two=two${sq}" \
+               git config --get-regexp "env.*" >actual &&
+       test_cmp expect actual &&
+
+       cat >expect <<-EOF &&
+       env.one one${sq}
+       env.two two
+       EOF
+       GIT_CONFIG_PARAMETERS="${sq}env.one=one${sq}\\$sq$sq$sq ${sq}env.two=two${sq}" \
+               git config --get-regexp "env.*" >actual &&
+       test_cmp expect actual &&
+
+       test_must_fail env \
+               GIT_CONFIG_PARAMETERS="${sq}env.one=one${sq}\\$sq ${sq}env.two=two${sq}" \
+               git config --get-regexp "env.*"
+'
+
+test_expect_success 'git config --edit works' '
+       git config -f tmp test.value no &&
+       echo test.value=yes >expect &&
+       GIT_EDITOR="echo [test]value=yes >" git config -f tmp --edit &&
+       git config -f tmp --list >actual &&
+       test_cmp expect actual
+'
+
+test_expect_success 'git config --edit respects core.editor' '
+       git config -f tmp test.value no &&
+       echo test.value=yes >expect &&
+       test_config core.editor "echo [test]value=yes >" &&
+       git config -f tmp --edit &&
+       git config -f tmp --list >actual &&
+       test_cmp expect actual
+'
+
+# malformed configuration files
+test_expect_success 'barf on syntax error' '
+       cat >.git/config <<-\EOF &&
+       # broken section line
+       [section]
+       key garbage
+       EOF
+       test_must_fail git config --get section.key >actual 2>error &&
+       test_i18ngrep " line 3 " error
+'
+
+test_expect_success 'barf on incomplete section header' '
+       cat >.git/config <<-\EOF &&
+       # broken section line
+       [section
+       key = value
+       EOF
+       test_must_fail git config --get section.key >actual 2>error &&
+       test_i18ngrep " line 2 " error
+'
+
+test_expect_success 'barf on incomplete string' '
+       cat >.git/config <<-\EOF &&
+       # broken section line
+       [section]
+       key = "value string
+       EOF
+       test_must_fail git config --get section.key >actual 2>error &&
+       test_i18ngrep " line 3 " error
+'
+
+test_expect_success 'urlmatch' '
+       cat >.git/config <<-\EOF &&
+       [http]
+               sslVerify
+       [http "https://weak.example.com"]
+               sslVerify = false
+               cookieFile = /tmp/cookie.txt
+       EOF
+
+       test_expect_code 1 git config --bool --get-urlmatch doesnt.exist https://good.example.com >actual &&
+       test_must_be_empty actual &&
+
+       echo true >expect &&
+       git config --bool --get-urlmatch http.SSLverify https://good.example.com >actual &&
+       test_cmp expect actual &&
+
+       echo false >expect &&
+       git config --bool --get-urlmatch http.sslverify https://weak.example.com >actual &&
+       test_cmp expect actual &&
+
+       {
+               echo http.cookiefile /tmp/cookie.txt &&
+               echo http.sslverify false
+       } >expect &&
+       git config --get-urlmatch HTTP https://weak.example.com >actual &&
+       test_cmp expect actual
+'
+
+test_expect_success 'urlmatch favors more specific URLs' '
+       cat >.git/config <<-\EOF &&
+       [http "https://example.com/"]
+               cookieFile = /tmp/root.txt
+       [http "https://example.com/subdirectory"]
+               cookieFile = /tmp/subdirectory.txt
+       [http "https://user@example.com/"]
+               cookieFile = /tmp/user.txt
+       [http "https://averylonguser@example.com/"]
+               cookieFile = /tmp/averylonguser.txt
+       [http "https://preceding.example.com"]
+               cookieFile = /tmp/preceding.txt
+       [http "https://*.example.com"]
+               cookieFile = /tmp/wildcard.txt
+       [http "https://*.example.com/wildcardwithsubdomain"]
+               cookieFile = /tmp/wildcardwithsubdomain.txt
+       [http "https://trailing.example.com"]
+               cookieFile = /tmp/trailing.txt
+       [http "https://user@*.example.com/"]
+               cookieFile = /tmp/wildcardwithuser.txt
+       [http "https://sub.example.com/"]
+               cookieFile = /tmp/sub.txt
+       EOF
+
+       echo http.cookiefile /tmp/root.txt >expect &&
+       git config --get-urlmatch HTTP https://example.com >actual &&
+       test_cmp expect actual &&
+
+       echo http.cookiefile /tmp/subdirectory.txt >expect &&
+       git config --get-urlmatch HTTP https://example.com/subdirectory >actual &&
+       test_cmp expect actual &&
+
+       echo http.cookiefile /tmp/subdirectory.txt >expect &&
+       git config --get-urlmatch HTTP https://example.com/subdirectory/nested >actual &&
+       test_cmp expect actual &&
+
+       echo http.cookiefile /tmp/user.txt >expect &&
+       git config --get-urlmatch HTTP https://user@example.com/ >actual &&
+       test_cmp expect actual &&
+
+       echo http.cookiefile /tmp/subdirectory.txt >expect &&
+       git config --get-urlmatch HTTP https://averylonguser@example.com/subdirectory >actual &&
+       test_cmp expect actual &&
+
+       echo http.cookiefile /tmp/preceding.txt >expect &&
+       git config --get-urlmatch HTTP https://preceding.example.com >actual &&
+       test_cmp expect actual &&
+
+       echo http.cookiefile /tmp/wildcard.txt >expect &&
+       git config --get-urlmatch HTTP https://wildcard.example.com >actual &&
+       test_cmp expect actual &&
+
+       echo http.cookiefile /tmp/sub.txt >expect &&
+       git config --get-urlmatch HTTP https://sub.example.com/wildcardwithsubdomain >actual &&
+       test_cmp expect actual &&
+
+       echo http.cookiefile /tmp/trailing.txt >expect &&
+       git config --get-urlmatch HTTP https://trailing.example.com >actual &&
+       test_cmp expect actual &&
+
+       echo http.cookiefile /tmp/sub.txt >expect &&
+       git config --get-urlmatch HTTP https://user@sub.example.com >actual &&
+       test_cmp expect actual
+'
+
+test_expect_success 'urlmatch with wildcard' '
+       cat >.git/config <<-\EOF &&
+       [http]
+               sslVerify
+       [http "https://*.example.com"]
+               sslVerify = false
+               cookieFile = /tmp/cookie.txt
+       EOF
+
+       test_expect_code 1 git config --bool --get-urlmatch doesnt.exist https://good.example.com >actual &&
+       test_must_be_empty actual &&
+
+       echo true >expect &&
+       git config --bool --get-urlmatch http.SSLverify https://example.com >actual &&
+       test_cmp expect actual &&
+
+       echo true >expect &&
+       git config --bool --get-urlmatch http.SSLverify https://good-example.com >actual &&
+       test_cmp expect actual &&
+
+       echo true >expect &&
+       git config --bool --get-urlmatch http.sslverify https://deep.nested.example.com >actual &&
+       test_cmp expect actual &&
+
+       echo false >expect &&
+       git config --bool --get-urlmatch http.sslverify https://good.example.com >actual &&
+       test_cmp expect actual &&
+
+       {
+               echo http.cookiefile /tmp/cookie.txt &&
+               echo http.sslverify false
+       } >expect &&
+       git config --get-urlmatch HTTP https://good.example.com >actual &&
+       test_cmp expect actual &&
+
+       echo http.sslverify >expect &&
+       git config --get-urlmatch HTTP https://more.example.com.au >actual &&
+       test_cmp expect actual
+'
+
+# good section hygiene
+test_expect_success '--unset last key removes section (except if commented)' '
+       cat >.git/config <<-\EOF &&
+       # some generic comment on the configuration file itself
+       # a comment specific to this "section" section.
+       [section]
+       # some intervening lines
+       # that should also be dropped
+
+       key = value
+       # please be careful when you update the above variable
+       EOF
+
+       cat >expect <<-\EOF &&
+       # some generic comment on the configuration file itself
+       # a comment specific to this "section" section.
+       [section]
+       # some intervening lines
+       # that should also be dropped
+
+       # please be careful when you update the above variable
+       EOF
+
+       git config --unset section.key &&
+       test_cmp expect .git/config &&
+
+       cat >.git/config <<-\EOF &&
+       [section]
+       key = value
+       [next-section]
+       EOF
+
+       cat >expect <<-\EOF &&
+       [next-section]
+       EOF
+
+       git config --unset section.key &&
+       test_cmp expect .git/config &&
+
+       q_to_tab >.git/config <<-\EOF &&
+       [one]
+       Qkey = "multiline \
+       QQ# with comment"
+       [two]
+       key = true
+       EOF
+       git config --unset two.key &&
+       ! grep two .git/config &&
+
+       q_to_tab >.git/config <<-\EOF &&
+       [one]
+       Qkey = "multiline \
+       QQ# with comment"
+       [one]
+       key = true
+       EOF
+       git config --unset-all one.key &&
+       test_line_count = 0 .git/config &&
+
+       q_to_tab >.git/config <<-\EOF &&
+       [one]
+       Qkey = true
+       Q# a comment not at the start
+       [two]
+       Qkey = true
+       EOF
+       git config --unset two.key &&
+       grep two .git/config &&
+
+       q_to_tab >.git/config <<-\EOF &&
+       [one]
+       Qkey = not [two "subsection"]
+       [two "subsection"]
+       [two "subsection"]
+       Qkey = true
+       [TWO "subsection"]
+       [one]
+       EOF
+       git config --unset two.subsection.key &&
+       test "not [two subsection]" = "$(git config one.key)" &&
+       test_line_count = 3 .git/config
+'
+
+test_expect_success '--unset-all removes section if empty & uncommented' '
+       cat >.git/config <<-\EOF &&
+       [section]
+       key = value1
+       key = value2
+       EOF
+
+       git config --unset-all section.key &&
+       test_line_count = 0 .git/config
+'
+
+test_expect_success 'adding a key into an empty section reuses header' '
+       cat >.git/config <<-\EOF &&
+       [section]
+       EOF
+
+       q_to_tab >expect <<-\EOF &&
+       [section]
+       Qkey = value
+       EOF
+
+       git config section.key value &&
+       test_cmp expect .git/config
+'
+
+test_expect_success POSIXPERM,PERL 'preserves existing permissions' '
+       chmod 0600 .git/config &&
+       git config imap.pass Hunter2 &&
+       perl -e \
+         "die q(badset) if ((stat(q(.git/config)))[2] & 07777) != 0600" &&
+       git config --rename-section imap pop &&
+       perl -e \
+         "die q(badrename) if ((stat(q(.git/config)))[2] & 07777) != 0600"
+'
+
+! test_have_prereq MINGW ||
+HOME="$(pwd)" # convert to Windows path
+
+test_expect_success 'set up --show-origin tests' '
+       INCLUDE_DIR="$HOME/include" &&
+       mkdir -p "$INCLUDE_DIR" &&
+       cat >"$INCLUDE_DIR"/absolute.include <<-\EOF &&
+               [user]
+                       absolute = include
+       EOF
+       cat >"$INCLUDE_DIR"/relative.include <<-\EOF &&
+               [user]
+                       relative = include
+       EOF
+       cat >"$HOME"/.gitconfig <<-EOF &&
+               [user]
+                       global = true
+                       override = global
+               [include]
+                       path = "$INCLUDE_DIR/absolute.include"
+       EOF
+       cat >.git/config <<-\EOF
+               [user]
+                       local = true
+                       override = local
+               [include]
+                       path = ../include/relative.include
+       EOF
+'
+
+test_expect_success '--show-origin with --list' '
+       cat >expect <<-EOF &&
+               file:$HOME/.gitconfig   user.global=true
+               file:$HOME/.gitconfig   user.override=global
+               file:$HOME/.gitconfig   include.path=$INCLUDE_DIR/absolute.include
+               file:$INCLUDE_DIR/absolute.include      user.absolute=include
+               file:.git/config        user.local=true
+               file:.git/config        user.override=local
+               file:.git/config        include.path=../include/relative.include
+               file:.git/../include/relative.include   user.relative=include
+               command line:   user.cmdline=true
+       EOF
+       git -c user.cmdline=true config --list --show-origin >output &&
+       test_cmp expect output
+'
+
+test_expect_success '--show-origin with --list --null' '
+       cat >expect <<-EOF &&
+               file:$HOME/.gitconfigQuser.global
+               trueQfile:$HOME/.gitconfigQuser.override
+               globalQfile:$HOME/.gitconfigQinclude.path
+               $INCLUDE_DIR/absolute.includeQfile:$INCLUDE_DIR/absolute.includeQuser.absolute
+               includeQfile:.git/configQuser.local
+               trueQfile:.git/configQuser.override
+               localQfile:.git/configQinclude.path
+               ../include/relative.includeQfile:.git/../include/relative.includeQuser.relative
+               includeQcommand line:Quser.cmdline
+               trueQ
+       EOF
+       git -c user.cmdline=true config --null --list --show-origin >output.raw &&
+       nul_to_q <output.raw >output &&
+       # The here-doc above adds a newline that the --null output would not
+       # include. Add it here to make the two comparable.
+       echo >>output &&
+       test_cmp expect output
+'
+
+test_expect_success '--show-origin with single file' '
+       cat >expect <<-\EOF &&
+               file:.git/config        user.local=true
+               file:.git/config        user.override=local
+               file:.git/config        include.path=../include/relative.include
+       EOF
+       git config --local --list --show-origin >output &&
+       test_cmp expect output
+'
+
+test_expect_success '--show-origin with --get-regexp' '
+       cat >expect <<-EOF &&
+               file:$HOME/.gitconfig   user.global true
+               file:.git/config        user.local true
+       EOF
+       git config --show-origin --get-regexp "user\.[g|l].*" >output &&
+       test_cmp expect output
+'
+
+test_expect_success '--show-origin getting a single key' '
+       cat >expect <<-\EOF &&
+               file:.git/config        local
+       EOF
+       git config --show-origin user.override >output &&
+       test_cmp expect output
+'
+
+test_expect_success 'set up custom config file' '
+       CUSTOM_CONFIG_FILE="file\" (dq) and spaces.conf" &&
+       cat >"$CUSTOM_CONFIG_FILE" <<-\EOF
+               [user]
+                       custom = true
+       EOF
+'
+
+test_expect_success !MINGW '--show-origin escape special file name characters' '
+       cat >expect <<-\EOF &&
+               file:"file\" (dq) and spaces.conf"      user.custom=true
+       EOF
+       git config --file "$CUSTOM_CONFIG_FILE" --show-origin --list >output &&
+       test_cmp expect output
+'
+
+test_expect_success '--show-origin stdin' '
+       cat >expect <<-\EOF &&
+               standard input: user.custom=true
+       EOF
+       git config --file - --show-origin --list <"$CUSTOM_CONFIG_FILE" >output &&
+       test_cmp expect output
+'
+
+test_expect_success '--show-origin stdin with file include' '
+       cat >"$INCLUDE_DIR"/stdin.include <<-EOF &&
+               [user]
+                       stdin = include
+       EOF
+       cat >expect <<-EOF &&
+               file:$INCLUDE_DIR/stdin.include include
+       EOF
+       echo "[include]path=\"$INCLUDE_DIR\"/stdin.include" \
+               | git config --show-origin --includes --file - user.stdin >output &&
+       test_cmp expect output
+'
+
+test_expect_success !MINGW '--show-origin blob' '
+       blob=$(git hash-object -w "$CUSTOM_CONFIG_FILE") &&
+       cat >expect <<-EOF &&
+               blob:$blob      user.custom=true
+       EOF
+       git config --blob=$blob --show-origin --list >output &&
+       test_cmp expect output
+'
+
+test_expect_success !MINGW '--show-origin blob ref' '
+       cat >expect <<-\EOF &&
+               blob:"master:file\" (dq) and spaces.conf"       user.custom=true
+       EOF
+       git add "$CUSTOM_CONFIG_FILE" &&
+       git commit -m "new config file" &&
+       git config --blob=master:"$CUSTOM_CONFIG_FILE" --show-origin --list >output &&
+       test_cmp expect output
+'
+
+test_expect_success '--local requires a repo' '
+       # we expect 128 to ensure that we do not simply
+       # fail to find anything and return code "1"
+       test_expect_code 128 nongit git config --local foo.bar
+'
+
+cat >.git/config <<-\EOF &&
+[core]
+foo = true
+number = 10
+big = 1M
+EOF
+
+test_expect_success 'identical modern --type specifiers are allowed' '
+       git config --type=int --type=int core.big >actual &&
+       echo 1048576 >expect &&
+       test_cmp expect actual
+'
+
+test_expect_success 'identical legacy --type specifiers are allowed' '
+       git config --int --int core.big >actual &&
+       echo 1048576 >expect &&
+       test_cmp expect actual
+'
+
+test_expect_success 'identical mixed --type specifiers are allowed' '
+       git config --int --type=int core.big >actual &&
+       echo 1048576 >expect &&
+       test_cmp expect actual
+'
+
+test_expect_success 'non-identical modern --type specifiers are not allowed' '
+       test_must_fail git config --type=int --type=bool core.big 2>error &&
+       test_i18ngrep "only one type at a time" error
+'
+
+test_expect_success 'non-identical legacy --type specifiers are not allowed' '
+       test_must_fail git config --int --bool core.big 2>error &&
+       test_i18ngrep "only one type at a time" error
+'
+
+test_expect_success 'non-identical mixed --type specifiers are not allowed' '
+       test_must_fail git config --type=int --bool core.big 2>error &&
+       test_i18ngrep "only one type at a time" error
+'
+
+test_expect_success '--type allows valid type specifiers' '
+       echo "true" >expect &&
+       git config --type=bool core.foo >actual &&
+       test_cmp expect actual
+'
+
+test_expect_success '--no-type unsets type specifiers' '
+       echo "10" >expect &&
+       git config --type=bool --no-type core.number >actual &&
+       test_cmp expect actual
+'
+
+test_expect_success 'unset type specifiers may be reset to conflicting ones' '
+       echo 1048576 >expect &&
+       git config --type=bool --no-type --type=int core.big >actual &&
+       test_cmp expect actual
+'
+
+test_expect_success '--type rejects unknown specifiers' '
+       test_must_fail git config --type=nonsense core.foo 2>error &&
+       test_i18ngrep "unrecognized --type argument" error
+'
+
+test_expect_success '--replace-all does not invent newlines' '
+       q_to_tab >.git/config <<-\EOF &&
+       [abc]key
+       QkeepSection
+       [xyz]
+       Qkey = 1
+       [abc]
+       Qkey = a
+       EOF
+       q_to_tab >expect <<-\EOF &&
+       [abc]
+       QkeepSection
+       [xyz]
+       Qkey = 1
+       [abc]
+       Qkey = b
+       EOF
+       git config --replace-all abc.key b &&
+       test_cmp .git/config expect
+'
+
+test_done
diff --git a/t/t1300-repo-config.sh b/t/t1300-repo-config.sh
deleted file mode 100755 (executable)
index 4f8e6f5..0000000
+++ /dev/null
@@ -1,1614 +0,0 @@
-#!/bin/sh
-#
-# Copyright (c) 2005 Johannes Schindelin
-#
-
-test_description='Test git config in different settings'
-
-. ./test-lib.sh
-
-test_expect_success 'clear default config' '
-       rm -f .git/config
-'
-
-cat > expect << EOF
-[core]
-       penguin = little blue
-EOF
-test_expect_success 'initial' '
-       git config core.penguin "little blue" &&
-       test_cmp expect .git/config
-'
-
-cat > expect << EOF
-[core]
-       penguin = little blue
-       Movie = BadPhysics
-EOF
-test_expect_success 'mixed case' '
-       git config Core.Movie BadPhysics &&
-       test_cmp expect .git/config
-'
-
-cat > expect << EOF
-[core]
-       penguin = little blue
-       Movie = BadPhysics
-[Cores]
-       WhatEver = Second
-EOF
-test_expect_success 'similar section' '
-       git config Cores.WhatEver Second &&
-       test_cmp expect .git/config
-'
-
-cat > expect << EOF
-[core]
-       penguin = little blue
-       Movie = BadPhysics
-       UPPERCASE = true
-[Cores]
-       WhatEver = Second
-EOF
-test_expect_success 'uppercase section' '
-       git config CORE.UPPERCASE true &&
-       test_cmp expect .git/config
-'
-
-test_expect_success 'replace with non-match' '
-       git config core.penguin kingpin !blue
-'
-
-test_expect_success 'replace with non-match (actually matching)' '
-       git config core.penguin "very blue" !kingpin
-'
-
-cat > expect << EOF
-[core]
-       penguin = very blue
-       Movie = BadPhysics
-       UPPERCASE = true
-       penguin = kingpin
-[Cores]
-       WhatEver = Second
-EOF
-
-test_expect_success 'non-match result' 'test_cmp expect .git/config'
-
-test_expect_success 'find mixed-case key by canonical name' '
-       echo Second >expect &&
-       git config cores.whatever >actual &&
-       test_cmp expect actual
-'
-
-test_expect_success 'find mixed-case key by non-canonical name' '
-       echo Second >expect &&
-       git config CoReS.WhAtEvEr >actual &&
-       test_cmp expect actual
-'
-
-test_expect_success 'subsections are not canonicalized by git-config' '
-       cat >>.git/config <<-\EOF &&
-       [section.SubSection]
-       key = one
-       [section "SubSection"]
-       key = two
-       EOF
-       echo one >expect &&
-       git config section.subsection.key >actual &&
-       test_cmp expect actual &&
-       echo two >expect &&
-       git config section.SubSection.key >actual &&
-       test_cmp expect actual
-'
-
-cat > .git/config <<\EOF
-[alpha]
-bar = foo
-[beta]
-baz = multiple \
-lines
-EOF
-
-test_expect_success 'unset with cont. lines' '
-       git config --unset beta.baz
-'
-
-cat > expect <<\EOF
-[alpha]
-bar = foo
-[beta]
-EOF
-
-test_expect_success 'unset with cont. lines is correct' 'test_cmp expect .git/config'
-
-cat > .git/config << EOF
-[beta] ; silly comment # another comment
-noIndent= sillyValue ; 'nother silly comment
-
-# empty line
-               ; comment
-               haha   ="beta" # last silly comment
-haha = hello
-       haha = bello
-[nextSection] noNewline = ouch
-EOF
-
-cp .git/config .git/config2
-
-test_expect_success 'multiple unset' '
-       git config --unset-all beta.haha
-'
-
-cat > expect << EOF
-[beta] ; silly comment # another comment
-noIndent= sillyValue ; 'nother silly comment
-
-# empty line
-               ; comment
-[nextSection] noNewline = ouch
-EOF
-
-test_expect_success 'multiple unset is correct' '
-       test_cmp expect .git/config
-'
-
-cp .git/config2 .git/config
-
-test_expect_success '--replace-all missing value' '
-       test_must_fail git config --replace-all beta.haha &&
-       test_cmp .git/config2 .git/config
-'
-
-rm .git/config2
-
-test_expect_success '--replace-all' '
-       git config --replace-all beta.haha gamma
-'
-
-cat > expect << EOF
-[beta] ; silly comment # another comment
-noIndent= sillyValue ; 'nother silly comment
-
-# empty line
-               ; comment
-       haha = gamma
-[nextSection] noNewline = ouch
-EOF
-
-test_expect_success 'all replaced' '
-       test_cmp expect .git/config
-'
-
-cat > expect << EOF
-[beta] ; silly comment # another comment
-noIndent= sillyValue ; 'nother silly comment
-
-# empty line
-               ; comment
-       haha = alpha
-[nextSection] noNewline = ouch
-EOF
-test_expect_success 'really mean test' '
-       git config beta.haha alpha &&
-       test_cmp expect .git/config
-'
-
-cat > expect << EOF
-[beta] ; silly comment # another comment
-noIndent= sillyValue ; 'nother silly comment
-
-# empty line
-               ; comment
-       haha = alpha
-[nextSection]
-       nonewline = wow
-EOF
-test_expect_success 'really really mean test' '
-       git config nextsection.nonewline wow &&
-       test_cmp expect .git/config
-'
-
-test_expect_success 'get value' '
-       echo alpha >expect &&
-       git config beta.haha >actual &&
-       test_cmp expect actual
-'
-
-cat > expect << EOF
-[beta] ; silly comment # another comment
-noIndent= sillyValue ; 'nother silly comment
-
-# empty line
-               ; comment
-[nextSection]
-       nonewline = wow
-EOF
-test_expect_success 'unset' '
-       git config --unset beta.haha &&
-       test_cmp expect .git/config
-'
-
-cat > expect << EOF
-[beta] ; silly comment # another comment
-noIndent= sillyValue ; 'nother silly comment
-
-# empty line
-               ; comment
-[nextSection]
-       nonewline = wow
-       NoNewLine = wow2 for me
-EOF
-test_expect_success 'multivar' '
-       git config nextsection.NoNewLine "wow2 for me" "for me$" &&
-       test_cmp expect .git/config
-'
-
-test_expect_success 'non-match' '
-       git config --get nextsection.nonewline !for
-'
-
-test_expect_success 'non-match value' '
-       echo wow >expect &&
-       git config --get nextsection.nonewline !for >actual &&
-       test_cmp expect actual
-'
-
-test_expect_success 'multi-valued get returns final one' '
-       echo "wow2 for me" >expect &&
-       git config --get nextsection.nonewline >actual &&
-       test_cmp expect actual
-'
-
-test_expect_success 'multi-valued get-all returns all' '
-       cat >expect <<-\EOF &&
-       wow
-       wow2 for me
-       EOF
-       git config --get-all nextsection.nonewline >actual &&
-       test_cmp expect actual
-'
-
-cat > expect << EOF
-[beta] ; silly comment # another comment
-noIndent= sillyValue ; 'nother silly comment
-
-# empty line
-               ; comment
-[nextSection]
-       nonewline = wow3
-       NoNewLine = wow2 for me
-EOF
-test_expect_success 'multivar replace' '
-       git config nextsection.nonewline "wow3" "wow$" &&
-       test_cmp expect .git/config
-'
-
-test_expect_success 'ambiguous unset' '
-       test_must_fail git config --unset nextsection.nonewline
-'
-
-test_expect_success 'invalid unset' '
-       test_must_fail git config --unset somesection.nonewline
-'
-
-cat > expect << EOF
-[beta] ; silly comment # another comment
-noIndent= sillyValue ; 'nother silly comment
-
-# empty line
-               ; comment
-[nextSection]
-       NoNewLine = wow2 for me
-EOF
-
-test_expect_success 'multivar unset' '
-       git config --unset nextsection.nonewline "wow3$" &&
-       test_cmp expect .git/config
-'
-
-test_expect_success 'invalid key' 'test_must_fail git config inval.2key blabla'
-
-test_expect_success 'correct key' 'git config 123456.a123 987'
-
-test_expect_success 'hierarchical section' '
-       git config Version.1.2.3eX.Alpha beta
-'
-
-cat > expect << EOF
-[beta] ; silly comment # another comment
-noIndent= sillyValue ; 'nother silly comment
-
-# empty line
-               ; comment
-[nextSection]
-       NoNewLine = wow2 for me
-[123456]
-       a123 = 987
-[Version "1.2.3eX"]
-       Alpha = beta
-EOF
-
-test_expect_success 'hierarchical section value' '
-       test_cmp expect .git/config
-'
-
-cat > expect << EOF
-beta.noindent=sillyValue
-nextsection.nonewline=wow2 for me
-123456.a123=987
-version.1.2.3eX.alpha=beta
-EOF
-
-test_expect_success 'working --list' '
-       git config --list > output &&
-       test_cmp expect output
-'
-cat > expect << EOF
-EOF
-
-test_expect_success '--list without repo produces empty output' '
-       git --git-dir=nonexistent config --list >output &&
-       test_cmp expect output
-'
-
-cat > expect << EOF
-beta.noindent
-nextsection.nonewline
-123456.a123
-version.1.2.3eX.alpha
-EOF
-
-test_expect_success '--name-only --list' '
-       git config --name-only --list >output &&
-       test_cmp expect output
-'
-
-cat > expect << EOF
-beta.noindent sillyValue
-nextsection.nonewline wow2 for me
-EOF
-
-test_expect_success '--get-regexp' '
-       git config --get-regexp in >output &&
-       test_cmp expect output
-'
-
-cat > expect << EOF
-beta.noindent
-nextsection.nonewline
-EOF
-
-test_expect_success '--name-only --get-regexp' '
-       git config --name-only --get-regexp in >output &&
-       test_cmp expect output
-'
-
-cat > expect << EOF
-wow2 for me
-wow4 for you
-EOF
-
-test_expect_success '--add' '
-       git config --add nextsection.nonewline "wow4 for you" &&
-       git config --get-all nextsection.nonewline > output &&
-       test_cmp expect output
-'
-
-cat > .git/config << EOF
-[novalue]
-       variable
-[emptyvalue]
-       variable =
-EOF
-
-test_expect_success 'get variable with no value' '
-       git config --get novalue.variable ^$
-'
-
-test_expect_success 'get variable with empty value' '
-       git config --get emptyvalue.variable ^$
-'
-
-echo novalue.variable > expect
-
-test_expect_success 'get-regexp variable with no value' '
-       git config --get-regexp novalue > output &&
-       test_cmp expect output
-'
-
-echo 'novalue.variable true' > expect
-
-test_expect_success 'get-regexp --bool variable with no value' '
-       git config --bool --get-regexp novalue > output &&
-       test_cmp expect output
-'
-
-echo 'emptyvalue.variable ' > expect
-
-test_expect_success 'get-regexp variable with empty value' '
-       git config --get-regexp emptyvalue > output &&
-       test_cmp expect output
-'
-
-echo true > expect
-
-test_expect_success 'get bool variable with no value' '
-       git config --bool novalue.variable > output &&
-       test_cmp expect output
-'
-
-echo false > expect
-
-test_expect_success 'get bool variable with empty value' '
-       git config --bool emptyvalue.variable > output &&
-       test_cmp expect output
-'
-
-test_expect_success 'no arguments, but no crash' '
-       test_must_fail git config >output 2>&1 &&
-       test_i18ngrep usage output
-'
-
-cat > .git/config << EOF
-[a.b]
-       c = d
-EOF
-
-cat > expect << EOF
-[a.b]
-       c = d
-[a]
-       x = y
-EOF
-
-test_expect_success 'new section is partial match of another' '
-       git config a.x y &&
-       test_cmp expect .git/config
-'
-
-cat > expect << EOF
-[a.b]
-       c = d
-[a]
-       x = y
-       b = c
-[b]
-       x = y
-EOF
-
-test_expect_success 'new variable inserts into proper section' '
-       git config b.x y &&
-       git config a.b c &&
-       test_cmp expect .git/config
-'
-
-test_expect_success 'alternative --file (non-existing file should fail)' '
-       test_must_fail git config --file non-existing-config -l
-'
-
-cat > other-config << EOF
-[ein]
-       bahn = strasse
-EOF
-
-cat > expect << EOF
-ein.bahn=strasse
-EOF
-
-test_expect_success 'alternative GIT_CONFIG' '
-       GIT_CONFIG=other-config git config --list >output &&
-       test_cmp expect output
-'
-
-test_expect_success 'alternative GIT_CONFIG (--file)' '
-       git config --file other-config --list >output &&
-       test_cmp expect output
-'
-
-test_expect_success 'alternative GIT_CONFIG (--file=-)' '
-       git config --file - --list <other-config >output &&
-       test_cmp expect output
-'
-
-test_expect_success 'setting a value in stdin is an error' '
-       test_must_fail git config --file - some.value foo
-'
-
-test_expect_success 'editing stdin is an error' '
-       test_must_fail git config --file - --edit
-'
-
-test_expect_success 'refer config from subdirectory' '
-       mkdir x &&
-       (
-               cd x &&
-               echo strasse >expect &&
-               git config --get --file ../other-config ein.bahn >actual &&
-               test_cmp expect actual
-       )
-
-'
-
-test_expect_success 'refer config from subdirectory via --file' '
-       (
-               cd x &&
-               git config --file=../other-config --get ein.bahn >actual &&
-               test_cmp expect actual
-       )
-'
-
-cat > expect << EOF
-[ein]
-       bahn = strasse
-[anwohner]
-       park = ausweis
-EOF
-
-test_expect_success '--set in alternative file' '
-       git config --file=other-config anwohner.park ausweis &&
-       test_cmp expect other-config
-'
-
-cat > .git/config << EOF
-# Hallo
-       #Bello
-[branch "eins"]
-       x = 1
-[branch.eins]
-       y = 1
-       [branch "1 234 blabl/a"]
-weird
-EOF
-
-test_expect_success 'rename section' '
-       git config --rename-section branch.eins branch.zwei
-'
-
-cat > expect << EOF
-# Hallo
-       #Bello
-[branch "zwei"]
-       x = 1
-[branch "zwei"]
-       y = 1
-       [branch "1 234 blabl/a"]
-weird
-EOF
-
-test_expect_success 'rename succeeded' '
-       test_cmp expect .git/config
-'
-
-test_expect_success 'rename non-existing section' '
-       test_must_fail git config --rename-section \
-               branch."world domination" branch.drei
-'
-
-test_expect_success 'rename succeeded' '
-       test_cmp expect .git/config
-'
-
-test_expect_success 'rename another section' '
-       git config --rename-section branch."1 234 blabl/a" branch.drei
-'
-
-cat > expect << EOF
-# Hallo
-       #Bello
-[branch "zwei"]
-       x = 1
-[branch "zwei"]
-       y = 1
-[branch "drei"]
-weird
-EOF
-
-test_expect_success 'rename succeeded' '
-       test_cmp expect .git/config
-'
-
-cat >> .git/config << EOF
-[branch "vier"] z = 1
-EOF
-
-test_expect_success 'rename a section with a var on the same line' '
-       git config --rename-section branch.vier branch.zwei
-'
-
-cat > expect << EOF
-# Hallo
-       #Bello
-[branch "zwei"]
-       x = 1
-[branch "zwei"]
-       y = 1
-[branch "drei"]
-weird
-[branch "zwei"]
-       z = 1
-EOF
-
-test_expect_success 'rename succeeded' '
-       test_cmp expect .git/config
-'
-
-test_expect_success 'renaming empty section name is rejected' '
-       test_must_fail git config --rename-section branch.zwei ""
-'
-
-test_expect_success 'renaming to bogus section is rejected' '
-       test_must_fail git config --rename-section branch.zwei "bogus name"
-'
-
-cat >> .git/config << EOF
-  [branch "zwei"] a = 1 [branch "vier"]
-EOF
-
-test_expect_success 'remove section' '
-       git config --remove-section branch.zwei
-'
-
-cat > expect << EOF
-# Hallo
-       #Bello
-[branch "drei"]
-weird
-EOF
-
-test_expect_success 'section was removed properly' '
-       test_cmp expect .git/config
-'
-
-cat > expect << EOF
-[gitcvs]
-       enabled = true
-       dbname = %Ggitcvs2.%a.%m.sqlite
-[gitcvs "ext"]
-       dbname = %Ggitcvs1.%a.%m.sqlite
-EOF
-
-test_expect_success 'section ending' '
-       rm -f .git/config &&
-       git config gitcvs.enabled true &&
-       git config gitcvs.ext.dbname %Ggitcvs1.%a.%m.sqlite &&
-       git config gitcvs.dbname %Ggitcvs2.%a.%m.sqlite &&
-       test_cmp expect .git/config
-
-'
-
-test_expect_success numbers '
-       git config kilo.gram 1k &&
-       git config mega.ton 1m &&
-       echo 1024 >expect &&
-       echo 1048576 >>expect &&
-       git config --int --get kilo.gram >actual &&
-       git config --int --get mega.ton >>actual &&
-       test_cmp expect actual
-'
-
-test_expect_success '--int is at least 64 bits' '
-       git config giga.watts 121g &&
-       echo 129922760704 >expect &&
-       git config --int --get giga.watts >actual &&
-       test_cmp expect actual
-'
-
-test_expect_success 'invalid unit' '
-       git config aninvalid.unit "1auto" &&
-       echo 1auto >expect &&
-       git config aninvalid.unit >actual &&
-       test_cmp expect actual &&
-       test_must_fail git config --int --get aninvalid.unit 2>actual &&
-       test_i18ngrep "bad numeric config value .1auto. for .aninvalid.unit. in file .git/config: invalid unit" actual
-'
-
-test_expect_success 'line number is reported correctly' '
-       printf "[bool]\n\tvar\n" >invalid &&
-       test_must_fail git config -f invalid --path bool.var 2>actual &&
-       test_i18ngrep "line 2" actual
-'
-
-test_expect_success 'invalid stdin config' '
-       echo "[broken" | test_must_fail git config --list --file - >output 2>&1 &&
-       test_i18ngrep "bad config line 1 in standard input" output
-'
-
-cat > expect << EOF
-true
-false
-true
-false
-true
-false
-true
-false
-EOF
-
-test_expect_success bool '
-
-       git config bool.true1 01 &&
-       git config bool.true2 -1 &&
-       git config bool.true3 YeS &&
-       git config bool.true4 true &&
-       git config bool.false1 000 &&
-       git config bool.false2 "" &&
-       git config bool.false3 nO &&
-       git config bool.false4 FALSE &&
-       rm -f result &&
-       for i in 1 2 3 4
-       do
-           git config --bool --get bool.true$i >>result
-           git config --bool --get bool.false$i >>result
-        done &&
-       test_cmp expect result'
-
-test_expect_success 'invalid bool (--get)' '
-
-       git config bool.nobool foobar &&
-       test_must_fail git config --bool --get bool.nobool'
-
-test_expect_success 'invalid bool (set)' '
-
-       test_must_fail git config --bool bool.nobool foobar'
-
-cat > expect <<\EOF
-[bool]
-       true1 = true
-       true2 = true
-       true3 = true
-       true4 = true
-       false1 = false
-       false2 = false
-       false3 = false
-       false4 = false
-EOF
-
-test_expect_success 'set --bool' '
-
-       rm -f .git/config &&
-       git config --bool bool.true1 01 &&
-       git config --bool bool.true2 -1 &&
-       git config --bool bool.true3 YeS &&
-       git config --bool bool.true4 true &&
-       git config --bool bool.false1 000 &&
-       git config --bool bool.false2 "" &&
-       git config --bool bool.false3 nO &&
-       git config --bool bool.false4 FALSE &&
-       test_cmp expect .git/config'
-
-cat > expect <<\EOF
-[int]
-       val1 = 1
-       val2 = -1
-       val3 = 5242880
-EOF
-
-test_expect_success 'set --int' '
-
-       rm -f .git/config &&
-       git config --int int.val1 01 &&
-       git config --int int.val2 -1 &&
-       git config --int int.val3 5m &&
-       test_cmp expect .git/config
-'
-
-test_expect_success 'get --bool-or-int' '
-       cat >.git/config <<-\EOF &&
-       [bool]
-       true1
-       true2 = true
-       false = false
-       [int]
-       int1 = 0
-       int2 = 1
-       int3 = -1
-       EOF
-       cat >expect <<-\EOF &&
-       true
-       true
-       false
-       0
-       1
-       -1
-       EOF
-       {
-               git config --bool-or-int bool.true1 &&
-               git config --bool-or-int bool.true2 &&
-               git config --bool-or-int bool.false &&
-               git config --bool-or-int int.int1 &&
-               git config --bool-or-int int.int2 &&
-               git config --bool-or-int int.int3
-       } >actual &&
-       test_cmp expect actual
-'
-
-cat >expect <<\EOF
-[bool]
-       true1 = true
-       false1 = false
-       true2 = true
-       false2 = false
-[int]
-       int1 = 0
-       int2 = 1
-       int3 = -1
-EOF
-
-test_expect_success 'set --bool-or-int' '
-       rm -f .git/config &&
-       git config --bool-or-int bool.true1 true &&
-       git config --bool-or-int bool.false1 false &&
-       git config --bool-or-int bool.true2 yes &&
-       git config --bool-or-int bool.false2 no &&
-       git config --bool-or-int int.int1 0 &&
-       git config --bool-or-int int.int2 1 &&
-       git config --bool-or-int int.int3 -1 &&
-       test_cmp expect .git/config
-'
-
-cat >expect <<\EOF
-[path]
-       home = ~/
-       normal = /dev/null
-       trailingtilde = foo~
-EOF
-
-test_expect_success !MINGW 'set --path' '
-       rm -f .git/config &&
-       git config --path path.home "~/" &&
-       git config --path path.normal "/dev/null" &&
-       git config --path path.trailingtilde "foo~" &&
-       test_cmp expect .git/config'
-
-if test_have_prereq !MINGW && test "${HOME+set}"
-then
-       test_set_prereq HOMEVAR
-fi
-
-cat >expect <<EOF
-$HOME/
-/dev/null
-foo~
-EOF
-
-test_expect_success HOMEVAR 'get --path' '
-       git config --get --path path.home > result &&
-       git config --get --path path.normal >> result &&
-       git config --get --path path.trailingtilde >> result &&
-       test_cmp expect result
-'
-
-cat >expect <<\EOF
-/dev/null
-foo~
-EOF
-
-test_expect_success !MINGW 'get --path copes with unset $HOME' '
-       (
-               unset HOME;
-               test_must_fail git config --get --path path.home \
-                       >result 2>msg &&
-               git config --get --path path.normal >>result &&
-               git config --get --path path.trailingtilde >>result
-       ) &&
-       test_i18ngrep "[Ff]ailed to expand.*~/" msg &&
-       test_cmp expect result
-'
-
-test_expect_success 'get --path barfs on boolean variable' '
-       echo "[path]bool" >.git/config &&
-       test_must_fail git config --get --path path.bool
-'
-
-test_expect_success 'get --expiry-date' '
-       rel="3.weeks.5.days.00:00" &&
-       rel_out="$rel ->" &&
-       cat >.git/config <<-\EOF &&
-       [date]
-       valid1 = "3.weeks.5.days 00:00"
-       valid2 = "Fri Jun 4 15:46:55 2010"
-       valid3 = "2017/11/11 11:11:11PM"
-       valid4 = "2017/11/10 09:08:07 PM"
-       valid5 = "never"
-       invalid1 = "abc"
-       EOF
-       cat >expect <<-EOF &&
-       $(test-date timestamp $rel)
-       1275666415
-       1510441871
-       1510348087
-       0
-       EOF
-       {
-               echo "$rel_out $(git config --expiry-date date.valid1)"
-               git config --expiry-date date.valid2 &&
-               git config --expiry-date date.valid3 &&
-               git config --expiry-date date.valid4 &&
-               git config --expiry-date date.valid5
-       } >actual &&
-       test_cmp expect actual &&
-       test_must_fail git config --expiry-date date.invalid1
-'
-
-cat > expect << EOF
-[quote]
-       leading = " test"
-       ending = "test "
-       semicolon = "test;test"
-       hash = "test#test"
-EOF
-test_expect_success 'quoting' '
-       rm -f .git/config &&
-       git config quote.leading " test" &&
-       git config quote.ending "test " &&
-       git config quote.semicolon "test;test" &&
-       git config quote.hash "test#test" &&
-       test_cmp expect .git/config
-'
-
-test_expect_success 'key with newline' '
-       test_must_fail git config "key.with
-newline" 123'
-
-test_expect_success 'value with newline' 'git config key.sub value.with\\\
-newline'
-
-cat > .git/config <<\EOF
-[section]
-       ; comment \
-       continued = cont\
-inued
-       noncont   = not continued ; \
-       quotecont = "cont;\
-inued"
-EOF
-
-cat > expect <<\EOF
-section.continued=continued
-section.noncont=not continued
-section.quotecont=cont;inued
-EOF
-
-test_expect_success 'value continued on next line' '
-       git config --list > result &&
-       test_cmp result expect
-'
-
-cat > .git/config <<\EOF
-[section "sub=section"]
-       val1 = foo=bar
-       val2 = foo\nbar
-       val3 = \n\n
-       val4 =
-       val5
-EOF
-
-cat > expect <<\EOF
-section.sub=section.val1
-foo=barQsection.sub=section.val2
-foo
-barQsection.sub=section.val3
-
-
-Qsection.sub=section.val4
-Qsection.sub=section.val5Q
-EOF
-test_expect_success '--null --list' '
-       git config --null --list >result.raw &&
-       nul_to_q <result.raw >result &&
-       echo >>result &&
-       test_cmp expect result
-'
-
-test_expect_success '--null --get-regexp' '
-       git config --null --get-regexp "val[0-9]" >result.raw &&
-       nul_to_q <result.raw >result &&
-       echo >>result &&
-       test_cmp expect result
-'
-
-test_expect_success 'inner whitespace kept verbatim' '
-       git config section.val "foo       bar" &&
-       echo "foo         bar" >expect &&
-       git config section.val >actual &&
-       test_cmp expect actual
-'
-
-test_expect_success SYMLINKS 'symlinked configuration' '
-       ln -s notyet myconfig &&
-       git config --file=myconfig test.frotz nitfol &&
-       test -h myconfig &&
-       test -f notyet &&
-       test "z$(git config --file=notyet test.frotz)" = znitfol &&
-       git config --file=myconfig test.xyzzy rezrov &&
-       test -h myconfig &&
-       test -f notyet &&
-       cat >expect <<-\EOF &&
-       nitfol
-       rezrov
-       EOF
-       {
-               git config --file=notyet test.frotz &&
-               git config --file=notyet test.xyzzy
-       } >actual &&
-       test_cmp expect actual
-'
-
-test_expect_success 'nonexistent configuration' '
-       test_must_fail git config --file=doesnotexist --list &&
-       test_must_fail git config --file=doesnotexist test.xyzzy
-'
-
-test_expect_success SYMLINKS 'symlink to nonexistent configuration' '
-       ln -s doesnotexist linktonada &&
-       ln -s linktonada linktolinktonada &&
-       test_must_fail git config --file=linktonada --list &&
-       test_must_fail git config --file=linktolinktonada --list
-'
-
-test_expect_success 'check split_cmdline return' "
-       git config alias.split-cmdline-fix 'echo \"' &&
-       test_must_fail git split-cmdline-fix &&
-       echo foo > foo &&
-       git add foo &&
-       git commit -m 'initial commit' &&
-       git config branch.master.mergeoptions 'echo \"' &&
-       test_must_fail git merge master
-"
-
-test_expect_success 'git -c "key=value" support' '
-       cat >expect <<-\EOF &&
-       value
-       value
-       true
-       EOF
-       {
-               git -c core.name=value config core.name &&
-               git -c foo.CamelCase=value config foo.camelcase &&
-               git -c foo.flag config --bool foo.flag
-       } >actual &&
-       test_cmp expect actual &&
-       test_must_fail git -c name=value config core.name
-'
-
-# We just need a type-specifier here that cares about the
-# distinction internally between a NULL boolean and a real
-# string (because most of git's internal parsers do care).
-# Using "--path" works, but we do not otherwise care about
-# its semantics.
-test_expect_success 'git -c can represent empty string' '
-       echo >expect &&
-       git -c foo.empty= config --path foo.empty >actual &&
-       test_cmp expect actual
-'
-
-test_expect_success 'key sanity-checking' '
-       test_must_fail git config foo=bar &&
-       test_must_fail git config foo=.bar &&
-       test_must_fail git config foo.ba=r &&
-       test_must_fail git config foo.1bar &&
-       test_must_fail git config foo."ba
-                               z".bar &&
-       test_must_fail git config . false &&
-       test_must_fail git config .foo false &&
-       test_must_fail git config foo. false &&
-       test_must_fail git config .foo. false &&
-       git config foo.bar true &&
-       git config foo."ba =z".bar false
-'
-
-test_expect_success 'git -c works with aliases of builtins' '
-       git config alias.checkconfig "-c foo.check=bar config foo.check" &&
-       echo bar >expect &&
-       git checkconfig >actual &&
-       test_cmp expect actual
-'
-
-test_expect_success 'aliases can be CamelCased' '
-       test_config alias.CamelCased "rev-parse HEAD" &&
-       git CamelCased >out &&
-       git rev-parse HEAD >expect &&
-       test_cmp expect out
-'
-
-test_expect_success 'git -c does not split values on equals' '
-       echo "value with = in it" >expect &&
-       git -c core.foo="value with = in it" config core.foo >actual &&
-       test_cmp expect actual
-'
-
-test_expect_success 'git -c dies on bogus config' '
-       test_must_fail git -c core.bare=foo rev-parse
-'
-
-test_expect_success 'git -c complains about empty key' '
-       test_must_fail git -c "=foo" rev-parse
-'
-
-test_expect_success 'git -c complains about empty key and value' '
-       test_must_fail git -c "" rev-parse
-'
-
-test_expect_success 'multiple git -c appends config' '
-       test_config alias.x "!git -c x.two=2 config --get-regexp ^x\.*" &&
-       cat >expect <<-\EOF &&
-       x.one 1
-       x.two 2
-       EOF
-       git -c x.one=1 x >actual &&
-       test_cmp expect actual
-'
-
-test_expect_success 'last one wins: two level vars' '
-
-       # sec.var and sec.VAR are the same variable, as the first
-       # and the last level of a configuration variable name is
-       # case insensitive.
-
-       echo VAL >expect &&
-
-       git -c sec.var=val -c sec.VAR=VAL config --get sec.var >actual &&
-       test_cmp expect actual &&
-       git -c SEC.var=val -c sec.var=VAL config --get sec.var >actual &&
-       test_cmp expect actual &&
-
-       git -c sec.var=val -c sec.VAR=VAL config --get SEC.var >actual &&
-       test_cmp expect actual &&
-       git -c SEC.var=val -c sec.var=VAL config --get sec.VAR >actual &&
-       test_cmp expect actual
-'
-
-test_expect_success 'last one wins: three level vars' '
-
-       # v.a.r and v.A.r are not the same variable, as the middle
-       # level of a three-level configuration variable name is
-       # case sensitive.
-
-       echo val >expect &&
-       git -c v.a.r=val -c v.A.r=VAL config --get v.a.r >actual &&
-       test_cmp expect actual &&
-       git -c v.a.r=val -c v.A.r=VAL config --get V.a.R >actual &&
-       test_cmp expect actual &&
-
-       # v.a.r and V.a.R are the same variable, as the first
-       # and the last level of a configuration variable name is
-       # case insensitive.
-
-       echo VAL >expect &&
-       git -c v.a.r=val -c v.a.R=VAL config --get v.a.r >actual &&
-       test_cmp expect actual &&
-       git -c v.a.r=val -c V.a.r=VAL config --get v.a.r >actual &&
-       test_cmp expect actual &&
-       git -c v.a.r=val -c v.a.R=VAL config --get V.a.R >actual &&
-       test_cmp expect actual &&
-       git -c v.a.r=val -c V.a.r=VAL config --get V.a.R >actual &&
-       test_cmp expect actual
-'
-
-for VAR in a .a a. a.0b a."b c". a."b c".0d
-do
-       test_expect_success "git -c $VAR=VAL rejects invalid '$VAR'" '
-               test_must_fail git -c "$VAR=VAL" config -l
-       '
-done
-
-for VAR in a.b a."b c".d
-do
-       test_expect_success "git -c $VAR=VAL works with valid '$VAR'" '
-               echo VAL >expect &&
-               git -c "$VAR=VAL" config --get "$VAR" >actual &&
-               test_cmp expect actual
-       '
-done
-
-test_expect_success 'git -c is not confused by empty environment' '
-       GIT_CONFIG_PARAMETERS="" git -c x.one=1 config --list
-'
-
-sq="'"
-test_expect_success 'detect bogus GIT_CONFIG_PARAMETERS' '
-       cat >expect <<-\EOF &&
-       env.one one
-       env.two two
-       EOF
-       GIT_CONFIG_PARAMETERS="${sq}env.one=one${sq} ${sq}env.two=two${sq}" \
-               git config --get-regexp "env.*" >actual &&
-       test_cmp expect actual &&
-
-       cat >expect <<-EOF &&
-       env.one one${sq}
-       env.two two
-       EOF
-       GIT_CONFIG_PARAMETERS="${sq}env.one=one${sq}\\$sq$sq$sq ${sq}env.two=two${sq}" \
-               git config --get-regexp "env.*" >actual &&
-       test_cmp expect actual &&
-
-       test_must_fail env \
-               GIT_CONFIG_PARAMETERS="${sq}env.one=one${sq}\\$sq ${sq}env.two=two${sq}" \
-               git config --get-regexp "env.*"
-'
-
-test_expect_success 'git config --edit works' '
-       git config -f tmp test.value no &&
-       echo test.value=yes >expect &&
-       GIT_EDITOR="echo [test]value=yes >" git config -f tmp --edit &&
-       git config -f tmp --list >actual &&
-       test_cmp expect actual
-'
-
-test_expect_success 'git config --edit respects core.editor' '
-       git config -f tmp test.value no &&
-       echo test.value=yes >expect &&
-       test_config core.editor "echo [test]value=yes >" &&
-       git config -f tmp --edit &&
-       git config -f tmp --list >actual &&
-       test_cmp expect actual
-'
-
-# malformed configuration files
-test_expect_success 'barf on syntax error' '
-       cat >.git/config <<-\EOF &&
-       # broken section line
-       [section]
-       key garbage
-       EOF
-       test_must_fail git config --get section.key >actual 2>error &&
-       test_i18ngrep " line 3 " error
-'
-
-test_expect_success 'barf on incomplete section header' '
-       cat >.git/config <<-\EOF &&
-       # broken section line
-       [section
-       key = value
-       EOF
-       test_must_fail git config --get section.key >actual 2>error &&
-       test_i18ngrep " line 2 " error
-'
-
-test_expect_success 'barf on incomplete string' '
-       cat >.git/config <<-\EOF &&
-       # broken section line
-       [section]
-       key = "value string
-       EOF
-       test_must_fail git config --get section.key >actual 2>error &&
-       test_i18ngrep " line 3 " error
-'
-
-test_expect_success 'urlmatch' '
-       cat >.git/config <<-\EOF &&
-       [http]
-               sslVerify
-       [http "https://weak.example.com"]
-               sslVerify = false
-               cookieFile = /tmp/cookie.txt
-       EOF
-
-       test_expect_code 1 git config --bool --get-urlmatch doesnt.exist https://good.example.com >actual &&
-       test_must_be_empty actual &&
-
-       echo true >expect &&
-       git config --bool --get-urlmatch http.SSLverify https://good.example.com >actual &&
-       test_cmp expect actual &&
-
-       echo false >expect &&
-       git config --bool --get-urlmatch http.sslverify https://weak.example.com >actual &&
-       test_cmp expect actual &&
-
-       {
-               echo http.cookiefile /tmp/cookie.txt &&
-               echo http.sslverify false
-       } >expect &&
-       git config --get-urlmatch HTTP https://weak.example.com >actual &&
-       test_cmp expect actual
-'
-
-test_expect_success 'urlmatch favors more specific URLs' '
-       cat >.git/config <<-\EOF &&
-       [http "https://example.com/"]
-               cookieFile = /tmp/root.txt
-       [http "https://example.com/subdirectory"]
-               cookieFile = /tmp/subdirectory.txt
-       [http "https://user@example.com/"]
-               cookieFile = /tmp/user.txt
-       [http "https://averylonguser@example.com/"]
-               cookieFile = /tmp/averylonguser.txt
-       [http "https://preceding.example.com"]
-               cookieFile = /tmp/preceding.txt
-       [http "https://*.example.com"]
-               cookieFile = /tmp/wildcard.txt
-       [http "https://*.example.com/wildcardwithsubdomain"]
-               cookieFile = /tmp/wildcardwithsubdomain.txt
-       [http "https://trailing.example.com"]
-               cookieFile = /tmp/trailing.txt
-       [http "https://user@*.example.com/"]
-               cookieFile = /tmp/wildcardwithuser.txt
-       [http "https://sub.example.com/"]
-               cookieFile = /tmp/sub.txt
-       EOF
-
-       echo http.cookiefile /tmp/root.txt >expect &&
-       git config --get-urlmatch HTTP https://example.com >actual &&
-       test_cmp expect actual &&
-
-       echo http.cookiefile /tmp/subdirectory.txt >expect &&
-       git config --get-urlmatch HTTP https://example.com/subdirectory >actual &&
-       test_cmp expect actual &&
-
-       echo http.cookiefile /tmp/subdirectory.txt >expect &&
-       git config --get-urlmatch HTTP https://example.com/subdirectory/nested >actual &&
-       test_cmp expect actual &&
-
-       echo http.cookiefile /tmp/user.txt >expect &&
-       git config --get-urlmatch HTTP https://user@example.com/ >actual &&
-       test_cmp expect actual &&
-
-       echo http.cookiefile /tmp/subdirectory.txt >expect &&
-       git config --get-urlmatch HTTP https://averylonguser@example.com/subdirectory >actual &&
-       test_cmp expect actual &&
-
-       echo http.cookiefile /tmp/preceding.txt >expect &&
-       git config --get-urlmatch HTTP https://preceding.example.com >actual &&
-       test_cmp expect actual &&
-
-       echo http.cookiefile /tmp/wildcard.txt >expect &&
-       git config --get-urlmatch HTTP https://wildcard.example.com >actual &&
-       test_cmp expect actual &&
-
-       echo http.cookiefile /tmp/sub.txt >expect &&
-       git config --get-urlmatch HTTP https://sub.example.com/wildcardwithsubdomain >actual &&
-       test_cmp expect actual &&
-
-       echo http.cookiefile /tmp/trailing.txt >expect &&
-       git config --get-urlmatch HTTP https://trailing.example.com >actual &&
-       test_cmp expect actual &&
-
-       echo http.cookiefile /tmp/sub.txt >expect &&
-       git config --get-urlmatch HTTP https://user@sub.example.com >actual &&
-       test_cmp expect actual
-'
-
-test_expect_success 'urlmatch with wildcard' '
-       cat >.git/config <<-\EOF &&
-       [http]
-               sslVerify
-       [http "https://*.example.com"]
-               sslVerify = false
-               cookieFile = /tmp/cookie.txt
-       EOF
-
-       test_expect_code 1 git config --bool --get-urlmatch doesnt.exist https://good.example.com >actual &&
-       test_must_be_empty actual &&
-
-       echo true >expect &&
-       git config --bool --get-urlmatch http.SSLverify https://example.com >actual &&
-       test_cmp expect actual &&
-
-       echo true >expect &&
-       git config --bool --get-urlmatch http.SSLverify https://good-example.com >actual &&
-       test_cmp expect actual &&
-
-       echo true >expect &&
-       git config --bool --get-urlmatch http.sslverify https://deep.nested.example.com >actual &&
-       test_cmp expect actual &&
-
-       echo false >expect &&
-       git config --bool --get-urlmatch http.sslverify https://good.example.com >actual &&
-       test_cmp expect actual &&
-
-       {
-               echo http.cookiefile /tmp/cookie.txt &&
-               echo http.sslverify false
-       } >expect &&
-       git config --get-urlmatch HTTP https://good.example.com >actual &&
-       test_cmp expect actual &&
-
-       echo http.sslverify >expect &&
-       git config --get-urlmatch HTTP https://more.example.com.au >actual &&
-       test_cmp expect actual
-'
-
-# good section hygiene
-test_expect_failure 'unsetting the last key in a section removes header' '
-       cat >.git/config <<-\EOF &&
-       # some generic comment on the configuration file itself
-       # a comment specific to this "section" section.
-       [section]
-       # some intervening lines
-       # that should also be dropped
-
-       key = value
-       # please be careful when you update the above variable
-       EOF
-
-       cat >expect <<-\EOF &&
-       # some generic comment on the configuration file itself
-       EOF
-
-       git config --unset section.key &&
-       test_cmp expect .git/config
-'
-
-test_expect_failure 'adding a key into an empty section reuses header' '
-       cat >.git/config <<-\EOF &&
-       [section]
-       EOF
-
-       q_to_tab >expect <<-\EOF &&
-       [section]
-       Qkey = value
-       EOF
-
-       git config section.key value &&
-       test_cmp expect .git/config
-'
-
-test_expect_success POSIXPERM,PERL 'preserves existing permissions' '
-       chmod 0600 .git/config &&
-       git config imap.pass Hunter2 &&
-       perl -e \
-         "die q(badset) if ((stat(q(.git/config)))[2] & 07777) != 0600" &&
-       git config --rename-section imap pop &&
-       perl -e \
-         "die q(badrename) if ((stat(q(.git/config)))[2] & 07777) != 0600"
-'
-
-! test_have_prereq MINGW ||
-HOME="$(pwd)" # convert to Windows path
-
-test_expect_success 'set up --show-origin tests' '
-       INCLUDE_DIR="$HOME/include" &&
-       mkdir -p "$INCLUDE_DIR" &&
-       cat >"$INCLUDE_DIR"/absolute.include <<-\EOF &&
-               [user]
-                       absolute = include
-       EOF
-       cat >"$INCLUDE_DIR"/relative.include <<-\EOF &&
-               [user]
-                       relative = include
-       EOF
-       cat >"$HOME"/.gitconfig <<-EOF &&
-               [user]
-                       global = true
-                       override = global
-               [include]
-                       path = "$INCLUDE_DIR/absolute.include"
-       EOF
-       cat >.git/config <<-\EOF
-               [user]
-                       local = true
-                       override = local
-               [include]
-                       path = ../include/relative.include
-       EOF
-'
-
-test_expect_success '--show-origin with --list' '
-       cat >expect <<-EOF &&
-               file:$HOME/.gitconfig   user.global=true
-               file:$HOME/.gitconfig   user.override=global
-               file:$HOME/.gitconfig   include.path=$INCLUDE_DIR/absolute.include
-               file:$INCLUDE_DIR/absolute.include      user.absolute=include
-               file:.git/config        user.local=true
-               file:.git/config        user.override=local
-               file:.git/config        include.path=../include/relative.include
-               file:.git/../include/relative.include   user.relative=include
-               command line:   user.cmdline=true
-       EOF
-       git -c user.cmdline=true config --list --show-origin >output &&
-       test_cmp expect output
-'
-
-test_expect_success '--show-origin with --list --null' '
-       cat >expect <<-EOF &&
-               file:$HOME/.gitconfigQuser.global
-               trueQfile:$HOME/.gitconfigQuser.override
-               globalQfile:$HOME/.gitconfigQinclude.path
-               $INCLUDE_DIR/absolute.includeQfile:$INCLUDE_DIR/absolute.includeQuser.absolute
-               includeQfile:.git/configQuser.local
-               trueQfile:.git/configQuser.override
-               localQfile:.git/configQinclude.path
-               ../include/relative.includeQfile:.git/../include/relative.includeQuser.relative
-               includeQcommand line:Quser.cmdline
-               trueQ
-       EOF
-       git -c user.cmdline=true config --null --list --show-origin >output.raw &&
-       nul_to_q <output.raw >output &&
-       # The here-doc above adds a newline that the --null output would not
-       # include. Add it here to make the two comparable.
-       echo >>output &&
-       test_cmp expect output
-'
-
-test_expect_success '--show-origin with single file' '
-       cat >expect <<-\EOF &&
-               file:.git/config        user.local=true
-               file:.git/config        user.override=local
-               file:.git/config        include.path=../include/relative.include
-       EOF
-       git config --local --list --show-origin >output &&
-       test_cmp expect output
-'
-
-test_expect_success '--show-origin with --get-regexp' '
-       cat >expect <<-EOF &&
-               file:$HOME/.gitconfig   user.global true
-               file:.git/config        user.local true
-       EOF
-       git config --show-origin --get-regexp "user\.[g|l].*" >output &&
-       test_cmp expect output
-'
-
-test_expect_success '--show-origin getting a single key' '
-       cat >expect <<-\EOF &&
-               file:.git/config        local
-       EOF
-       git config --show-origin user.override >output &&
-       test_cmp expect output
-'
-
-test_expect_success 'set up custom config file' '
-       CUSTOM_CONFIG_FILE="file\" (dq) and spaces.conf" &&
-       cat >"$CUSTOM_CONFIG_FILE" <<-\EOF
-               [user]
-                       custom = true
-       EOF
-'
-
-test_expect_success !MINGW '--show-origin escape special file name characters' '
-       cat >expect <<-\EOF &&
-               file:"file\" (dq) and spaces.conf"      user.custom=true
-       EOF
-       git config --file "$CUSTOM_CONFIG_FILE" --show-origin --list >output &&
-       test_cmp expect output
-'
-
-test_expect_success '--show-origin stdin' '
-       cat >expect <<-\EOF &&
-               standard input: user.custom=true
-       EOF
-       git config --file - --show-origin --list <"$CUSTOM_CONFIG_FILE" >output &&
-       test_cmp expect output
-'
-
-test_expect_success '--show-origin stdin with file include' '
-       cat >"$INCLUDE_DIR"/stdin.include <<-EOF &&
-               [user]
-                       stdin = include
-       EOF
-       cat >expect <<-EOF &&
-               file:$INCLUDE_DIR/stdin.include include
-       EOF
-       echo "[include]path=\"$INCLUDE_DIR\"/stdin.include" \
-               | git config --show-origin --includes --file - user.stdin >output &&
-       test_cmp expect output
-'
-
-test_expect_success !MINGW '--show-origin blob' '
-       cat >expect <<-\EOF &&
-               blob:a9d9f9e555b5c6f07cbe09d3f06fe3df11e09c08   user.custom=true
-       EOF
-       blob=$(git hash-object -w "$CUSTOM_CONFIG_FILE") &&
-       git config --blob=$blob --show-origin --list >output &&
-       test_cmp expect output
-'
-
-test_expect_success !MINGW '--show-origin blob ref' '
-       cat >expect <<-\EOF &&
-               blob:"master:file\" (dq) and spaces.conf"       user.custom=true
-       EOF
-       git add "$CUSTOM_CONFIG_FILE" &&
-       git commit -m "new config file" &&
-       git config --blob=master:"$CUSTOM_CONFIG_FILE" --show-origin --list >output &&
-       test_cmp expect output
-'
-
-test_expect_success '--local requires a repo' '
-       # we expect 128 to ensure that we do not simply
-       # fail to find anything and return code "1"
-       test_expect_code 128 nongit git config --local foo.bar
-'
-
-test_done
index f5422f1d33f5eac98e6f56ec4bf05f3f8d4c8be2..335d3f3211aa874fd3a8e0d0006dd9fc53a4e589 100755 (executable)
@@ -54,7 +54,7 @@ test_expect_success SETFACL 'Setup test repo' '
 
 test_expect_success SETFACL 'Objects creation does not break ACLs with restrictive umask' '
        # SHA1 for empty blob
-       check_perms_and_acl .git/objects/e6/9de29bb2d1d6434b8b29ae775ad8c2e48c5391
+       check_perms_and_acl .git/objects/$(echo $EMPTY_BLOB | sed -e "s,^\(..\),\1/,")
 '
 
 test_expect_success SETFACL 'git gc does not break ACLs with restrictive umask' '
index d9d2f545a4ed735e02f7d5cd6ceee7d873fdec94..f035ee40a313ae25a13bdb6a838eed3b34fd5f9d 100755 (executable)
@@ -224,7 +224,7 @@ test_expect_success 'conditional include, early config reading' '
                echo "[includeIf \"gitdir:foo/\"]path=bar6" >>.git/config &&
                echo "[test]six=6" >.git/bar6 &&
                echo 6 >expect &&
-               test-config read_early_config test.six >actual &&
+               test-tool config read_early_config test.six >actual &&
                test_cmp expect actual
        )
 '
index bafed5c9b88481992ca6cf4cd6c13596c7baf16b..3e00d1af01fb771bd3fd26dcb265a3d80da37934 100755 (executable)
@@ -18,7 +18,7 @@ check_config () {
        then
                printf "%s\n" "$@"
        fi >expect &&
-       test_expect_code $expect_code test-config "$op" "$key" >actual &&
+       test_expect_code $expect_code test-tool config "$op" "$key" >actual &&
        test_cmp expect actual
 }
 
@@ -125,7 +125,7 @@ test_expect_success 'find string value for a key' '
 '
 
 test_expect_success 'check line error when NULL string is queried' '
-       test_expect_code 128 test-config get_string case.foo 2>result &&
+       test_expect_code 128 test-tool config get_string case.foo 2>result &&
        test_i18ngrep "fatal: .*case\.foo.*\.git/config.*line 7" result
 '
 
@@ -155,13 +155,13 @@ test_expect_success 'find value from a configset' '
                baz = ball
        EOF
        echo silk >expect &&
-       test-config configset_get_value my.new config2 .git/config >actual &&
+       test-tool config configset_get_value my.new config2 .git/config >actual &&
        test_cmp expect actual
 '
 
 test_expect_success 'find value with highest priority from a configset' '
        echo hask >expect &&
-       test-config configset_get_value case.baz config2 .git/config >actual &&
+       test-tool config configset_get_value case.baz config2 .git/config >actual &&
        test_cmp expect actual
 '
 
@@ -173,20 +173,20 @@ test_expect_success 'find value_list for a key from a configset' '
        lama
        ball
        EOF
-       test-config configset_get_value case.baz config2 .git/config >actual &&
+       test-tool config configset_get_value case.baz config2 .git/config >actual &&
        test_cmp expect actual
 '
 
 test_expect_success 'proper error on non-existent files' '
        echo "Error (-1) reading configuration file non-existent-file." >expect &&
-       test_expect_code 2 test-config configset_get_value foo.bar non-existent-file 2>actual &&
+       test_expect_code 2 test-tool config configset_get_value foo.bar non-existent-file 2>actual &&
        test_cmp expect actual
 '
 
 test_expect_success 'proper error on directory "files"' '
        echo "Error (-1) reading configuration file a-directory." >expect &&
        mkdir a-directory &&
-       test_expect_code 2 test-config configset_get_value foo.bar a-directory 2>output &&
+       test_expect_code 2 test-tool config configset_get_value foo.bar a-directory 2>output &&
        grep "^warning:" output &&
        grep "^Error" output >actual &&
        test_cmp expect actual
@@ -196,7 +196,7 @@ test_expect_success POSIXPERM,SANITY 'proper error on non-accessible files' '
        chmod -r .git/config &&
        test_when_finished "chmod +r .git/config" &&
        echo "Error (-1) reading configuration file .git/config." >expect &&
-       test_expect_code 2 test-config configset_get_value foo.bar .git/config 2>output &&
+       test_expect_code 2 test-tool config configset_get_value foo.bar .git/config 2>output &&
        grep "^warning:" output &&
        grep "^Error" output >actual &&
        test_cmp expect actual
@@ -207,14 +207,14 @@ test_expect_success 'proper error on error in default config files' '
        test_when_finished "mv .git/config.old .git/config" &&
        echo "[" >>.git/config &&
        echo "fatal: bad config line 34 in file .git/config" >expect &&
-       test_expect_code 128 test-config get_value foo.bar 2>actual &&
+       test_expect_code 128 test-tool config get_value foo.bar 2>actual &&
        test_i18ncmp expect actual
 '
 
 test_expect_success 'proper error on error in custom config files' '
        echo "[" >>syntax-error &&
        echo "fatal: bad config line 1 in file syntax-error" >expect &&
-       test_expect_code 128 test-config configset_get_value foo.bar syntax-error 2>actual &&
+       test_expect_code 128 test-tool config configset_get_value foo.bar syntax-error 2>actual &&
        test_i18ncmp expect actual
 '
 
@@ -267,7 +267,7 @@ test_expect_success 'iteration shows correct origins' '
        name=
        scope=cmdline
        EOF
-       GIT_CONFIG_PARAMETERS=$cmdline_config test-config iterate >actual &&
+       GIT_CONFIG_PARAMETERS=$cmdline_config test-tool config iterate >actual &&
        test_cmp expect actual
 '
 
index 3dda215e8e2f37c049a3169cecdb3e43ddea5dfb..413642aa5672800d1b7be448bc97d175add07ee4 100755 (executable)
@@ -6,7 +6,7 @@ test_description='Test read_early_config()'
 
 test_expect_success 'read early config' '
        test_config early.config correct &&
-       test-config read_early_config early.config >output &&
+       test-tool config read_early_config early.config >output &&
        test correct = "$(cat output)"
 '
 
@@ -15,7 +15,7 @@ test_expect_success 'in a sub-directory' '
        mkdir -p sub &&
        (
                cd sub &&
-               test-config read_early_config early.config
+               test-tool config read_early_config early.config
        ) >output &&
        test sub = "$(cat output)"
 '
@@ -27,7 +27,7 @@ test_expect_success 'ceiling' '
                GIT_CEILING_DIRECTORIES="$PWD" &&
                export GIT_CEILING_DIRECTORIES &&
                cd sub &&
-               test-config read_early_config early.config
+               test-tool config read_early_config early.config
        ) >output &&
        test -z "$(cat output)"
 '
@@ -42,7 +42,7 @@ test_expect_success 'ceiling #2' '
                GIT_CEILING_DIRECTORIES="$PWD" &&
                export GIT_CEILING_DIRECTORIES XDG_CONFIG_HOME &&
                cd sub &&
-               test-config read_early_config early.config
+               test-tool config read_early_config early.config
        ) >output &&
        test xdg = "$(cat output)"
 '
@@ -54,7 +54,7 @@ test_expect_success 'read config file in right order' '
        (
                cd foo &&
                echo "[test]source = repo" >>.git/config &&
-               GIT_CONFIG_PARAMETERS=$cmdline_config test-config \
+               GIT_CONFIG_PARAMETERS=$cmdline_config test-tool config \
                        read_early_config test.source >actual &&
                cat >expected <<-\EOF &&
                home
@@ -71,7 +71,7 @@ test_with_config () {
        (
                cd throwaway &&
                echo "$*" >.git/config &&
-               test-config read_early_config early.config
+               test-tool config read_early_config early.config
        )
 }
 
diff --git a/t/t1310-config-default.sh b/t/t1310-config-default.sh
new file mode 100755 (executable)
index 0000000..6049d91
--- /dev/null
@@ -0,0 +1,36 @@
+#!/bin/sh
+
+test_description='Test git config in different settings (with --default)'
+
+. ./test-lib.sh
+
+test_expect_success 'uses --default when entry missing' '
+       echo quux >expect &&
+       git config -f config --default=quux core.foo >actual &&
+       test_cmp expect actual
+'
+
+test_expect_success 'does not use --default when entry present' '
+       echo bar >expect &&
+       git -c core.foo=bar config --default=baz core.foo >actual &&
+       test_cmp expect actual
+'
+
+test_expect_success 'canonicalizes --default with appropriate type' '
+       echo true >expect &&
+       git config -f config --default=yes --bool core.foo >actual &&
+       test_cmp expect actual
+'
+
+test_expect_success 'dies when --default cannot be parsed' '
+       test_must_fail git config -f config --type=expiry-date --default=x --get \
+               not.a.section 2>error &&
+       test_i18ngrep "failed to format default config value" error
+'
+
+test_expect_success 'does not allow --default without --get' '
+       test_must_fail git config --default=quux --unset a.section >output 2>&1 &&
+       test_i18ngrep "\-\-default is only applicable to" output
+'
+
+test_done
index e8115df5bad8d9ef46542cfd3c96c32da0bfa4ad..a74c38b5fb22a6d274c26ed008c6bec355801e34 100755 (executable)
@@ -4,7 +4,7 @@ test_description='test main ref store api'
 
 . ./test-lib.sh
 
-RUN="test-ref-store main"
+RUN="test-tool ref-store main"
 
 test_expect_success 'pack_refs(PACK_REFS_ALL | PACK_REFS_PRUNE)' '
        test_commit one &&
@@ -45,7 +45,7 @@ test_expect_success 'rename_refs(master, new-master)' '
 '
 
 test_expect_success 'for_each_ref(refs/heads/)' '
-       $RUN for-each-ref refs/heads/ | cut -c 42- >actual &&
+       $RUN for-each-ref refs/heads/ | cut -d" " -f 2- >actual &&
        cat >expected <<-\EOF &&
        master 0x0
        new-master 0x0
@@ -71,7 +71,7 @@ test_expect_success 'verify_ref(new-master)' '
 '
 
 test_expect_success 'for_each_reflog()' '
-       $RUN for-each-reflog | sort | cut -c 42- >actual &&
+       $RUN for-each-reflog | sort -k2 | cut -c 42- >actual &&
        cat >expected <<-\EOF &&
        HEAD 0x1
        refs/heads/master 0x0
index c32d4cc4652a4496ce8fa6aa1c10c797ae7d760a..e093782cc37c495a122eb8676797b1988b828c29 100755 (executable)
@@ -4,7 +4,7 @@ test_description='test submodule ref store api'
 
 . ./test-lib.sh
 
-RUN="test-ref-store submodule:sub"
+RUN="test-tool ref-store submodule:sub"
 
 test_expect_success 'setup' '
        git init sub &&
index 8842d0329fb7947e811d4ffccff3bba9e8885d5d..2211f9831fb07f933c8c3f0c7cfd89cc5512c253 100755 (executable)
@@ -4,8 +4,8 @@ test_description='test worktree ref store api'
 
 . ./test-lib.sh
 
-RWT="test-ref-store worktree:wt"
-RMAIN="test-ref-store worktree:main"
+RWT="test-tool ref-store worktree:wt"
+RMAIN="test-tool ref-store worktree:main"
 
 test_expect_success 'setup' '
        test_commit first &&
index 6ac7734d79be21a82feeadff10064bb4ca7ad47b..596907758d5d47ae8026098a2ce4acd8c01df6aa 100755 (executable)
@@ -10,6 +10,7 @@ test_expect_success 'setup' '
        git commit -m one
 '
 
+commit=$(git rev-parse --short HEAD)
 cat >expect <<'EOF'
 Reflog: HEAD@{0} (C O Mitter <committer@example.com>)
 Reflog message: commit (initial): one
@@ -20,8 +21,8 @@ test_expect_success 'log -g shows reflog headers' '
        test_cmp expect actual
 '
 
-cat >expect <<'EOF'
-e46513e HEAD@{0}: commit (initial): one
+cat >expect <<EOF
+$commit HEAD@{0}: commit (initial): one
 EOF
 test_expect_success 'oneline reflog format' '
        git log -g -1 --oneline >actual &&
@@ -33,8 +34,8 @@ test_expect_success 'reflog default format' '
        test_cmp expect actual
 '
 
-cat >expect <<'EOF'
-commit e46513e
+cat >expect <<EOF
+commit $commit
 Reflog: HEAD@{0} (C O Mitter <committer@example.com>)
 Reflog message: commit (initial): one
 Author: A U Thor <author@example.com>
@@ -56,8 +57,8 @@ test_expect_success 'using @{now} syntax shows reflog date (multiline)' '
        test_cmp expect actual
 '
 
-cat >expect <<'EOF'
-e46513e HEAD@{Thu Apr 7 15:13:13 2005 -0700}: commit (initial): one
+cat >expect <<EOF
+$commit HEAD@{Thu Apr 7 15:13:13 2005 -0700}: commit (initial): one
 EOF
 test_expect_success 'using @{now} syntax shows reflog date (oneline)' '
        git log -g -1 --oneline HEAD@{now} >actual &&
@@ -82,8 +83,8 @@ test_expect_success 'using --date= shows reflog date (multiline)' '
        test_cmp expect actual
 '
 
-cat >expect <<'EOF'
-e46513e HEAD@{Thu Apr 7 15:13:13 2005 -0700}: commit (initial): one
+cat >expect <<EOF
+$commit HEAD@{Thu Apr 7 15:13:13 2005 -0700}: commit (initial): one
 EOF
 test_expect_success 'using --date= shows reflog date (oneline)' '
        git log -g -1 --oneline --date=default >actual &&
@@ -109,8 +110,8 @@ test_expect_success 'log.date does not invoke "--date" magic (multiline)' '
        test_cmp expect actual
 '
 
-cat >expect <<'EOF'
-e46513e HEAD@{0}: commit (initial): one
+cat >expect <<EOF
+$commit HEAD@{0}: commit (initial): one
 EOF
 test_expect_success 'log.date does not invoke "--date" magic (oneline)' '
        test_config log.date raw &&
index b06210ec5e8ffa30b59314b1c79ebc9bdb8f6476..9c0bc6525034021a1947d07faec84570d0dd629a 100755 (executable)
@@ -341,7 +341,7 @@ test_expect_success 'make_relative_path handles double slashes in GIT_DIR' '
 
 test_expect_success 'relative $GIT_WORK_TREE and git subprocesses' '
        GIT_DIR=repo.git GIT_WORK_TREE=repo.git/work \
-       test-subprocess --setup-work-tree rev-parse --show-toplevel >actual &&
+       test-tool subprocess --setup-work-tree rev-parse --show-toplevel >actual &&
        echo "$(pwd)/repo.git/work" >expected &&
        test_cmp expected actual
 '
@@ -360,7 +360,7 @@ test_expect_success 'GIT_DIR set (1)' '
        (
                cd work &&
                GIT_DIR=../gitfile git rev-parse --git-common-dir >actual &&
-               test-path-utils real_path "$TRASH_DIRECTORY/repo.git" >expect &&
+               test-tool path-utils real_path "$TRASH_DIRECTORY/repo.git" >expect &&
                test_cmp expect actual
        )
 '
@@ -371,7 +371,7 @@ test_expect_success 'GIT_DIR set (2)' '
        (
                cd work &&
                GIT_DIR=../gitfile git rev-parse --git-common-dir >actual &&
-               test-path-utils real_path "$TRASH_DIRECTORY/repo.git" >expect &&
+               test-tool path-utils real_path "$TRASH_DIRECTORY/repo.git" >expect &&
                test_cmp expect actual
        )
 '
@@ -382,7 +382,7 @@ test_expect_success 'Auto discovery' '
        (
                cd work &&
                git rev-parse --git-common-dir >actual &&
-               test-path-utils real_path "$TRASH_DIRECTORY/repo.git" >expect &&
+               test-tool path-utils real_path "$TRASH_DIRECTORY/repo.git" >expect &&
                test_cmp expect actual &&
                echo haha >data1 &&
                git add data1 &&
@@ -400,7 +400,7 @@ test_expect_success '$GIT_DIR/common overrides core.worktree' '
        (
                cd work &&
                git rev-parse --git-common-dir >actual &&
-               test-path-utils real_path "$TRASH_DIRECTORY/repo.git" >expect &&
+               test-tool path-utils real_path "$TRASH_DIRECTORY/repo.git" >expect &&
                test_cmp expect actual &&
                echo haha >data2 &&
                git add data2 &&
@@ -431,4 +431,16 @@ test_expect_success 'error out gracefully on invalid $GIT_WORK_TREE' '
        )
 '
 
+test_expect_success 'refs work with relative gitdir and work tree' '
+       git init relative &&
+       git -C relative commit --allow-empty -m one &&
+       git -C relative commit --allow-empty -m two &&
+
+       GIT_DIR=relative/.git GIT_WORK_TREE=relative git reset HEAD^ &&
+
+       git -C relative log -1 --format=%s >actual &&
+       echo one >expect &&
+       test_cmp expect actual
+'
+
 test_done
index 2ce68cc277a1ed742ce7b644de31a4a847e3529b..93c77eac45321cb24823431c4d89dc393049edbf 100755 (executable)
@@ -209,8 +209,9 @@ test_expect_success '@{u} works when tracking a local branch' '
        test refs/heads/master = "$(full_name @{u})"
 '
 
+commit=$(git rev-parse HEAD)
 cat >expect <<EOF
-commit 8f489d01d0cc65c3b0f09504ec50b5ed02a70bd5
+commit $commit
 Reflog: master@{0} (C O Mitter <committer@example.com>)
 Reflog message: branch: Created from HEAD
 Author: A U Thor <author@example.com>
@@ -224,7 +225,7 @@ test_expect_success 'log -g other@{u}' '
 '
 
 cat >expect <<EOF
-commit 8f489d01d0cc65c3b0f09504ec50b5ed02a70bd5
+commit $commit
 Reflog: master@{Thu Apr 7 15:17:13 2005 -0700} (C O Mitter <committer@example.com>)
 Reflog message: branch: Created from HEAD
 Author: A U Thor <author@example.com>
index e6854b828e2e68ad217721eb6139970b7c5958c0..972bd9c7859f52ac043b0a45500590fe182d4b6a 100755 (executable)
@@ -238,7 +238,6 @@ test_expect_success '#0: nonbare repo, no explicit configuration' '
 '
 
 test_expect_success '#1: GIT_WORK_TREE without explicit GIT_DIR is accepted' '
-       mkdir -p wt &&
        try_repo 1 "$here" unset unset "" unset \
                "$here/1/.git" "$here" "$here" 1/ \
                "$here/1/.git" "$here" "$here" 1/sub/ 2>message &&
index 079d2411450ca960d3d82fb9c7d59921c4cbe60a..c4422312f4e482e38172b1ac8b87e29d8708af2e 100755 (executable)
@@ -68,7 +68,7 @@ test_expect_success 'GIT_INDEX_VERSION takes precedence over config' '
                git config --add index.version 2 &&
                git add a 2>&1 &&
                echo 4 >expect &&
-               test-index-version <.git/index >actual &&
+               test-tool index-version <.git/index >actual &&
                test_cmp expect actual
        )
 '
index a66936fe9bde2ba20ab0e57400b6286b0710212a..e4f4c4df4ee3686e5ad1621134a827b941b38bca 100755 (executable)
@@ -11,8 +11,8 @@ sane_unset GIT_FSMONITOR_TEST
 test_expect_success 'enable split index' '
        git config splitIndex.maxPercentChange 100 &&
        git update-index --split-index &&
-       test-dump-split-index .git/index >actual &&
-       indexversion=$(test-index-version <.git/index) &&
+       test-tool dump-split-index .git/index >actual &&
+       indexversion=$(test-tool index-version <.git/index) &&
        if test "$indexversion" = "4"
        then
                own=432ef4b63f32193984f339431fd50ca796493569
@@ -39,7 +39,7 @@ test_expect_success 'add one file' '
        EOF
        test_cmp ls-files.expect ls-files.actual &&
 
-       test-dump-split-index .git/index | sed "/^own/d" >actual &&
+       test-tool dump-split-index .git/index | sed "/^own/d" >actual &&
        cat >expect <<-EOF &&
        base $base
        100644 $EMPTY_BLOB 0    one
@@ -57,8 +57,8 @@ test_expect_success 'disable split index' '
        EOF
        test_cmp ls-files.expect ls-files.actual &&
 
-       BASE=$(test-dump-split-index .git/index | grep "^own" | sed "s/own/base/") &&
-       test-dump-split-index .git/index | sed "/^own/d" >actual &&
+       BASE=$(test-tool dump-split-index .git/index | grep "^own" | sed "s/own/base/") &&
+       test-tool dump-split-index .git/index | sed "/^own/d" >actual &&
        cat >expect <<-EOF &&
        not a split index
        EOF
@@ -73,7 +73,7 @@ test_expect_success 'enable split index again, "one" now belongs to base index"'
        EOF
        test_cmp ls-files.expect ls-files.actual &&
 
-       test-dump-split-index .git/index | sed "/^own/d" >actual &&
+       test-tool dump-split-index .git/index | sed "/^own/d" >actual &&
        cat >expect <<-EOF &&
        $BASE
        replacements:
@@ -91,7 +91,7 @@ test_expect_success 'modify original file, base index untouched' '
        EOF
        test_cmp ls-files.expect ls-files.actual &&
 
-       test-dump-split-index .git/index | sed "/^own/d" >actual &&
+       test-tool dump-split-index .git/index | sed "/^own/d" >actual &&
        q_to_tab >expect <<-EOF &&
        $BASE
        100644 2e0996000b7e9019eabcad29391bf0f5c7702f0b 0Q
@@ -111,7 +111,7 @@ test_expect_success 'add another file, which stays index' '
        EOF
        test_cmp ls-files.expect ls-files.actual &&
 
-       test-dump-split-index .git/index | sed "/^own/d" >actual &&
+       test-tool dump-split-index .git/index | sed "/^own/d" >actual &&
        q_to_tab >expect <<-EOF &&
        $BASE
        100644 2e0996000b7e9019eabcad29391bf0f5c7702f0b 0Q
@@ -130,7 +130,7 @@ test_expect_success 'remove file not in base index' '
        EOF
        test_cmp ls-files.expect ls-files.actual &&
 
-       test-dump-split-index .git/index | sed "/^own/d" >actual &&
+       test-tool dump-split-index .git/index | sed "/^own/d" >actual &&
        q_to_tab >expect <<-EOF &&
        $BASE
        100644 2e0996000b7e9019eabcad29391bf0f5c7702f0b 0Q
@@ -147,7 +147,7 @@ test_expect_success 'remove file in base index' '
        EOF
        test_cmp ls-files.expect ls-files.actual &&
 
-       test-dump-split-index .git/index | sed "/^own/d" >actual &&
+       test-tool dump-split-index .git/index | sed "/^own/d" >actual &&
        cat >expect <<-EOF &&
        $BASE
        replacements:
@@ -165,7 +165,7 @@ test_expect_success 'add original file back' '
        EOF
        test_cmp ls-files.expect ls-files.actual &&
 
-       test-dump-split-index .git/index | sed "/^own/d" >actual &&
+       test-tool dump-split-index .git/index | sed "/^own/d" >actual &&
        cat >expect <<-EOF &&
        $BASE
        100644 $EMPTY_BLOB 0    one
@@ -195,7 +195,7 @@ test_expect_success 'unify index, two files remain' '
        EOF
        test_cmp ls-files.expect ls-files.actual &&
 
-       test-dump-split-index .git/index | sed "/^own/d" >actual &&
+       test-tool dump-split-index .git/index | sed "/^own/d" >actual &&
        cat >expect <<-EOF &&
        not a split index
        EOF
@@ -229,8 +229,8 @@ test_expect_success 'set core.splitIndex config variable to true' '
        100644 e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 0       two
        EOF
        test_cmp ls-files.expect ls-files.actual &&
-       BASE=$(test-dump-split-index .git/index | grep "^base") &&
-       test-dump-split-index .git/index | sed "/^own/d" >actual &&
+       BASE=$(test-tool dump-split-index .git/index | grep "^base") &&
+       test-tool dump-split-index .git/index | sed "/^own/d" >actual &&
        cat >expect <<-EOF &&
        $BASE
        replacements:
@@ -248,7 +248,7 @@ test_expect_success 'set core.splitIndex config variable to false' '
        100644 e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 0       two
        EOF
        test_cmp ls-files.expect ls-files.actual &&
-       test-dump-split-index .git/index | sed "/^own/d" >actual &&
+       test-tool dump-split-index .git/index | sed "/^own/d" >actual &&
        cat >expect <<-EOF &&
        not a split index
        EOF
@@ -259,8 +259,8 @@ test_expect_success 'set core.splitIndex config variable to true' '
        git config core.splitIndex true &&
        : >three &&
        git update-index --add three &&
-       BASE=$(test-dump-split-index .git/index | grep "^base") &&
-       test-dump-split-index .git/index | sed "/^own/d" >actual &&
+       BASE=$(test-tool dump-split-index .git/index | grep "^base") &&
+       test-tool dump-split-index .git/index | sed "/^own/d" >actual &&
        cat >expect <<-EOF &&
        $BASE
        replacements:
@@ -269,7 +269,7 @@ test_expect_success 'set core.splitIndex config variable to true' '
        test_cmp expect actual &&
        : >four &&
        git update-index --add four &&
-       test-dump-split-index .git/index | sed "/^own/d" >actual &&
+       test-tool dump-split-index .git/index | sed "/^own/d" >actual &&
        cat >expect <<-EOF &&
        $BASE
        100644 e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 0       four
@@ -283,8 +283,8 @@ test_expect_success 'check behavior with splitIndex.maxPercentChange unset' '
        git config --unset splitIndex.maxPercentChange &&
        : >five &&
        git update-index --add five &&
-       BASE=$(test-dump-split-index .git/index | grep "^base") &&
-       test-dump-split-index .git/index | sed "/^own/d" >actual &&
+       BASE=$(test-tool dump-split-index .git/index | grep "^base") &&
+       test-tool dump-split-index .git/index | sed "/^own/d" >actual &&
        cat >expect <<-EOF &&
        $BASE
        replacements:
@@ -293,7 +293,7 @@ test_expect_success 'check behavior with splitIndex.maxPercentChange unset' '
        test_cmp expect actual &&
        : >six &&
        git update-index --add six &&
-       test-dump-split-index .git/index | sed "/^own/d" >actual &&
+       test-tool dump-split-index .git/index | sed "/^own/d" >actual &&
        cat >expect <<-EOF &&
        $BASE
        100644 e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 0       six
@@ -307,8 +307,8 @@ test_expect_success 'check splitIndex.maxPercentChange set to 0' '
        git config splitIndex.maxPercentChange 0 &&
        : >seven &&
        git update-index --add seven &&
-       BASE=$(test-dump-split-index .git/index | grep "^base") &&
-       test-dump-split-index .git/index | sed "/^own/d" >actual &&
+       BASE=$(test-tool dump-split-index .git/index | grep "^base") &&
+       test-tool dump-split-index .git/index | sed "/^own/d" >actual &&
        cat >expect <<-EOF &&
        $BASE
        replacements:
@@ -317,8 +317,8 @@ test_expect_success 'check splitIndex.maxPercentChange set to 0' '
        test_cmp expect actual &&
        : >eight &&
        git update-index --add eight &&
-       BASE=$(test-dump-split-index .git/index | grep "^base") &&
-       test-dump-split-index .git/index | sed "/^own/d" >actual &&
+       BASE=$(test-tool dump-split-index .git/index | grep "^base") &&
+       test-tool dump-split-index .git/index | sed "/^own/d" >actual &&
        cat >expect <<-EOF &&
        $BASE
        replacements:
@@ -332,12 +332,12 @@ test_expect_success 'shared index files expire after 2 weeks by default' '
        git update-index --add ten &&
        test $(ls .git/sharedindex.* | wc -l) -gt 2 &&
        just_under_2_weeks_ago=$((5-14*86400)) &&
-       test-chmtime =$just_under_2_weeks_ago .git/sharedindex.* &&
+       test-tool chmtime =$just_under_2_weeks_ago .git/sharedindex.* &&
        : >eleven &&
        git update-index --add eleven &&
        test $(ls .git/sharedindex.* | wc -l) -gt 2 &&
        just_over_2_weeks_ago=$((-1-14*86400)) &&
-       test-chmtime =$just_over_2_weeks_ago .git/sharedindex.* &&
+       test-tool chmtime =$just_over_2_weeks_ago .git/sharedindex.* &&
        : >twelve &&
        git update-index --add twelve &&
        test $(ls .git/sharedindex.* | wc -l) -le 2
@@ -345,12 +345,12 @@ test_expect_success 'shared index files expire after 2 weeks by default' '
 
 test_expect_success 'check splitIndex.sharedIndexExpire set to 16 days' '
        git config splitIndex.sharedIndexExpire "16.days.ago" &&
-       test-chmtime =$just_over_2_weeks_ago .git/sharedindex.* &&
+       test-tool chmtime =$just_over_2_weeks_ago .git/sharedindex.* &&
        : >thirteen &&
        git update-index --add thirteen &&
        test $(ls .git/sharedindex.* | wc -l) -gt 2 &&
        just_over_16_days_ago=$((-1-16*86400)) &&
-       test-chmtime =$just_over_16_days_ago .git/sharedindex.* &&
+       test-tool chmtime =$just_over_16_days_ago .git/sharedindex.* &&
        : >fourteen &&
        git update-index --add fourteen &&
        test $(ls .git/sharedindex.* | wc -l) -le 2
@@ -359,13 +359,13 @@ test_expect_success 'check splitIndex.sharedIndexExpire set to 16 days' '
 test_expect_success 'check splitIndex.sharedIndexExpire set to "never" and "now"' '
        git config splitIndex.sharedIndexExpire never &&
        just_10_years_ago=$((-365*10*86400)) &&
-       test-chmtime =$just_10_years_ago .git/sharedindex.* &&
+       test-tool chmtime =$just_10_years_ago .git/sharedindex.* &&
        : >fifteen &&
        git update-index --add fifteen &&
        test $(ls .git/sharedindex.* | wc -l) -gt 2 &&
        git config splitIndex.sharedIndexExpire now &&
        just_1_second_ago=-1 &&
-       test-chmtime =$just_1_second_ago .git/sharedindex.* &&
+       test-tool chmtime =$just_1_second_ago .git/sharedindex.* &&
        : >sixteen &&
        git update-index --add sixteen &&
        test $(ls .git/sharedindex.* | wc -l) -le 2
@@ -435,7 +435,7 @@ test_expect_success 'writing split index with null sha1 does not write cache tre
        commit=$(git commit-tree $tree -p HEAD <msg) &&
        git update-ref HEAD "$commit" &&
        GIT_ALLOW_NULL_SHA1=1 git reset --hard &&
-       (test-dump-cache-tree >cache-tree.out || true) &&
+       (test-tool dump-cache-tree >cache-tree.out || true) &&
        test_line_count = 0 cache-tree.out
 '
 
index bb4f2e0c631f1de7421f50b9fa64f11276fe9645..1fa670625c5be87294eec9c5fe86bf2defff2ce2 100755 (executable)
@@ -189,8 +189,12 @@ test_expect_success 'no advice given for explicit detached head state' '
 # Detached HEAD tests for GIT_PRINT_SHA1_ELLIPSIS (new format)
 test_expect_success 'describe_detached_head prints no SHA-1 ellipsis when not asked to' "
 
+       commit=$(git rev-parse --short=12 master^) &&
+       commit2=$(git rev-parse --short=12 master~2) &&
+       commit3=$(git rev-parse --short=12 master~3) &&
+
        # The first detach operation is more chatty than the following ones.
-       cat >1st_detach <<-'EOF' &&
+       cat >1st_detach <<-EOF &&
        Note: checking out 'HEAD^'.
 
        You are in 'detached HEAD' state. You can look around, make experimental
@@ -202,18 +206,18 @@ test_expect_success 'describe_detached_head prints no SHA-1 ellipsis when not as
 
          git checkout -b <new-branch-name>
 
-       HEAD is now at 7c7cd714e262 three
+       HEAD is now at \$commit three
        EOF
 
        # The remaining ones just show info about previous and current HEADs.
-       cat >2nd_detach <<-'EOF' &&
-       Previous HEAD position was 7c7cd714e262 three
-       HEAD is now at 139b20d8e6c5 two
+       cat >2nd_detach <<-EOF &&
+       Previous HEAD position was \$commit three
+       HEAD is now at \$commit2 two
        EOF
 
-       cat >3rd_detach <<-'EOF' &&
-       Previous HEAD position was 139b20d8e6c5 two
-       HEAD is now at d79ce1670bdc one
+       cat >3rd_detach <<-EOF &&
+       Previous HEAD position was \$commit2 two
+       HEAD is now at \$commit3 one
        EOF
 
        reset &&
@@ -261,8 +265,12 @@ test_expect_success 'describe_detached_head prints no SHA-1 ellipsis when not as
 # Detached HEAD tests for GIT_PRINT_SHA1_ELLIPSIS (old format)
 test_expect_success 'describe_detached_head does print SHA-1 ellipsis when asked to' "
 
+       commit=$(git rev-parse --short=12 master^) &&
+       commit2=$(git rev-parse --short=12 master~2) &&
+       commit3=$(git rev-parse --short=12 master~3) &&
+
        # The first detach operation is more chatty than the following ones.
-       cat >1st_detach <<-'EOF' &&
+       cat >1st_detach <<-EOF &&
        Note: checking out 'HEAD^'.
 
        You are in 'detached HEAD' state. You can look around, make experimental
@@ -274,18 +282,18 @@ test_expect_success 'describe_detached_head does print SHA-1 ellipsis when asked
 
          git checkout -b <new-branch-name>
 
-       HEAD is now at 7c7cd714e262... three
+       HEAD is now at \$commit... three
        EOF
 
        # The remaining ones just show info about previous and current HEADs.
-       cat >2nd_detach <<-'EOF' &&
-       Previous HEAD position was 7c7cd714e262... three
-       HEAD is now at 139b20d8e6c5... two
+       cat >2nd_detach <<-EOF &&
+       Previous HEAD position was \$commit... three
+       HEAD is now at \$commit2... two
        EOF
 
-       cat >3rd_detach <<-'EOF' &&
-       Previous HEAD position was 139b20d8e6c5... two
-       HEAD is now at d79ce1670bdc... one
+       cat >3rd_detach <<-EOF &&
+       Previous HEAD position was \$commit2... two
+       HEAD is now at \$commit3... one
        EOF
 
        reset &&
index f46d0499bc6ea95236f7ca05060b8f8fbbdbbc50..fc3eb43b890977bf793f6ed70e6d6caed2902407 100755 (executable)
@@ -68,13 +68,13 @@ test_expect_success 'do not touch files that are already up-to-date' '
        git add file1 file2 &&
        git commit -m base &&
        echo modified >file1 &&
-       test-chmtime =1000000000 file2 &&
+       test-tool chmtime =1000000000 file2 &&
        git update-index -q --refresh &&
        git checkout HEAD -- file1 file2 &&
        echo one >expect &&
        test_cmp expect file1 &&
-       echo "1000000000        file2" >expect &&
-       test-chmtime -v +0 file2 >actual &&
+       echo "1000000000" >expect &&
+       test-tool chmtime --get file2 >actual &&
        test_cmp expect actual
 '
 
index d0d2e4f7ec3310ec51da7144fa87151129f393c0..224049892423075dd7d03bb15229f4a11302d66f 100755 (executable)
@@ -198,13 +198,25 @@ test_expect_success '"add" with <branch> omitted' '
        test_cmp_rev HEAD bat
 '
 
-test_expect_success '"add" auto-vivify does not clobber existing branch' '
-       test_commit c1 &&
-       test_commit c2 &&
-       git branch precious HEAD~1 &&
-       test_must_fail git worktree add precious &&
-       test_cmp_rev HEAD~1 precious &&
-       test_path_is_missing precious
+test_expect_success '"add" checks out existing branch of dwimd name' '
+       git branch dwim HEAD~1 &&
+       git worktree add dwim &&
+       test_cmp_rev HEAD~1 dwim &&
+       (
+               cd dwim &&
+               test_cmp_rev HEAD dwim
+       )
+'
+
+test_expect_success '"add <path>" dwim fails with checked out branch' '
+       git checkout -b test-branch &&
+       test_must_fail git worktree add test-branch &&
+       test_path_is_missing test-branch
+'
+
+test_expect_success '"add --force" with existing dwimd name doesnt die' '
+       git checkout test-branch &&
+       git worktree add --force test-branch
 '
 
 test_expect_success '"add" no auto-vivify with --detach and <branch> omitted' '
index a0f1e3bb800ec6943648eeffb9e7ca84e328fa41..b7d6d5d45adf6067ab2f39801f658f778f9b2855 100755 (executable)
@@ -78,10 +78,9 @@ test_expect_success 'not prune locked checkout' '
 
 test_expect_success 'not prune recent checkouts' '
        test_when_finished rm -r .git/worktrees &&
-       mkdir zz &&
-       mkdir -p .git/worktrees/jlm &&
-       echo "$(pwd)"/zz >.git/worktrees/jlm/gitdir &&
-       rmdir zz &&
+       git worktree add jlm HEAD &&
+       test -d .git/worktrees/jlm &&
+       rm -rf jlm &&
        git worktree prune --verbose --expire=2.days.ago &&
        test -d .git/worktrees/jlm
 '
index 5d5b3632ba0a7cf5364ab4db1df8ca7ea285b4d5..5f7d45b7b7fa91a497d6f8d96a7235fc0024919e 100755 (executable)
@@ -72,12 +72,11 @@ test_expect_success 'move locked worktree' '
 '
 
 test_expect_success 'move worktree' '
-       toplevel="$(pwd)" &&
        git worktree move source destination &&
        test_path_is_missing source &&
        git worktree list --porcelain >out &&
-       grep "^worktree.*/destination" out &&
-       ! grep "^worktree.*/source" out &&
+       grep "^worktree.*/destination$" out &&
+       ! grep "^worktree.*/source$" out &&
        git -C destination log --format=%s >actual2 &&
        echo init >expected2 &&
        test_cmp expected2 actual2
@@ -93,7 +92,7 @@ test_expect_success 'move worktree to another dir' '
        test_when_finished "git worktree move some-dir/destination destination" &&
        test_path_is_missing destination &&
        git worktree list --porcelain >out &&
-       grep "^worktree.*/some-dir/destination" out &&
+       grep "^worktree.*/some-dir/destination$" out &&
        git -C some-dir/destination log --format=%s >actual2 &&
        echo init >expected2 &&
        test_cmp expected2 actual2
index c8bce8c2e4314aaf466019438818293102c12c9c..685ec45639a5e9a2bf929e10aa30976e9a4c456b 100755 (executable)
@@ -8,19 +8,20 @@ test_description='git update-index --again test.
 
 . ./test-lib.sh
 
-cat > expected <<\EOF
-100644 3b18e512dba79e4c8300dd08aeb37f8e728b8dad 0      file1
-100644 9db8893856a8a02eaa73470054b7c1c5a7c82e47 0      file2
-EOF
-test_expect_success 'update-index --add' \
-       'echo hello world >file1 &&
-        echo goodbye people >file2 &&
-        git update-index --add file1 file2 &&
-        git ls-files -s >current &&
-        cmp current expected'
+test_expect_success 'update-index --add' '
+       echo hello world >file1 &&
+       echo goodbye people >file2 &&
+       git update-index --add file1 file2 &&
+       git ls-files -s >current &&
+       cat >expected <<-EOF &&
+       100644 $(git hash-object file1) 0       file1
+       100644 $(git hash-object file2) 0       file2
+       EOF
+       cmp current expected
+'
 
-test_expect_success 'update-index --again' \
-       'rm -f file1 &&
+test_expect_success 'update-index --again' '
+       rm -f file1 &&
        echo hello everybody >file2 &&
        if git update-index --again
        then
@@ -29,25 +30,23 @@ test_expect_success 'update-index --again' \
        else
                echo happy - failed as expected
        fi &&
-        git ls-files -s >current &&
-        cmp current expected'
+       git ls-files -s >current &&
+       cmp current expected
+'
 
-cat > expected <<\EOF
-100644 0f1ae1422c2bf43f117d3dbd715c988a9ed2103f 0      file2
-EOF
-test_expect_success 'update-index --remove --again' \
-       'git update-index --remove --again &&
-        git ls-files -s >current &&
-        cmp current expected'
+test_expect_success 'update-index --remove --again' '
+       git update-index --remove --again &&
+       git ls-files -s >current &&
+       cat >expected <<-EOF &&
+       100644 $(git hash-object file2) 0       file2
+       EOF
+       cmp current expected
+'
 
 test_expect_success 'first commit' 'git commit -m initial'
 
-cat > expected <<\EOF
-100644 53ab446c3f4e42ce9bb728a0ccb283a101be4979 0      dir1/file3
-100644 0f1ae1422c2bf43f117d3dbd715c988a9ed2103f 0      file2
-EOF
-test_expect_success 'update-index again' \
-       'mkdir -p dir1 &&
+test_expect_success 'update-index again' '
+       mkdir -p dir1 &&
        echo hello world >dir1/file3 &&
        echo goodbye people >file2 &&
        git update-index --add file2 dir1/file3 &&
@@ -55,30 +54,38 @@ test_expect_success 'update-index again' \
        echo happy >dir1/file3 &&
        git update-index --again &&
        git ls-files -s >current &&
-       cmp current expected'
+       cat >expected <<-EOF &&
+       100644 $(git hash-object dir1/file3) 0  dir1/file3
+       100644 $(git hash-object file2) 0       file2
+       EOF
+       cmp current expected
+'
 
-cat > expected <<\EOF
-100644 d7fb3f695f06c759dbf3ab00046e7cc2da22d10f 0      dir1/file3
-100644 0f1ae1422c2bf43f117d3dbd715c988a9ed2103f 0      file2
-EOF
-test_expect_success 'update-index --update from subdir' \
-       'echo not so happy >file2 &&
+file2=$(git hash-object file2)
+test_expect_success 'update-index --update from subdir' '
+       echo not so happy >file2 &&
        (cd dir1 &&
        cat ../file2 >file3 &&
        git update-index --again
        ) &&
        git ls-files -s >current &&
-       cmp current expected'
+       cat >expected <<-EOF &&
+       100644 $(git hash-object dir1/file3) 0  dir1/file3
+       100644 $file2 0 file2
+       EOF
+       test_cmp current expected
+'
 
-cat > expected <<\EOF
-100644 594fb5bb1759d90998e2bf2a38261ae8e243c760 0      dir1/file3
-100644 0f1ae1422c2bf43f117d3dbd715c988a9ed2103f 0      file2
-EOF
-test_expect_success 'update-index --update with pathspec' \
-       'echo very happy >file2 &&
+test_expect_success 'update-index --update with pathspec' '
+       echo very happy >file2 &&
        cat file2 >dir1/file3 &&
        git update-index --again dir1/ &&
        git ls-files -s >current &&
-       cmp current expected'
+       cat >expected <<-EOF &&
+       100644 $(git hash-object dir1/file3) 0  dir1/file3
+       100644 $file2 0 file2
+       EOF
+       cmp current expected
+'
 
 test_done
index cc830da58d920718b7b0a990a359afa9dd783b4c..7e2e7dd4ae5842bc49ccaf5dc116deb3b3d6b993 100755 (executable)
@@ -33,7 +33,7 @@ test_expect_success 'setup' '
 '
 
 test_expect_success 'index is at version 2' '
-       test "$(test-index-version < .git/index)" = 2
+       test "$(test-tool index-version < .git/index)" = 2
 '
 
 test_expect_success 'update-index --skip-worktree' '
@@ -42,7 +42,7 @@ test_expect_success 'update-index --skip-worktree' '
 '
 
 test_expect_success 'index is at version 3 after having some skip-worktree entries' '
-       test "$(test-index-version < .git/index)" = 3
+       test "$(test-tool index-version < .git/index)" = 3
 '
 
 test_expect_success 'ls-files -t' '
@@ -55,7 +55,7 @@ test_expect_success 'update-index --no-skip-worktree' '
 '
 
 test_expect_success 'index version is back to 2 when there is no skip-worktree entry' '
-       test "$(test-index-version < .git/index)" = 2
+       test "$(test-tool index-version < .git/index)" = 2
 '
 
 test_done
index 32ac6e09bdc81acfb8de5cf887302794d20c8ece..1db7e6a1abbebb63f811fa6ecbcd1db67607298e 100755 (executable)
@@ -85,9 +85,9 @@ test_expect_success '--chmod=+x and chmod=-x in the same argument list' '
        >B &&
        git add A B &&
        git update-index --chmod=+x A --chmod=-x B &&
-       cat >expect <<-\EOF &&
-       100755 e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 0       A
-       100644 e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 0       B
+       cat >expect <<-EOF &&
+       100755 $EMPTY_BLOB 0    A
+       100644 $EMPTY_BLOB 0    B
        EOF
        git ls-files --stage A B >actual &&
        test_cmp expect actual
index bdf5198b7eff11f9a7bfb57239f5905d92fe4bf7..08af596ba6c6b032eb1696c3beaba7c1776b4fd5 100755 (executable)
@@ -4,7 +4,7 @@ test_description='Test the lazy init name hash with various folder structures'
 
 . ./test-lib.sh
 
-if test 1 -eq $($GIT_BUILD_DIR/t/helper/test-online-cpus)
+if test 1 -eq $($GIT_BUILD_DIR/t/helper/test-tool online-cpus)
 then
        skip_all='skipping lazy-init tests, single cpu'
        test_done
@@ -21,7 +21,7 @@ test_expect_success 'no buffer overflow in lazy_init_name_hash' '
        ) |
        sed "s/^/100644 $EMPTY_BLOB     /" |
        git update-index --index-info &&
-       test-lazy-init-name-hash -m
+       test-tool lazy-init-name-hash -m
 '
 
 test_done
index c1fc6ca7301eaa9b15ef091ce592989956efc156..dce102130fb77ddda7cdc9aa9211507d9c2f2e09 100755 (executable)
@@ -79,12 +79,12 @@ match_with_function() {
        if test "$match_expect" = 1
        then
                test_expect_success "$match_function: match '$text' '$pattern'" "
-                       test-wildmatch $match_function '$text' '$pattern'
+                       test-tool wildmatch $match_function '$text' '$pattern'
                "
        elif test "$match_expect" = 0
        then
                test_expect_success "$match_function: no match '$text' '$pattern'" "
-                       test_must_fail test-wildmatch $match_function '$text' '$pattern'
+                       test_must_fail test-tool wildmatch $match_function '$text' '$pattern'
                "
        else
                test_expect_success "PANIC: Test framework error. Unknown matches value $match_expect" 'false'
@@ -148,7 +148,7 @@ match_with_ls_files() {
 match() {
        if test "$#" = 6
        then
-               # When test-wildmatch and git ls-files produce the same
+               # When test-tool wildmatch and git ls-files produce the same
                # result.
                match_glob=$1
                match_file_glob=$match_glob
@@ -204,19 +204,19 @@ match() {
                fi
        '
 
-       # $1: Case sensitive glob match: test-wildmatch & ls-files
+       # $1: Case sensitive glob match: test-tool wildmatch & ls-files
        match_with_function "$text" "$pattern" $match_glob "wildmatch"
        match_with_ls_files "$text" "$pattern" $match_file_glob "wildmatch" " --glob-pathspecs"
 
-       # $2: Case insensitive glob match: test-wildmatch & ls-files
+       # $2: Case insensitive glob match: test-tool wildmatch & ls-files
        match_with_function "$text" "$pattern" $match_iglob "iwildmatch"
        match_with_ls_files "$text" "$pattern" $match_file_iglob "iwildmatch" " --glob-pathspecs --icase-pathspecs"
 
-       # $3: Case sensitive path match: test-wildmatch & ls-files
+       # $3: Case sensitive path match: test-tool wildmatch & ls-files
        match_with_function "$text" "$pattern" $match_pathmatch "pathmatch"
        match_with_ls_files "$text" "$pattern" $match_file_pathmatch "pathmatch" ""
 
-       # $4: Case insensitive path match: test-wildmatch & ls-files
+       # $4: Case insensitive path match: test-tool wildmatch & ls-files
        match_with_function "$text" "$pattern" $match_pathmatchi "ipathmatch"
        match_with_ls_files "$text" "$pattern" $match_file_pathmatchi "ipathmatch" " --icase-pathspecs"
 }
index 6c0b7ea4addc8f1569b1b85f58dd3072fb863f33..c0ef946811dd534dfa5e704c992b6ab1fab82c35 100755 (executable)
@@ -6,6 +6,7 @@
 test_description='git branch assorted tests'
 
 . ./test-lib.sh
+. "$TEST_DIRECTORY"/lib-rebase.sh
 
 test_expect_success 'prepare a trivial repository' '
        echo Hello >A &&
@@ -1246,6 +1247,29 @@ test_expect_success '--merged is incompatible with --no-merged' '
        test_must_fail git branch --merged HEAD --no-merged HEAD
 '
 
+test_expect_success '--list during rebase' '
+       test_when_finished "reset_rebase" &&
+       git checkout master &&
+       FAKE_LINES="1 edit 2" &&
+       export FAKE_LINES &&
+       set_fake_editor &&
+       git rebase -i HEAD~2 &&
+       git branch --list >actual &&
+       test_i18ngrep "rebasing master" actual
+'
+
+test_expect_success '--list during rebase from detached HEAD' '
+       test_when_finished "reset_rebase && git checkout master" &&
+       git checkout master^0 &&
+       oid=$(git rev-parse --short HEAD) &&
+       FAKE_LINES="1 edit 2" &&
+       export FAKE_LINES &&
+       set_fake_editor &&
+       git rebase -i HEAD~2 &&
+       git branch --list >actual &&
+       test_i18ngrep "rebasing detached HEAD $oid" actual
+'
+
 test_expect_success 'tracking with unexpected .fetch refspec' '
        rm -rf a b c d &&
        git init a &&
index 86bf909ee3dfca78f678fdefe961772dcd78d6b1..61748088ebcbed700a570bc0e297f48435844340 100755 (executable)
@@ -22,7 +22,7 @@ test_expect_success 'setup: create a few commits with notes' '
        git commit -m 3rd &&
        COMMIT_FILE=.git/objects/5e/e1c35e83ea47cd3cc4f8cbee0568915fbbbd29 &&
        test -f $COMMIT_FILE &&
-       test-chmtime =+0 $COMMIT_FILE &&
+       test-tool chmtime =+0 $COMMIT_FILE &&
        git notes add -m "Note #3"
 '
 
index 3b905406df79187f70c828f72e9e2dc187f1be57..59c766540e5361af0eab19e33d1d61ef650bc72d 100755 (executable)
@@ -711,13 +711,13 @@ test_expect_success 'rebase -i continue with unstaged submodule' '
 test_expect_success 'avoid unnecessary reset' '
        git checkout master &&
        git reset --hard &&
-       test-chmtime =123456789 file3 &&
+       test-tool chmtime =123456789 file3 &&
        git update-index --refresh &&
        HEAD=$(git rev-parse HEAD) &&
        set_fake_editor &&
        git rebase -i HEAD~4 &&
        test $HEAD = $(git rev-parse HEAD) &&
-       MTIME=$(test-chmtime -v +0 file3 | sed 's/[^0-9].*$//') &&
+       MTIME=$(test-tool chmtime --get file3) &&
        test 123456789 = $MTIME
 '
 
@@ -927,10 +927,8 @@ test_expect_success 'rebase --exec works without -i ' '
 test_expect_success 'rebase -i --exec without <CMD>' '
        git reset --hard execute &&
        set_fake_editor &&
-       test_must_fail git rebase -i --exec 2>tmp &&
-       sed -e "1d" tmp >actual &&
-       test_must_fail git rebase -h >expected &&
-       test_cmp expected actual &&
+       test_must_fail git rebase -i --exec 2>actual &&
+       test_i18ngrep "requires a value" actual &&
        git checkout master
 '
 
index 7c91a85f43a7a11295819adc5da5f8e5fca9e4ea..03bf1b8a3b3df2e44ed0f70dc176c25af9ac244a 100755 (executable)
@@ -24,7 +24,7 @@ test_expect_success 'interactive rebase --continue works with touched file' '
        git checkout master &&
 
        FAKE_LINES="edit 1" git rebase -i HEAD^ &&
-       test-chmtime =-60 F1 &&
+       test-tool chmtime =-60 F1 &&
        git rebase --continue
 '
 
@@ -36,7 +36,7 @@ test_expect_success 'non-interactive rebase --continue works with touched file'
        test_must_fail git rebase --onto master master topic &&
        echo "Resolved" >F2 &&
        git add F2 &&
-       test-chmtime =-60 F1 &&
+       test-tool chmtime =-60 F1 &&
        git rebase --continue
 '
 
@@ -88,6 +88,55 @@ test_expect_success 'rebase passes merge strategy options correctly' '
        git rebase --continue
 '
 
+test_expect_success '--skip after failed fixup cleans commit message' '
+       test_when_finished "test_might_fail git rebase --abort" &&
+       git checkout -b with-conflicting-fixup &&
+       test_commit wants-fixup &&
+       test_commit "fixup! wants-fixup" wants-fixup.t 1 wants-fixup-1 &&
+       test_commit "fixup! wants-fixup" wants-fixup.t 2 wants-fixup-2 &&
+       test_commit "fixup! wants-fixup" wants-fixup.t 3 wants-fixup-3 &&
+       test_must_fail env FAKE_LINES="1 fixup 2 squash 4" \
+               git rebase -i HEAD~4 &&
+
+       : now there is a conflict, and comments in the commit message &&
+       git show HEAD >out &&
+       grep "fixup! wants-fixup" out &&
+
+       : skip and continue &&
+       echo "cp \"\$1\" .git/copy.txt" | write_script copy-editor.sh &&
+       (test_set_editor "$PWD/copy-editor.sh" && git rebase --skip) &&
+
+       : the user should not have had to edit the commit message &&
+       test_path_is_missing .git/copy.txt &&
+
+       : now the comments in the commit message should have been cleaned up &&
+       git show HEAD >out &&
+       ! grep "fixup! wants-fixup" out &&
+
+       : now, let us ensure that "squash" is handled correctly &&
+       git reset --hard wants-fixup-3 &&
+       test_must_fail env FAKE_LINES="1 squash 4 squash 2 squash 4" \
+               git rebase -i HEAD~4 &&
+
+       : the first squash failed, but there are two more in the chain &&
+       (test_set_editor "$PWD/copy-editor.sh" &&
+        test_must_fail git rebase --skip) &&
+
+       : not the final squash, no need to edit the commit message &&
+       test_path_is_missing .git/copy.txt &&
+
+       : The first squash was skipped, therefore: &&
+       git show HEAD >out &&
+       test_i18ngrep "# This is a combination of 2 commits" out &&
+
+       (test_set_editor "$PWD/copy-editor.sh" && git rebase --skip) &&
+       git show HEAD >out &&
+       test_i18ngrep ! "# This is a combination" out &&
+
+       : Final squash failed, but there was still a squash &&
+       test_i18ngrep "# This is a combination of 2 commits" .git/copy.txt
+'
+
 test_expect_success 'setup rerere database' '
        rm -fr .git/rebase-* &&
        git reset --hard commit-new-file-F3-on-topic-branch &&
index 68fe2003ef5f74073cafa4741bee31ade85cf5c0..e7438ad06acedee06ced9995a47e454a2eef3e2e 100755 (executable)
@@ -199,7 +199,7 @@ test_run_rebase () {
        "
 }
 test_run_rebase success ''
-test_run_rebase failure -m
+test_run_rebase success -m
 test_run_rebase success -i
 test_run_rebase failure -p
 
@@ -214,9 +214,10 @@ test_run_rebase () {
        "
 }
 test_run_rebase success ''
-test_run_rebase failure -m
-test_run_rebase failure -i
+test_run_rebase success -m
+test_run_rebase success -i
 test_run_rebase failure -p
+test_run_rebase success --rebase-merges
 
 #       m
 #      /
index 2afb56470184b18da19740f0a68abb31da96fd82..f6993b7e14d91617b337bfaefa717ec8591fb28f 100755 (executable)
@@ -12,6 +12,13 @@ cat >file <<EOF
 a
 EOF
 
+# Expected commit message for initial commit after rebase --signoff
+cat >expected-initial-signed <<EOF
+Initial empty commit
+
+Signed-off-by: $(git var GIT_COMMITTER_IDENT | sed -e "s/>.*/>/")
+EOF
+
 # Expected commit message after rebase --signoff
 cat >expected-signed <<EOF
 first
@@ -43,4 +50,35 @@ test_expect_success 'rebase --no-signoff does not add a sign-off line' '
        test_cmp expected-unsigned actual
 '
 
+test_expect_success 'rebase --exec --signoff adds a sign-off line' '
+       test_when_finished "rm exec" &&
+       git commit --amend -m "first" &&
+       git rebase --exec "touch exec" --signoff HEAD^ &&
+       test_path_is_file exec &&
+       git cat-file commit HEAD | sed -e "1,/^\$/d" >actual &&
+       test_cmp expected-signed actual
+'
+
+test_expect_success 'rebase --root --signoff adds a sign-off line' '
+       git commit --amend -m "first" &&
+       git rebase --root --keep-empty --signoff &&
+       git cat-file commit HEAD^ | sed -e "1,/^\$/d" >actual &&
+       test_cmp expected-initial-signed actual &&
+       git cat-file commit HEAD | sed -e "1,/^\$/d" >actual &&
+       test_cmp expected-signed actual
+'
+
+test_expect_success 'rebase -i --signoff fails' '
+       git commit --amend -m "first" &&
+       git rebase -i --signoff HEAD^ &&
+       git cat-file commit HEAD | sed -e "1,/^\$/d" >actual &&
+       test_cmp expected-signed actual
+'
+
+test_expect_success 'rebase -m --signoff fails' '
+       git commit --amend -m "first" &&
+       git rebase -m --signoff HEAD^ &&
+       git cat-file commit HEAD | sed -e "1,/^\$/d" >actual &&
+       test_cmp expected-signed actual
+'
 test_done
diff --git a/t/t3430-rebase-merges.sh b/t/t3430-rebase-merges.sh
new file mode 100755 (executable)
index 0000000..3d4dfdf
--- /dev/null
@@ -0,0 +1,244 @@
+#!/bin/sh
+#
+# Copyright (c) 2018 Johannes E. Schindelin
+#
+
+test_description='git rebase -i --rebase-merges
+
+This test runs git rebase "interactively", retaining the branch structure by
+recreating merge commits.
+
+Initial setup:
+
+    -- B --                   (first)
+   /       \
+ A - C - D - E - H            (master)
+       \       /
+         F - G                (second)
+'
+. ./test-lib.sh
+. "$TEST_DIRECTORY"/lib-rebase.sh
+
+test_cmp_graph () {
+       cat >expect &&
+       git log --graph --boundary --format=%s "$@" >output &&
+       sed "s/ *$//" <output >output.trimmed &&
+       test_cmp expect output.trimmed
+}
+
+test_expect_success 'setup' '
+       write_script replace-editor.sh <<-\EOF &&
+       mv "$1" "$(git rev-parse --git-path ORIGINAL-TODO)"
+       cp script-from-scratch "$1"
+       EOF
+
+       test_commit A &&
+       git checkout -b first &&
+       test_commit B &&
+       git checkout master &&
+       test_commit C &&
+       test_commit D &&
+       git merge --no-commit B &&
+       test_tick &&
+       git commit -m E &&
+       git tag -m E E &&
+       git checkout -b second C &&
+       test_commit F &&
+       test_commit G &&
+       git checkout master &&
+       git merge --no-commit G &&
+       test_tick &&
+       git commit -m H &&
+       git tag -m H H
+'
+
+test_expect_success 'create completely different structure' '
+       cat >script-from-scratch <<-\EOF &&
+       label onto
+
+       # onebranch
+       pick G
+       pick D
+       label onebranch
+
+       # second
+       reset onto
+       pick B
+       label second
+
+       reset onto
+       merge -C H second
+       merge onebranch # Merge the topic branch '\''onebranch'\''
+       EOF
+       test_config sequence.editor \""$PWD"/replace-editor.sh\" &&
+       test_tick &&
+       git rebase -i -r A &&
+       test_cmp_graph <<-\EOF
+       *   Merge the topic branch '\''onebranch'\''
+       |\
+       | * D
+       | * G
+       * |   H
+       |\ \
+       | |/
+       |/|
+       | * B
+       |/
+       * A
+       EOF
+'
+
+test_expect_success 'generate correct todo list' '
+       cat >expect <<-\EOF &&
+       label onto
+
+       reset onto
+       pick d9df450 B
+       label E
+
+       reset onto
+       pick 5dee784 C
+       label branch-point
+       pick ca2c861 F
+       pick 088b00a G
+       label H
+
+       reset branch-point # C
+       pick 12bd07b D
+       merge -C 2051b56 E # E
+       merge -C 233d48a H # H
+
+       EOF
+
+       grep -v "^#" <.git/ORIGINAL-TODO >output &&
+       test_cmp expect output
+'
+
+test_expect_success '`reset` refuses to overwrite untracked files' '
+       git checkout -b refuse-to-reset &&
+       test_commit dont-overwrite-untracked &&
+       git checkout @{-1} &&
+       : >dont-overwrite-untracked.t &&
+       echo "reset refs/tags/dont-overwrite-untracked" >script-from-scratch &&
+       test_config sequence.editor \""$PWD"/replace-editor.sh\" &&
+       test_must_fail git rebase -r HEAD &&
+       git rebase --abort
+'
+
+test_expect_success 'failed `merge` writes patch (may be rescheduled, too)' '
+       test_when_finished "test_might_fail git rebase --abort" &&
+       git checkout -b conflicting-merge A &&
+
+       : fail because of conflicting untracked file &&
+       >G.t &&
+       echo "merge -C H G" >script-from-scratch &&
+       test_config sequence.editor \""$PWD"/replace-editor.sh\" &&
+       test_tick &&
+       test_must_fail git rebase -ir HEAD &&
+       grep "^merge -C .* G$" .git/rebase-merge/done &&
+       grep "^merge -C .* G$" .git/rebase-merge/git-rebase-todo &&
+       test_path_is_file .git/rebase-merge/patch &&
+
+       : fail because of merge conflict &&
+       rm G.t .git/rebase-merge/patch &&
+       git reset --hard &&
+       test_commit conflicting-G G.t not-G conflicting-G &&
+       test_must_fail git rebase --continue &&
+       ! grep "^merge -C .* G$" .git/rebase-merge/git-rebase-todo &&
+       test_path_is_file .git/rebase-merge/patch
+'
+
+test_expect_success 'with a branch tip that was cherry-picked already' '
+       git checkout -b already-upstream master &&
+       base="$(git rev-parse --verify HEAD)" &&
+
+       test_commit A1 &&
+       test_commit A2 &&
+       git reset --hard $base &&
+       test_commit B1 &&
+       test_tick &&
+       git merge -m "Merge branch A" A2 &&
+
+       git checkout -b upstream-with-a2 $base &&
+       test_tick &&
+       git cherry-pick A2 &&
+
+       git checkout already-upstream &&
+       test_tick &&
+       git rebase -i -r upstream-with-a2 &&
+       test_cmp_graph upstream-with-a2.. <<-\EOF
+       *   Merge branch A
+       |\
+       | * A1
+       * | B1
+       |/
+       o A2
+       EOF
+'
+
+test_expect_success 'do not rebase cousins unless asked for' '
+       git checkout -b cousins master &&
+       before="$(git rev-parse --verify HEAD)" &&
+       test_tick &&
+       git rebase -r HEAD^ &&
+       test_cmp_rev HEAD $before &&
+       test_tick &&
+       git rebase --rebase-merges=rebase-cousins HEAD^ &&
+       test_cmp_graph HEAD^.. <<-\EOF
+       *   Merge the topic branch '\''onebranch'\''
+       |\
+       | * D
+       | * G
+       |/
+       o H
+       EOF
+'
+
+test_expect_success 'refs/rewritten/* is worktree-local' '
+       git worktree add wt &&
+       cat >wt/script-from-scratch <<-\EOF &&
+       label xyz
+       exec GIT_DIR=../.git git rev-parse --verify refs/rewritten/xyz >a || :
+       exec git rev-parse --verify refs/rewritten/xyz >b
+       EOF
+
+       test_config -C wt sequence.editor \""$PWD"/replace-editor.sh\" &&
+       git -C wt rebase -i HEAD &&
+       test_must_be_empty wt/a &&
+       test_cmp_rev HEAD "$(cat wt/b)"
+'
+
+test_expect_success 'post-rewrite hook and fixups work for merges' '
+       git checkout -b post-rewrite &&
+       test_commit same1 &&
+       git reset --hard HEAD^ &&
+       test_commit same2 &&
+       git merge -m "to fix up" same1 &&
+       echo same old same old >same2.t &&
+       test_tick &&
+       git commit --fixup HEAD same2.t &&
+       fixup="$(git rev-parse HEAD)" &&
+
+       mkdir -p .git/hooks &&
+       test_when_finished "rm .git/hooks/post-rewrite" &&
+       echo "cat >actual" | write_script .git/hooks/post-rewrite &&
+
+       test_tick &&
+       git rebase -i --autosquash -r HEAD^^^ &&
+       printf "%s %s\n%s %s\n%s %s\n%s %s\n" >expect $(git rev-parse \
+               $fixup^^2 HEAD^2 \
+               $fixup^^ HEAD^ \
+               $fixup^ HEAD \
+               $fixup HEAD) &&
+       test_cmp expect actual
+'
+
+test_expect_success 'refuse to merge ancestors of HEAD' '
+       echo "merge HEAD^" >script-from-scratch &&
+       test_config -C wt sequence.editor \""$PWD"/replace-editor.sh\" &&
+       before="$(git rev-parse HEAD)" &&
+       git rebase -i HEAD &&
+       test_cmp_rev HEAD $before
+'
+
+test_done
index 783bdbf59db07ebf2afac83165d31b86fd6b95db..d1c68af8c50e75255baa618ea297549b548c0fc2 100755 (executable)
@@ -86,7 +86,7 @@ test_expect_success 'cherry-pick on stat-dirty working tree' '
        (
                cd copy &&
                git checkout initial &&
-               test-chmtime +40 oops &&
+               test-tool chmtime +40 oops &&
                git cherry-pick added
        )
 '
@@ -141,7 +141,7 @@ test_expect_success 'cherry-pick "-" works with arguments' '
        test_cmp expect actual
 '
 
-test_expect_failure 'cherry-pick works with dirty renamed file' '
+test_expect_success 'cherry-pick works with dirty renamed file' '
        test_commit to-rename &&
        git checkout -b unrelated &&
        test_commit unrelated &&
@@ -150,9 +150,8 @@ test_expect_failure 'cherry-pick works with dirty renamed file' '
        test_tick &&
        git commit -m renamed &&
        echo modified >renamed &&
-       test_must_fail git cherry-pick refs/heads/unrelated >out &&
-       test_i18ngrep "Refusing to lose dirty file at renamed" out &&
-       test $(git rev-parse :0:renamed) = $(git rev-parse HEAD^:to-rename.t) &&
+       git cherry-pick refs/heads/unrelated >out &&
+       test $(git rev-parse :0:renamed) = $(git rev-parse HEAD~2:to-rename.t) &&
        grep -q "^modified$" renamed
 '
 
index 0acf4b14614c750d3c2324d8a941aa77d5c7ad56..21b4f194a2466b64023017b9711473f15187c638 100755 (executable)
@@ -247,9 +247,9 @@ test_expect_success '--abort after last commit in sequence' '
 test_expect_success 'cherry-pick does not implicitly stomp an existing operation' '
        pristine_detach initial &&
        test_expect_code 1 git cherry-pick base..anotherpick &&
-       test-chmtime -v +0 .git/sequencer >expect &&
+       test-tool chmtime --get .git/sequencer >expect &&
        test_expect_code 128 git cherry-pick unrelatedpick &&
-       test-chmtime -v +0 .git/sequencer >actual &&
+       test-tool chmtime --get .git/sequencer >actual &&
        test_cmp expect actual
 '
 
index 46f15169f55c03742717f36f8d0e99241391b2ea..b8fbdefcdc34ffa6fb54fc1ad375a583a194fb38 100755 (executable)
@@ -232,7 +232,7 @@ test_expect_success 'Call "rm" from outside the work tree' '
 test_expect_success 'refresh index before checking if it is up-to-date' '
 
        git reset --hard &&
-       test-chmtime -86400 frotz/nitfol &&
+       test-tool chmtime -86400 frotz/nitfol &&
        git rm frotz/nitfol &&
        test ! -f frotz/nitfol
 
index 2748805642201d7c514792bab8d8b3940fb4086c..07af05d7aee65c51245431292a21ec38eef19f53 100755 (executable)
@@ -187,7 +187,7 @@ test_expect_success 'git add --refresh with pathspec' '
        echo >foo && echo >bar && echo >baz &&
        git add foo bar baz && H=$(git rev-parse :foo) && git rm -f foo &&
        echo "100644 $H 3       foo" | git update-index --index-info &&
-       test-chmtime -60 bar baz &&
+       test-tool chmtime -60 bar baz &&
        >expect &&
        git add --refresh bar >actual &&
        test_cmp expect actual &&
index bfde4057ad2afcdd3bd38cbb43ffa2ce241aaa67..3ea5b9bb3ff0a4e439b1fc6cd8f9df86386f4126 100755 (executable)
@@ -228,4 +228,56 @@ test_expect_success 'stash previously ignored file' '
        test_path_is_file ignored.d/foo
 '
 
+test_expect_success 'stash -u -- <untracked> doesnt print error' '
+       >untracked &&
+       git stash push -u -- untracked 2>actual &&
+       test_path_is_missing untracked &&
+       test_line_count = 0 actual
+'
+
+test_expect_success 'stash -u -- <untracked> leaves rest of working tree in place' '
+       >tracked &&
+       git add tracked &&
+       >untracked &&
+       git stash push -u -- untracked &&
+       test_path_is_missing untracked &&
+       test_path_is_file tracked
+'
+
+test_expect_success 'stash -u -- <tracked> <untracked> clears changes in both' '
+       >tracked &&
+       git add tracked &&
+       >untracked &&
+       git stash push -u -- tracked untracked &&
+       test_path_is_missing tracked &&
+       test_path_is_missing untracked
+'
+
+test_expect_success 'stash --all -- <ignored> stashes ignored file' '
+       >ignored.d/bar &&
+       git stash push --all -- ignored.d/bar &&
+       test_path_is_missing ignored.d/bar
+'
+
+test_expect_success 'stash --all -- <tracked> <ignored> clears changes in both' '
+       >tracked &&
+       git add tracked &&
+       >ignored.d/bar &&
+       git stash push --all -- tracked ignored.d/bar &&
+       test_path_is_missing tracked &&
+       test_path_is_missing ignored.d/bar
+'
+
+test_expect_success 'stash -u -- <ignored> leaves ignored file alone' '
+       >ignored.d/bar &&
+       git stash push -u -- ignored.d/bar &&
+       test_path_is_file ignored.d/bar
+'
+
+test_expect_success 'stash -u -- <non-existant> shows no changes when there are none' '
+       git stash push -u -- non-existant >actual &&
+       echo "No local changes to save" >expect &&
+       test_i18ncmp expect actual
+'
+
 test_done
index a07816d5605f30ccb7f7ce6fe83288fff535538c..bf4030371a9fc8f5abe7e88d84c98a097b9821dd 100755 (executable)
@@ -138,6 +138,18 @@ test_expect_success 'favour same basenames over different ones' '
        test_i18ngrep "renamed: .*path1 -> subdir/path1" out
 '
 
+test_expect_success 'test diff.renames=true for git status' '
+       git -c diff.renames=true status >out &&
+       test_i18ngrep "renamed: .*path1 -> subdir/path1" out
+'
+
+test_expect_success 'test diff.renames=false for git status' '
+       git -c diff.renames=false status >out &&
+       test_i18ngrep ! "renamed: .*path1 -> subdir/path1" out &&
+       test_i18ngrep "new file: .*subdir/path1" out &&
+       test_i18ngrep "deleted: .*[^/]path1" out
+'
+
 test_expect_success 'favour same basenames even with minor differences' '
        git show HEAD:path1 | sed "s/15/16/" > subdir/path1 &&
        git status >out &&
index 13e7f621ab79f95cc7c3057d9de5710813049102..cf0f3a1ee75dd28c6b5ce2a5db7c3ad6afafd21f 100755 (executable)
@@ -73,7 +73,7 @@ test_expect_success 'diff identical, but newly created symlink and file' '
        >expected &&
        rm -f frotz nitfol &&
        echo xyzzy >nitfol &&
-       test-chmtime +10 nitfol &&
+       test-tool chmtime +10 nitfol &&
        if test_have_prereq SYMLINKS
        then
                ln -s xyzzy frotz
index 3f9a24fd56c801d1a75abb6cc4f4e8928c2dc427..f8d853595b99bfff64521a502ee062b7e5f35e5f 100755 (executable)
@@ -76,7 +76,7 @@ test_expect_success setup '
 
        mkdir dir3 &&
        cp dir/sub dir3/sub &&
-       test-chmtime +1 dir3/sub &&
+       test-tool chmtime +1 dir3/sub &&
 
        git config log.showroot false &&
        git commit --amend &&
index 482112ca339f05fc31e6f9c2c6168971a1121e87..6ea08fd5e9c21bc1efe9726c8745dd1397c3ddab 100755 (executable)
@@ -1661,6 +1661,15 @@ test_expect_success 'format-patch --base with --attach' '
        test_write_lines 1 2 >expect &&
        test_cmp expect actual
 '
+test_expect_success 'format-patch --attach cover-letter only is non-multipart' '
+       test_when_finished "rm -fr patches" &&
+       git format-patch -o patches --cover-letter --attach=mimemime --base=HEAD~ -1 &&
+       ! egrep "^--+mimemime" patches/0000*.patch &&
+       egrep "^--+mimemime$" patches/0001*.patch >output &&
+       test_line_count = 2 output &&
+       egrep "^--+mimemime--$" patches/0001*.patch >output &&
+       test_line_count = 1 output
+'
 
 test_expect_success 'format-patch --pretty=mboxrd' '
        sp=" " &&
index 2f1737fcef185486dc626d616c37fdadc11deba1..0352bf81a90a38adf14fb7a980c98600e1f650b2 100755 (executable)
@@ -147,7 +147,7 @@ test_expect_success 'git diff --ignore-all-space, both files outside repo' '
 '
 
 test_expect_success 'git diff --quiet ignores stat-change only entries' '
-       test-chmtime +10 a &&
+       test-tool chmtime +10 a &&
        echo modified >>b &&
        test_expect_code 1 git diff --quiet
 '
index 16432781d2e0d52c33dfb1444fcce4459b417e8b..9d8d3c72e7efe68b8e0011c1ec9b77d9a0820b30 100755 (executable)
@@ -171,7 +171,7 @@ test_expect_success 'am --skip leaves index stat info alone' '
        git checkout -f --orphan skip-stat-info &&
        git reset &&
        test_commit skip-should-be-untouched &&
-       test-chmtime =0 skip-should-be-untouched.t &&
+       test-tool chmtime =0 skip-should-be-untouched.t &&
        git update-index --refresh &&
        git diff-files --exit-code --quiet &&
        test_must_fail git am 0001-*.patch &&
@@ -183,7 +183,7 @@ test_expect_success 'am --abort leaves index stat info alone' '
        git checkout -f --orphan abort-stat-info &&
        git reset &&
        test_commit abort-should-be-untouched &&
-       test-chmtime =0 abort-should-be-untouched.t &&
+       test-tool chmtime =0 abort-should-be-untouched.t &&
        git update-index --refresh &&
        git diff-files --exit-code --quiet &&
        test_must_fail git am 0001-*.patch &&
index d97d2bebc9850c8ba96a8263ed081fced2ecc153..eaf18c81cbffe012663fae3a537ef8302d6c7b31 100755 (executable)
@@ -166,7 +166,7 @@ test_expect_success 'first postimage wins' '
        git commit -q -a -m "prefer first over second" &&
        test -f $rr/postimage &&
 
-       oldmtimepost=$(test-chmtime -v -60 $rr/postimage | cut -f 1) &&
+       oldmtimepost=$(test-tool chmtime --get -60 $rr/postimage) &&
 
        git checkout -b third master &&
        git show second^:a1 | sed "s/To die: t/To die! T/" >a1 &&
@@ -179,7 +179,7 @@ test_expect_success 'first postimage wins' '
 '
 
 test_expect_success 'rerere updates postimage timestamp' '
-       newmtimepost=$(test-chmtime -v +0 $rr/postimage | cut -f 1) &&
+       newmtimepost=$(test-tool chmtime --get $rr/postimage) &&
        test $oldmtimepost -lt $newmtimepost
 '
 
@@ -220,9 +220,9 @@ test_expect_success 'set up for garbage collection tests' '
        almost_60_days_ago=$((60-60*86400)) &&
        just_over_60_days_ago=$((-1-60*86400)) &&
 
-       test-chmtime =$just_over_60_days_ago $rr/preimage &&
-       test-chmtime =$almost_60_days_ago $rr/postimage &&
-       test-chmtime =$almost_15_days_ago $rr2/preimage
+       test-tool chmtime =$just_over_60_days_ago $rr/preimage &&
+       test-tool chmtime =$almost_60_days_ago $rr/postimage &&
+       test-tool chmtime =$almost_15_days_ago $rr2/preimage
 '
 
 test_expect_success 'gc preserves young or recently used records' '
@@ -232,8 +232,8 @@ test_expect_success 'gc preserves young or recently used records' '
 '
 
 test_expect_success 'old records rest in peace' '
-       test-chmtime =$just_over_60_days_ago $rr/postimage &&
-       test-chmtime =$just_over_15_days_ago $rr2/preimage &&
+       test-tool chmtime =$just_over_60_days_ago $rr/postimage &&
+       test-tool chmtime =$just_over_15_days_ago $rr2/preimage &&
        git rerere gc &&
        ! test -f $rr/preimage &&
        ! test -f $rr2/preimage
@@ -249,8 +249,8 @@ rerere_gc_custom_expiry_test () {
                >"$rr/postimage" &&
 
                two_days_ago=$((-2*86400)) &&
-               test-chmtime =$two_days_ago "$rr/preimage" &&
-               test-chmtime =$two_days_ago "$rr/postimage" &&
+               test-tool chmtime =$two_days_ago "$rr/preimage" &&
+               test-tool chmtime =$two_days_ago "$rr/postimage" &&
 
                find .git/rr-cache -type f | sort >original &&
 
@@ -512,7 +512,7 @@ test_expect_success 'multiple identical conflicts' '
        count_pre_post 2 0 &&
 
        # Pretend that the conflicts were made quite some time ago
-       find .git/rr-cache/ -type f | xargs test-chmtime -172800 &&
+       test-tool chmtime -172800 $(find .git/rr-cache/ -type f) &&
 
        # Unresolved entries have not expired yet
        git -c gc.rerereresolved=5 -c gc.rerereunresolved=5 rerere gc &&
@@ -568,7 +568,7 @@ test_expect_success 'multiple identical conflicts' '
        git rerere &&
 
        # Pretend that the resolutions are old again
-       find .git/rr-cache/ -type f | xargs test-chmtime -172800 &&
+       test-tool chmtime -172800 $(find .git/rr-cache/ -type f) &&
 
        # Resolved entries have not expired yet
        git -c gc.rerereresolved=5 -c gc.rerereunresolved=5 rerere gc &&
index da10478f59da1a301edf7def229d37fbc964dce9..ff6649ed9a70721523da3c55142a9622b152243a 100755 (executable)
@@ -127,6 +127,11 @@ test_expect_success !MINGW 'shortlog can read --format=raw output' '
        test_cmp expect out
 '
 
+test_expect_success 'shortlog from non-git directory refuses extra arguments' '
+       test_must_fail env GIT_DIR=non-existing git shortlog foo 2>out &&
+       test_i18ngrep "too many arguments" out
+'
+
 test_expect_success 'shortlog should add newline when input line matches wraplen' '
        cat >expect <<\EOF &&
 A U Thor (2):
index fe2d4f15a73f082c516a03b1877c4cf82982138a..2a97b27b0a68f94ab7204764ced5fd1457da3d9c 100755 (executable)
@@ -101,7 +101,7 @@ test_expect_success \
      ten=0123456789 && hundred=$ten$ten$ten$ten$ten$ten$ten$ten$ten$ten &&
      echo long filename >a/four$hundred &&
      mkdir a/bin &&
-     test-genrandom "frotz" 500000 >a/bin/sh &&
+     test-tool genrandom "frotz" 500000 >a/bin/sh &&
      printf "A\$Format:%s\$O" "$SUBSTFORMAT" >a/substfile1 &&
      printf "A not substituted O" >a/substfile2 &&
      if test_have_prereq SYMLINKS; then
@@ -192,7 +192,7 @@ test_expect_success \
     'validate file modification time' \
     'mkdir extract &&
      "$TAR" xf b.tar -C extract a/a &&
-     test-chmtime -v +0 extract/a/a |cut -f 1 >b.mtime &&
+     test-tool chmtime --get extract/a/a >b.mtime &&
      echo "1117231200" >expected.mtime &&
      test_cmp expected.mtime b.mtime'
 
index 9c68b992511b7098df0570ca37e0add468a8a093..54eff03851dfcb6ba0d4a41a4d0a221edde43729 100755 (executable)
@@ -16,8 +16,8 @@ test_expect_success \
      perl -e "print \"a\" x 4096;" > a &&
      perl -e "print \"b\" x 4096;" > b &&
      perl -e "print \"c\" x 4096;" > c &&
-     test-genrandom "seed a" 2097152 > a_big &&
-     test-genrandom "seed b" 2097152 > b_big &&
+     test-tool genrandom "seed a" 2097152 > a_big &&
+     test-tool genrandom "seed b" 2097152 > b_big &&
      git update-index --add a a_big b b_big c &&
      cat c >d && echo foo >>d && git update-index --add d &&
      tree=$(git write-tree) &&
@@ -311,8 +311,8 @@ test_expect_success 'unpacking with --strict' '
        rm -f .git/index &&
        tail -n 10 LIST | git update-index --index-info &&
        ST=$(git write-tree) &&
-       PACK5=$( git rev-list --objects "$LIST" "$LI" "$ST" | \
-               git pack-objects test-5 ) &&
+       git rev-list --objects "$LIST" "$LI" "$ST" >actual &&
+       PACK5=$( git pack-objects test-5 <actual ) &&
        PACK6=$( (
                        echo "$LIST"
                        echo "$LI"
@@ -358,8 +358,8 @@ test_expect_success 'index-pack with --strict' '
        rm -f .git/index &&
        tail -n 10 LIST | git update-index --index-info &&
        ST=$(git write-tree) &&
-       PACK5=$( git rev-list --objects "$LIST" "$LI" "$ST" | \
-               git pack-objects test-5 ) &&
+       git rev-list --objects "$LIST" "$LI" "$ST" >actual &&
+       PACK5=$( git pack-objects test-5 <actual ) &&
        PACK6=$( (
                        echo "$LIST"
                        echo "$LI"
@@ -457,6 +457,11 @@ test_expect_success !PTHREADS,C_LOCALE_OUTPUT 'pack-objects --threads=N or pack.
        grep -F "no threads support, ignoring pack.threads" err
 '
 
+test_expect_success 'pack-objects in too-many-packs mode' '
+       GIT_TEST_FULL_IN_PACK_ARRAY=1 git repack -ad &&
+       git fsck
+'
+
 #
 # WARNING!
 #
index cae8c2e8822ccc1e464e3f5b71c99c1f6b1c2323..76f9798ab958cae2414cbcc496bbb22af85f0ac5 100755 (executable)
@@ -12,7 +12,7 @@ test_expect_success \
      for i in a b c
      do
          echo $i >$i &&
-         test-genrandom "$i" 32768 >>$i &&
+        test-tool genrandom "$i" 32768 >>$i &&
          git update-index --add $i || return 1
      done &&
      echo d >d && cat c >>d && git update-index --add d &&
index d695a6082edf69c6ab377ea825519097f84162f3..bb9b8bb3097c05f6e28c37fa24fa27d5bb5b805d 100755 (executable)
@@ -15,17 +15,17 @@ test_expect_success \
      while test $i -le 100
      do
          iii=$(printf '%03i' $i)
-         test-genrandom "bar" 200 > wide_delta_$iii &&
-         test-genrandom "baz $iii" 50 >> wide_delta_$iii &&
-         test-genrandom "foo"$i 100 > deep_delta_$iii &&
-         test-genrandom "foo"$(expr $i + 1) 100 >> deep_delta_$iii &&
-         test-genrandom "foo"$(expr $i + 2) 100 >> deep_delta_$iii &&
+        test-tool genrandom "bar" 200 > wide_delta_$iii &&
+        test-tool genrandom "baz $iii" 50 >> wide_delta_$iii &&
+        test-tool genrandom "foo"$i 100 > deep_delta_$iii &&
+        test-tool genrandom "foo"$(expr $i + 1) 100 >> deep_delta_$iii &&
+        test-tool genrandom "foo"$(expr $i + 2) 100 >> deep_delta_$iii &&
          echo $iii >file_$iii &&
-         test-genrandom "$iii" 8192 >>file_$iii &&
+        test-tool genrandom "$iii" 8192 >>file_$iii &&
          git update-index --add file_$iii deep_delta_$iii wide_delta_$iii &&
          i=$(expr $i + 1) || return 1
      done &&
-     { echo 101 && test-genrandom 100 8192; } >file_101 &&
+     { echo 101 && test-tool genrandom 100 8192; } >file_101 &&
      git update-index --add file_101 &&
      tree=$(git write-tree) &&
      commit=$(git commit-tree $tree </dev/null) && {
index 5940ce2084a6e9cc935a0e5784ff8b6bff8e035d..3634e258f8bf66c2c1917c1598f6f21486c08684 100755 (executable)
@@ -19,14 +19,14 @@ test_description='resilience to pack corruptions with redundant objects'
 # 3) object header is always 2 bytes.
 
 create_test_files() {
-    test-genrandom "foo" 2000 > file_1 &&
-    test-genrandom "foo" 1800 > file_2 &&
-    test-genrandom "foo" 1800 > file_3 &&
+    test-tool genrandom "foo" 2000 > file_1 &&
+    test-tool genrandom "foo" 1800 > file_2 &&
+    test-tool genrandom "foo" 1800 > file_3 &&
     echo " base " >> file_1 &&
     echo " delta1 " >> file_2 &&
     echo " delta delta2 " >> file_3 &&
-    test-genrandom "bar" 150 >> file_2 &&
-    test-genrandom "baz" 100 >> file_3
+    test-tool genrandom "bar" 150 >> file_2 &&
+    test-tool genrandom "baz" 100 >> file_3
 }
 
 create_new_pack() {
index 6694c19a1eecf10117b843bbacbc5bb47924c9c8..f20f03c1039256f0bc674e3969176a2c592919dd 100755 (executable)
@@ -15,7 +15,7 @@ add_blob() {
        BLOB_FILE=.git/objects/$(echo $BLOB | sed "s/^../&\//") &&
        verbose test $((1 + $before)) = $(git count-objects | sed "s/ .*//") &&
        test_path_is_file $BLOB_FILE &&
-       test-chmtime =+0 $BLOB_FILE
+       test-tool chmtime =+0 $BLOB_FILE
 }
 
 test_expect_success setup '
@@ -33,7 +33,7 @@ test_expect_success 'prune stale packs' '
        orig_pack=$(echo .git/objects/pack/*.pack) &&
        : > .git/objects/tmp_1.pack &&
        : > .git/objects/tmp_2.pack &&
-       test-chmtime =-86501 .git/objects/tmp_1.pack &&
+       test-tool chmtime =-86501 .git/objects/tmp_1.pack &&
        git prune --expire 1.day &&
        test_path_is_file $orig_pack &&
        test_path_is_file .git/objects/tmp_2.pack &&
@@ -47,7 +47,7 @@ test_expect_success 'prune --expire' '
        git prune --expire=1.hour.ago &&
        verbose test $((1 + $before)) = $(git count-objects | sed "s/ .*//") &&
        test_path_is_file $BLOB_FILE &&
-       test-chmtime =-86500 $BLOB_FILE &&
+       test-tool chmtime =-86500 $BLOB_FILE &&
        git prune --expire 1.day &&
        verbose test $before = $(git count-objects | sed "s/ .*//") &&
        test_path_is_missing $BLOB_FILE
@@ -57,11 +57,11 @@ test_expect_success 'prune --expire' '
 test_expect_success 'gc: implicit prune --expire' '
 
        add_blob &&
-       test-chmtime =-$((2*$week-30)) $BLOB_FILE &&
+       test-tool chmtime =-$((2*$week-30)) $BLOB_FILE &&
        git gc &&
        verbose test $((1 + $before)) = $(git count-objects | sed "s/ .*//") &&
        test_path_is_file $BLOB_FILE &&
-       test-chmtime =-$((2*$week+1)) $BLOB_FILE &&
+       test-tool chmtime =-$((2*$week+1)) $BLOB_FILE &&
        git gc &&
        verbose test $before = $(git count-objects | sed "s/ .*//") &&
        test_path_is_missing $BLOB_FILE
@@ -141,7 +141,7 @@ test_expect_success 'prune: do not prune heads listed as an argument' '
 test_expect_success 'gc --no-prune' '
 
        add_blob &&
-       test-chmtime =-$((5001*$day)) $BLOB_FILE &&
+       test-tool chmtime =-$((5001*$day)) $BLOB_FILE &&
        git config gc.pruneExpire 2.days.ago &&
        git gc --no-prune &&
        verbose test 1 = $(git count-objects | sed "s/ .*//") &&
@@ -163,7 +163,7 @@ test_expect_success 'gc respects gc.pruneExpire' '
 test_expect_success 'gc --prune=<date>' '
 
        add_blob &&
-       test-chmtime =-$((5001*$day)) $BLOB_FILE &&
+       test-tool chmtime =-$((5001*$day)) $BLOB_FILE &&
        git gc --prune=5002.days.ago &&
        test_path_is_file $BLOB_FILE &&
        git gc --prune=5000.days.ago &&
@@ -205,7 +205,7 @@ test_expect_success 'prune --expire=never' '
 
 test_expect_success 'gc: prune old objects after local clone' '
        add_blob &&
-       test-chmtime =-$((2*$week+1)) $BLOB_FILE &&
+       test-tool chmtime =-$((2*$week+1)) $BLOB_FILE &&
        git clone --no-hardlinks . aclone &&
        (
                cd aclone &&
@@ -320,4 +320,14 @@ test_expect_success 'prune: handle HEAD reflog in multiple worktrees' '
        test_cmp expected actual
 '
 
+test_expect_success 'prune: handle expire option correctly' '
+       test_must_fail git prune --expire 2>error &&
+       test_i18ngrep "requires a value" error &&
+
+       test_must_fail git prune --expire=nyah 2>error &&
+       test_i18ngrep "malformed expiration" error &&
+
+       git prune --no-expire
+'
+
 test_done
index 20e2473a03b645d690b25598bc0cb2e421034b6c..423c0a475f7e87b4c32abf60ef85ccbb898eca11 100755 (executable)
@@ -264,9 +264,9 @@ test_expect_success 'pack with missing parent' '
 '
 
 test_expect_success JGIT 'we can read jgit bitmaps' '
-       git clone . compat-jgit &&
+       git clone --bare . compat-jgit.git &&
        (
-               cd compat-jgit &&
+               cd compat-jgit.git &&
                rm -f .git/objects/pack/*.bitmap &&
                jgit gc &&
                git rev-list --test-bitmap HEAD
@@ -274,9 +274,9 @@ test_expect_success JGIT 'we can read jgit bitmaps' '
 '
 
 test_expect_success JGIT 'jgit can read our bitmaps' '
-       git clone . compat-us &&
+       git clone --bare . compat-us.git &&
        (
-               cd compat-us &&
+               cd compat-us.git &&
                git repack -adb &&
                # jgit gc will barf if it does not like our bitmaps
                jgit gc
@@ -284,7 +284,7 @@ test_expect_success JGIT 'jgit can read our bitmaps' '
 '
 
 test_expect_success 'splitting packs does not generate bogus bitmaps' '
-       test-genrandom foo $((1024 * 1024)) >rand &&
+       test-tool genrandom foo $((1024 * 1024)) >rand &&
        git add rand &&
        git commit -m "commit with big file" &&
        git -c pack.packSizeLimit=500k repack -adb &&
index 9372508c993e72ad99da004ca2400df81c721006..4fe4ad9d6166d9a82cb944fadfc0317b5052b973 100755 (executable)
@@ -163,8 +163,8 @@ test_expect_success 'bogus offset inside v2 extended table' '
 
 test_expect_success 'bogus OFS_DELTA in packfile' '
        # Generate a pack with a delta in it.
-       base=$(test-genrandom foo 3000 | git hash-object --stdin -w) &&
-       delta=$(test-genrandom foo 2000 | git hash-object --stdin -w) &&
+       base=$(test-tool genrandom foo 3000 | git hash-object --stdin -w) &&
+       delta=$(test-tool genrandom foo 2000 | git hash-object --stdin -w) &&
        do_pack "$base $delta" --delta-base-offset &&
        rm -f .git/objects/??/* &&
 
index f7dbdfb412f3ee139ce88e56d1cd8be166d3bbd2..f31995d3d28d9fcd590c91aed7686aef87e332af 100755 (executable)
@@ -73,7 +73,7 @@ make_pack () {
 }
 
 test_expect_success 'setup' '
-       test-genrandom base 4096 >base &&
+       test-tool genrandom base 4096 >base &&
        for i in one two
        do
                # we want shared content here to encourage deltas...
index 2ed479b712aed7a8f11c8495c0eba72788eb4f26..0f06c40eb13f31d8f06962dd1c88c5ff5810618f 100755 (executable)
@@ -47,7 +47,7 @@ test_description='pack-objects breaks long cross-pack delta chains'
 # repeatedly-modified file to generate the delta chain).
 
 test_expect_success 'create series of packs' '
-       test-genrandom foo 4096 >content &&
+       test-tool genrandom foo 4096 >content &&
        prev= &&
        for i in $(test_seq 1 10)
        do
diff --git a/t/t5318-commit-graph.sh b/t/t5318-commit-graph.sh
new file mode 100755 (executable)
index 0000000..a380419
--- /dev/null
@@ -0,0 +1,224 @@
+#!/bin/sh
+
+test_description='commit graph'
+. ./test-lib.sh
+
+test_expect_success 'setup full repo' '
+       mkdir full &&
+       cd "$TRASH_DIRECTORY/full" &&
+       git init &&
+       git config core.commitGraph true &&
+       objdir=".git/objects"
+'
+
+test_expect_success 'write graph with no packs' '
+       cd "$TRASH_DIRECTORY/full" &&
+       git commit-graph write --object-dir . &&
+       test_path_is_file info/commit-graph
+'
+
+test_expect_success 'create commits and repack' '
+       cd "$TRASH_DIRECTORY/full" &&
+       for i in $(test_seq 3)
+       do
+               test_commit $i &&
+               git branch commits/$i
+       done &&
+       git repack
+'
+
+graph_git_two_modes() {
+       git -c core.graph=true $1 >output
+       git -c core.graph=false $1 >expect
+       test_cmp output expect
+}
+
+graph_git_behavior() {
+       MSG=$1
+       DIR=$2
+       BRANCH=$3
+       COMPARE=$4
+       test_expect_success "check normal git operations: $MSG" '
+               cd "$TRASH_DIRECTORY/$DIR" &&
+               graph_git_two_modes "log --oneline $BRANCH" &&
+               graph_git_two_modes "log --topo-order $BRANCH" &&
+               graph_git_two_modes "log --graph $COMPARE..$BRANCH" &&
+               graph_git_two_modes "branch -vv" &&
+               graph_git_two_modes "merge-base -a $BRANCH $COMPARE"
+       '
+}
+
+graph_git_behavior 'no graph' full commits/3 commits/1
+
+graph_read_expect() {
+       OPTIONAL=""
+       NUM_CHUNKS=3
+       if test ! -z $2
+       then
+               OPTIONAL=" $2"
+               NUM_CHUNKS=$((3 + $(echo "$2" | wc -w)))
+       fi
+       cat >expect <<- EOF
+       header: 43475048 1 1 $NUM_CHUNKS 0
+       num_commits: $1
+       chunks: oid_fanout oid_lookup commit_metadata$OPTIONAL
+       EOF
+       git commit-graph read >output &&
+       test_cmp expect output
+}
+
+test_expect_success 'write graph' '
+       cd "$TRASH_DIRECTORY/full" &&
+       graph1=$(git commit-graph write) &&
+       test_path_is_file $objdir/info/commit-graph &&
+       graph_read_expect "3"
+'
+
+graph_git_behavior 'graph exists' full commits/3 commits/1
+
+test_expect_success 'Add more commits' '
+       cd "$TRASH_DIRECTORY/full" &&
+       git reset --hard commits/1 &&
+       for i in $(test_seq 4 5)
+       do
+               test_commit $i &&
+               git branch commits/$i
+       done &&
+       git reset --hard commits/2 &&
+       for i in $(test_seq 6 7)
+       do
+               test_commit $i &&
+               git branch commits/$i
+       done &&
+       git reset --hard commits/2 &&
+       git merge commits/4 &&
+       git branch merge/1 &&
+       git reset --hard commits/4 &&
+       git merge commits/6 &&
+       git branch merge/2 &&
+       git reset --hard commits/3 &&
+       git merge commits/5 commits/7 &&
+       git branch merge/3 &&
+       git repack
+'
+
+# Current graph structure:
+#
+#   __M3___
+#  /   |   \
+# 3 M1 5 M2 7
+# |/  \|/  \|
+# 2    4    6
+# |___/____/
+# 1
+
+test_expect_success 'write graph with merges' '
+       cd "$TRASH_DIRECTORY/full" &&
+       git commit-graph write &&
+       test_path_is_file $objdir/info/commit-graph &&
+       graph_read_expect "10" "large_edges"
+'
+
+graph_git_behavior 'merge 1 vs 2' full merge/1 merge/2
+graph_git_behavior 'merge 1 vs 3' full merge/1 merge/3
+graph_git_behavior 'merge 2 vs 3' full merge/2 merge/3
+
+test_expect_success 'Add one more commit' '
+       cd "$TRASH_DIRECTORY/full" &&
+       test_commit 8 &&
+       git branch commits/8 &&
+       ls $objdir/pack | grep idx >existing-idx &&
+       git repack &&
+       ls $objdir/pack| grep idx | grep -v --file=existing-idx >new-idx
+'
+
+# Current graph structure:
+#
+#      8
+#      |
+#   __M3___
+#  /   |   \
+# 3 M1 5 M2 7
+# |/  \|/  \|
+# 2    4    6
+# |___/____/
+# 1
+
+graph_git_behavior 'mixed mode, commit 8 vs merge 1' full commits/8 merge/1
+graph_git_behavior 'mixed mode, commit 8 vs merge 2' full commits/8 merge/2
+
+test_expect_success 'write graph with new commit' '
+       cd "$TRASH_DIRECTORY/full" &&
+       git commit-graph write &&
+       test_path_is_file $objdir/info/commit-graph &&
+       graph_read_expect "11" "large_edges"
+'
+
+graph_git_behavior 'full graph, commit 8 vs merge 1' full commits/8 merge/1
+graph_git_behavior 'full graph, commit 8 vs merge 2' full commits/8 merge/2
+
+test_expect_success 'write graph with nothing new' '
+       cd "$TRASH_DIRECTORY/full" &&
+       git commit-graph write &&
+       test_path_is_file $objdir/info/commit-graph &&
+       graph_read_expect "11" "large_edges"
+'
+
+graph_git_behavior 'cleared graph, commit 8 vs merge 1' full commits/8 merge/1
+graph_git_behavior 'cleared graph, commit 8 vs merge 2' full commits/8 merge/2
+
+test_expect_success 'build graph from latest pack with closure' '
+       cd "$TRASH_DIRECTORY/full" &&
+       cat new-idx | git commit-graph write --stdin-packs &&
+       test_path_is_file $objdir/info/commit-graph &&
+       graph_read_expect "9" "large_edges"
+'
+
+graph_git_behavior 'graph from pack, commit 8 vs merge 1' full commits/8 merge/1
+graph_git_behavior 'graph from pack, commit 8 vs merge 2' full commits/8 merge/2
+
+test_expect_success 'build graph from commits with closure' '
+       cd "$TRASH_DIRECTORY/full" &&
+       git tag -a -m "merge" tag/merge merge/2 &&
+       git rev-parse tag/merge >commits-in &&
+       git rev-parse merge/1 >>commits-in &&
+       cat commits-in | git commit-graph write --stdin-commits &&
+       test_path_is_file $objdir/info/commit-graph &&
+       graph_read_expect "6"
+'
+
+graph_git_behavior 'graph from commits, commit 8 vs merge 1' full commits/8 merge/1
+graph_git_behavior 'graph from commits, commit 8 vs merge 2' full commits/8 merge/2
+
+test_expect_success 'build graph from commits with append' '
+       cd "$TRASH_DIRECTORY/full" &&
+       git rev-parse merge/3 | git commit-graph write --stdin-commits --append &&
+       test_path_is_file $objdir/info/commit-graph &&
+       graph_read_expect "10" "large_edges"
+'
+
+graph_git_behavior 'append graph, commit 8 vs merge 1' full commits/8 merge/1
+graph_git_behavior 'append graph, commit 8 vs merge 2' full commits/8 merge/2
+
+test_expect_success 'setup bare repo' '
+       cd "$TRASH_DIRECTORY" &&
+       git clone --bare --no-local full bare &&
+       cd bare &&
+       git config core.commitGraph true &&
+       baredir="./objects"
+'
+
+graph_git_behavior 'bare repo, commit 8 vs merge 1' bare commits/8 merge/1
+graph_git_behavior 'bare repo, commit 8 vs merge 2' bare commits/8 merge/2
+
+test_expect_success 'write graph in bare repo' '
+       cd "$TRASH_DIRECTORY/bare" &&
+       git commit-graph write &&
+       test_path_is_file $baredir/info/commit-graph &&
+       graph_read_expect "11" "large_edges"
+'
+
+graph_git_behavior 'bare repo with graph, commit 8 vs merge 1' bare commits/8 merge/1
+graph_git_behavior 'bare repo with graph, commit 8 vs merge 2' bare commits/8 merge/2
+
+test_done
index d375d7110d102d6b3ea194e4f09a9b5f391ed496..911eae1bf7518485ace0fbd417e3ea0bbb18a9cf 100755 (executable)
@@ -180,7 +180,7 @@ test_expect_success 'receive-pack runs auto-gc in remote repo' '
            # And create a file that follows the temporary object naming
            # convention for the auto-gc to remove
            : >.git/objects/tmp_test_object &&
-           test-chmtime =-1209601 .git/objects/tmp_test_object
+           test-tool chmtime =-1209601 .git/objects/tmp_test_object
        ) &&
        (
            cd parent &&
index 2b8c0bac7db47ef7b37024ecea95ed0e37d5364f..2762f420bc2c670b42eaefa0c1d33bc397fb4b2f 100755 (executable)
@@ -56,7 +56,7 @@ test_expect_success 'deleted branches have their tracking branches removed' '
 test_expect_success 'already deleted tracking branches ignored' '
        git branch -d -r origin/b3 &&
        git push origin :b3 >output 2>&1 &&
-       ! grep error output
+       ! grep "^error: " output
 '
 
 test_done
index da9ac0055721237f177d3d475e56ddb38b25eff1..ae5a530a2dc61a7926abfd471e3325d1638ca457 100755 (executable)
@@ -840,8 +840,8 @@ test_expect_success C_LOCALE_OUTPUT 'fetch aligned output' '
        test_commit looooooooooooong-tag &&
        (
                cd full-output &&
-               git -c fetch.output=full fetch origin 2>&1 | \
-                       grep -e "->" | cut -c 22- >../actual
+               git -c fetch.output=full fetch origin >actual 2>&1 &&
+               grep -e "->" actual | cut -c 22- >../actual
        ) &&
        cat >expect <<-\EOF &&
        master               -> origin/master
@@ -855,8 +855,8 @@ test_expect_success C_LOCALE_OUTPUT 'fetch compact output' '
        test_commit extraaa &&
        (
                cd compact &&
-               git -c fetch.output=compact fetch origin 2>&1 | \
-                       grep -e "->" | cut -c 22- >../actual
+               git -c fetch.output=compact fetch origin >actual 2>&1 &&
+               grep -e "->" actual | cut -c 22- >../actual
        ) &&
        cat >expect <<-\EOF &&
        master     -> origin/*
index 02106c9226605f3b241160a8b46e6dbf3a9d3fc0..6a949484d090ea2df02603f9d82bf4f203a799e9 100755 (executable)
@@ -10,6 +10,9 @@ test_expect_success setup '
        test_tick &&
        git commit -m initial &&
        git tag mark &&
+       git tag mark1.1 &&
+       git tag mark1.2 &&
+       git tag mark1.10 &&
        git show-ref --tags -d | sed -e "s/ /   /" >expected.tag &&
        (
                echo "$(git rev-parse HEAD)     HEAD"
@@ -39,6 +42,39 @@ test_expect_success 'ls-remote self' '
        test_cmp expected.all actual
 '
 
+test_expect_success 'ls-remote --sort="version:refname" --tags self' '
+       cat >expect <<-EOF &&
+       $(git rev-parse mark)   refs/tags/mark
+       $(git rev-parse mark1.1)        refs/tags/mark1.1
+       $(git rev-parse mark1.2)        refs/tags/mark1.2
+       $(git rev-parse mark1.10)       refs/tags/mark1.10
+       EOF
+       git ls-remote --sort="version:refname" --tags self >actual &&
+       test_cmp expect actual
+'
+
+test_expect_success 'ls-remote --sort="-version:refname" --tags self' '
+       cat >expect <<-EOF &&
+       $(git rev-parse mark1.10)       refs/tags/mark1.10
+       $(git rev-parse mark1.2)        refs/tags/mark1.2
+       $(git rev-parse mark1.1)        refs/tags/mark1.1
+       $(git rev-parse mark)   refs/tags/mark
+       EOF
+       git ls-remote --sort="-version:refname" --tags self >actual &&
+       test_cmp expect actual
+'
+
+test_expect_success 'ls-remote --sort="-refname" --tags self' '
+       cat >expect <<-EOF &&
+       $(git rev-parse mark1.2)        refs/tags/mark1.2
+       $(git rev-parse mark1.10)       refs/tags/mark1.10
+       $(git rev-parse mark1.1)        refs/tags/mark1.1
+       $(git rev-parse mark)   refs/tags/mark
+       EOF
+       git ls-remote --sort="-refname" --tags self >actual &&
+       test_cmp expect actual
+'
+
 test_expect_success 'dies when no remote specified and no default remotes found' '
        test_must_fail git ls-remote
 '
@@ -131,7 +167,7 @@ test_expect_success 'Report no-match with --exit-code' '
 
 test_expect_success 'Report match with --exit-code' '
        git ls-remote --exit-code other.git "refs/tags/*" >actual &&
-       git ls-remote . tags/mark >expect &&
+       git ls-remote . tags/mark* >expect &&
        test_cmp expect actual
 '
 
@@ -171,13 +207,17 @@ test_expect_success 'overrides work between mixed transfer/upload-pack hideRefs'
 '
 
 test_expect_success 'ls-remote --symref' '
-       cat >expect <<-\EOF &&
+       git fetch origin &&
+       cat >expect <<-EOF &&
        ref: refs/heads/master  HEAD
-       1bd44cb9d13204b0fe1958db0082f5028a16eb3a        HEAD
-       1bd44cb9d13204b0fe1958db0082f5028a16eb3a        refs/heads/master
-       1bd44cb9d13204b0fe1958db0082f5028a16eb3a        refs/remotes/origin/HEAD
-       1bd44cb9d13204b0fe1958db0082f5028a16eb3a        refs/remotes/origin/master
-       1bd44cb9d13204b0fe1958db0082f5028a16eb3a        refs/tags/mark
+       $(git rev-parse HEAD)   HEAD
+       $(git rev-parse refs/heads/master)      refs/heads/master
+       $(git rev-parse HEAD)   refs/remotes/origin/HEAD
+       $(git rev-parse refs/remotes/origin/master)     refs/remotes/origin/master
+       $(git rev-parse refs/tags/mark) refs/tags/mark
+       $(git rev-parse refs/tags/mark1.1)      refs/tags/mark1.1
+       $(git rev-parse refs/tags/mark1.10)     refs/tags/mark1.10
+       $(git rev-parse refs/tags/mark1.2)      refs/tags/mark1.2
        EOF
        git ls-remote --symref >actual &&
        test_cmp expect actual
index 177897ea0b1e00cc4ec0f0e43510411ab19f1681..3e8940eee5d5793c4b49b02586ab460b355a0f56 100755 (executable)
@@ -94,6 +94,9 @@ mk_child() {
 }
 
 check_push_result () {
+       test $# -ge 3 ||
+       error "bug in the test script: check_push_result requires at least 3 parameters"
+
        repo_name="$1"
        shift
 
@@ -553,10 +556,7 @@ test_expect_success 'branch.*.pushremote config order is irrelevant' '
 test_expect_success 'push with dry-run' '
 
        mk_test testrepo heads/master &&
-       (
-               cd testrepo &&
-               old_commit=$(git show-ref -s --verify refs/heads/master)
-       ) &&
+       old_commit=$(git -C testrepo show-ref -s --verify refs/heads/master) &&
        git push --dry-run testrepo : &&
        check_push_result testrepo $old_commit heads/master
 '
@@ -612,7 +612,7 @@ test_expect_success 'push does not update local refs on failure' '
        chmod +x testrepo/.git/hooks/pre-receive &&
        (
                cd child &&
-               git pull .. master
+               git pull .. master &&
                test_must_fail git push &&
                test $(git rev-parse master) != \
                        $(git rev-parse remotes/origin/master)
@@ -1418,7 +1418,7 @@ test_expect_success 'receive.denyCurrentBranch = updateInstead' '
                cd testrepo &&
                git reset --hard HEAD^ &&
                test $(git -C .. rev-parse HEAD^) = $(git rev-parse HEAD) &&
-               test-chmtime +100 path1
+               test-tool chmtime +100 path1
        ) &&
        git push testrepo master &&
        (
index 21340e89c9650e43fda9a6176c6fe814360edb8b..a2af693068fa455838c97df1fefe38bd630ceb2e 100755 (executable)
@@ -377,5 +377,17 @@ test_expect_success 'push status output scrubs password' '
        grep "^To $HTTPD_URL/smart/test_repo.git" status
 '
 
+test_expect_success 'colorize errors/hints' '
+       cd "$ROOT_PATH"/test_repo_clone &&
+       test_must_fail git -c color.transport=always -c color.advice=always \
+               -c color.push=always \
+               push origin origin/master^:master 2>act &&
+       test_decode_color <act >decoded &&
+       test_i18ngrep "<RED>.*rejected.*<RESET>" decoded &&
+       test_i18ngrep "<RED>error: failed to push some refs" decoded &&
+       test_i18ngrep "<YELLOW>hint: " decoded &&
+       test_i18ngrep ! "^hint: " decoded
+'
+
 stop_httpd
 test_done
index 10cb0be2b7ea42e5a1767edcd3515f933e6ef4af..0b0e987fdb73fcd7d8f54c393ee7deb15a4b5b8c 100755 (executable)
@@ -44,7 +44,7 @@ test_pack_input_limit () {
 }
 
 test_expect_success "create known-size (1024 bytes) commit" '
-       test-genrandom foo 1024 >one-k &&
+       test-tool genrandom foo 1024 >one-k &&
        git add one-k &&
        test_commit one-k
 '
index 113c87007f31abced0d93b3702f0b54f50ff4679..faaa51ccc562545c18180410e68483019a80832d 100755 (executable)
@@ -39,7 +39,7 @@ test_expect_success 'push to repo path with path separator (colon)' '
        # so make it likely for us to generate a delta by having
        # a non-trivial file with multiple versions.
 
-       test-genrandom foo 4096 >file.bin &&
+       test-tool genrandom foo 4096 >file.bin &&
        git add file.bin &&
        git commit -m bin &&
 
index 8552184e741fe2465e746a3ac42d19edddb15576..6d7d88ccc906a4c73285550ec80cd3fe67a764ee 100755 (executable)
@@ -169,6 +169,17 @@ test_expect_success 'fetch changes via manual http-fetch' '
        test_cmp file clone2/file
 '
 
+test_expect_success 'manual http-fetch without -a works just as well' '
+       cp -R clone-tmpl clone3 &&
+
+       HEAD=$(git rev-parse --verify HEAD) &&
+       (cd clone3 &&
+        git http-fetch -w heads/master-new $HEAD $(git config remote.origin.url) &&
+        git checkout master-new &&
+        test $HEAD = $(git rev-parse --verify HEAD)) &&
+       test_cmp file clone3/file
+'
+
 test_expect_success 'http remote detects correct HEAD' '
        git push public master:other &&
        (cd clone &&
index 90e0d6f0fe935970c0941bfef2af39bb15d2f959..84a955770a017e68d0cc1e928929e5a043d64e86 100755 (executable)
@@ -3,10 +3,16 @@
 test_description='test git-http-backend'
 . ./test-lib.sh
 . "$TEST_DIRECTORY"/lib-httpd.sh
+
+if ! test_have_prereq CURL; then
+       skip_all='skipping raw http-backend tests, curl not available'
+       test_done
+fi
+
 start_httpd
 
 GET() {
-       curl --include "$HTTPD_URL/$SMART/repo.git/$1" >out 2>/dev/null &&
+       curl --include "$HTTPD_URL/$SMART/repo.git/$1" >out &&
        tr '\015' Q <out |
        sed '
                s/Q$//
@@ -19,7 +25,7 @@ GET() {
 POST() {
        curl --include --data "$2" \
        --header "Content-Type: application/x-$1-request" \
-       "$HTTPD_URL/smart/repo.git/$1" >out 2>/dev/null &&
+       "$HTTPD_URL/smart/repo.git/$1" >out &&
        tr '\015' Q <out |
        sed '
                s/Q$//
index 191d6d3a780325b6f3e294b6b42540569ef129d1..df822d9a3e9e7c7b4b7031ffa75716cd7ba6103a 100755 (executable)
@@ -21,7 +21,7 @@ test_expect_success CLONE_2GB 'setup' '
         do
                printf "Generating blob $i/$blobcount\r" >&2 &&
                printf "blob\nmark :$i\ndata $blobsize\n" &&
-               #test-genrandom $i $blobsize &&
+               #test-tool genrandom $i $blobsize &&
                printf "%-${blobsize}s" $i &&
                echo "M 100644 :$i $i" >> commit
                i=$(($i+1)) ||
diff --git a/t/t5701-git-serve.sh b/t/t5701-git-serve.sh
new file mode 100755 (executable)
index 0000000..011a579
--- /dev/null
@@ -0,0 +1,197 @@
+#!/bin/sh
+
+test_description='test git-serve and server commands'
+
+. ./test-lib.sh
+
+test_expect_success 'test capability advertisement' '
+       cat >expect <<-EOF &&
+       version 2
+       agent=git/$(git version | cut -d" " -f3)
+       ls-refs
+       fetch=shallow
+       server-option
+       0000
+       EOF
+
+       git serve --advertise-capabilities >out &&
+       test-pkt-line unpack <out >actual &&
+       test_cmp actual expect
+'
+
+test_expect_success 'stateless-rpc flag does not list capabilities' '
+       # Empty request
+       test-pkt-line pack >in <<-EOF &&
+       0000
+       EOF
+       git serve --stateless-rpc >out <in &&
+       test_must_be_empty out &&
+
+       # EOF
+       git serve --stateless-rpc >out &&
+       test_must_be_empty out
+'
+
+test_expect_success 'request invalid capability' '
+       test-pkt-line pack >in <<-EOF &&
+       foobar
+       0000
+       EOF
+       test_must_fail git serve --stateless-rpc 2>err <in &&
+       test_i18ngrep "unknown capability" err
+'
+
+test_expect_success 'request with no command' '
+       test-pkt-line pack >in <<-EOF &&
+       agent=git/test
+       0000
+       EOF
+       test_must_fail git serve --stateless-rpc 2>err <in &&
+       test_i18ngrep "no command requested" err
+'
+
+test_expect_success 'request invalid command' '
+       test-pkt-line pack >in <<-EOF &&
+       command=foo
+       agent=git/test
+       0000
+       EOF
+       test_must_fail git serve --stateless-rpc 2>err <in &&
+       test_i18ngrep "invalid command" err
+'
+
+# Test the basics of ls-refs
+#
+test_expect_success 'setup some refs and tags' '
+       test_commit one &&
+       git branch dev master &&
+       test_commit two &&
+       git symbolic-ref refs/heads/release refs/heads/master &&
+       git tag -a -m "annotated tag" annotated-tag
+'
+
+test_expect_success 'basics of ls-refs' '
+       test-pkt-line pack >in <<-EOF &&
+       command=ls-refs
+       0000
+       EOF
+
+       cat >expect <<-EOF &&
+       $(git rev-parse HEAD) HEAD
+       $(git rev-parse refs/heads/dev) refs/heads/dev
+       $(git rev-parse refs/heads/master) refs/heads/master
+       $(git rev-parse refs/heads/release) refs/heads/release
+       $(git rev-parse refs/tags/annotated-tag) refs/tags/annotated-tag
+       $(git rev-parse refs/tags/one) refs/tags/one
+       $(git rev-parse refs/tags/two) refs/tags/two
+       0000
+       EOF
+
+       git serve --stateless-rpc <in >out &&
+       test-pkt-line unpack <out >actual &&
+       test_cmp actual expect
+'
+
+test_expect_success 'basic ref-prefixes' '
+       test-pkt-line pack >in <<-EOF &&
+       command=ls-refs
+       0001
+       ref-prefix refs/heads/master
+       ref-prefix refs/tags/one
+       0000
+       EOF
+
+       cat >expect <<-EOF &&
+       $(git rev-parse refs/heads/master) refs/heads/master
+       $(git rev-parse refs/tags/one) refs/tags/one
+       0000
+       EOF
+
+       git serve --stateless-rpc <in >out &&
+       test-pkt-line unpack <out >actual &&
+       test_cmp actual expect
+'
+
+test_expect_success 'refs/heads prefix' '
+       test-pkt-line pack >in <<-EOF &&
+       command=ls-refs
+       0001
+       ref-prefix refs/heads/
+       0000
+       EOF
+
+       cat >expect <<-EOF &&
+       $(git rev-parse refs/heads/dev) refs/heads/dev
+       $(git rev-parse refs/heads/master) refs/heads/master
+       $(git rev-parse refs/heads/release) refs/heads/release
+       0000
+       EOF
+
+       git serve --stateless-rpc <in >out &&
+       test-pkt-line unpack <out >actual &&
+       test_cmp actual expect
+'
+
+test_expect_success 'peel parameter' '
+       test-pkt-line pack >in <<-EOF &&
+       command=ls-refs
+       0001
+       peel
+       ref-prefix refs/tags/
+       0000
+       EOF
+
+       cat >expect <<-EOF &&
+       $(git rev-parse refs/tags/annotated-tag) refs/tags/annotated-tag peeled:$(git rev-parse refs/tags/annotated-tag^{})
+       $(git rev-parse refs/tags/one) refs/tags/one
+       $(git rev-parse refs/tags/two) refs/tags/two
+       0000
+       EOF
+
+       git serve --stateless-rpc <in >out &&
+       test-pkt-line unpack <out >actual &&
+       test_cmp actual expect
+'
+
+test_expect_success 'symrefs parameter' '
+       test-pkt-line pack >in <<-EOF &&
+       command=ls-refs
+       0001
+       symrefs
+       ref-prefix refs/heads/
+       0000
+       EOF
+
+       cat >expect <<-EOF &&
+       $(git rev-parse refs/heads/dev) refs/heads/dev
+       $(git rev-parse refs/heads/master) refs/heads/master
+       $(git rev-parse refs/heads/release) refs/heads/release symref-target:refs/heads/master
+       0000
+       EOF
+
+       git serve --stateless-rpc <in >out &&
+       test-pkt-line unpack <out >actual &&
+       test_cmp actual expect
+'
+
+test_expect_success 'sending server-options' '
+       test-pkt-line pack >in <<-EOF &&
+       command=ls-refs
+       server-option=hello
+       server-option=world
+       0001
+       ref-prefix HEAD
+       0000
+       EOF
+
+       cat >expect <<-EOF &&
+       $(git rev-parse HEAD) HEAD
+       0000
+       EOF
+
+       git serve --stateless-rpc <in >out &&
+       test-pkt-line unpack <out >actual &&
+       test_cmp actual expect
+'
+
+test_done
diff --git a/t/t5702-protocol-v2.sh b/t/t5702-protocol-v2.sh
new file mode 100755 (executable)
index 0000000..dbfd069
--- /dev/null
@@ -0,0 +1,305 @@
+#!/bin/sh
+
+test_description='test git wire-protocol version 2'
+
+TEST_NO_CREATE_REPO=1
+
+. ./test-lib.sh
+
+# Test protocol v2 with 'git://' transport
+#
+. "$TEST_DIRECTORY"/lib-git-daemon.sh
+start_git_daemon --export-all --enable=receive-pack
+daemon_parent=$GIT_DAEMON_DOCUMENT_ROOT_PATH/parent
+
+test_expect_success 'create repo to be served by git-daemon' '
+       git init "$daemon_parent" &&
+       test_commit -C "$daemon_parent" one
+'
+
+test_expect_success 'list refs with git:// using protocol v2' '
+       test_when_finished "rm -f log" &&
+
+       GIT_TRACE_PACKET="$(pwd)/log" git -c protocol.version=2 \
+               ls-remote --symref "$GIT_DAEMON_URL/parent" >actual &&
+
+       # Client requested to use protocol v2
+       grep "git> .*\\\0\\\0version=2\\\0$" log &&
+       # Server responded using protocol v2
+       grep "git< version 2" log &&
+
+       git ls-remote --symref "$GIT_DAEMON_URL/parent" >expect &&
+       test_cmp actual expect
+'
+
+test_expect_success 'ref advertisment is filtered with ls-remote using protocol v2' '
+       test_when_finished "rm -f log" &&
+
+       GIT_TRACE_PACKET="$(pwd)/log" git -c protocol.version=2 \
+               ls-remote "$GIT_DAEMON_URL/parent" master >actual &&
+
+       cat >expect <<-EOF &&
+       $(git -C "$daemon_parent" rev-parse refs/heads/master)$(printf "\t")refs/heads/master
+       EOF
+
+       test_cmp actual expect
+'
+
+test_expect_success 'clone with git:// using protocol v2' '
+       test_when_finished "rm -f log" &&
+
+       GIT_TRACE_PACKET="$(pwd)/log" git -c protocol.version=2 \
+               clone "$GIT_DAEMON_URL/parent" daemon_child &&
+
+       git -C daemon_child log -1 --format=%s >actual &&
+       git -C "$daemon_parent" log -1 --format=%s >expect &&
+       test_cmp expect actual &&
+
+       # Client requested to use protocol v2
+       grep "clone> .*\\\0\\\0version=2\\\0$" log &&
+       # Server responded using protocol v2
+       grep "clone< version 2" log
+'
+
+test_expect_success 'fetch with git:// using protocol v2' '
+       test_when_finished "rm -f log" &&
+
+       test_commit -C "$daemon_parent" two &&
+
+       GIT_TRACE_PACKET="$(pwd)/log" git -C daemon_child -c protocol.version=2 \
+               fetch &&
+
+       git -C daemon_child log -1 --format=%s origin/master >actual &&
+       git -C "$daemon_parent" log -1 --format=%s >expect &&
+       test_cmp expect actual &&
+
+       # Client requested to use protocol v2
+       grep "fetch> .*\\\0\\\0version=2\\\0$" log &&
+       # Server responded using protocol v2
+       grep "fetch< version 2" log
+'
+
+test_expect_success 'pull with git:// using protocol v2' '
+       test_when_finished "rm -f log" &&
+
+       GIT_TRACE_PACKET="$(pwd)/log" git -C daemon_child -c protocol.version=2 \
+               pull &&
+
+       git -C daemon_child log -1 --format=%s >actual &&
+       git -C "$daemon_parent" log -1 --format=%s >expect &&
+       test_cmp expect actual &&
+
+       # Client requested to use protocol v2
+       grep "fetch> .*\\\0\\\0version=2\\\0$" log &&
+       # Server responded using protocol v2
+       grep "fetch< version 2" log
+'
+
+test_expect_success 'push with git:// and a config of v2 does not request v2' '
+       test_when_finished "rm -f log" &&
+
+       # Till v2 for push is designed, make sure that if a client has
+       # protocol.version configured to use v2, that the client instead falls
+       # back and uses v0.
+
+       test_commit -C daemon_child three &&
+
+       # Push to another branch, as the target repository has the
+       # master branch checked out and we cannot push into it.
+       GIT_TRACE_PACKET="$(pwd)/log" git -C daemon_child -c protocol.version=2 \
+               push origin HEAD:client_branch &&
+
+       git -C daemon_child log -1 --format=%s >actual &&
+       git -C "$daemon_parent" log -1 --format=%s client_branch >expect &&
+       test_cmp expect actual &&
+
+       # Client requested to use protocol v2
+       ! grep "push> .*\\\0\\\0version=2\\\0$" log &&
+       # Server responded using protocol v2
+       ! grep "push< version 2" log
+'
+
+stop_git_daemon
+
+# Test protocol v2 with 'file://' transport
+#
+test_expect_success 'create repo to be served by file:// transport' '
+       git init file_parent &&
+       test_commit -C file_parent one
+'
+
+test_expect_success 'list refs with file:// using protocol v2' '
+       test_when_finished "rm -f log" &&
+
+       GIT_TRACE_PACKET="$(pwd)/log" git -c protocol.version=2 \
+               ls-remote --symref "file://$(pwd)/file_parent" >actual &&
+
+       # Server responded using protocol v2
+       grep "git< version 2" log &&
+
+       git ls-remote --symref "file://$(pwd)/file_parent" >expect &&
+       test_cmp actual expect
+'
+
+test_expect_success 'ref advertisment is filtered with ls-remote using protocol v2' '
+       test_when_finished "rm -f log" &&
+
+       GIT_TRACE_PACKET="$(pwd)/log" git -c protocol.version=2 \
+               ls-remote "file://$(pwd)/file_parent" master >actual &&
+
+       cat >expect <<-EOF &&
+       $(git -C file_parent rev-parse refs/heads/master)$(printf "\t")refs/heads/master
+       EOF
+
+       test_cmp actual expect
+'
+
+test_expect_success 'server-options are sent when using ls-remote' '
+       test_when_finished "rm -f log" &&
+
+       GIT_TRACE_PACKET="$(pwd)/log" git -c protocol.version=2 \
+               ls-remote -o hello -o world "file://$(pwd)/file_parent" master >actual &&
+
+       cat >expect <<-EOF &&
+       $(git -C file_parent rev-parse refs/heads/master)$(printf "\t")refs/heads/master
+       EOF
+
+       test_cmp actual expect &&
+       grep "server-option=hello" log &&
+       grep "server-option=world" log
+'
+
+
+test_expect_success 'clone with file:// using protocol v2' '
+       test_when_finished "rm -f log" &&
+
+       GIT_TRACE_PACKET="$(pwd)/log" git -c protocol.version=2 \
+               clone "file://$(pwd)/file_parent" file_child &&
+
+       git -C file_child log -1 --format=%s >actual &&
+       git -C file_parent log -1 --format=%s >expect &&
+       test_cmp expect actual &&
+
+       # Server responded using protocol v2
+       grep "clone< version 2" log
+'
+
+test_expect_success 'fetch with file:// using protocol v2' '
+       test_when_finished "rm -f log" &&
+
+       test_commit -C file_parent two &&
+
+       GIT_TRACE_PACKET="$(pwd)/log" git -C file_child -c protocol.version=2 \
+               fetch origin &&
+
+       git -C file_child log -1 --format=%s origin/master >actual &&
+       git -C file_parent log -1 --format=%s >expect &&
+       test_cmp expect actual &&
+
+       # Server responded using protocol v2
+       grep "fetch< version 2" log
+'
+
+test_expect_success 'ref advertisment is filtered during fetch using protocol v2' '
+       test_when_finished "rm -f log" &&
+
+       test_commit -C file_parent three &&
+
+       GIT_TRACE_PACKET="$(pwd)/log" git -C file_child -c protocol.version=2 \
+               fetch origin master &&
+
+       git -C file_child log -1 --format=%s origin/master >actual &&
+       git -C file_parent log -1 --format=%s >expect &&
+       test_cmp expect actual &&
+
+       ! grep "refs/tags/one" log &&
+       ! grep "refs/tags/two" log &&
+       ! grep "refs/tags/three" log
+'
+
+test_expect_success 'server-options are sent when fetching' '
+       test_when_finished "rm -f log" &&
+
+       test_commit -C file_parent four &&
+
+       GIT_TRACE_PACKET="$(pwd)/log" git -C file_child -c protocol.version=2 \
+               fetch -o hello -o world origin master &&
+
+       git -C file_child log -1 --format=%s origin/master >actual &&
+       git -C file_parent log -1 --format=%s >expect &&
+       test_cmp expect actual &&
+
+       grep "server-option=hello" log &&
+       grep "server-option=world" log
+'
+
+# Test protocol v2 with 'http://' transport
+#
+. "$TEST_DIRECTORY"/lib-httpd.sh
+start_httpd
+
+test_expect_success 'create repo to be served by http:// transport' '
+       git init "$HTTPD_DOCUMENT_ROOT_PATH/http_parent" &&
+       git -C "$HTTPD_DOCUMENT_ROOT_PATH/http_parent" config http.receivepack true &&
+       test_commit -C "$HTTPD_DOCUMENT_ROOT_PATH/http_parent" one
+'
+
+test_expect_success 'clone with http:// using protocol v2' '
+       test_when_finished "rm -f log" &&
+
+       GIT_TRACE_PACKET="$(pwd)/log" GIT_TRACE_CURL="$(pwd)/log" git -c protocol.version=2 \
+               clone "$HTTPD_URL/smart/http_parent" http_child &&
+
+       git -C http_child log -1 --format=%s >actual &&
+       git -C "$HTTPD_DOCUMENT_ROOT_PATH/http_parent" log -1 --format=%s >expect &&
+       test_cmp expect actual &&
+
+       # Client requested to use protocol v2
+       grep "Git-Protocol: version=2" log &&
+       # Server responded using protocol v2
+       grep "git< version 2" log
+'
+
+test_expect_success 'fetch with http:// using protocol v2' '
+       test_when_finished "rm -f log" &&
+
+       test_commit -C "$HTTPD_DOCUMENT_ROOT_PATH/http_parent" two &&
+
+       GIT_TRACE_PACKET="$(pwd)/log" git -C http_child -c protocol.version=2 \
+               fetch &&
+
+       git -C http_child log -1 --format=%s origin/master >actual &&
+       git -C "$HTTPD_DOCUMENT_ROOT_PATH/http_parent" log -1 --format=%s >expect &&
+       test_cmp expect actual &&
+
+       # Server responded using protocol v2
+       grep "git< version 2" log
+'
+
+test_expect_success 'push with http:// and a config of v2 does not request v2' '
+       test_when_finished "rm -f log" &&
+       # Till v2 for push is designed, make sure that if a client has
+       # protocol.version configured to use v2, that the client instead falls
+       # back and uses v0.
+
+       test_commit -C http_child three &&
+
+       # Push to another branch, as the target repository has the
+       # master branch checked out and we cannot push into it.
+       GIT_TRACE_PACKET="$(pwd)/log" git -C http_child -c protocol.version=2 \
+               push origin HEAD:client_branch &&
+
+       git -C http_child log -1 --format=%s >actual &&
+       git -C "$HTTPD_DOCUMENT_ROOT_PATH/http_parent" log -1 --format=%s client_branch >expect &&
+       test_cmp expect actual &&
+
+       # Client didnt request to use protocol v2
+       ! grep "Git-Protocol: version=2" log &&
+       # Server didnt respond using protocol v2
+       ! grep "git< version 2" log
+'
+
+
+stop_httpd
+
+test_done
index 05ddc69cf2ad9470a125fda47fc450401f94902b..7504ba47511bf5df27dd54d7090fdd920285db07 100755 (executable)
@@ -110,4 +110,13 @@ do
        "
 
 done
+
+test_expect_success 'show advice that grafts are deprecated' '
+       git show HEAD 2>err &&
+       test_i18ngrep "git replace" err &&
+       test_config advice.graftFileDeprecated false &&
+       git show HEAD 2>err &&
+       test_i18ngrep ! "git replace" err
+'
+
 test_done
index c01f721f13dba71fae681d6ba5100e02e58de4d8..b760c223c6a56609b7dace5353671aec8243b85d 100755 (executable)
@@ -247,7 +247,7 @@ test_expect_success 'merge of identical changes in a renamed file' '
        git reset --hard HEAD^ &&
        git checkout change &&
        GIT_MERGE_VERBOSITY=3 git merge change+rename >out &&
-       test_i18ngrep "^Skipped B" out
+       test_i18ngrep "^Skipped B" out
 '
 
 test_expect_success 'setup for rename + d/f conflicts' '
@@ -635,10 +635,9 @@ test_expect_success 'setup avoid unnecessary update, normal rename' '
 
 test_expect_success 'avoid unnecessary update, normal rename' '
        git checkout -q avoid-unnecessary-update-1^0 &&
-       test-chmtime =1000000000 rename &&
-       test-chmtime -v +0 rename >expect &&
+       test-tool chmtime --get =1000000000 rename >expect &&
        git merge merge-branch-1 &&
-       test-chmtime -v +0 rename >actual &&
+       test-tool chmtime --get rename >actual &&
        test_cmp expect actual # "rename" should have stayed intact
 '
 
@@ -668,10 +667,9 @@ test_expect_success 'setup to test avoiding unnecessary update, with D/F conflic
 
 test_expect_success 'avoid unnecessary update, with D/F conflict' '
        git checkout -q avoid-unnecessary-update-2^0 &&
-       test-chmtime =1000000000 df &&
-       test-chmtime -v +0 df >expect &&
+       test-tool chmtime --get =1000000000 df >expect &&
        git merge merge-branch-2 &&
-       test-chmtime -v +0 df >actual &&
+       test-tool chmtime --get df >actual &&
        test_cmp expect actual # "df" should have stayed intact
 '
 
@@ -700,10 +698,9 @@ test_expect_success 'setup avoid unnecessary update, dir->(file,nothing)' '
 
 test_expect_success 'avoid unnecessary update, dir->(file,nothing)' '
        git checkout -q master^0 &&
-       test-chmtime =1000000000 df &&
-       test-chmtime -v +0 df >expect &&
+       test-tool chmtime --get =1000000000 df >expect &&
        git merge side &&
-       test-chmtime -v +0 df >actual &&
+       test-tool chmtime --get df >actual &&
        test_cmp expect actual # "df" should have stayed intact
 '
 
@@ -730,10 +727,9 @@ test_expect_success 'setup avoid unnecessary update, modify/delete' '
 
 test_expect_success 'avoid unnecessary update, modify/delete' '
        git checkout -q master^0 &&
-       test-chmtime =1000000000 file &&
-       test-chmtime -v +0 file >expect &&
+       test-tool chmtime --get =1000000000 file >expect &&
        test_must_fail git merge side &&
-       test-chmtime -v +0 file >actual &&
+       test-tool chmtime --get file >actual &&
        test_cmp expect actual # "file" should have stayed intact
 '
 
@@ -759,10 +755,9 @@ test_expect_success 'setup avoid unnecessary update, rename/add-dest' '
 
 test_expect_success 'avoid unnecessary update, rename/add-dest' '
        git checkout -q master^0 &&
-       test-chmtime =1000000000 newfile &&
-       test-chmtime -v +0 newfile >expect &&
+       test-tool chmtime --get =1000000000 newfile >expect &&
        git merge side &&
-       test-chmtime -v +0 newfile >actual &&
+       test-tool chmtime --get newfile >actual &&
        test_cmp expect actual # "file" should have stayed intact
 '
 
diff --git a/t/t6043-merge-rename-directories.sh b/t/t6043-merge-rename-directories.sh
new file mode 100755 (executable)
index 0000000..2e28f29
--- /dev/null
@@ -0,0 +1,3998 @@
+#!/bin/sh
+
+test_description="recursive merge with directory renames"
+# includes checking of many corner cases, with a similar methodology to:
+#   t6042: corner cases with renames but not criss-cross merges
+#   t6036: corner cases with both renames and criss-cross merges
+#
+# The setup for all of them, pictorially, is:
+#
+#      A
+#      o
+#     / \
+#  O o   ?
+#     \ /
+#      o
+#      B
+#
+# To help make it easier to follow the flow of tests, they have been
+# divided into sections and each test will start with a quick explanation
+# of what commits O, A, and B contain.
+#
+# Notation:
+#    z/{b,c}   means  files z/b and z/c both exist
+#    x/d_1     means  file x/d exists with content d1.  (Purpose of the
+#                     underscore notation is to differentiate different
+#                     files that might be renamed into each other's paths.)
+
+. ./test-lib.sh
+
+
+###########################################################################
+# SECTION 1: Basic cases we should be able to handle
+###########################################################################
+
+# Testcase 1a, Basic directory rename.
+#   Commit O: z/{b,c}
+#   Commit A: y/{b,c}
+#   Commit B: z/{b,c,d,e/f}
+#   Expected: y/{b,c,d,e/f}
+
+test_expect_success '1a-setup: Simple directory rename detection' '
+       test_create_repo 1a &&
+       (
+               cd 1a &&
+
+               mkdir z &&
+               echo b >z/b &&
+               echo c >z/c &&
+               git add z &&
+               test_tick &&
+               git commit -m "O" &&
+
+               git branch O &&
+               git branch A &&
+               git branch B &&
+
+               git checkout A &&
+               git mv z y &&
+               test_tick &&
+               git commit -m "A" &&
+
+               git checkout B &&
+               echo d >z/d &&
+               mkdir z/e &&
+               echo f >z/e/f &&
+               git add z/d z/e/f &&
+               test_tick &&
+               git commit -m "B"
+       )
+'
+
+test_expect_success '1a-check: Simple directory rename detection' '
+       (
+               cd 1a &&
+
+               git checkout A^0 &&
+
+               git merge -s recursive B^0 &&
+
+               git ls-files -s >out &&
+               test_line_count = 4 out &&
+
+               git rev-parse >actual \
+                       HEAD:y/b HEAD:y/c HEAD:y/d HEAD:y/e/f &&
+               git rev-parse >expect \
+                       O:z/b    O:z/c    B:z/d    B:z/e/f &&
+               test_cmp expect actual &&
+
+               git hash-object y/d >actual &&
+               git rev-parse B:z/d >expect &&
+               test_cmp expect actual &&
+
+               test_must_fail git rev-parse HEAD:z/d &&
+               test_must_fail git rev-parse HEAD:z/e/f &&
+               test_path_is_missing z/d &&
+               test_path_is_missing z/e/f
+       )
+'
+
+# Testcase 1b, Merge a directory with another
+#   Commit O: z/{b,c},   y/d
+#   Commit A: z/{b,c,e}, y/d
+#   Commit B: y/{b,c,d}
+#   Expected: y/{b,c,d,e}
+
+test_expect_success '1b-setup: Merge a directory with another' '
+       test_create_repo 1b &&
+       (
+               cd 1b &&
+
+               mkdir z &&
+               echo b >z/b &&
+               echo c >z/c &&
+               mkdir y &&
+               echo d >y/d &&
+               git add z y &&
+               test_tick &&
+               git commit -m "O" &&
+
+               git branch O &&
+               git branch A &&
+               git branch B &&
+
+               git checkout A &&
+               echo e >z/e &&
+               git add z/e &&
+               test_tick &&
+               git commit -m "A" &&
+
+               git checkout B &&
+               git mv z/b y &&
+               git mv z/c y &&
+               rmdir z &&
+               test_tick &&
+               git commit -m "B"
+       )
+'
+
+test_expect_success '1b-check: Merge a directory with another' '
+       (
+               cd 1b &&
+
+               git checkout A^0 &&
+
+               git merge -s recursive B^0 &&
+
+               git ls-files -s >out &&
+               test_line_count = 4 out &&
+
+               git rev-parse >actual \
+                       HEAD:y/b HEAD:y/c HEAD:y/d HEAD:y/e &&
+               git rev-parse >expect \
+                       O:z/b    O:z/c    O:y/d    A:z/e &&
+               test_cmp expect actual &&
+               test_must_fail git rev-parse HEAD:z/e
+       )
+'
+
+# Testcase 1c, Transitive renaming
+#   (Related to testcases 3a and 6d -- when should a transitive rename apply?)
+#   (Related to testcases 9c and 9d -- can transitivity repeat?)
+#   (Related to testcase 12b -- joint-transitivity?)
+#   Commit O: z/{b,c},   x/d
+#   Commit A: y/{b,c},   x/d
+#   Commit B: z/{b,c,d}
+#   Expected: y/{b,c,d}  (because x/d -> z/d -> y/d)
+
+test_expect_success '1c-setup: Transitive renaming' '
+       test_create_repo 1c &&
+       (
+               cd 1c &&
+
+               mkdir z &&
+               echo b >z/b &&
+               echo c >z/c &&
+               mkdir x &&
+               echo d >x/d &&
+               git add z x &&
+               test_tick &&
+               git commit -m "O" &&
+
+               git branch O &&
+               git branch A &&
+               git branch B &&
+
+               git checkout A &&
+               git mv z y &&
+               test_tick &&
+               git commit -m "A" &&
+
+               git checkout B &&
+               git mv x/d z/d &&
+               test_tick &&
+               git commit -m "B"
+       )
+'
+
+test_expect_success '1c-check: Transitive renaming' '
+       (
+               cd 1c &&
+
+               git checkout A^0 &&
+
+               git merge -s recursive B^0 &&
+
+               git ls-files -s >out &&
+               test_line_count = 3 out &&
+
+               git rev-parse >actual \
+                       HEAD:y/b HEAD:y/c HEAD:y/d &&
+               git rev-parse >expect \
+                       O:z/b    O:z/c    O:x/d &&
+               test_cmp expect actual &&
+               test_must_fail git rev-parse HEAD:x/d &&
+               test_must_fail git rev-parse HEAD:z/d &&
+               test_path_is_missing z/d
+       )
+'
+
+# Testcase 1d, Directory renames (merging two directories into one new one)
+#              cause a rename/rename(2to1) conflict
+#   (Related to testcases 1c and 7b)
+#   Commit O. z/{b,c},        y/{d,e}
+#   Commit A. x/{b,c},        y/{d,e,m,wham_1}
+#   Commit B. z/{b,c,n,wham_2}, x/{d,e}
+#   Expected: x/{b,c,d,e,m,n}, CONFLICT:(y/wham_1 & z/wham_2 -> x/wham)
+#   Note: y/m & z/n should definitely move into x.  By the same token, both
+#         y/wham_1 & z/wham_2 should too...giving us a conflict.
+
+test_expect_success '1d-setup: Directory renames cause a rename/rename(2to1) conflict' '
+       test_create_repo 1d &&
+       (
+               cd 1d &&
+
+               mkdir z &&
+               echo b >z/b &&
+               echo c >z/c &&
+               mkdir y &&
+               echo d >y/d &&
+               echo e >y/e &&
+               git add z y &&
+               test_tick &&
+               git commit -m "O" &&
+
+               git branch O &&
+               git branch A &&
+               git branch B &&
+
+               git checkout A &&
+               git mv z x &&
+               echo m >y/m &&
+               echo wham1 >y/wham &&
+               git add y &&
+               test_tick &&
+               git commit -m "A" &&
+
+               git checkout B &&
+               git mv y x &&
+               echo n >z/n &&
+               echo wham2 >z/wham &&
+               git add z &&
+               test_tick &&
+               git commit -m "B"
+       )
+'
+
+test_expect_success '1d-check: Directory renames cause a rename/rename(2to1) conflict' '
+       (
+               cd 1d &&
+
+               git checkout A^0 &&
+
+               test_must_fail git merge -s recursive B^0 >out &&
+               test_i18ngrep "CONFLICT (rename/rename)" out &&
+
+               git ls-files -s >out &&
+               test_line_count = 8 out &&
+               git ls-files -u >out &&
+               test_line_count = 2 out &&
+               git ls-files -o >out &&
+               test_line_count = 3 out &&
+
+               git rev-parse >actual \
+                       :0:x/b :0:x/c :0:x/d :0:x/e :0:x/m :0:x/n &&
+               git rev-parse >expect \
+                        O:z/b  O:z/c  O:y/d  O:y/e  A:y/m  B:z/n &&
+               test_cmp expect actual &&
+
+               test_must_fail git rev-parse :0:x/wham &&
+               git rev-parse >actual \
+                       :2:x/wham :3:x/wham &&
+               git rev-parse >expect \
+                        A:y/wham  B:z/wham &&
+               test_cmp expect actual &&
+
+               test_path_is_missing x/wham &&
+               test_path_is_file x/wham~HEAD &&
+               test_path_is_file x/wham~B^0 &&
+
+               git hash-object >actual \
+                       x/wham~HEAD x/wham~B^0 &&
+               git rev-parse >expect \
+                       A:y/wham    B:z/wham &&
+               test_cmp expect actual
+       )
+'
+
+# Testcase 1e, Renamed directory, with all filenames being renamed too
+#   (Related to testcases 9f & 9g)
+#   Commit O: z/{oldb,oldc}
+#   Commit A: y/{newb,newc}
+#   Commit B: z/{oldb,oldc,d}
+#   Expected: y/{newb,newc,d}
+
+test_expect_success '1e-setup: Renamed directory, with all files being renamed too' '
+       test_create_repo 1e &&
+       (
+               cd 1e &&
+
+               mkdir z &&
+               echo b >z/oldb &&
+               echo c >z/oldc &&
+               git add z &&
+               test_tick &&
+               git commit -m "O" &&
+
+               git branch O &&
+               git branch A &&
+               git branch B &&
+
+               git checkout A &&
+               mkdir y &&
+               git mv z/oldb y/newb &&
+               git mv z/oldc y/newc &&
+               test_tick &&
+               git commit -m "A" &&
+
+               git checkout B &&
+               echo d >z/d &&
+               git add z/d &&
+               test_tick &&
+               git commit -m "B"
+       )
+'
+
+test_expect_success '1e-check: Renamed directory, with all files being renamed too' '
+       (
+               cd 1e &&
+
+               git checkout A^0 &&
+
+               git merge -s recursive B^0 &&
+
+               git ls-files -s >out &&
+               test_line_count = 3 out &&
+
+               git rev-parse >actual \
+                       HEAD:y/newb HEAD:y/newc HEAD:y/d &&
+               git rev-parse >expect \
+                       O:z/oldb    O:z/oldc    B:z/d &&
+               test_cmp expect actual &&
+               test_must_fail git rev-parse HEAD:z/d
+       )
+'
+
+# Testcase 1f, Split a directory into two other directories
+#   (Related to testcases 3a, all of section 2, and all of section 4)
+#   Commit O: z/{b,c,d,e,f}
+#   Commit A: z/{b,c,d,e,f,g}
+#   Commit B: y/{b,c}, x/{d,e,f}
+#   Expected: y/{b,c}, x/{d,e,f,g}
+
+test_expect_success '1f-setup: Split a directory into two other directories' '
+       test_create_repo 1f &&
+       (
+               cd 1f &&
+
+               mkdir z &&
+               echo b >z/b &&
+               echo c >z/c &&
+               echo d >z/d &&
+               echo e >z/e &&
+               echo f >z/f &&
+               git add z &&
+               test_tick &&
+               git commit -m "O" &&
+
+               git branch O &&
+               git branch A &&
+               git branch B &&
+
+               git checkout A &&
+               echo g >z/g &&
+               git add z/g &&
+               test_tick &&
+               git commit -m "A" &&
+
+               git checkout B &&
+               mkdir y &&
+               mkdir x &&
+               git mv z/b y/ &&
+               git mv z/c y/ &&
+               git mv z/d x/ &&
+               git mv z/e x/ &&
+               git mv z/f x/ &&
+               rmdir z &&
+               test_tick &&
+               git commit -m "B"
+       )
+'
+
+test_expect_success '1f-check: Split a directory into two other directories' '
+       (
+               cd 1f &&
+
+               git checkout A^0 &&
+
+               git merge -s recursive B^0 &&
+
+               git ls-files -s >out &&
+               test_line_count = 6 out &&
+
+               git rev-parse >actual \
+                       HEAD:y/b HEAD:y/c HEAD:x/d HEAD:x/e HEAD:x/f HEAD:x/g &&
+               git rev-parse >expect \
+                       O:z/b    O:z/c    O:z/d    O:z/e    O:z/f    A:z/g &&
+               test_cmp expect actual &&
+               test_path_is_missing z/g &&
+               test_must_fail git rev-parse HEAD:z/g
+       )
+'
+
+###########################################################################
+# Rules suggested by testcases in section 1:
+#
+#   We should still detect the directory rename even if it wasn't just
+#   the directory renamed, but the files within it. (see 1b)
+#
+#   If renames split a directory into two or more others, the directory
+#   with the most renames, "wins" (see 1c).  However, see the testcases
+#   in section 2, plus testcases 3a and 4a.
+###########################################################################
+
+
+###########################################################################
+# SECTION 2: Split into multiple directories, with equal number of paths
+#
+# Explore the splitting-a-directory rules a bit; what happens in the
+# edge cases?
+#
+# Note that there is a closely related case of a directory not being
+# split on either side of history, but being renamed differently on
+# each side.  See testcase 8e for that.
+###########################################################################
+
+# Testcase 2a, Directory split into two on one side, with equal numbers of paths
+#   Commit O: z/{b,c}
+#   Commit A: y/b, w/c
+#   Commit B: z/{b,c,d}
+#   Expected: y/b, w/c, z/d, with warning about z/ -> (y/ vs. w/) conflict
+test_expect_success '2a-setup: Directory split into two on one side, with equal numbers of paths' '
+       test_create_repo 2a &&
+       (
+               cd 2a &&
+
+               mkdir z &&
+               echo b >z/b &&
+               echo c >z/c &&
+               git add z &&
+               test_tick &&
+               git commit -m "O" &&
+
+               git branch O &&
+               git branch A &&
+               git branch B &&
+
+               git checkout A &&
+               mkdir y &&
+               mkdir w &&
+               git mv z/b y/ &&
+               git mv z/c w/ &&
+               test_tick &&
+               git commit -m "A" &&
+
+               git checkout B &&
+               echo d >z/d &&
+               git add z/d &&
+               test_tick &&
+               git commit -m "B"
+       )
+'
+
+test_expect_success '2a-check: Directory split into two on one side, with equal numbers of paths' '
+       (
+               cd 2a &&
+
+               git checkout A^0 &&
+
+               test_must_fail git merge -s recursive B^0 >out &&
+               test_i18ngrep "CONFLICT.*directory rename split" out &&
+
+               git ls-files -s >out &&
+               test_line_count = 3 out &&
+               git ls-files -u >out &&
+               test_line_count = 0 out &&
+               git ls-files -o >out &&
+               test_line_count = 1 out &&
+
+               git rev-parse >actual \
+                       :0:y/b :0:w/c :0:z/d &&
+               git rev-parse >expect \
+                        O:z/b  O:z/c  B:z/d &&
+               test_cmp expect actual
+       )
+'
+
+# Testcase 2b, Directory split into two on one side, with equal numbers of paths
+#   Commit O: z/{b,c}
+#   Commit A: y/b, w/c
+#   Commit B: z/{b,c}, x/d
+#   Expected: y/b, w/c, x/d; No warning about z/ -> (y/ vs. w/) conflict
+test_expect_success '2b-setup: Directory split into two on one side, with equal numbers of paths' '
+       test_create_repo 2b &&
+       (
+               cd 2b &&
+
+               mkdir z &&
+               echo b >z/b &&
+               echo c >z/c &&
+               git add z &&
+               test_tick &&
+               git commit -m "O" &&
+
+               git branch O &&
+               git branch A &&
+               git branch B &&
+
+               git checkout A &&
+               mkdir y &&
+               mkdir w &&
+               git mv z/b y/ &&
+               git mv z/c w/ &&
+               test_tick &&
+               git commit -m "A" &&
+
+               git checkout B &&
+               mkdir x &&
+               echo d >x/d &&
+               git add x/d &&
+               test_tick &&
+               git commit -m "B"
+       )
+'
+
+test_expect_success '2b-check: Directory split into two on one side, with equal numbers of paths' '
+       (
+               cd 2b &&
+
+               git checkout A^0 &&
+
+               git merge -s recursive B^0 >out &&
+
+               git ls-files -s >out &&
+               test_line_count = 3 out &&
+               git ls-files -u >out &&
+               test_line_count = 0 out &&
+               git ls-files -o >out &&
+               test_line_count = 1 out &&
+
+               git rev-parse >actual \
+                       :0:y/b :0:w/c :0:x/d &&
+               git rev-parse >expect \
+                        O:z/b  O:z/c  B:x/d &&
+               test_cmp expect actual &&
+               test_i18ngrep ! "CONFLICT.*directory rename split" out
+       )
+'
+
+###########################################################################
+# Rules suggested by section 2:
+#
+#   None; the rule was already covered in section 1.  These testcases are
+#   here just to make sure the conflict resolution and necessary warning
+#   messages are handled correctly.
+###########################################################################
+
+
+###########################################################################
+# SECTION 3: Path in question is the source path for some rename already
+#
+# Combining cases from Section 1 and trying to handle them could lead to
+# directory renaming detection being over-applied.  So, this section
+# provides some good testcases to check that the implementation doesn't go
+# too far.
+###########################################################################
+
+# Testcase 3a, Avoid implicit rename if involved as source on other side
+#   (Related to testcases 1c, 1f, and 9h)
+#   Commit O: z/{b,c,d}
+#   Commit A: z/{b,c,d} (no change)
+#   Commit B: y/{b,c}, x/d
+#   Expected: y/{b,c}, x/d
+test_expect_success '3a-setup: Avoid implicit rename if involved as source on other side' '
+       test_create_repo 3a &&
+       (
+               cd 3a &&
+
+               mkdir z &&
+               echo b >z/b &&
+               echo c >z/c &&
+               echo d >z/d &&
+               git add z &&
+               test_tick &&
+               git commit -m "O" &&
+
+               git branch O &&
+               git branch A &&
+               git branch B &&
+
+               git checkout A &&
+               test_tick &&
+               git commit --allow-empty -m "A" &&
+
+               git checkout B &&
+               mkdir y &&
+               mkdir x &&
+               git mv z/b y/ &&
+               git mv z/c y/ &&
+               git mv z/d x/ &&
+               rmdir z &&
+               test_tick &&
+               git commit -m "B"
+       )
+'
+
+test_expect_success '3a-check: Avoid implicit rename if involved as source on other side' '
+       (
+               cd 3a &&
+
+               git checkout A^0 &&
+
+               git merge -s recursive B^0 &&
+
+               git ls-files -s >out &&
+               test_line_count = 3 out &&
+
+               git rev-parse >actual \
+                       HEAD:y/b HEAD:y/c HEAD:x/d &&
+               git rev-parse >expect \
+                       O:z/b    O:z/c    O:z/d &&
+               test_cmp expect actual
+       )
+'
+
+# Testcase 3b, Avoid implicit rename if involved as source on other side
+#   (Related to testcases 5c and 7c, also kind of 1e and 1f)
+#   Commit O: z/{b,c,d}
+#   Commit A: y/{b,c}, x/d
+#   Commit B: z/{b,c}, w/d
+#   Expected: y/{b,c}, CONFLICT:(z/d -> x/d vs. w/d)
+#   NOTE: We're particularly checking that since z/d is already involved as
+#         a source in a file rename on the same side of history, that we don't
+#         get it involved in directory rename detection.  If it were, we might
+#         end up with CONFLICT:(z/d -> y/d vs. x/d vs. w/d), i.e. a
+#         rename/rename/rename(1to3) conflict, which is just weird.
+test_expect_success '3b-setup: Avoid implicit rename if involved as source on current side' '
+       test_create_repo 3b &&
+       (
+               cd 3b &&
+
+               mkdir z &&
+               echo b >z/b &&
+               echo c >z/c &&
+               echo d >z/d &&
+               git add z &&
+               test_tick &&
+               git commit -m "O" &&
+
+               git branch O &&
+               git branch A &&
+               git branch B &&
+
+               git checkout A &&
+               mkdir y &&
+               mkdir x &&
+               git mv z/b y/ &&
+               git mv z/c y/ &&
+               git mv z/d x/ &&
+               rmdir z &&
+               test_tick &&
+               git commit -m "A" &&
+
+               git checkout B &&
+               mkdir w &&
+               git mv z/d w/ &&
+               test_tick &&
+               git commit -m "B"
+       )
+'
+
+test_expect_success '3b-check: Avoid implicit rename if involved as source on current side' '
+       (
+               cd 3b &&
+
+               git checkout A^0 &&
+
+               test_must_fail git merge -s recursive B^0 >out &&
+               test_i18ngrep CONFLICT.*rename/rename.*z/d.*x/d.*w/d out &&
+               test_i18ngrep ! CONFLICT.*rename/rename.*y/d out &&
+
+               git ls-files -s >out &&
+               test_line_count = 5 out &&
+               git ls-files -u >out &&
+               test_line_count = 3 out &&
+               git ls-files -o >out &&
+               test_line_count = 1 out &&
+
+               git rev-parse >actual \
+                       :0:y/b :0:y/c :1:z/d :2:x/d :3:w/d &&
+               git rev-parse >expect \
+                        O:z/b  O:z/c  O:z/d  O:z/d  O:z/d &&
+               test_cmp expect actual &&
+
+               test_path_is_missing z/d &&
+               git hash-object >actual \
+                       x/d   w/d &&
+               git rev-parse >expect \
+                       O:z/d O:z/d &&
+               test_cmp expect actual
+       )
+'
+
+###########################################################################
+# Rules suggested by section 3:
+#
+#   Avoid directory-rename-detection for a path, if that path is the source
+#   of a rename on either side of a merge.
+###########################################################################
+
+
+###########################################################################
+# SECTION 4: Partially renamed directory; still exists on both sides of merge
+#
+# What if we were to attempt to do directory rename detection when someone
+# "mostly" moved a directory but still left some files around, or,
+# equivalently, fully renamed a directory in one commmit and then recreated
+# that directory in a later commit adding some new files and then tried to
+# merge?
+#
+# It's hard to divine user intent in these cases, because you can make an
+# argument that, depending on the intermediate history of the side being
+# merged, that some users will want files in that directory to
+# automatically be detected and renamed, while users with a different
+# intermediate history wouldn't want that rename to happen.
+#
+# I think that it is best to simply not have directory rename detection
+# apply to such cases.  My reasoning for this is four-fold: (1) it's
+# easiest for users in general to figure out what happened if we don't
+# apply directory rename detection in any such case, (2) it's an easy rule
+# to explain ["We don't do directory rename detection if the directory
+# still exists on both sides of the merge"], (3) we can get some hairy
+# edge/corner cases that would be really confusing and possibly not even
+# representable in the index if we were to even try, and [related to 3] (4)
+# attempting to resolve this issue of divining user intent by examining
+# intermediate history goes against the spirit of three-way merges and is a
+# path towards crazy corner cases that are far more complex than what we're
+# already dealing with.
+#
+# Note that the wording of the rule ("We don't do directory rename
+# detection if the directory still exists on both sides of the merge.")
+# also excludes "renaming" of a directory into a subdirectory of itself
+# (e.g. /some/dir/* -> /some/dir/subdir/*).  It may be possible to carve
+# out an exception for "renaming"-beneath-itself cases without opening
+# weird edge/corner cases for other partial directory renames, but for now
+# we are keeping the rule simple.
+#
+# This section contains a test for a partially-renamed-directory case.
+###########################################################################
+
+# Testcase 4a, Directory split, with original directory still present
+#   (Related to testcase 1f)
+#   Commit O: z/{b,c,d,e}
+#   Commit A: y/{b,c,d}, z/e
+#   Commit B: z/{b,c,d,e,f}
+#   Expected: y/{b,c,d}, z/{e,f}
+#   NOTE: Even though most files from z moved to y, we don't want f to follow.
+
+test_expect_success '4a-setup: Directory split, with original directory still present' '
+       test_create_repo 4a &&
+       (
+               cd 4a &&
+
+               mkdir z &&
+               echo b >z/b &&
+               echo c >z/c &&
+               echo d >z/d &&
+               echo e >z/e &&
+               git add z &&
+               test_tick &&
+               git commit -m "O" &&
+
+               git branch O &&
+               git branch A &&
+               git branch B &&
+
+               git checkout A &&
+               mkdir y &&
+               git mv z/b y/ &&
+               git mv z/c y/ &&
+               git mv z/d y/ &&
+               test_tick &&
+               git commit -m "A" &&
+
+               git checkout B &&
+               echo f >z/f &&
+               git add z/f &&
+               test_tick &&
+               git commit -m "B"
+       )
+'
+
+test_expect_success '4a-check: Directory split, with original directory still present' '
+       (
+               cd 4a &&
+
+               git checkout A^0 &&
+
+               git merge -s recursive B^0 &&
+
+               git ls-files -s >out &&
+               test_line_count = 5 out &&
+               git ls-files -u >out &&
+               test_line_count = 0 out &&
+               git ls-files -o >out &&
+               test_line_count = 1 out &&
+
+               git rev-parse >actual \
+                       HEAD:y/b HEAD:y/c HEAD:y/d HEAD:z/e HEAD:z/f &&
+               git rev-parse >expect \
+                       O:z/b    O:z/c    O:z/d    O:z/e    B:z/f &&
+               test_cmp expect actual
+       )
+'
+
+###########################################################################
+# Rules suggested by section 4:
+#
+#   Directory-rename-detection should be turned off for any directories (as
+#   a source for renames) that exist on both sides of the merge.  (The "as
+#   a source for renames" clarification is due to cases like 1c where
+#   the target directory exists on both sides and we do want the rename
+#   detection.)  But, sadly, see testcase 8b.
+###########################################################################
+
+
+###########################################################################
+# SECTION 5: Files/directories in the way of subset of to-be-renamed paths
+#
+# Implicitly renaming files due to a detected directory rename could run
+# into problems if there are files or directories in the way of the paths
+# we want to rename.  Explore such cases in this section.
+###########################################################################
+
+# Testcase 5a, Merge directories, other side adds files to original and target
+#   Commit O: z/{b,c},       y/d
+#   Commit A: z/{b,c,e_1,f}, y/{d,e_2}
+#   Commit B: y/{b,c,d}
+#   Expected: z/e_1, y/{b,c,d,e_2,f} + CONFLICT warning
+#   NOTE: While directory rename detection is active here causing z/f to
+#         become y/f, we did not apply this for z/e_1 because that would
+#         give us an add/add conflict for y/e_1 vs y/e_2.  This problem with
+#         this add/add, is that both versions of y/e are from the same side
+#         of history, giving us no way to represent this conflict in the
+#         index.
+
+test_expect_success '5a-setup: Merge directories, other side adds files to original and target' '
+       test_create_repo 5a &&
+       (
+               cd 5a &&
+
+               mkdir z &&
+               echo b >z/b &&
+               echo c >z/c &&
+               mkdir y &&
+               echo d >y/d &&
+               git add z y &&
+               test_tick &&
+               git commit -m "O" &&
+
+               git branch O &&
+               git branch A &&
+               git branch B &&
+
+               git checkout A &&
+               echo e1 >z/e &&
+               echo f >z/f &&
+               echo e2 >y/e &&
+               git add z/e z/f y/e &&
+               test_tick &&
+               git commit -m "A" &&
+
+               git checkout B &&
+               git mv z/b y/ &&
+               git mv z/c y/ &&
+               rmdir z &&
+               test_tick &&
+               git commit -m "B"
+       )
+'
+
+test_expect_success '5a-check: Merge directories, other side adds files to original and target' '
+       (
+               cd 5a &&
+
+               git checkout A^0 &&
+
+               test_must_fail git merge -s recursive B^0 >out &&
+               test_i18ngrep "CONFLICT.*implicit dir rename" out &&
+
+               git ls-files -s >out &&
+               test_line_count = 6 out &&
+               git ls-files -u >out &&
+               test_line_count = 0 out &&
+               git ls-files -o >out &&
+               test_line_count = 1 out &&
+
+               git rev-parse >actual \
+                       :0:y/b :0:y/c :0:y/d :0:y/e :0:z/e :0:y/f &&
+               git rev-parse >expect \
+                        O:z/b  O:z/c  O:y/d  A:y/e  A:z/e  A:z/f &&
+               test_cmp expect actual
+       )
+'
+
+# Testcase 5b, Rename/delete in order to get add/add/add conflict
+#   (Related to testcase 8d; these may appear slightly inconsistent to users;
+#    Also related to testcases 7d and 7e)
+#   Commit O: z/{b,c,d_1}
+#   Commit A: y/{b,c,d_2}
+#   Commit B: z/{b,c,d_1,e}, y/d_3
+#   Expected: y/{b,c,e}, CONFLICT(add/add: y/d_2 vs. y/d_3)
+#   NOTE: If z/d_1 in commit B were to be involved in dir rename detection, as
+#         we normaly would since z/ is being renamed to y/, then this would be
+#         a rename/delete (z/d_1 -> y/d_1 vs. deleted) AND an add/add/add
+#         conflict of y/d_1 vs. y/d_2 vs. y/d_3.  Add/add/add is not
+#         representable in the index, so the existence of y/d_3 needs to
+#         cause us to bail on directory rename detection for that path, falling
+#         back to git behavior without the directory rename detection.
+
+test_expect_success '5b-setup: Rename/delete in order to get add/add/add conflict' '
+       test_create_repo 5b &&
+       (
+               cd 5b &&
+
+               mkdir z &&
+               echo b >z/b &&
+               echo c >z/c &&
+               echo d1 >z/d &&
+               git add z &&
+               test_tick &&
+               git commit -m "O" &&
+
+               git branch O &&
+               git branch A &&
+               git branch B &&
+
+               git checkout A &&
+               git rm z/d &&
+               git mv z y &&
+               echo d2 >y/d &&
+               git add y/d &&
+               test_tick &&
+               git commit -m "A" &&
+
+               git checkout B &&
+               mkdir y &&
+               echo d3 >y/d &&
+               echo e >z/e &&
+               git add y/d z/e &&
+               test_tick &&
+               git commit -m "B"
+       )
+'
+
+test_expect_success '5b-check: Rename/delete in order to get add/add/add conflict' '
+       (
+               cd 5b &&
+
+               git checkout A^0 &&
+
+               test_must_fail git merge -s recursive B^0 >out &&
+               test_i18ngrep "CONFLICT (add/add).* y/d" out &&
+
+               git ls-files -s >out &&
+               test_line_count = 5 out &&
+               git ls-files -u >out &&
+               test_line_count = 2 out &&
+               git ls-files -o >out &&
+               test_line_count = 1 out &&
+
+               git rev-parse >actual \
+                       :0:y/b :0:y/c :0:y/e :2:y/d :3:y/d &&
+               git rev-parse >expect \
+                        O:z/b  O:z/c  B:z/e  A:y/d  B:y/d &&
+               test_cmp expect actual &&
+
+               test_must_fail git rev-parse :1:y/d &&
+               test_path_is_file y/d
+       )
+'
+
+# Testcase 5c, Transitive rename would cause rename/rename/rename/add/add/add
+#   (Directory rename detection would result in transitive rename vs.
+#    rename/rename(1to2) and turn it into a rename/rename(1to3).  Further,
+#    rename paths conflict with separate adds on the other side)
+#   (Related to testcases 3b and 7c)
+#   Commit O: z/{b,c}, x/d_1
+#   Commit A: y/{b,c,d_2}, w/d_1
+#   Commit B: z/{b,c,d_1,e}, w/d_3, y/d_4
+#   Expected: A mess, but only a rename/rename(1to2)/add/add mess.  Use the
+#             presence of y/d_4 in B to avoid doing transitive rename of
+#             x/d_1 -> z/d_1 -> y/d_1, so that the only paths we have at
+#             y/d are y/d_2 and y/d_4.  We still do the move from z/e to y/e,
+#             though, because it doesn't have anything in the way.
+
+test_expect_success '5c-setup: Transitive rename would cause rename/rename/rename/add/add/add' '
+       test_create_repo 5c &&
+       (
+               cd 5c &&
+
+               mkdir z &&
+               echo b >z/b &&
+               echo c >z/c &&
+               mkdir x &&
+               echo d1 >x/d &&
+               git add z x &&
+               test_tick &&
+               git commit -m "O" &&
+
+               git branch O &&
+               git branch A &&
+               git branch B &&
+
+               git checkout A &&
+               git mv z y &&
+               echo d2 >y/d &&
+               git add y/d &&
+               git mv x w &&
+               test_tick &&
+               git commit -m "A" &&
+
+               git checkout B &&
+               git mv x/d z/ &&
+               mkdir w &&
+               mkdir y &&
+               echo d3 >w/d &&
+               echo d4 >y/d &&
+               echo e >z/e &&
+               git add w/ y/ z/e &&
+               test_tick &&
+               git commit -m "B"
+       )
+'
+
+test_expect_success '5c-check: Transitive rename would cause rename/rename/rename/add/add/add' '
+       (
+               cd 5c &&
+
+               git checkout A^0 &&
+
+               test_must_fail git merge -s recursive B^0 >out &&
+               test_i18ngrep "CONFLICT (rename/rename).*x/d.*w/d.*z/d" out &&
+               test_i18ngrep "CONFLICT (add/add).* y/d" out &&
+
+               git ls-files -s >out &&
+               test_line_count = 9 out &&
+               git ls-files -u >out &&
+               test_line_count = 6 out &&
+               git ls-files -o >out &&
+               test_line_count = 3 out &&
+
+               git rev-parse >actual \
+                       :0:y/b :0:y/c :0:y/e &&
+               git rev-parse >expect \
+                        O:z/b  O:z/c  B:z/e &&
+               test_cmp expect actual &&
+
+               test_must_fail git rev-parse :1:y/d &&
+               git rev-parse >actual \
+                       :2:w/d :3:w/d :1:x/d :2:y/d :3:y/d :3:z/d &&
+               git rev-parse >expect \
+                        O:x/d  B:w/d  O:x/d  A:y/d  B:y/d  O:x/d &&
+               test_cmp expect actual &&
+
+               git hash-object >actual \
+                       w/d~HEAD w/d~B^0 z/d &&
+               git rev-parse >expect \
+                       O:x/d    B:w/d   O:x/d &&
+               test_cmp expect actual &&
+               test_path_is_missing x/d &&
+               test_path_is_file y/d &&
+               grep -q "<<<<" y/d  # conflict markers should be present
+       )
+'
+
+# Testcase 5d, Directory/file/file conflict due to directory rename
+#   Commit O: z/{b,c}
+#   Commit A: y/{b,c,d_1}
+#   Commit B: z/{b,c,d_2,f}, y/d/e
+#   Expected: y/{b,c,d/e,f}, z/d_2, CONFLICT(file/directory), y/d_1~HEAD
+#   Note: The fact that y/d/ exists in B makes us bail on directory rename
+#         detection for z/d_2, but that doesn't prevent us from applying the
+#         directory rename detection for z/f -> y/f.
+
+test_expect_success '5d-setup: Directory/file/file conflict due to directory rename' '
+       test_create_repo 5d &&
+       (
+               cd 5d &&
+
+               mkdir z &&
+               echo b >z/b &&
+               echo c >z/c &&
+               git add z &&
+               test_tick &&
+               git commit -m "O" &&
+
+               git branch O &&
+               git branch A &&
+               git branch B &&
+
+               git checkout A &&
+               git mv z y &&
+               echo d1 >y/d &&
+               git add y/d &&
+               test_tick &&
+               git commit -m "A" &&
+
+               git checkout B &&
+               mkdir -p y/d &&
+               echo e >y/d/e &&
+               echo d2 >z/d &&
+               echo f >z/f &&
+               git add y/d/e z/d z/f &&
+               test_tick &&
+               git commit -m "B"
+       )
+'
+
+test_expect_success '5d-check: Directory/file/file conflict due to directory rename' '
+       (
+               cd 5d &&
+
+               git checkout A^0 &&
+
+               test_must_fail git merge -s recursive B^0 >out &&
+               test_i18ngrep "CONFLICT (file/directory).*y/d" out &&
+
+               git ls-files -s >out &&
+               test_line_count = 6 out &&
+               git ls-files -u >out &&
+               test_line_count = 1 out &&
+               git ls-files -o >out &&
+               test_line_count = 2 out &&
+
+               git rev-parse >actual \
+                       :0:y/b :0:y/c :0:z/d :0:y/f :2:y/d :0:y/d/e &&
+               git rev-parse >expect \
+                        O:z/b  O:z/c  B:z/d  B:z/f  A:y/d  B:y/d/e &&
+               test_cmp expect actual &&
+
+               git hash-object y/d~HEAD >actual &&
+               git rev-parse A:y/d >expect &&
+               test_cmp expect actual
+       )
+'
+
+###########################################################################
+# Rules suggested by section 5:
+#
+#   If a subset of to-be-renamed files have a file or directory in the way,
+#   "turn off" the directory rename for those specific sub-paths, falling
+#   back to old handling.  But, sadly, see testcases 8a and 8b.
+###########################################################################
+
+
+###########################################################################
+# SECTION 6: Same side of the merge was the one that did the rename
+#
+# It may sound obvious that you only want to apply implicit directory
+# renames to directories if the _other_ side of history did the renaming.
+# If you did make an implementation that didn't explicitly enforce this
+# rule, the majority of cases that would fall under this section would
+# also be solved by following the rules from the above sections.  But
+# there are still a few that stick out, so this section covers them just
+# to make sure we also get them right.
+###########################################################################
+
+# Testcase 6a, Tricky rename/delete
+#   Commit O: z/{b,c,d}
+#   Commit A: z/b
+#   Commit B: y/{b,c}, z/d
+#   Expected: y/b, CONFLICT(rename/delete, z/c -> y/c vs. NULL)
+#   Note: We're just checking here that the rename of z/b and z/c to put
+#         them under y/ doesn't accidentally catch z/d and make it look like
+#         it is also involved in a rename/delete conflict.
+
+test_expect_success '6a-setup: Tricky rename/delete' '
+       test_create_repo 6a &&
+       (
+               cd 6a &&
+
+               mkdir z &&
+               echo b >z/b &&
+               echo c >z/c &&
+               echo d >z/d &&
+               git add z &&
+               test_tick &&
+               git commit -m "O" &&
+
+               git branch O &&
+               git branch A &&
+               git branch B &&
+
+               git checkout A &&
+               git rm z/c &&
+               git rm z/d &&
+               test_tick &&
+               git commit -m "A" &&
+
+               git checkout B &&
+               mkdir y &&
+               git mv z/b y/ &&
+               git mv z/c y/ &&
+               test_tick &&
+               git commit -m "B"
+       )
+'
+
+test_expect_success '6a-check: Tricky rename/delete' '
+       (
+               cd 6a &&
+
+               git checkout A^0 &&
+
+               test_must_fail git merge -s recursive B^0 >out &&
+               test_i18ngrep "CONFLICT (rename/delete).*z/c.*y/c" out &&
+
+               git ls-files -s >out &&
+               test_line_count = 2 out &&
+               git ls-files -u >out &&
+               test_line_count = 1 out &&
+               git ls-files -o >out &&
+               test_line_count = 1 out &&
+
+               git rev-parse >actual \
+                       :0:y/b :3:y/c &&
+               git rev-parse >expect \
+                        O:z/b  O:z/c &&
+               test_cmp expect actual
+       )
+'
+
+# Testcase 6b, Same rename done on both sides
+#   (Related to testcases 6c and 8e)
+#   Commit O: z/{b,c}
+#   Commit A: y/{b,c}
+#   Commit B: y/{b,c}, z/d
+#   Expected: y/{b,c}, z/d
+#   Note: If we did directory rename detection here, we'd move z/d into y/,
+#         but B did that rename and still decided to put the file into z/,
+#         so we probably shouldn't apply directory rename detection for it.
+
+test_expect_success '6b-setup: Same rename done on both sides' '
+       test_create_repo 6b &&
+       (
+               cd 6b &&
+
+               mkdir z &&
+               echo b >z/b &&
+               echo c >z/c &&
+               git add z &&
+               test_tick &&
+               git commit -m "O" &&
+
+               git branch O &&
+               git branch A &&
+               git branch B &&
+
+               git checkout A &&
+               git mv z y &&
+               test_tick &&
+               git commit -m "A" &&
+
+               git checkout B &&
+               git mv z y &&
+               mkdir z &&
+               echo d >z/d &&
+               git add z/d &&
+               test_tick &&
+               git commit -m "B"
+       )
+'
+
+test_expect_success '6b-check: Same rename done on both sides' '
+       (
+               cd 6b &&
+
+               git checkout A^0 &&
+
+               git merge -s recursive B^0 &&
+
+               git ls-files -s >out &&
+               test_line_count = 3 out &&
+               git ls-files -u >out &&
+               test_line_count = 0 out &&
+               git ls-files -o >out &&
+               test_line_count = 1 out &&
+
+               git rev-parse >actual \
+                       HEAD:y/b HEAD:y/c HEAD:z/d &&
+               git rev-parse >expect \
+                       O:z/b    O:z/c    B:z/d &&
+               test_cmp expect actual
+       )
+'
+
+# Testcase 6c, Rename only done on same side
+#   (Related to testcases 6b and 8e)
+#   Commit O: z/{b,c}
+#   Commit A: z/{b,c} (no change)
+#   Commit B: y/{b,c}, z/d
+#   Expected: y/{b,c}, z/d
+#   NOTE: Seems obvious, but just checking that the implementation doesn't
+#         "accidentally detect a rename" and give us y/{b,c,d}.
+
+test_expect_success '6c-setup: Rename only done on same side' '
+       test_create_repo 6c &&
+       (
+               cd 6c &&
+
+               mkdir z &&
+               echo b >z/b &&
+               echo c >z/c &&
+               git add z &&
+               test_tick &&
+               git commit -m "O" &&
+
+               git branch O &&
+               git branch A &&
+               git branch B &&
+
+               git checkout A &&
+               test_tick &&
+               git commit --allow-empty -m "A" &&
+
+               git checkout B &&
+               git mv z y &&
+               mkdir z &&
+               echo d >z/d &&
+               git add z/d &&
+               test_tick &&
+               git commit -m "B"
+       )
+'
+
+test_expect_success '6c-check: Rename only done on same side' '
+       (
+               cd 6c &&
+
+               git checkout A^0 &&
+
+               git merge -s recursive B^0 &&
+
+               git ls-files -s >out &&
+               test_line_count = 3 out &&
+               git ls-files -u >out &&
+               test_line_count = 0 out &&
+               git ls-files -o >out &&
+               test_line_count = 1 out &&
+
+               git rev-parse >actual \
+                       HEAD:y/b HEAD:y/c HEAD:z/d &&
+               git rev-parse >expect \
+                       O:z/b    O:z/c    B:z/d &&
+               test_cmp expect actual
+       )
+'
+
+# Testcase 6d, We don't always want transitive renaming
+#   (Related to testcase 1c)
+#   Commit O: z/{b,c}, x/d
+#   Commit A: z/{b,c}, x/d (no change)
+#   Commit B: y/{b,c}, z/d
+#   Expected: y/{b,c}, z/d
+#   NOTE: Again, this seems obvious but just checking that the implementation
+#         doesn't "accidentally detect a rename" and give us y/{b,c,d}.
+
+test_expect_success '6d-setup: We do not always want transitive renaming' '
+       test_create_repo 6d &&
+       (
+               cd 6d &&
+
+               mkdir z &&
+               echo b >z/b &&
+               echo c >z/c &&
+               mkdir x &&
+               echo d >x/d &&
+               git add z x &&
+               test_tick &&
+               git commit -m "O" &&
+
+               git branch O &&
+               git branch A &&
+               git branch B &&
+
+               git checkout A &&
+               test_tick &&
+               git commit --allow-empty -m "A" &&
+
+               git checkout B &&
+               git mv z y &&
+               git mv x z &&
+               test_tick &&
+               git commit -m "B"
+       )
+'
+
+test_expect_success '6d-check: We do not always want transitive renaming' '
+       (
+               cd 6d &&
+
+               git checkout A^0 &&
+
+               git merge -s recursive B^0 &&
+
+               git ls-files -s >out &&
+               test_line_count = 3 out &&
+               git ls-files -u >out &&
+               test_line_count = 0 out &&
+               git ls-files -o >out &&
+               test_line_count = 1 out &&
+
+               git rev-parse >actual \
+                       HEAD:y/b HEAD:y/c HEAD:z/d &&
+               git rev-parse >expect \
+                       O:z/b    O:z/c    O:x/d &&
+               test_cmp expect actual
+       )
+'
+
+# Testcase 6e, Add/add from one-side
+#   Commit O: z/{b,c}
+#   Commit A: z/{b,c} (no change)
+#   Commit B: y/{b,c,d_1}, z/d_2
+#   Expected: y/{b,c,d_1}, z/d_2
+#   NOTE: Again, this seems obvious but just checking that the implementation
+#         doesn't "accidentally detect a rename" and give us y/{b,c} +
+#         add/add conflict on y/d_1 vs y/d_2.
+
+test_expect_success '6e-setup: Add/add from one side' '
+       test_create_repo 6e &&
+       (
+               cd 6e &&
+
+               mkdir z &&
+               echo b >z/b &&
+               echo c >z/c &&
+               git add z &&
+               test_tick &&
+               git commit -m "O" &&
+
+               git branch O &&
+               git branch A &&
+               git branch B &&
+
+               git checkout A &&
+               test_tick &&
+               git commit --allow-empty -m "A" &&
+
+               git checkout B &&
+               git mv z y &&
+               echo d1 > y/d &&
+               mkdir z &&
+               echo d2 > z/d &&
+               git add y/d z/d &&
+               test_tick &&
+               git commit -m "B"
+       )
+'
+
+test_expect_success '6e-check: Add/add from one side' '
+       (
+               cd 6e &&
+
+               git checkout A^0 &&
+
+               git merge -s recursive B^0 &&
+
+               git ls-files -s >out &&
+               test_line_count = 4 out &&
+               git ls-files -u >out &&
+               test_line_count = 0 out &&
+               git ls-files -o >out &&
+               test_line_count = 1 out &&
+
+               git rev-parse >actual \
+                       HEAD:y/b HEAD:y/c HEAD:y/d HEAD:z/d &&
+               git rev-parse >expect \
+                       O:z/b    O:z/c    B:y/d    B:z/d &&
+               test_cmp expect actual
+       )
+'
+
+###########################################################################
+# Rules suggested by section 6:
+#
+#   Only apply implicit directory renames to directories if the other
+#   side of history is the one doing the renaming.
+###########################################################################
+
+
+###########################################################################
+# SECTION 7: More involved Edge/Corner cases
+#
+# The ruleset we have generated in the above sections seems to provide
+# well-defined merges.  But can we find edge/corner cases that either (a)
+# are harder for users to understand, or (b) have a resolution that is
+# non-intuitive or suboptimal?
+#
+# The testcases in this section dive into cases that I've tried to craft in
+# a way to find some that might be surprising to users or difficult for
+# them to understand (the next section will look at non-intuitive or
+# suboptimal merge results).  Some of the testcases are similar to ones
+# from past sections, but have been simplified to try to highlight error
+# messages using a "modified" path (due to the directory rename).  Are
+# users okay with these?
+#
+# In my opinion, testcases that are difficult to understand from this
+# section is due to difficulty in the testcase rather than the directory
+# renaming (similar to how t6042 and t6036 have difficult resolutions due
+# to the problem setup itself being complex).  And I don't think the
+# error messages are a problem.
+#
+# On the other hand, the testcases in section 8 worry me slightly more...
+###########################################################################
+
+# Testcase 7a, rename-dir vs. rename-dir (NOT split evenly) PLUS add-other-file
+#   Commit O: z/{b,c}
+#   Commit A: y/{b,c}
+#   Commit B: w/b, x/c, z/d
+#   Expected: y/d, CONFLICT(rename/rename for both z/b and z/c)
+#   NOTE: There's a rename of z/ here, y/ has more renames, so z/d -> y/d.
+
+test_expect_success '7a-setup: rename-dir vs. rename-dir (NOT split evenly) PLUS add-other-file' '
+       test_create_repo 7a &&
+       (
+               cd 7a &&
+
+               mkdir z &&
+               echo b >z/b &&
+               echo c >z/c &&
+               git add z &&
+               test_tick &&
+               git commit -m "O" &&
+
+               git branch O &&
+               git branch A &&
+               git branch B &&
+
+               git checkout A &&
+               git mv z y &&
+               test_tick &&
+               git commit -m "A" &&
+
+               git checkout B &&
+               mkdir w &&
+               mkdir x &&
+               git mv z/b w/ &&
+               git mv z/c x/ &&
+               echo d > z/d &&
+               git add z/d &&
+               test_tick &&
+               git commit -m "B"
+       )
+'
+
+test_expect_success '7a-check: rename-dir vs. rename-dir (NOT split evenly) PLUS add-other-file' '
+       (
+               cd 7a &&
+
+               git checkout A^0 &&
+
+               test_must_fail git merge -s recursive B^0 >out &&
+               test_i18ngrep "CONFLICT (rename/rename).*z/b.*y/b.*w/b" out &&
+               test_i18ngrep "CONFLICT (rename/rename).*z/c.*y/c.*x/c" out &&
+
+               git ls-files -s >out &&
+               test_line_count = 7 out &&
+               git ls-files -u >out &&
+               test_line_count = 6 out &&
+               git ls-files -o >out &&
+               test_line_count = 1 out &&
+
+               git rev-parse >actual \
+                       :1:z/b :2:y/b :3:w/b :1:z/c :2:y/c :3:x/c :0:y/d &&
+               git rev-parse >expect \
+                        O:z/b  O:z/b  O:z/b  O:z/c  O:z/c  O:z/c  B:z/d &&
+               test_cmp expect actual &&
+
+               git hash-object >actual \
+                       y/b   w/b   y/c   x/c &&
+               git rev-parse >expect \
+                       O:z/b O:z/b O:z/c O:z/c &&
+               test_cmp expect actual
+       )
+'
+
+# Testcase 7b, rename/rename(2to1), but only due to transitive rename
+#   (Related to testcase 1d)
+#   Commit O: z/{b,c},     x/d_1, w/d_2
+#   Commit A: y/{b,c,d_2}, x/d_1
+#   Commit B: z/{b,c,d_1},        w/d_2
+#   Expected: y/{b,c}, CONFLICT(rename/rename(2to1): x/d_1, w/d_2 -> y_d)
+
+test_expect_success '7b-setup: rename/rename(2to1), but only due to transitive rename' '
+       test_create_repo 7b &&
+       (
+               cd 7b &&
+
+               mkdir z &&
+               mkdir x &&
+               mkdir w &&
+               echo b >z/b &&
+               echo c >z/c &&
+               echo d1 > x/d &&
+               echo d2 > w/d &&
+               git add z x w &&
+               test_tick &&
+               git commit -m "O" &&
+
+               git branch O &&
+               git branch A &&
+               git branch B &&
+
+               git checkout A &&
+               git mv z y &&
+               git mv w/d y/ &&
+               test_tick &&
+               git commit -m "A" &&
+
+               git checkout B &&
+               git mv x/d z/ &&
+               rmdir x &&
+               test_tick &&
+               git commit -m "B"
+       )
+'
+
+test_expect_success '7b-check: rename/rename(2to1), but only due to transitive rename' '
+       (
+               cd 7b &&
+
+               git checkout A^0 &&
+
+               test_must_fail git merge -s recursive B^0 >out &&
+               test_i18ngrep "CONFLICT (rename/rename)" out &&
+
+               git ls-files -s >out &&
+               test_line_count = 4 out &&
+               git ls-files -u >out &&
+               test_line_count = 2 out &&
+               git ls-files -o >out &&
+               test_line_count = 3 out &&
+
+               git rev-parse >actual \
+                       :0:y/b :0:y/c :2:y/d :3:y/d &&
+               git rev-parse >expect \
+                        O:z/b  O:z/c  O:w/d  O:x/d &&
+               test_cmp expect actual &&
+
+               test_path_is_missing y/d &&
+               test_path_is_file y/d~HEAD &&
+               test_path_is_file y/d~B^0 &&
+
+               git hash-object >actual \
+                       y/d~HEAD y/d~B^0 &&
+               git rev-parse >expect \
+                       O:w/d    O:x/d &&
+               test_cmp expect actual
+       )
+'
+
+# Testcase 7c, rename/rename(1to...2or3); transitive rename may add complexity
+#   (Related to testcases 3b and 5c)
+#   Commit O: z/{b,c}, x/d
+#   Commit A: y/{b,c}, w/d
+#   Commit B: z/{b,c,d}
+#   Expected: y/{b,c}, CONFLICT(x/d -> w/d vs. y/d)
+#   NOTE: z/ was renamed to y/ so we do want to report
+#         neither CONFLICT(x/d -> w/d vs. z/d)
+#         nor CONFLiCT x/d -> w/d vs. y/d vs. z/d)
+
+test_expect_success '7c-setup: rename/rename(1to...2or3); transitive rename may add complexity' '
+       test_create_repo 7c &&
+       (
+               cd 7c &&
+
+               mkdir z &&
+               echo b >z/b &&
+               echo c >z/c &&
+               mkdir x &&
+               echo d >x/d &&
+               git add z x &&
+               test_tick &&
+               git commit -m "O" &&
+
+               git branch O &&
+               git branch A &&
+               git branch B &&
+
+               git checkout A &&
+               git mv z y &&
+               git mv x w &&
+               test_tick &&
+               git commit -m "A" &&
+
+               git checkout B &&
+               git mv x/d z/ &&
+               rmdir x &&
+               test_tick &&
+               git commit -m "B"
+       )
+'
+
+test_expect_success '7c-check: rename/rename(1to...2or3); transitive rename may add complexity' '
+       (
+               cd 7c &&
+
+               git checkout A^0 &&
+
+               test_must_fail git merge -s recursive B^0 >out &&
+               test_i18ngrep "CONFLICT (rename/rename).*x/d.*w/d.*y/d" out &&
+
+               git ls-files -s >out &&
+               test_line_count = 5 out &&
+               git ls-files -u >out &&
+               test_line_count = 3 out &&
+               git ls-files -o >out &&
+               test_line_count = 1 out &&
+
+               git rev-parse >actual \
+                       :0:y/b :0:y/c :1:x/d :2:w/d :3:y/d &&
+               git rev-parse >expect \
+                        O:z/b  O:z/c  O:x/d  O:x/d  O:x/d &&
+               test_cmp expect actual
+       )
+'
+
+# Testcase 7d, transitive rename involved in rename/delete; how is it reported?
+#   (Related somewhat to testcases 5b and 8d)
+#   Commit O: z/{b,c}, x/d
+#   Commit A: y/{b,c}
+#   Commit B: z/{b,c,d}
+#   Expected: y/{b,c}, CONFLICT(delete x/d vs rename to y/d)
+#   NOTE: z->y so NOT CONFLICT(delete x/d vs rename to z/d)
+
+test_expect_success '7d-setup: transitive rename involved in rename/delete; how is it reported?' '
+       test_create_repo 7d &&
+       (
+               cd 7d &&
+
+               mkdir z &&
+               echo b >z/b &&
+               echo c >z/c &&
+               mkdir x &&
+               echo d >x/d &&
+               git add z x &&
+               test_tick &&
+               git commit -m "O" &&
+
+               git branch O &&
+               git branch A &&
+               git branch B &&
+
+               git checkout A &&
+               git mv z y &&
+               git rm -rf x &&
+               test_tick &&
+               git commit -m "A" &&
+
+               git checkout B &&
+               git mv x/d z/ &&
+               rmdir x &&
+               test_tick &&
+               git commit -m "B"
+       )
+'
+
+test_expect_success '7d-check: transitive rename involved in rename/delete; how is it reported?' '
+       (
+               cd 7d &&
+
+               git checkout A^0 &&
+
+               test_must_fail git merge -s recursive B^0 >out &&
+               test_i18ngrep "CONFLICT (rename/delete).*x/d.*y/d" out &&
+
+               git ls-files -s >out &&
+               test_line_count = 3 out &&
+               git ls-files -u >out &&
+               test_line_count = 1 out &&
+               git ls-files -o >out &&
+               test_line_count = 1 out &&
+
+               git rev-parse >actual \
+                       :0:y/b :0:y/c :3:y/d &&
+               git rev-parse >expect \
+                        O:z/b  O:z/c  O:x/d &&
+               test_cmp expect actual
+       )
+'
+
+# Testcase 7e, transitive rename in rename/delete AND dirs in the way
+#   (Very similar to 'both rename source and destination involved in D/F conflict' from t6022-merge-rename.sh)
+#   (Also related to testcases 9c and 9d)
+#   Commit O: z/{b,c},     x/d_1
+#   Commit A: y/{b,c,d/g}, x/d/f
+#   Commit B: z/{b,c,d_1}
+#   Expected: rename/delete(x/d_1->y/d_1 vs. None) + D/F conflict on y/d
+#             y/{b,c,d/g}, y/d_1~B^0, x/d/f
+
+#   NOTE: The main path of interest here is d_1 and where it ends up, but
+#         this is actually a case that has two potential directory renames
+#         involved and D/F conflict(s), so it makes sense to walk through
+#         each step.
+#
+#         Commit A renames z/ -> y/.  Thus everything that B adds to z/
+#         should be instead moved to y/.  This gives us the D/F conflict on
+#         y/d because x/d_1 -> z/d_1 -> y/d_1 conflicts with y/d/g.
+#
+#         Further, commit B renames x/ -> z/, thus everything A adds to x/
+#         should instead be moved to z/...BUT we removed z/ and renamed it
+#         to y/, so maybe everything should move not from x/ to z/, but
+#         from x/ to z/ to y/.  Doing so might make sense from the logic so
+#         far, but note that commit A had both an x/ and a y/; it did the
+#         renaming of z/ to y/ and created x/d/f and it clearly made these
+#         things separate, so it doesn't make much sense to push these
+#         together.  Doing so is what I'd call a doubly transitive rename;
+#         see testcases 9c and 9d for further discussion of this issue and
+#         how it's resolved.
+
+test_expect_success '7e-setup: transitive rename in rename/delete AND dirs in the way' '
+       test_create_repo 7e &&
+       (
+               cd 7e &&
+
+               mkdir z &&
+               echo b >z/b &&
+               echo c >z/c &&
+               mkdir x &&
+               echo d1 >x/d &&
+               git add z x &&
+               test_tick &&
+               git commit -m "O" &&
+
+               git branch O &&
+               git branch A &&
+               git branch B &&
+
+               git checkout A &&
+               git mv z y &&
+               git rm x/d &&
+               mkdir -p x/d &&
+               mkdir -p y/d &&
+               echo f >x/d/f &&
+               echo g >y/d/g &&
+               git add x/d/f y/d/g &&
+               test_tick &&
+               git commit -m "A" &&
+
+               git checkout B &&
+               git mv x/d z/ &&
+               rmdir x &&
+               test_tick &&
+               git commit -m "B"
+       )
+'
+
+test_expect_success '7e-check: transitive rename in rename/delete AND dirs in the way' '
+       (
+               cd 7e &&
+
+               git checkout A^0 &&
+
+               test_must_fail git merge -s recursive B^0 >out &&
+               test_i18ngrep "CONFLICT (rename/delete).*x/d.*y/d" out &&
+
+               git ls-files -s >out &&
+               test_line_count = 5 out &&
+               git ls-files -u >out &&
+               test_line_count = 1 out &&
+               git ls-files -o >out &&
+               test_line_count = 2 out &&
+
+               git rev-parse >actual \
+                       :0:x/d/f :0:y/d/g :0:y/b :0:y/c :3:y/d &&
+               git rev-parse >expect \
+                        A:x/d/f  A:y/d/g  O:z/b  O:z/c  O:x/d &&
+               test_cmp expect actual &&
+
+               git hash-object y/d~B^0 >actual &&
+               git rev-parse O:x/d >expect &&
+               test_cmp expect actual
+       )
+'
+
+###########################################################################
+# SECTION 8: Suboptimal merges
+#
+# As alluded to in the last section, the ruleset we have built up for
+# detecting directory renames unfortunately has some special cases where it
+# results in slightly suboptimal or non-intuitive behavior.  This section
+# explores these cases.
+#
+# To be fair, we already had non-intuitive or suboptimal behavior for most
+# of these cases in git before introducing implicit directory rename
+# detection, but it'd be nice if there was a modified ruleset out there
+# that handled these cases a bit better.
+###########################################################################
+
+# Testcase 8a, Dual-directory rename, one into the others' way
+#   Commit O. x/{a,b},   y/{c,d}
+#   Commit A. x/{a,b,e}, y/{c,d,f}
+#   Commit B. y/{a,b},   z/{c,d}
+#
+# Possible Resolutions:
+#   w/o dir-rename detection: y/{a,b,f},   z/{c,d},   x/e
+#   Currently expected:       y/{a,b,e,f}, z/{c,d}
+#   Optimal:                  y/{a,b,e},   z/{c,d,f}
+#
+# Note: Both x and y got renamed and it'd be nice to detect both, and we do
+# better with directory rename detection than git did without, but the
+# simple rule from section 5 prevents me from handling this as optimally as
+# we potentially could.
+
+test_expect_success '8a-setup: Dual-directory rename, one into the others way' '
+       test_create_repo 8a &&
+       (
+               cd 8a &&
+
+               mkdir x &&
+               mkdir y &&
+               echo a >x/a &&
+               echo b >x/b &&
+               echo c >y/c &&
+               echo d >y/d &&
+               git add x y &&
+               test_tick &&
+               git commit -m "O" &&
+
+               git branch O &&
+               git branch A &&
+               git branch B &&
+
+               git checkout A &&
+               echo e >x/e &&
+               echo f >y/f &&
+               git add x/e y/f &&
+               test_tick &&
+               git commit -m "A" &&
+
+               git checkout B &&
+               git mv y z &&
+               git mv x y &&
+               test_tick &&
+               git commit -m "B"
+       )
+'
+
+test_expect_success '8a-check: Dual-directory rename, one into the others way' '
+       (
+               cd 8a &&
+
+               git checkout A^0 &&
+
+               git merge -s recursive B^0 &&
+
+               git ls-files -s >out &&
+               test_line_count = 6 out &&
+               git ls-files -u >out &&
+               test_line_count = 0 out &&
+               git ls-files -o >out &&
+               test_line_count = 1 out &&
+
+               git rev-parse >actual \
+                       HEAD:y/a HEAD:y/b HEAD:y/e HEAD:y/f HEAD:z/c HEAD:z/d &&
+               git rev-parse >expect \
+                       O:x/a    O:x/b    A:x/e    A:y/f    O:y/c    O:y/d &&
+               test_cmp expect actual
+       )
+'
+
+# Testcase 8b, Dual-directory rename, one into the others' way, with conflicting filenames
+#   Commit O. x/{a_1,b_1},     y/{a_2,b_2}
+#   Commit A. x/{a_1,b_1,e_1}, y/{a_2,b_2,e_2}
+#   Commit B. y/{a_1,b_1},     z/{a_2,b_2}
+#
+#   w/o dir-rename detection: y/{a_1,b_1,e_2}, z/{a_2,b_2}, x/e_1
+#   Currently expected:       <same>
+#   Scary:                    y/{a_1,b_1},     z/{a_2,b_2}, CONFLICT(add/add, e_1 vs. e_2)
+#   Optimal:                  y/{a_1,b_1,e_1}, z/{a_2,b_2,e_2}
+#
+# Note: Very similar to 8a, except instead of 'e' and 'f' in directories x and
+# y, both are named 'e'.  Without directory rename detection, neither file
+# moves directories.  Implement directory rename detection suboptimally, and
+# you get an add/add conflict, but both files were added in commit A, so this
+# is an add/add conflict where one side of history added both files --
+# something we can't represent in the index.  Obviously, we'd prefer the last
+# resolution, but our previous rules are too coarse to allow it.  Using both
+# the rules from section 4 and section 5 save us from the Scary resolution,
+# making us fall back to pre-directory-rename-detection behavior for both
+# e_1 and e_2.
+
+test_expect_success '8b-setup: Dual-directory rename, one into the others way, with conflicting filenames' '
+       test_create_repo 8b &&
+       (
+               cd 8b &&
+
+               mkdir x &&
+               mkdir y &&
+               echo a1 >x/a &&
+               echo b1 >x/b &&
+               echo a2 >y/a &&
+               echo b2 >y/b &&
+               git add x y &&
+               test_tick &&
+               git commit -m "O" &&
+
+               git branch O &&
+               git branch A &&
+               git branch B &&
+
+               git checkout A &&
+               echo e1 >x/e &&
+               echo e2 >y/e &&
+               git add x/e y/e &&
+               test_tick &&
+               git commit -m "A" &&
+
+               git checkout B &&
+               git mv y z &&
+               git mv x y &&
+               test_tick &&
+               git commit -m "B"
+       )
+'
+
+test_expect_success '8b-check: Dual-directory rename, one into the others way, with conflicting filenames' '
+       (
+               cd 8b &&
+
+               git checkout A^0 &&
+
+               git merge -s recursive B^0 &&
+
+               git ls-files -s >out &&
+               test_line_count = 6 out &&
+               git ls-files -u >out &&
+               test_line_count = 0 out &&
+               git ls-files -o >out &&
+               test_line_count = 1 out &&
+
+               git rev-parse >actual \
+                       HEAD:y/a HEAD:y/b HEAD:z/a HEAD:z/b HEAD:x/e HEAD:y/e &&
+               git rev-parse >expect \
+                       O:x/a    O:x/b    O:y/a    O:y/b    A:x/e    A:y/e &&
+               test_cmp expect actual
+       )
+'
+
+# Testcase 8c, modify/delete or rename+modify/delete?
+#   (Related to testcases 5b, 8d, and 9h)
+#   Commit O: z/{b,c,d}
+#   Commit A: y/{b,c}
+#   Commit B: z/{b,c,d_modified,e}
+#   Expected: y/{b,c,e}, CONFLICT(modify/delete: on z/d)
+#
+#   Note: It could easily be argued that the correct resolution here is
+#         y/{b,c,e}, CONFLICT(rename/delete: z/d -> y/d vs deleted)
+#         and that the modifed version of d should be present in y/ after
+#         the merge, just marked as conflicted.  Indeed, I previously did
+#         argue that.  But applying directory renames to the side of
+#         history where a file is merely modified results in spurious
+#         rename/rename(1to2) conflicts -- see testcase 9h.  See also
+#         notes in 8d.
+
+test_expect_success '8c-setup: modify/delete or rename+modify/delete?' '
+       test_create_repo 8c &&
+       (
+               cd 8c &&
+
+               mkdir z &&
+               echo b >z/b &&
+               echo c >z/c &&
+               test_seq 1 10 >z/d &&
+               git add z &&
+               test_tick &&
+               git commit -m "O" &&
+
+               git branch O &&
+               git branch A &&
+               git branch B &&
+
+               git checkout A &&
+               git rm z/d &&
+               git mv z y &&
+               test_tick &&
+               git commit -m "A" &&
+
+               git checkout B &&
+               echo 11 >z/d &&
+               test_chmod +x z/d &&
+               echo e >z/e &&
+               git add z/d z/e &&
+               test_tick &&
+               git commit -m "B"
+       )
+'
+
+test_expect_success '8c-check: modify/delete or rename+modify/delete' '
+       (
+               cd 8c &&
+
+               git checkout A^0 &&
+
+               test_must_fail git merge -s recursive B^0 >out &&
+               test_i18ngrep "CONFLICT (modify/delete).* z/d" out &&
+
+               git ls-files -s >out &&
+               test_line_count = 5 out &&
+               git ls-files -u >out &&
+               test_line_count = 2 out &&
+               git ls-files -o >out &&
+               test_line_count = 1 out &&
+
+               git rev-parse >actual \
+                       :0:y/b :0:y/c :0:y/e :1:z/d :3:z/d &&
+               git rev-parse >expect \
+                        O:z/b  O:z/c  B:z/e  O:z/d  B:z/d &&
+               test_cmp expect actual &&
+
+               test_must_fail git rev-parse :2:z/d &&
+               git ls-files -s z/d | grep ^100755 &&
+               test_path_is_file z/d &&
+               test_path_is_missing y/d
+       )
+'
+
+# Testcase 8d, rename/delete...or not?
+#   (Related to testcase 5b; these may appear slightly inconsistent to users;
+#    Also related to testcases 7d and 7e)
+#   Commit O: z/{b,c,d}
+#   Commit A: y/{b,c}
+#   Commit B: z/{b,c,d,e}
+#   Expected: y/{b,c,e}
+#
+#   Note: It would also be somewhat reasonable to resolve this as
+#             y/{b,c,e}, CONFLICT(rename/delete: x/d -> y/d or deleted)
+#
+#   In this case, I'm leaning towards: commit A was the one that deleted z/d
+#   and it did the rename of z to y, so the two "conflicts" (rename vs.
+#   delete) are both coming from commit A, which is illogical.  Conflicts
+#   during merging are supposed to be about opposite sides doing things
+#   differently.
+
+test_expect_success '8d-setup: rename/delete...or not?' '
+       test_create_repo 8d &&
+       (
+               cd 8d &&
+
+               mkdir z &&
+               echo b >z/b &&
+               echo c >z/c &&
+               test_seq 1 10 >z/d &&
+               git add z &&
+               test_tick &&
+               git commit -m "O" &&
+
+               git branch O &&
+               git branch A &&
+               git branch B &&
+
+               git checkout A &&
+               git rm z/d &&
+               git mv z y &&
+               test_tick &&
+               git commit -m "A" &&
+
+               git checkout B &&
+               echo e >z/e &&
+               git add z/e &&
+               test_tick &&
+               git commit -m "B"
+       )
+'
+
+test_expect_success '8d-check: rename/delete...or not?' '
+       (
+               cd 8d &&
+
+               git checkout A^0 &&
+
+               git merge -s recursive B^0 &&
+
+               git ls-files -s >out &&
+               test_line_count = 3 out &&
+
+               git rev-parse >actual \
+                       HEAD:y/b HEAD:y/c HEAD:y/e &&
+               git rev-parse >expect \
+                       O:z/b    O:z/c    B:z/e &&
+               test_cmp expect actual
+       )
+'
+
+# Testcase 8e, Both sides rename, one side adds to original directory
+#   Commit O: z/{b,c}
+#   Commit A: y/{b,c}
+#   Commit B: w/{b,c}, z/d
+#
+# Possible Resolutions:
+#   w/o dir-rename detection: z/d, CONFLICT(z/b -> y/b vs. w/b),
+#                                  CONFLICT(z/c -> y/c vs. w/c)
+#   Currently expected:       y/d, CONFLICT(z/b -> y/b vs. w/b),
+#                                  CONFLICT(z/c -> y/c vs. w/c)
+#   Optimal:                  ??
+#
+# Notes: In commit A, directory z got renamed to y.  In commit B, directory z
+#        did NOT get renamed; the directory is still present; instead it is
+#        considered to have just renamed a subset of paths in directory z
+#        elsewhere.  Therefore, the directory rename done in commit A to z/
+#        applies to z/d and maps it to y/d.
+#
+#        It's possible that users would get confused about this, but what
+#        should we do instead?  Silently leaving at z/d seems just as bad or
+#        maybe even worse.  Perhaps we could print a big warning about z/d
+#        and how we're moving to y/d in this case, but when I started thinking
+#        about the ramifications of doing that, I didn't know how to rule out
+#        that opening other weird edge and corner cases so I just punted.
+
+test_expect_success '8e-setup: Both sides rename, one side adds to original directory' '
+       test_create_repo 8e &&
+       (
+               cd 8e &&
+
+               mkdir z &&
+               echo b >z/b &&
+               echo c >z/c &&
+               git add z &&
+               test_tick &&
+               git commit -m "O" &&
+
+               git branch O &&
+               git branch A &&
+               git branch B &&
+
+               git checkout A &&
+               git mv z y &&
+               test_tick &&
+               git commit -m "A" &&
+
+               git checkout B &&
+               git mv z w &&
+               mkdir z &&
+               echo d >z/d &&
+               git add z/d &&
+               test_tick &&
+               git commit -m "B"
+       )
+'
+
+test_expect_success '8e-check: Both sides rename, one side adds to original directory' '
+       (
+               cd 8e &&
+
+               git checkout A^0 &&
+
+               test_must_fail git merge -s recursive B^0 >out 2>err &&
+               test_i18ngrep CONFLICT.*rename/rename.*z/c.*y/c.*w/c out &&
+               test_i18ngrep CONFLICT.*rename/rename.*z/b.*y/b.*w/b out &&
+
+               git ls-files -s >out &&
+               test_line_count = 7 out &&
+               git ls-files -u >out &&
+               test_line_count = 6 out &&
+               git ls-files -o >out &&
+               test_line_count = 2 out &&
+
+               git rev-parse >actual \
+                       :1:z/b :2:y/b :3:w/b :1:z/c :2:y/c :3:w/c :0:y/d &&
+               git rev-parse >expect \
+                        O:z/b  O:z/b  O:z/b  O:z/c  O:z/c  O:z/c  B:z/d &&
+               test_cmp expect actual &&
+
+               git hash-object >actual \
+                       y/b   w/b   y/c   w/c &&
+               git rev-parse >expect \
+                       O:z/b O:z/b O:z/c O:z/c &&
+               test_cmp expect actual &&
+
+               test_path_is_missing z/b &&
+               test_path_is_missing z/c
+       )
+'
+
+###########################################################################
+# SECTION 9: Other testcases
+#
+# This section consists of miscellaneous testcases I thought of during
+# the implementation which round out the testing.
+###########################################################################
+
+# Testcase 9a, Inner renamed directory within outer renamed directory
+#   (Related to testcase 1f)
+#   Commit O: z/{b,c,d/{e,f,g}}
+#   Commit A: y/{b,c}, x/w/{e,f,g}
+#   Commit B: z/{b,c,d/{e,f,g,h},i}
+#   Expected: y/{b,c,i}, x/w/{e,f,g,h}
+#   NOTE: The only reason this one is interesting is because when a directory
+#         is split into multiple other directories, we determine by the weight
+#         of which one had the most paths going to it.  A naive implementation
+#         of that could take the new file in commit B at z/i to x/w/i or x/i.
+
+test_expect_success '9a-setup: Inner renamed directory within outer renamed directory' '
+       test_create_repo 9a &&
+       (
+               cd 9a &&
+
+               mkdir -p z/d &&
+               echo b >z/b &&
+               echo c >z/c &&
+               echo e >z/d/e &&
+               echo f >z/d/f &&
+               echo g >z/d/g &&
+               git add z &&
+               test_tick &&
+               git commit -m "O" &&
+
+               git branch O &&
+               git branch A &&
+               git branch B &&
+
+               git checkout A &&
+               mkdir x &&
+               git mv z/d x/w &&
+               git mv z y &&
+               test_tick &&
+               git commit -m "A" &&
+
+               git checkout B &&
+               echo h >z/d/h &&
+               echo i >z/i &&
+               git add z &&
+               test_tick &&
+               git commit -m "B"
+       )
+'
+
+test_expect_success '9a-check: Inner renamed directory within outer renamed directory' '
+       (
+               cd 9a &&
+
+               git checkout A^0 &&
+
+               git merge -s recursive B^0 &&
+
+               git ls-files -s >out &&
+               test_line_count = 7 out &&
+               git ls-files -u >out &&
+               test_line_count = 0 out &&
+               git ls-files -o >out &&
+               test_line_count = 1 out &&
+
+               git rev-parse >actual \
+                       HEAD:y/b HEAD:y/c HEAD:y/i &&
+               git rev-parse >expect \
+                       O:z/b    O:z/c    B:z/i &&
+               test_cmp expect actual &&
+
+               git rev-parse >actual \
+                       HEAD:x/w/e HEAD:x/w/f HEAD:x/w/g HEAD:x/w/h &&
+               git rev-parse >expect \
+                       O:z/d/e    O:z/d/f    O:z/d/g    B:z/d/h &&
+               test_cmp expect actual
+       )
+'
+
+# Testcase 9b, Transitive rename with content merge
+#   (Related to testcase 1c)
+#   Commit O: z/{b,c},   x/d_1
+#   Commit A: y/{b,c},   x/d_2
+#   Commit B: z/{b,c,d_3}
+#   Expected: y/{b,c,d_merged}
+
+test_expect_success '9b-setup: Transitive rename with content merge' '
+       test_create_repo 9b &&
+       (
+               cd 9b &&
+
+               mkdir z &&
+               echo b >z/b &&
+               echo c >z/c &&
+               mkdir x &&
+               test_seq 1 10 >x/d &&
+               git add z x &&
+               test_tick &&
+               git commit -m "O" &&
+
+               git branch O &&
+               git branch A &&
+               git branch B &&
+
+               git checkout A &&
+               git mv z y &&
+               test_seq 1 11 >x/d &&
+               git add x/d &&
+               test_tick &&
+               git commit -m "A" &&
+
+               git checkout B &&
+               test_seq 0 10 >x/d &&
+               git mv x/d z/d &&
+               git add z/d &&
+               test_tick &&
+               git commit -m "B"
+       )
+'
+
+test_expect_success '9b-check: Transitive rename with content merge' '
+       (
+               cd 9b &&
+
+               git checkout A^0 &&
+
+               git merge -s recursive B^0 &&
+
+               git ls-files -s >out &&
+               test_line_count = 3 out &&
+
+               test_seq 0 11 >expected &&
+               test_cmp expected y/d &&
+               git add expected &&
+               git rev-parse >actual \
+                       HEAD:y/b HEAD:y/c HEAD:y/d &&
+               git rev-parse >expect \
+                       O:z/b    O:z/c    :0:expected &&
+               test_cmp expect actual &&
+               test_must_fail git rev-parse HEAD:x/d &&
+               test_must_fail git rev-parse HEAD:z/d &&
+               test_path_is_missing z/d &&
+
+               test $(git rev-parse HEAD:y/d) != $(git rev-parse O:x/d) &&
+               test $(git rev-parse HEAD:y/d) != $(git rev-parse A:x/d) &&
+               test $(git rev-parse HEAD:y/d) != $(git rev-parse B:z/d)
+       )
+'
+
+# Testcase 9c, Doubly transitive rename?
+#   (Related to testcase 1c, 7e, and 9d)
+#   Commit O: z/{b,c},     x/{d,e},    w/f
+#   Commit A: y/{b,c},     x/{d,e,f,g}
+#   Commit B: z/{b,c,d,e},             w/f
+#   Expected: y/{b,c,d,e}, x/{f,g}
+#
+#   NOTE: x/f and x/g may be slightly confusing here.  The rename from w/f to
+#         x/f is clear.  Let's look beyond that.  Here's the logic:
+#            Commit B renamed x/ -> z/
+#            Commit A renamed z/ -> y/
+#         So, we could possibly further rename x/f to z/f to y/f, a doubly
+#         transient rename.  However, where does it end?  We can chain these
+#         indefinitely (see testcase 9d).  What if there is a D/F conflict
+#         at z/f/ or y/f/?  Or just another file conflict at one of those
+#         paths?  In the case of an N-long chain of transient renamings,
+#         where do we "abort" the rename at?  Can the user make sense of
+#         the resulting conflict and resolve it?
+#
+#         To avoid this confusion I use the simple rule that if the other side
+#         of history did a directory rename to a path that your side renamed
+#         away, then ignore that particular rename from the other side of
+#         history for any implicit directory renames.
+
+test_expect_success '9c-setup: Doubly transitive rename?' '
+       test_create_repo 9c &&
+       (
+               cd 9c &&
+
+               mkdir z &&
+               echo b >z/b &&
+               echo c >z/c &&
+               mkdir x &&
+               echo d >x/d &&
+               echo e >x/e &&
+               mkdir w &&
+               echo f >w/f &&
+               git add z x w &&
+               test_tick &&
+               git commit -m "O" &&
+
+               git branch O &&
+               git branch A &&
+               git branch B &&
+
+               git checkout A &&
+               git mv z y &&
+               git mv w/f x/ &&
+               echo g >x/g &&
+               git add x/g &&
+               test_tick &&
+               git commit -m "A" &&
+
+               git checkout B &&
+               git mv x/d z/d &&
+               git mv x/e z/e &&
+               test_tick &&
+               git commit -m "B"
+       )
+'
+
+test_expect_success '9c-check: Doubly transitive rename?' '
+       (
+               cd 9c &&
+
+               git checkout A^0 &&
+
+               git merge -s recursive B^0 >out &&
+               test_i18ngrep "WARNING: Avoiding applying x -> z rename to x/f" out &&
+
+               git ls-files -s >out &&
+               test_line_count = 6 out &&
+               git ls-files -o >out &&
+               test_line_count = 1 out &&
+
+               git rev-parse >actual \
+                       HEAD:y/b HEAD:y/c HEAD:y/d HEAD:y/e HEAD:x/f HEAD:x/g &&
+               git rev-parse >expect \
+                       O:z/b    O:z/c    O:x/d    O:x/e    O:w/f    A:x/g &&
+               test_cmp expect actual
+       )
+'
+
+# Testcase 9d, N-fold transitive rename?
+#   (Related to testcase 9c...and 1c and 7e)
+#   Commit O: z/a, y/b, x/c, w/d, v/e, u/f
+#   Commit A:  y/{a,b},  w/{c,d},  u/{e,f}
+#   Commit B: z/{a,t}, x/{b,c}, v/{d,e}, u/f
+#   Expected: <see NOTE first>
+#
+#   NOTE: z/ -> y/ (in commit A)
+#         y/ -> x/ (in commit B)
+#         x/ -> w/ (in commit A)
+#         w/ -> v/ (in commit B)
+#         v/ -> u/ (in commit A)
+#         So, if we add a file to z, say z/t, where should it end up?  In u?
+#         What if there's another file or directory named 't' in one of the
+#         intervening directories and/or in u itself?  Also, shouldn't the
+#         same logic that places 't' in u/ also move ALL other files to u/?
+#         What if there are file or directory conflicts in any of them?  If
+#         we attempted to do N-way (N-fold? N-ary? N-uple?) transitive renames
+#         like this, would the user have any hope of understanding any
+#         conflicts or how their working tree ended up?  I think not, so I'm
+#         ruling out N-ary transitive renames for N>1.
+#
+#   Therefore our expected result is:
+#     z/t, y/a, x/b, w/c, u/d, u/e, u/f
+#   The reason that v/d DOES get transitively renamed to u/d is that u/ isn't
+#   renamed somewhere.  A slightly sub-optimal result, but it uses fairly
+#   simple rules that are consistent with what we need for all the other
+#   testcases and simplifies things for the user.
+
+test_expect_success '9d-setup: N-way transitive rename?' '
+       test_create_repo 9d &&
+       (
+               cd 9d &&
+
+               mkdir z y x w v u &&
+               echo a >z/a &&
+               echo b >y/b &&
+               echo c >x/c &&
+               echo d >w/d &&
+               echo e >v/e &&
+               echo f >u/f &&
+               git add z y x w v u &&
+               test_tick &&
+               git commit -m "O" &&
+
+               git branch O &&
+               git branch A &&
+               git branch B &&
+
+               git checkout A &&
+               git mv z/a y/ &&
+               git mv x/c w/ &&
+               git mv v/e u/ &&
+               test_tick &&
+               git commit -m "A" &&
+
+               git checkout B &&
+               echo t >z/t &&
+               git mv y/b x/ &&
+               git mv w/d v/ &&
+               git add z/t &&
+               test_tick &&
+               git commit -m "B"
+       )
+'
+
+test_expect_success '9d-check: N-way transitive rename?' '
+       (
+               cd 9d &&
+
+               git checkout A^0 &&
+
+               git merge -s recursive B^0 >out &&
+               test_i18ngrep "WARNING: Avoiding applying z -> y rename to z/t" out &&
+               test_i18ngrep "WARNING: Avoiding applying y -> x rename to y/a" out &&
+               test_i18ngrep "WARNING: Avoiding applying x -> w rename to x/b" out &&
+               test_i18ngrep "WARNING: Avoiding applying w -> v rename to w/c" out &&
+
+               git ls-files -s >out &&
+               test_line_count = 7 out &&
+               git ls-files -o >out &&
+               test_line_count = 1 out &&
+
+               git rev-parse >actual \
+                       HEAD:z/t \
+                       HEAD:y/a HEAD:x/b HEAD:w/c \
+                       HEAD:u/d HEAD:u/e HEAD:u/f &&
+               git rev-parse >expect \
+                       B:z/t    \
+                       O:z/a    O:y/b    O:x/c    \
+                       O:w/d    O:v/e    A:u/f &&
+               test_cmp expect actual
+       )
+'
+
+# Testcase 9e, N-to-1 whammo
+#   (Related to testcase 9c...and 1c and 7e)
+#   Commit O: dir1/{a,b}, dir2/{d,e}, dir3/{g,h}, dirN/{j,k}
+#   Commit A: dir1/{a,b,c,yo}, dir2/{d,e,f,yo}, dir3/{g,h,i,yo}, dirN/{j,k,l,yo}
+#   Commit B: combined/{a,b,d,e,g,h,j,k}
+#   Expected: combined/{a,b,c,d,e,f,g,h,i,j,k,l}, CONFLICT(Nto1) warnings,
+#             dir1/yo, dir2/yo, dir3/yo, dirN/yo
+
+test_expect_success '9e-setup: N-to-1 whammo' '
+       test_create_repo 9e &&
+       (
+               cd 9e &&
+
+               mkdir dir1 dir2 dir3 dirN &&
+               echo a >dir1/a &&
+               echo b >dir1/b &&
+               echo d >dir2/d &&
+               echo e >dir2/e &&
+               echo g >dir3/g &&
+               echo h >dir3/h &&
+               echo j >dirN/j &&
+               echo k >dirN/k &&
+               git add dir* &&
+               test_tick &&
+               git commit -m "O" &&
+
+               git branch O &&
+               git branch A &&
+               git branch B &&
+
+               git checkout A &&
+               echo c  >dir1/c &&
+               echo yo >dir1/yo &&
+               echo f  >dir2/f &&
+               echo yo >dir2/yo &&
+               echo i  >dir3/i &&
+               echo yo >dir3/yo &&
+               echo l  >dirN/l &&
+               echo yo >dirN/yo &&
+               git add dir* &&
+               test_tick &&
+               git commit -m "A" &&
+
+               git checkout B &&
+               git mv dir1 combined &&
+               git mv dir2/* combined/ &&
+               git mv dir3/* combined/ &&
+               git mv dirN/* combined/ &&
+               test_tick &&
+               git commit -m "B"
+       )
+'
+
+test_expect_success C_LOCALE_OUTPUT '9e-check: N-to-1 whammo' '
+       (
+               cd 9e &&
+
+               git checkout A^0 &&
+
+               test_must_fail git merge -s recursive B^0 >out &&
+               grep "CONFLICT (implicit dir rename): Cannot map more than one path to combined/yo" out >error_line &&
+               grep -q dir1/yo error_line &&
+               grep -q dir2/yo error_line &&
+               grep -q dir3/yo error_line &&
+               grep -q dirN/yo error_line &&
+
+               git ls-files -s >out &&
+               test_line_count = 16 out &&
+               git ls-files -u >out &&
+               test_line_count = 0 out &&
+               git ls-files -o >out &&
+               test_line_count = 2 out &&
+
+               git rev-parse >actual \
+                       :0:combined/a :0:combined/b :0:combined/c \
+                       :0:combined/d :0:combined/e :0:combined/f \
+                       :0:combined/g :0:combined/h :0:combined/i \
+                       :0:combined/j :0:combined/k :0:combined/l &&
+               git rev-parse >expect \
+                        O:dir1/a      O:dir1/b      A:dir1/c \
+                        O:dir2/d      O:dir2/e      A:dir2/f \
+                        O:dir3/g      O:dir3/h      A:dir3/i \
+                        O:dirN/j      O:dirN/k      A:dirN/l &&
+               test_cmp expect actual &&
+
+               git rev-parse >actual \
+                       :0:dir1/yo :0:dir2/yo :0:dir3/yo :0:dirN/yo &&
+               git rev-parse >expect \
+                        A:dir1/yo  A:dir2/yo  A:dir3/yo  A:dirN/yo &&
+               test_cmp expect actual
+       )
+'
+
+# Testcase 9f, Renamed directory that only contained immediate subdirs
+#   (Related to testcases 1e & 9g)
+#   Commit O: goal/{a,b}/$more_files
+#   Commit A: priority/{a,b}/$more_files
+#   Commit B: goal/{a,b}/$more_files, goal/c
+#   Expected: priority/{a,b}/$more_files, priority/c
+
+test_expect_success '9f-setup: Renamed directory that only contained immediate subdirs' '
+       test_create_repo 9f &&
+       (
+               cd 9f &&
+
+               mkdir -p goal/a &&
+               mkdir -p goal/b &&
+               echo foo >goal/a/foo &&
+               echo bar >goal/b/bar &&
+               echo baz >goal/b/baz &&
+               git add goal &&
+               test_tick &&
+               git commit -m "O" &&
+
+               git branch O &&
+               git branch A &&
+               git branch B &&
+
+               git checkout A &&
+               git mv goal/ priority &&
+               test_tick &&
+               git commit -m "A" &&
+
+               git checkout B &&
+               echo c >goal/c &&
+               git add goal/c &&
+               test_tick &&
+               git commit -m "B"
+       )
+'
+
+test_expect_success '9f-check: Renamed directory that only contained immediate subdirs' '
+       (
+               cd 9f &&
+
+               git checkout A^0 &&
+
+               git merge -s recursive B^0 &&
+
+               git ls-files -s >out &&
+               test_line_count = 4 out &&
+
+               git rev-parse >actual \
+                       HEAD:priority/a/foo \
+                       HEAD:priority/b/bar \
+                       HEAD:priority/b/baz \
+                       HEAD:priority/c &&
+               git rev-parse >expect \
+                       O:goal/a/foo \
+                       O:goal/b/bar \
+                       O:goal/b/baz \
+                       B:goal/c &&
+               test_cmp expect actual &&
+               test_must_fail git rev-parse HEAD:goal/c
+       )
+'
+
+# Testcase 9g, Renamed directory that only contained immediate subdirs, immediate subdirs renamed
+#   (Related to testcases 1e & 9f)
+#   Commit O: goal/{a,b}/$more_files
+#   Commit A: priority/{alpha,bravo}/$more_files
+#   Commit B: goal/{a,b}/$more_files, goal/c
+#   Expected: priority/{alpha,bravo}/$more_files, priority/c
+
+test_expect_success '9g-setup: Renamed directory that only contained immediate subdirs, immediate subdirs renamed' '
+       test_create_repo 9g &&
+       (
+               cd 9g &&
+
+               mkdir -p goal/a &&
+               mkdir -p goal/b &&
+               echo foo >goal/a/foo &&
+               echo bar >goal/b/bar &&
+               echo baz >goal/b/baz &&
+               git add goal &&
+               test_tick &&
+               git commit -m "O" &&
+
+               git branch O &&
+               git branch A &&
+               git branch B &&
+
+               git checkout A &&
+               mkdir priority &&
+               git mv goal/a/ priority/alpha &&
+               git mv goal/b/ priority/beta &&
+               rmdir goal/ &&
+               test_tick &&
+               git commit -m "A" &&
+
+               git checkout B &&
+               echo c >goal/c &&
+               git add goal/c &&
+               test_tick &&
+               git commit -m "B"
+       )
+'
+
+test_expect_failure '9g-check: Renamed directory that only contained immediate subdirs, immediate subdirs renamed' '
+       (
+               cd 9g &&
+
+               git checkout A^0 &&
+
+               git merge -s recursive B^0 &&
+
+               git ls-files -s >out &&
+               test_line_count = 4 out &&
+
+               git rev-parse >actual \
+                       HEAD:priority/alpha/foo \
+                       HEAD:priority/beta/bar  \
+                       HEAD:priority/beta/baz  \
+                       HEAD:priority/c &&
+               git rev-parse >expect \
+                       O:goal/a/foo \
+                       O:goal/b/bar \
+                       O:goal/b/baz \
+                       B:goal/c &&
+               test_cmp expect actual &&
+               test_must_fail git rev-parse HEAD:goal/c
+       )
+'
+
+# Testcase 9h, Avoid implicit rename if involved as source on other side
+#   (Extremely closely related to testcase 3a)
+#   Commit O: z/{b,c,d_1}
+#   Commit A: z/{b,c,d_2}
+#   Commit B: y/{b,c}, x/d_1
+#   Expected: y/{b,c}, x/d_2
+#   NOTE: If we applied the z/ -> y/ rename to z/d, then we'd end up with
+#         a rename/rename(1to2) conflict (z/d -> y/d vs. x/d)
+test_expect_success '9h-setup: Avoid dir rename on merely modified path' '
+       test_create_repo 9h &&
+       (
+               cd 9h &&
+
+               mkdir z &&
+               echo b >z/b &&
+               echo c >z/c &&
+               printf "1\n2\n3\n4\n5\n6\n7\n8\nd\n" >z/d &&
+               git add z &&
+               test_tick &&
+               git commit -m "O" &&
+
+               git branch O &&
+               git branch A &&
+               git branch B &&
+
+               git checkout A &&
+               test_tick &&
+               echo more >>z/d &&
+               git add z/d &&
+               git commit -m "A" &&
+
+               git checkout B &&
+               mkdir y &&
+               mkdir x &&
+               git mv z/b y/ &&
+               git mv z/c y/ &&
+               git mv z/d x/ &&
+               rmdir z &&
+               test_tick &&
+               git commit -m "B"
+       )
+'
+
+test_expect_success '9h-check: Avoid dir rename on merely modified path' '
+       (
+               cd 9h &&
+
+               git checkout A^0 &&
+
+               git merge -s recursive B^0 &&
+
+               git ls-files -s >out &&
+               test_line_count = 3 out &&
+
+               git rev-parse >actual \
+                       HEAD:y/b HEAD:y/c HEAD:x/d &&
+               git rev-parse >expect \
+                       O:z/b    O:z/c    A:z/d &&
+               test_cmp expect actual
+       )
+'
+
+###########################################################################
+# Rules suggested by section 9:
+#
+#   If the other side of history did a directory rename to a path that your
+#   side renamed away, then ignore that particular rename from the other
+#   side of history for any implicit directory renames.
+###########################################################################
+
+###########################################################################
+# SECTION 10: Handling untracked files
+#
+# unpack_trees(), upon which the recursive merge algorithm is based, aborts
+# the operation if untracked or dirty files would be deleted or overwritten
+# by the merge.  Unfortunately, unpack_trees() does not understand renames,
+# and if it doesn't abort, then it muddies up the working directory before
+# we even get to the point of detecting renames, so we need some special
+# handling, at least in the case of directory renames.
+###########################################################################
+
+# Testcase 10a, Overwrite untracked: normal rename/delete
+#   Commit O: z/{b,c_1}
+#   Commit A: z/b + untracked z/c + untracked z/d
+#   Commit B: z/{b,d_1}
+#   Expected: Aborted Merge +
+#       ERROR_MSG(untracked working tree files would be overwritten by merge)
+
+test_expect_success '10a-setup: Overwrite untracked with normal rename/delete' '
+       test_create_repo 10a &&
+       (
+               cd 10a &&
+
+               mkdir z &&
+               echo b >z/b &&
+               echo c >z/c &&
+               git add z &&
+               test_tick &&
+               git commit -m "O" &&
+
+               git branch O &&
+               git branch A &&
+               git branch B &&
+
+               git checkout A &&
+               git rm z/c &&
+               test_tick &&
+               git commit -m "A" &&
+
+               git checkout B &&
+               git mv z/c z/d &&
+               test_tick &&
+               git commit -m "B"
+       )
+'
+
+test_expect_success '10a-check: Overwrite untracked with normal rename/delete' '
+       (
+               cd 10a &&
+
+               git checkout A^0 &&
+               echo very >z/c &&
+               echo important >z/d &&
+
+               test_must_fail git merge -s recursive B^0 >out 2>err &&
+               test_i18ngrep "The following untracked working tree files would be overwritten by merge" err &&
+
+               git ls-files -s >out &&
+               test_line_count = 1 out &&
+               git ls-files -o >out &&
+               test_line_count = 4 out &&
+
+               echo very >expect &&
+               test_cmp expect z/c &&
+
+               echo important >expect &&
+               test_cmp expect z/d &&
+
+               git rev-parse HEAD:z/b >actual &&
+               git rev-parse O:z/b >expect &&
+               test_cmp expect actual
+       )
+'
+
+# Testcase 10b, Overwrite untracked: dir rename + delete
+#   Commit O: z/{b,c_1}
+#   Commit A: y/b + untracked y/{c,d,e}
+#   Commit B: z/{b,d_1,e}
+#   Expected: Failed Merge; y/b + untracked y/c + untracked y/d on disk +
+#             z/c_1 -> z/d_1 rename recorded at stage 3 for y/d +
+#       ERROR_MSG(refusing to lose untracked file at 'y/d')
+
+test_expect_success '10b-setup: Overwrite untracked with dir rename + delete' '
+       test_create_repo 10b &&
+       (
+               cd 10b &&
+
+               mkdir z &&
+               echo b >z/b &&
+               echo c >z/c &&
+               git add z &&
+               test_tick &&
+               git commit -m "O" &&
+
+               git branch O &&
+               git branch A &&
+               git branch B &&
+
+               git checkout A &&
+               git rm z/c &&
+               git mv z/ y/ &&
+               test_tick &&
+               git commit -m "A" &&
+
+               git checkout B &&
+               git mv z/c z/d &&
+               echo e >z/e &&
+               git add z/e &&
+               test_tick &&
+               git commit -m "B"
+       )
+'
+
+test_expect_success '10b-check: Overwrite untracked with dir rename + delete' '
+       (
+               cd 10b &&
+
+               git checkout A^0 &&
+               echo very >y/c &&
+               echo important >y/d &&
+               echo contents >y/e &&
+
+               test_must_fail git merge -s recursive B^0 >out 2>err &&
+               test_i18ngrep "CONFLICT (rename/delete).*Version B\^0 of y/d left in tree at y/d~B\^0" out &&
+               test_i18ngrep "Error: Refusing to lose untracked file at y/e; writing to y/e~B\^0 instead" out &&
+
+               git ls-files -s >out &&
+               test_line_count = 3 out &&
+               git ls-files -u >out &&
+               test_line_count = 2 out &&
+               git ls-files -o >out &&
+               test_line_count = 5 out &&
+
+               git rev-parse >actual \
+                       :0:y/b :3:y/d :3:y/e &&
+               git rev-parse >expect \
+                       O:z/b  O:z/c  B:z/e &&
+               test_cmp expect actual &&
+
+               echo very >expect &&
+               test_cmp expect y/c &&
+
+               echo important >expect &&
+               test_cmp expect y/d &&
+
+               echo contents >expect &&
+               test_cmp expect y/e
+       )
+'
+
+# Testcase 10c, Overwrite untracked: dir rename/rename(1to2)
+#   Commit O: z/{a,b}, x/{c,d}
+#   Commit A: y/{a,b}, w/c, x/d + different untracked y/c
+#   Commit B: z/{a,b,c}, x/d
+#   Expected: Failed Merge; y/{a,b} + x/d + untracked y/c +
+#             CONFLICT(rename/rename) x/c -> w/c vs y/c +
+#             y/c~B^0 +
+#             ERROR_MSG(Refusing to lose untracked file at y/c)
+
+test_expect_success '10c-setup: Overwrite untracked with dir rename/rename(1to2)' '
+       test_create_repo 10c &&
+       (
+               cd 10c &&
+
+               mkdir z x &&
+               echo a >z/a &&
+               echo b >z/b &&
+               echo c >x/c &&
+               echo d >x/d &&
+               git add z x &&
+               test_tick &&
+               git commit -m "O" &&
+
+               git branch O &&
+               git branch A &&
+               git branch B &&
+
+               git checkout A &&
+               mkdir w &&
+               git mv x/c w/c &&
+               git mv z/ y/ &&
+               test_tick &&
+               git commit -m "A" &&
+
+               git checkout B &&
+               git mv x/c z/ &&
+               test_tick &&
+               git commit -m "B"
+       )
+'
+
+test_expect_success '10c-check: Overwrite untracked with dir rename/rename(1to2)' '
+       (
+               cd 10c &&
+
+               git checkout A^0 &&
+               echo important >y/c &&
+
+               test_must_fail git merge -s recursive B^0 >out 2>err &&
+               test_i18ngrep "CONFLICT (rename/rename)" out &&
+               test_i18ngrep "Refusing to lose untracked file at y/c; adding as y/c~B\^0 instead" out &&
+
+               git ls-files -s >out &&
+               test_line_count = 6 out &&
+               git ls-files -u >out &&
+               test_line_count = 3 out &&
+               git ls-files -o >out &&
+               test_line_count = 3 out &&
+
+               git rev-parse >actual \
+                       :0:y/a :0:y/b :0:x/d :1:x/c :2:w/c :3:y/c &&
+               git rev-parse >expect \
+                        O:z/a  O:z/b  O:x/d  O:x/c  O:x/c  O:x/c &&
+               test_cmp expect actual &&
+
+               git hash-object y/c~B^0 >actual &&
+               git rev-parse O:x/c >expect &&
+               test_cmp expect actual &&
+
+               echo important >expect &&
+               test_cmp expect y/c
+       )
+'
+
+# Testcase 10d, Delete untracked w/ dir rename/rename(2to1)
+#   Commit O: z/{a,b,c_1},        x/{d,e,f_2}
+#   Commit A: y/{a,b},            x/{d,e,f_2,wham_1} + untracked y/wham
+#   Commit B: z/{a,b,c_1,wham_2}, y/{d,e}
+#   Expected: Failed Merge; y/{a,b,d,e} + untracked y/{wham,wham~B^0,wham~HEAD}+
+#             CONFLICT(rename/rename) z/c_1 vs x/f_2 -> y/wham
+#             ERROR_MSG(Refusing to lose untracked file at y/wham)
+
+test_expect_success '10d-setup: Delete untracked with dir rename/rename(2to1)' '
+       test_create_repo 10d &&
+       (
+               cd 10d &&
+
+               mkdir z x &&
+               echo a >z/a &&
+               echo b >z/b &&
+               echo c >z/c &&
+               echo d >x/d &&
+               echo e >x/e &&
+               echo f >x/f &&
+               git add z x &&
+               test_tick &&
+               git commit -m "O" &&
+
+               git branch O &&
+               git branch A &&
+               git branch B &&
+
+               git checkout A &&
+               git mv z/c x/wham &&
+               git mv z/ y/ &&
+               test_tick &&
+               git commit -m "A" &&
+
+               git checkout B &&
+               git mv x/f z/wham &&
+               git mv x/ y/ &&
+               test_tick &&
+               git commit -m "B"
+       )
+'
+
+test_expect_success '10d-check: Delete untracked with dir rename/rename(2to1)' '
+       (
+               cd 10d &&
+
+               git checkout A^0 &&
+               echo important >y/wham &&
+
+               test_must_fail git merge -s recursive B^0 >out 2>err &&
+               test_i18ngrep "CONFLICT (rename/rename)" out &&
+               test_i18ngrep "Refusing to lose untracked file at y/wham" out &&
+
+               git ls-files -s >out &&
+               test_line_count = 6 out &&
+               git ls-files -u >out &&
+               test_line_count = 2 out &&
+               git ls-files -o >out &&
+               test_line_count = 4 out &&
+
+               git rev-parse >actual \
+                       :0:y/a :0:y/b :0:y/d :0:y/e :2:y/wham :3:y/wham &&
+               git rev-parse >expect \
+                        O:z/a  O:z/b  O:x/d  O:x/e  O:z/c     O:x/f &&
+               test_cmp expect actual &&
+
+               test_must_fail git rev-parse :1:y/wham &&
+
+               echo important >expect &&
+               test_cmp expect y/wham &&
+
+               git hash-object >actual \
+                       y/wham~B^0 y/wham~HEAD &&
+               git rev-parse >expect \
+                       O:x/f      O:z/c &&
+               test_cmp expect actual
+       )
+'
+
+# Testcase 10e, Does git complain about untracked file that's not in the way?
+#   Commit O: z/{a,b}
+#   Commit A: y/{a,b} + untracked z/c
+#   Commit B: z/{a,b,c}
+#   Expected: y/{a,b,c} + untracked z/c
+
+test_expect_success '10e-setup: Does git complain about untracked file that is not really in the way?' '
+       test_create_repo 10e &&
+       (
+               cd 10e &&
+
+               mkdir z &&
+               echo a >z/a &&
+               echo b >z/b &&
+               git add z &&
+               test_tick &&
+               git commit -m "O" &&
+
+               git branch O &&
+               git branch A &&
+               git branch B &&
+
+               git checkout A &&
+               git mv z/ y/ &&
+               test_tick &&
+               git commit -m "A" &&
+
+               git checkout B &&
+               echo c >z/c &&
+               git add z/c &&
+               test_tick &&
+               git commit -m "B"
+       )
+'
+
+test_expect_failure '10e-check: Does git complain about untracked file that is not really in the way?' '
+       (
+               cd 10e &&
+
+               git checkout A^0 &&
+               mkdir z &&
+               echo random >z/c &&
+
+               git merge -s recursive B^0 >out 2>err &&
+               test_i18ngrep ! "following untracked working tree files would be overwritten by merge" err &&
+
+               git ls-files -s >out &&
+               test_line_count = 3 out &&
+               git ls-files -u >out &&
+               test_line_count = 0 out &&
+               git ls-files -o >out &&
+               test_line_count = 3 out &&
+
+               git rev-parse >actual \
+                       :0:y/a :0:y/b :0:y/c &&
+               git rev-parse >expect \
+                        O:z/a  O:z/b  B:z/c &&
+               test_cmp expect actual &&
+
+               echo random >expect &&
+               test_cmp expect z/c
+       )
+'
+
+###########################################################################
+# SECTION 11: Handling dirty (not up-to-date) files
+#
+# unpack_trees(), upon which the recursive merge algorithm is based, aborts
+# the operation if untracked or dirty files would be deleted or overwritten
+# by the merge.  Unfortunately, unpack_trees() does not understand renames,
+# and if it doesn't abort, then it muddies up the working directory before
+# we even get to the point of detecting renames, so we need some special
+# handling.  This was true even of normal renames, but there are additional
+# codepaths that need special handling with directory renames.  Add
+# testcases for both renamed-by-directory-rename-detection and standard
+# rename cases.
+###########################################################################
+
+# Testcase 11a, Avoid losing dirty contents with simple rename
+#   Commit O: z/{a,b_v1},
+#   Commit A: z/{a,c_v1}, and z/c_v1 has uncommitted mods
+#   Commit B: z/{a,b_v2}
+#   Expected: ERROR_MSG(Refusing to lose dirty file at z/c) +
+#             z/a, staged version of z/c has sha1sum matching B:z/b_v2,
+#             z/c~HEAD with contents of B:z/b_v2,
+#             z/c with uncommitted mods on top of A:z/c_v1
+
+test_expect_success '11a-setup: Avoid losing dirty contents with simple rename' '
+       test_create_repo 11a &&
+       (
+               cd 11a &&
+
+               mkdir z &&
+               echo a >z/a &&
+               test_seq 1 10 >z/b &&
+               git add z &&
+               test_tick &&
+               git commit -m "O" &&
+
+               git branch O &&
+               git branch A &&
+               git branch B &&
+
+               git checkout A &&
+               git mv z/b z/c &&
+               test_tick &&
+               git commit -m "A" &&
+
+               git checkout B &&
+               echo 11 >>z/b &&
+               git add z/b &&
+               test_tick &&
+               git commit -m "B"
+       )
+'
+
+test_expect_success '11a-check: Avoid losing dirty contents with simple rename' '
+       (
+               cd 11a &&
+
+               git checkout A^0 &&
+               echo stuff >>z/c &&
+
+               test_must_fail git merge -s recursive B^0 >out 2>err &&
+               test_i18ngrep "Refusing to lose dirty file at z/c" out &&
+
+               test_seq 1 10 >expected &&
+               echo stuff >>expected &&
+               test_cmp expected z/c &&
+
+               git ls-files -s >out &&
+               test_line_count = 2 out &&
+               git ls-files -u >out &&
+               test_line_count = 1 out &&
+               git ls-files -o >out &&
+               test_line_count = 4 out &&
+
+               git rev-parse >actual \
+                       :0:z/a :2:z/c &&
+               git rev-parse >expect \
+                        O:z/a  B:z/b &&
+               test_cmp expect actual &&
+
+               git hash-object z/c~HEAD >actual &&
+               git rev-parse B:z/b >expect &&
+               test_cmp expect actual
+       )
+'
+
+# Testcase 11b, Avoid losing dirty file involved in directory rename
+#   Commit O: z/a,         x/{b,c_v1}
+#   Commit A: z/{a,c_v1},  x/b,       and z/c_v1 has uncommitted mods
+#   Commit B: y/a,         x/{b,c_v2}
+#   Expected: y/{a,c_v2}, x/b, z/c_v1 with uncommitted mods untracked,
+#             ERROR_MSG(Refusing to lose dirty file at z/c)
+
+
+test_expect_success '11b-setup: Avoid losing dirty file involved in directory rename' '
+       test_create_repo 11b &&
+       (
+               cd 11b &&
+
+               mkdir z x &&
+               echo a >z/a &&
+               echo b >x/b &&
+               test_seq 1 10 >x/c &&
+               git add z x &&
+               test_tick &&
+               git commit -m "O" &&
+
+               git branch O &&
+               git branch A &&
+               git branch B &&
+
+               git checkout A &&
+               git mv x/c z/c &&
+               test_tick &&
+               git commit -m "A" &&
+
+               git checkout B &&
+               git mv z y &&
+               echo 11 >>x/c &&
+               git add x/c &&
+               test_tick &&
+               git commit -m "B"
+       )
+'
+
+test_expect_success '11b-check: Avoid losing dirty file involved in directory rename' '
+       (
+               cd 11b &&
+
+               git checkout A^0 &&
+               echo stuff >>z/c &&
+
+               git merge -s recursive B^0 >out 2>err &&
+               test_i18ngrep "Refusing to lose dirty file at z/c" out &&
+
+               grep -q stuff z/c &&
+               test_seq 1 10 >expected &&
+               echo stuff >>expected &&
+               test_cmp expected z/c &&
+
+               git ls-files -s >out &&
+               test_line_count = 3 out &&
+               git ls-files -u >out &&
+               test_line_count = 0 out &&
+               git ls-files -m >out &&
+               test_line_count = 0 out &&
+               git ls-files -o >out &&
+               test_line_count = 4 out &&
+
+               git rev-parse >actual \
+                       :0:x/b :0:y/a :0:y/c &&
+               git rev-parse >expect \
+                        O:x/b  O:z/a  B:x/c &&
+               test_cmp expect actual &&
+
+               git hash-object y/c >actual &&
+               git rev-parse B:x/c >expect &&
+               test_cmp expect actual
+       )
+'
+
+# Testcase 11c, Avoid losing not-up-to-date with rename + D/F conflict
+#   Commit O: y/a,         x/{b,c_v1}
+#   Commit A: y/{a,c_v1},  x/b,       and y/c_v1 has uncommitted mods
+#   Commit B: y/{a,c/d},   x/{b,c_v2}
+#   Expected: Abort_msg("following files would be overwritten by merge") +
+#             y/c left untouched (still has uncommitted mods)
+
+test_expect_success '11c-setup: Avoid losing not-uptodate with rename + D/F conflict' '
+       test_create_repo 11c &&
+       (
+               cd 11c &&
+
+               mkdir y x &&
+               echo a >y/a &&
+               echo b >x/b &&
+               test_seq 1 10 >x/c &&
+               git add y x &&
+               test_tick &&
+               git commit -m "O" &&
+
+               git branch O &&
+               git branch A &&
+               git branch B &&
+
+               git checkout A &&
+               git mv x/c y/c &&
+               test_tick &&
+               git commit -m "A" &&
+
+               git checkout B &&
+               mkdir y/c &&
+               echo d >y/c/d &&
+               echo 11 >>x/c &&
+               git add x/c y/c/d &&
+               test_tick &&
+               git commit -m "B"
+       )
+'
+
+test_expect_success '11c-check: Avoid losing not-uptodate with rename + D/F conflict' '
+       (
+               cd 11c &&
+
+               git checkout A^0 &&
+               echo stuff >>y/c &&
+
+               test_must_fail git merge -s recursive B^0 >out 2>err &&
+               test_i18ngrep "following files would be overwritten by merge" err &&
+
+               grep -q stuff y/c &&
+               test_seq 1 10 >expected &&
+               echo stuff >>expected &&
+               test_cmp expected y/c &&
+
+               git ls-files -s >out &&
+               test_line_count = 3 out &&
+               git ls-files -u >out &&
+               test_line_count = 0 out &&
+               git ls-files -m >out &&
+               test_line_count = 1 out &&
+               git ls-files -o >out &&
+               test_line_count = 3 out
+       )
+'
+
+# Testcase 11d, Avoid losing not-up-to-date with rename + D/F conflict
+#   Commit O: z/a,         x/{b,c_v1}
+#   Commit A: z/{a,c_v1},  x/b,       and z/c_v1 has uncommitted mods
+#   Commit B: y/{a,c/d},   x/{b,c_v2}
+#   Expected: D/F: y/c_v2 vs y/c/d) +
+#             Warning_Msg("Refusing to lose dirty file at z/c) +
+#             y/{a,c~HEAD,c/d}, x/b, now-untracked z/c_v1 with uncommitted mods
+
+test_expect_success '11d-setup: Avoid losing not-uptodate with rename + D/F conflict' '
+       test_create_repo 11d &&
+       (
+               cd 11d &&
+
+               mkdir z x &&
+               echo a >z/a &&
+               echo b >x/b &&
+               test_seq 1 10 >x/c &&
+               git add z x &&
+               test_tick &&
+               git commit -m "O" &&
+
+               git branch O &&
+               git branch A &&
+               git branch B &&
+
+               git checkout A &&
+               git mv x/c z/c &&
+               test_tick &&
+               git commit -m "A" &&
+
+               git checkout B &&
+               git mv z y &&
+               mkdir y/c &&
+               echo d >y/c/d &&
+               echo 11 >>x/c &&
+               git add x/c y/c/d &&
+               test_tick &&
+               git commit -m "B"
+       )
+'
+
+test_expect_success '11d-check: Avoid losing not-uptodate with rename + D/F conflict' '
+       (
+               cd 11d &&
+
+               git checkout A^0 &&
+               echo stuff >>z/c &&
+
+               test_must_fail git merge -s recursive B^0 >out 2>err &&
+               test_i18ngrep "Refusing to lose dirty file at z/c" out &&
+
+               grep -q stuff z/c &&
+               test_seq 1 10 >expected &&
+               echo stuff >>expected &&
+               test_cmp expected z/c
+
+               git ls-files -s >out &&
+               test_line_count = 4 out &&
+               git ls-files -u >out &&
+               test_line_count = 1 out &&
+               git ls-files -o >out &&
+               test_line_count = 5 out &&
+
+               git rev-parse >actual \
+                       :0:x/b :0:y/a :0:y/c/d :3:y/c &&
+               git rev-parse >expect \
+                        O:x/b  O:z/a  B:y/c/d  B:x/c &&
+               test_cmp expect actual &&
+
+               git hash-object y/c~HEAD >actual &&
+               git rev-parse B:x/c >expect &&
+               test_cmp expect actual
+       )
+'
+
+# Testcase 11e, Avoid deleting not-up-to-date with dir rename/rename(1to2)/add
+#   Commit O: z/{a,b},      x/{c_1,d}
+#   Commit A: y/{a,b,c_2},  x/d, w/c_1, and y/c_2 has uncommitted mods
+#   Commit B: z/{a,b,c_1},  x/d
+#   Expected: Failed Merge; y/{a,b} + x/d +
+#             CONFLICT(rename/rename) x/c_1 -> w/c_1 vs y/c_1 +
+#             ERROR_MSG(Refusing to lose dirty file at y/c)
+#             y/c~B^0 has O:x/c_1 contents
+#             y/c~HEAD has A:y/c_2 contents
+#             y/c has dirty file from before merge
+
+test_expect_success '11e-setup: Avoid deleting not-uptodate with dir rename/rename(1to2)/add' '
+       test_create_repo 11e &&
+       (
+               cd 11e &&
+
+               mkdir z x &&
+               echo a >z/a &&
+               echo b >z/b &&
+               echo c >x/c &&
+               echo d >x/d &&
+               git add z x &&
+               test_tick &&
+               git commit -m "O" &&
+
+               git branch O &&
+               git branch A &&
+               git branch B &&
+
+               git checkout A &&
+               git mv z/ y/ &&
+               echo different >y/c &&
+               mkdir w &&
+               git mv x/c w/ &&
+               git add y/c &&
+               test_tick &&
+               git commit -m "A" &&
+
+               git checkout B &&
+               git mv x/c z/ &&
+               test_tick &&
+               git commit -m "B"
+       )
+'
+
+test_expect_success '11e-check: Avoid deleting not-uptodate with dir rename/rename(1to2)/add' '
+       (
+               cd 11e &&
+
+               git checkout A^0 &&
+               echo mods >>y/c &&
+
+               test_must_fail git merge -s recursive B^0 >out 2>err &&
+               test_i18ngrep "CONFLICT (rename/rename)" out &&
+               test_i18ngrep "Refusing to lose dirty file at y/c" out &&
+
+               git ls-files -s >out &&
+               test_line_count = 7 out &&
+               git ls-files -u >out &&
+               test_line_count = 4 out &&
+               git ls-files -o >out &&
+               test_line_count = 4 out &&
+
+               echo different >expected &&
+               echo mods >>expected &&
+               test_cmp expected y/c &&
+
+               git rev-parse >actual \
+                       :0:y/a :0:y/b :0:x/d :1:x/c :2:w/c :2:y/c :3:y/c &&
+               git rev-parse >expect \
+                        O:z/a  O:z/b  O:x/d  O:x/c  O:x/c  A:y/c  O:x/c &&
+               test_cmp expect actual &&
+
+               git hash-object >actual \
+                       y/c~B^0 y/c~HEAD &&
+               git rev-parse >expect \
+                       O:x/c   A:y/c &&
+               test_cmp expect actual
+       )
+'
+
+# Testcase 11f, Avoid deleting not-up-to-date w/ dir rename/rename(2to1)
+#   Commit O: z/{a,b},        x/{c_1,d_2}
+#   Commit A: y/{a,b,wham_1}, x/d_2, except y/wham has uncommitted mods
+#   Commit B: z/{a,b,wham_2}, x/c_1
+#   Expected: Failed Merge; y/{a,b} + untracked y/{wham~B^0,wham~B^HEAD} +
+#             y/wham with dirty changes from before merge +
+#             CONFLICT(rename/rename) x/c vs x/d -> y/wham
+#             ERROR_MSG(Refusing to lose dirty file at y/wham)
+
+test_expect_success '11f-setup: Avoid deleting not-uptodate with dir rename/rename(2to1)' '
+       test_create_repo 11f &&
+       (
+               cd 11f &&
+
+               mkdir z x &&
+               echo a >z/a &&
+               echo b >z/b &&
+               test_seq 1 10 >x/c &&
+               echo d >x/d &&
+               git add z x &&
+               test_tick &&
+               git commit -m "O" &&
+
+               git branch O &&
+               git branch A &&
+               git branch B &&
+
+               git checkout A &&
+               git mv z/ y/ &&
+               git mv x/c y/wham &&
+               test_tick &&
+               git commit -m "A" &&
+
+               git checkout B &&
+               git mv x/d z/wham &&
+               test_tick &&
+               git commit -m "B"
+       )
+'
+
+test_expect_success '11f-check: Avoid deleting not-uptodate with dir rename/rename(2to1)' '
+       (
+               cd 11f &&
+
+               git checkout A^0 &&
+               echo important >>y/wham &&
+
+               test_must_fail git merge -s recursive B^0 >out 2>err &&
+               test_i18ngrep "CONFLICT (rename/rename)" out &&
+               test_i18ngrep "Refusing to lose dirty file at y/wham" out &&
+
+               git ls-files -s >out &&
+               test_line_count = 4 out &&
+               git ls-files -u >out &&
+               test_line_count = 2 out &&
+               git ls-files -o >out &&
+               test_line_count = 4 out &&
+
+               test_seq 1 10 >expected &&
+               echo important >>expected &&
+               test_cmp expected y/wham &&
+
+               test_must_fail git rev-parse :1:y/wham &&
+               git hash-object >actual \
+                       y/wham~B^0 y/wham~HEAD &&
+               git rev-parse >expect \
+                       O:x/d      O:x/c &&
+               test_cmp expect actual &&
+
+               git rev-parse >actual \
+                       :0:y/a :0:y/b :2:y/wham :3:y/wham &&
+               git rev-parse >expect \
+                        O:z/a  O:z/b  O:x/c     O:x/d &&
+               test_cmp expect actual
+       )
+'
+
+###########################################################################
+# SECTION 12: Everything else
+#
+# Tests suggested by others.  Tests added after implementation completed
+# and submitted.  Grab bag.
+###########################################################################
+
+# Testcase 12a, Moving one directory hierarchy into another
+#   (Related to testcase 9a)
+#   Commit O: node1/{leaf1,leaf2}, node2/{leaf3,leaf4}
+#   Commit A: node1/{leaf1,leaf2,node2/{leaf3,leaf4}}
+#   Commit B: node1/{leaf1,leaf2,leaf5}, node2/{leaf3,leaf4,leaf6}
+#   Expected: node1/{leaf1,leaf2,leaf5,node2/{leaf3,leaf4,leaf6}}
+
+test_expect_success '12a-setup: Moving one directory hierarchy into another' '
+       test_create_repo 12a &&
+       (
+               cd 12a &&
+
+               mkdir -p node1 node2 &&
+               echo leaf1 >node1/leaf1 &&
+               echo leaf2 >node1/leaf2 &&
+               echo leaf3 >node2/leaf3 &&
+               echo leaf4 >node2/leaf4 &&
+               git add node1 node2 &&
+               test_tick &&
+               git commit -m "O" &&
+
+               git branch O &&
+               git branch A &&
+               git branch B &&
+
+               git checkout A &&
+               git mv node2/ node1/ &&
+               test_tick &&
+               git commit -m "A" &&
+
+               git checkout B &&
+               echo leaf5 >node1/leaf5 &&
+               echo leaf6 >node2/leaf6 &&
+               git add node1 node2 &&
+               test_tick &&
+               git commit -m "B"
+       )
+'
+
+test_expect_success '12a-check: Moving one directory hierarchy into another' '
+       (
+               cd 12a &&
+
+               git checkout A^0 &&
+
+               git merge -s recursive B^0 &&
+
+               git ls-files -s >out &&
+               test_line_count = 6 out &&
+
+               git rev-parse >actual \
+                       HEAD:node1/leaf1 HEAD:node1/leaf2 HEAD:node1/leaf5 \
+                       HEAD:node1/node2/leaf3 \
+                       HEAD:node1/node2/leaf4 \
+                       HEAD:node1/node2/leaf6 &&
+               git rev-parse >expect \
+                       O:node1/leaf1    O:node1/leaf2    B:node1/leaf5 \
+                       O:node2/leaf3 \
+                       O:node2/leaf4 \
+                       B:node2/leaf6 &&
+               test_cmp expect actual
+       )
+'
+
+# Testcase 12b, Moving two directory hierarchies into each other
+#   (Related to testcases 1c and 12c)
+#   Commit O: node1/{leaf1, leaf2}, node2/{leaf3, leaf4}
+#   Commit A: node1/{leaf1, leaf2, node2/{leaf3, leaf4}}
+#   Commit B: node2/{leaf3, leaf4, node1/{leaf1, leaf2}}
+#   Expected: node1/node2/node1/{leaf1, leaf2},
+#             node2/node1/node2/{leaf3, leaf4}
+#   NOTE: Without directory renames, we would expect
+#                   node2/node1/{leaf1, leaf2},
+#                   node1/node2/{leaf3, leaf4}
+#         with directory rename detection, we note that
+#             commit A renames node2/ -> node1/node2/
+#             commit B renames node1/ -> node2/node1/
+#         therefore, applying those directory renames to the initial result
+#         (making all four paths experience a transitive renaming), yields
+#         the expected result.
+#
+#         You may ask, is it weird to have two directories rename each other?
+#         To which, I can do no more than shrug my shoulders and say that
+#         even simple rules give weird results when given weird inputs.
+
+test_expect_success '12b-setup: Moving one directory hierarchy into another' '
+       test_create_repo 12b &&
+       (
+               cd 12b &&
+
+               mkdir -p node1 node2 &&
+               echo leaf1 >node1/leaf1 &&
+               echo leaf2 >node1/leaf2 &&
+               echo leaf3 >node2/leaf3 &&
+               echo leaf4 >node2/leaf4 &&
+               git add node1 node2 &&
+               test_tick &&
+               git commit -m "O" &&
+
+               git branch O &&
+               git branch A &&
+               git branch B &&
+
+               git checkout A &&
+               git mv node2/ node1/ &&
+               test_tick &&
+               git commit -m "A" &&
+
+               git checkout B &&
+               git mv node1/ node2/ &&
+               test_tick &&
+               git commit -m "B"
+       )
+'
+
+test_expect_success '12b-check: Moving one directory hierarchy into another' '
+       (
+               cd 12b &&
+
+               git checkout A^0 &&
+
+               git merge -s recursive B^0 &&
+
+               git ls-files -s >out &&
+               test_line_count = 4 out &&
+
+               git rev-parse >actual \
+                       HEAD:node1/node2/node1/leaf1 \
+                       HEAD:node1/node2/node1/leaf2 \
+                       HEAD:node2/node1/node2/leaf3 \
+                       HEAD:node2/node1/node2/leaf4 &&
+               git rev-parse >expect \
+                       O:node1/leaf1 \
+                       O:node1/leaf2 \
+                       O:node2/leaf3 \
+                       O:node2/leaf4 &&
+               test_cmp expect actual
+       )
+'
+
+# Testcase 12c, Moving two directory hierarchies into each other w/ content merge
+#   (Related to testcase 12b)
+#   Commit O: node1/{       leaf1_1, leaf2_1}, node2/{leaf3_1, leaf4_1}
+#   Commit A: node1/{       leaf1_2, leaf2_2,  node2/{leaf3_2, leaf4_2}}
+#   Commit B: node2/{node1/{leaf1_3, leaf2_3},        leaf3_3, leaf4_3}
+#   Expected: Content merge conflicts for each of:
+#               node1/node2/node1/{leaf1, leaf2},
+#               node2/node1/node2/{leaf3, leaf4}
+#   NOTE: This is *exactly* like 12c, except that every path is modified on
+#         each side of the merge.
+
+test_expect_success '12c-setup: Moving one directory hierarchy into another w/ content merge' '
+       test_create_repo 12c &&
+       (
+               cd 12c &&
+
+               mkdir -p node1 node2 &&
+               printf "1\n2\n3\n4\n5\n6\n7\n8\nleaf1\n" >node1/leaf1 &&
+               printf "1\n2\n3\n4\n5\n6\n7\n8\nleaf2\n" >node1/leaf2 &&
+               printf "1\n2\n3\n4\n5\n6\n7\n8\nleaf3\n" >node2/leaf3 &&
+               printf "1\n2\n3\n4\n5\n6\n7\n8\nleaf4\n" >node2/leaf4 &&
+               git add node1 node2 &&
+               test_tick &&
+               git commit -m "O" &&
+
+               git branch O &&
+               git branch A &&
+               git branch B &&
+
+               git checkout A &&
+               git mv node2/ node1/ &&
+               for i in `git ls-files`; do echo side A >>$i; done &&
+               git add -u &&
+               test_tick &&
+               git commit -m "A" &&
+
+               git checkout B &&
+               git mv node1/ node2/ &&
+               for i in `git ls-files`; do echo side B >>$i; done &&
+               git add -u &&
+               test_tick &&
+               git commit -m "B"
+       )
+'
+
+test_expect_success '12c-check: Moving one directory hierarchy into another w/ content merge' '
+       (
+               cd 12c &&
+
+               git checkout A^0 &&
+
+               test_must_fail git merge -s recursive B^0 &&
+
+               git ls-files -u >out &&
+               test_line_count = 12 out &&
+
+               git rev-parse >actual \
+                       :1:node1/node2/node1/leaf1 \
+                       :1:node1/node2/node1/leaf2 \
+                       :1:node2/node1/node2/leaf3 \
+                       :1:node2/node1/node2/leaf4 \
+                       :2:node1/node2/node1/leaf1 \
+                       :2:node1/node2/node1/leaf2 \
+                       :2:node2/node1/node2/leaf3 \
+                       :2:node2/node1/node2/leaf4 \
+                       :3:node1/node2/node1/leaf1 \
+                       :3:node1/node2/node1/leaf2 \
+                       :3:node2/node1/node2/leaf3 \
+                       :3:node2/node1/node2/leaf4 &&
+               git rev-parse >expect \
+                       O:node1/leaf1 \
+                       O:node1/leaf2 \
+                       O:node2/leaf3 \
+                       O:node2/leaf4 \
+                       A:node1/leaf1 \
+                       A:node1/leaf2 \
+                       A:node1/node2/leaf3 \
+                       A:node1/node2/leaf4 \
+                       B:node2/node1/leaf1 \
+                       B:node2/node1/leaf2 \
+                       B:node2/leaf3 \
+                       B:node2/leaf4 &&
+               test_cmp expect actual
+       )
+'
+
+test_done
diff --git a/t/t6046-merge-skip-unneeded-updates.sh b/t/t6046-merge-skip-unneeded-updates.sh
new file mode 100755 (executable)
index 0000000..fcefffc
--- /dev/null
@@ -0,0 +1,761 @@
+#!/bin/sh
+
+test_description="merge cases"
+
+# The setup for all of them, pictorially, is:
+#
+#      A
+#      o
+#     / \
+#  O o   ?
+#     \ /
+#      o
+#      B
+#
+# To help make it easier to follow the flow of tests, they have been
+# divided into sections and each test will start with a quick explanation
+# of what commits O, A, and B contain.
+#
+# Notation:
+#    z/{b,c}   means  files z/b and z/c both exist
+#    x/d_1     means  file x/d exists with content d1.  (Purpose of the
+#                     underscore notation is to differentiate different
+#                     files that might be renamed into each other's paths.)
+
+. ./test-lib.sh
+
+
+###########################################################################
+# SECTION 1: Cases involving no renames (one side has subset of changes of
+#            the other side)
+###########################################################################
+
+# Testcase 1a, Changes on A, subset of changes on B
+#   Commit O: b_1
+#   Commit A: b_2
+#   Commit B: b_3
+#   Expected: b_2
+
+test_expect_success '1a-setup: Modify(A)/Modify(B), change on B subset of A' '
+       test_create_repo 1a &&
+       (
+               cd 1a &&
+
+               test_write_lines 1 2 3 4 5 6 7 8 9 10 >b &&
+               git add b &&
+               test_tick &&
+               git commit -m "O" &&
+
+               git branch O &&
+               git branch A &&
+               git branch B &&
+
+               git checkout A &&
+               test_write_lines 1 2 3 4 5 5.5 6 7 8 9 10 10.5 >b &&
+               git add b &&
+               test_tick &&
+               git commit -m "A" &&
+
+               git checkout B &&
+               test_write_lines 1 2 3 4 5 5.5 6 7 8 9 10 >b &&
+               git add b &&
+               test_tick &&
+               git commit -m "B"
+       )
+'
+
+test_expect_success '1a-check-L: Modify(A)/Modify(B), change on B subset of A' '
+       test_when_finished "git -C 1a reset --hard" &&
+       test_when_finished "git -C 1a clean -fd" &&
+       (
+               cd 1a &&
+
+               git checkout A^0 &&
+
+               test-tool chmtime =31337 b &&
+               test-tool chmtime -v +0 b >expected-mtime &&
+
+               GIT_MERGE_VERBOSITY=3 git merge -s recursive B^0 >out 2>err &&
+
+               test_i18ngrep "Skipped b" out &&
+               test_must_be_empty err &&
+
+               test-tool chmtime -v +0 b >actual-mtime &&
+               test_cmp expected-mtime actual-mtime &&
+
+               git ls-files -s >index_files &&
+               test_line_count = 1 index_files &&
+
+               git rev-parse >actual HEAD:b &&
+               git rev-parse >expect A:b &&
+               test_cmp expect actual &&
+
+               git hash-object b   >actual &&
+               git rev-parse   A:b >expect &&
+               test_cmp expect actual
+       )
+'
+
+test_expect_success '1a-check-R: Modify(A)/Modify(B), change on B subset of A' '
+       test_when_finished "git -C 1a reset --hard" &&
+       test_when_finished "git -C 1a clean -fd" &&
+       (
+               cd 1a &&
+
+               git checkout B^0 &&
+
+               GIT_MERGE_VERBOSITY=3 git merge -s recursive A^0 >out 2>err &&
+
+               test_i18ngrep "Auto-merging b" out &&
+               test_must_be_empty err &&
+
+               git ls-files -s >index_files &&
+               test_line_count = 1 index_files &&
+
+               git rev-parse >actual HEAD:b &&
+               git rev-parse >expect A:b &&
+               test_cmp expect actual &&
+
+               git hash-object b   >actual &&
+               git rev-parse   A:b >expect &&
+               test_cmp expect actual
+       )
+'
+
+
+###########################################################################
+# SECTION 2: Cases involving basic renames
+###########################################################################
+
+# Testcase 2a, Changes on A, rename on B
+#   Commit O: b_1
+#   Commit A: b_2
+#   Commit B: c_1
+#   Expected: c_2
+
+test_expect_success '2a-setup: Modify(A)/rename(B)' '
+       test_create_repo 2a &&
+       (
+               cd 2a &&
+
+               test_seq 1 10 >b &&
+               git add b &&
+               test_tick &&
+               git commit -m "O" &&
+
+               git branch O &&
+               git branch A &&
+               git branch B &&
+
+               git checkout A &&
+               test_seq 1 11 >b &&
+               git add b &&
+               test_tick &&
+               git commit -m "A" &&
+
+               git checkout B &&
+               git mv b c &&
+               test_tick &&
+               git commit -m "B"
+       )
+'
+
+test_expect_success '2a-check-L: Modify/rename, merge into modify side' '
+       test_when_finished "git -C 2a reset --hard" &&
+       test_when_finished "git -C 2a clean -fd" &&
+       (
+               cd 2a &&
+
+               git checkout A^0 &&
+
+               GIT_MERGE_VERBOSITY=3 git merge -s recursive B^0 >out 2>err &&
+
+               test_i18ngrep ! "Skipped c" out &&
+               test_must_be_empty err &&
+
+               git ls-files -s >index_files &&
+               test_line_count = 1 index_files &&
+
+               git rev-parse >actual HEAD:c &&
+               git rev-parse >expect A:b &&
+               test_cmp expect actual &&
+
+               git hash-object c   >actual &&
+               git rev-parse   A:b >expect &&
+               test_cmp expect actual &&
+
+               test_must_fail git rev-parse HEAD:b &&
+               test_path_is_missing b
+       )
+'
+
+test_expect_success '2a-check-R: Modify/rename, merge into rename side' '
+       test_when_finished "git -C 2a reset --hard" &&
+       test_when_finished "git -C 2a clean -fd" &&
+       (
+               cd 2a &&
+
+               git checkout B^0 &&
+
+               GIT_MERGE_VERBOSITY=3 git merge -s recursive A^0 >out 2>err &&
+
+               test_i18ngrep ! "Skipped c" out &&
+               test_must_be_empty err &&
+
+               git ls-files -s >index_files &&
+               test_line_count = 1 index_files &&
+
+               git rev-parse >actual HEAD:c &&
+               git rev-parse >expect A:b &&
+               test_cmp expect actual &&
+
+               git hash-object c   >actual &&
+               git rev-parse   A:b >expect &&
+               test_cmp expect actual &&
+
+               test_must_fail git rev-parse HEAD:b &&
+               test_path_is_missing b
+       )
+'
+
+# Testcase 2b, Changed and renamed on A, subset of changes on B
+#   Commit O: b_1
+#   Commit A: c_2
+#   Commit B: b_3
+#   Expected: c_2
+
+test_expect_success '2b-setup: Rename+Mod(A)/Mod(B), B mods subset of A' '
+       test_create_repo 2b &&
+       (
+               cd 2b &&
+
+               test_write_lines 1 2 3 4 5 6 7 8 9 10 >b &&
+               git add b &&
+               test_tick &&
+               git commit -m "O" &&
+
+               git branch O &&
+               git branch A &&
+               git branch B &&
+
+               git checkout A &&
+               test_write_lines 1 2 3 4 5 5.5 6 7 8 9 10 10.5 >b &&
+               git add b &&
+               git mv b c &&
+               test_tick &&
+               git commit -m "A" &&
+
+               git checkout B &&
+               test_write_lines 1 2 3 4 5 5.5 6 7 8 9 10 >b &&
+               git add b &&
+               test_tick &&
+               git commit -m "B"
+       )
+'
+
+test_expect_success '2b-check-L: Rename+Mod(A)/Mod(B), B mods subset of A' '
+       test_when_finished "git -C 2b reset --hard" &&
+       test_when_finished "git -C 2b clean -fd" &&
+       (
+               cd 2b &&
+
+               git checkout A^0 &&
+
+               test-tool chmtime =31337 c &&
+               test-tool chmtime -v +0 c >expected-mtime &&
+
+               GIT_MERGE_VERBOSITY=3 git merge -s recursive B^0 >out 2>err &&
+
+               test_i18ngrep "Skipped c" out &&
+               test_must_be_empty err &&
+
+               test-tool chmtime -v +0 c >actual-mtime &&
+               test_cmp expected-mtime actual-mtime &&
+
+               git ls-files -s >index_files &&
+               test_line_count = 1 index_files &&
+
+               git rev-parse >actual HEAD:c &&
+               git rev-parse >expect A:c &&
+               test_cmp expect actual &&
+
+               git hash-object c   >actual &&
+               git rev-parse   A:c >expect &&
+               test_cmp expect actual &&
+
+               test_must_fail git rev-parse HEAD:b &&
+               test_path_is_missing b
+       )
+'
+
+test_expect_success '2b-check-R: Rename+Mod(A)/Mod(B), B mods subset of A' '
+       test_when_finished "git -C 2b reset --hard" &&
+       test_when_finished "git -C 2b clean -fd" &&
+       (
+               cd 2b &&
+
+               git checkout B^0 &&
+
+               GIT_MERGE_VERBOSITY=3 git merge -s recursive A^0 >out 2>err &&
+
+               test_i18ngrep "Auto-merging c" out &&
+               test_must_be_empty err &&
+
+               git ls-files -s >index_files &&
+               test_line_count = 1 index_files &&
+
+               git rev-parse >actual HEAD:c &&
+               git rev-parse >expect A:c &&
+               test_cmp expect actual &&
+
+               git hash-object c   >actual &&
+               git rev-parse   A:c >expect &&
+               test_cmp expect actual &&
+
+               test_must_fail git rev-parse HEAD:b &&
+               test_path_is_missing b
+       )
+'
+
+# Testcase 2c, Changes on A, rename on B
+#   Commit O: b_1
+#   Commit A: b_2, c_3
+#   Commit B: c_1
+#   Expected: rename/add conflict c_2 vs c_3
+#
+#   NOTE: Since A modified b_1->b_2, and B renamed b_1->c_1, the threeway
+#         merge of those files should result in c_2.  We then should have a
+#         rename/add conflict between c_2 and c_3.  However, if we note in
+#         merge_content() that A had the right contents (b_2 has same
+#         contents as c_2, just at a different name), and that A had the
+#         right path present (c_3 existed) and thus decides that it can
+#         skip the update, then we're in trouble.  This test verifies we do
+#         not make that particular mistake.
+
+test_expect_success '2c-setup: Modify b & add c VS rename b->c' '
+       test_create_repo 2c &&
+       (
+               cd 2c &&
+
+               test_seq 1 10 >b &&
+               git add b &&
+               test_tick &&
+               git commit -m "O" &&
+
+               git branch O &&
+               git branch A &&
+               git branch B &&
+
+               git checkout A &&
+               test_seq 1 11 >b &&
+               echo whatever >c &&
+               git add b c &&
+               test_tick &&
+               git commit -m "A" &&
+
+               git checkout B &&
+               git mv b c &&
+               test_tick &&
+               git commit -m "B"
+       )
+'
+
+test_expect_success '2c-check: Modify b & add c VS rename b->c' '
+       (
+               cd 2c &&
+
+               git checkout A^0 &&
+
+               GIT_MERGE_VERBOSITY=3 test_must_fail git merge -s recursive B^0 >out 2>err &&
+
+               test_i18ngrep "CONFLICT (rename/add): Rename b->c" out &&
+               test_i18ngrep ! "Skipped c" out &&
+               test_must_be_empty err
+
+               # FIXME: rename/add conflicts are horribly broken right now;
+               # when I get back to my patch series fixing it and
+               # rename/rename(2to1) conflicts to bring them in line with
+               # how add/add conflicts behave, then checks like the below
+               # could be added.  But that patch series is waiting until
+               # the rename-directory-detection series lands, which this
+               # is part of.  And in the mean time, I do not want to further
+               # enforce broken behavior.  So for now, the main test is the
+               # one above that err is an empty file.
+
+               #git ls-files -s >index_files &&
+               #test_line_count = 2 index_files &&
+
+               #git rev-parse >actual :2:c :3:c &&
+               #git rev-parse >expect A:b  A:c  &&
+               #test_cmp expect actual &&
+
+               #git cat-file -p A:b >>merged &&
+               #git cat-file -p A:c >>merge-me &&
+               #>empty &&
+               #test_must_fail git merge-file \
+               #       -L "Temporary merge branch 1" \
+               #       -L "" \
+               #       -L "Temporary merge branch 2" \
+               #       merged empty merge-me &&
+               #sed -e "s/^\([<=>]\)/\1\1\1/" merged >merged-internal &&
+
+               #git hash-object c               >actual &&
+               #git hash-object merged-internal >expect &&
+               #test_cmp expect actual &&
+
+               #test_path_is_missing b
+       )
+'
+
+
+###########################################################################
+# SECTION 3: Cases involving directory renames
+#
+# NOTE:
+#   Directory renames only apply when one side renames a directory, and the
+#   other side adds or renames a path into that directory.  Applying the
+#   directory rename to that new path creates a new pathname that didn't
+#   exist on either side of history.  Thus, it is impossible for the
+#   merge contents to already be at the right path, so all of these checks
+#   exist just to make sure that updates are not skipped.
+###########################################################################
+
+# Testcase 3a, Change + rename into dir foo on A, dir rename foo->bar on B
+#   Commit O: bq_1, foo/whatever
+#   Commit A: foo/{bq_2, whatever}
+#   Commit B: bq_1, bar/whatever
+#   Expected: bar/{bq_2, whatever}
+
+test_expect_success '3a-setup: bq_1->foo/bq_2 on A, foo/->bar/ on B' '
+       test_create_repo 3a &&
+       (
+               cd 3a &&
+
+               mkdir foo &&
+               test_seq 1 10 >bq &&
+               test_write_lines a b c d e f g h i j k >foo/whatever &&
+               git add bq foo/whatever &&
+               test_tick &&
+               git commit -m "O" &&
+
+               git branch O &&
+               git branch A &&
+               git branch B &&
+
+               git checkout A &&
+               test_seq 1 11 >bq &&
+               git add bq &&
+               git mv bq foo/ &&
+               test_tick &&
+               git commit -m "A" &&
+
+               git checkout B &&
+               git mv foo/ bar/ &&
+               test_tick &&
+               git commit -m "B"
+       )
+'
+
+test_expect_success '3a-check-L: bq_1->foo/bq_2 on A, foo/->bar/ on B' '
+       test_when_finished "git -C 3a reset --hard" &&
+       test_when_finished "git -C 3a clean -fd" &&
+       (
+               cd 3a &&
+
+               git checkout A^0 &&
+
+               GIT_MERGE_VERBOSITY=3 git merge -s recursive B^0 >out 2>err &&
+
+               test_i18ngrep ! "Skipped bar/bq" out &&
+               test_must_be_empty err &&
+
+               git ls-files -s >index_files &&
+               test_line_count = 2 index_files &&
+
+               git rev-parse >actual HEAD:bar/bq HEAD:bar/whatever &&
+               git rev-parse >expect A:foo/bq    A:foo/whatever &&
+               test_cmp expect actual &&
+
+               git hash-object bar/bq   bar/whatever   >actual &&
+               git rev-parse   A:foo/bq A:foo/whatever >expect &&
+               test_cmp expect actual &&
+
+               test_must_fail git rev-parse HEAD:bq HEAD:foo/bq &&
+               test_path_is_missing bq foo/bq foo/whatever
+       )
+'
+
+test_expect_success '3a-check-R: bq_1->foo/bq_2 on A, foo/->bar/ on B' '
+       test_when_finished "git -C 3a reset --hard" &&
+       test_when_finished "git -C 3a clean -fd" &&
+       (
+               cd 3a &&
+
+               git checkout B^0 &&
+
+               GIT_MERGE_VERBOSITY=3 git merge -s recursive A^0 >out 2>err &&
+
+               test_i18ngrep ! "Skipped bar/bq" out &&
+               test_must_be_empty err &&
+
+               git ls-files -s >index_files &&
+               test_line_count = 2 index_files &&
+
+               git rev-parse >actual HEAD:bar/bq HEAD:bar/whatever &&
+               git rev-parse >expect A:foo/bq    A:foo/whatever &&
+               test_cmp expect actual &&
+
+               git hash-object bar/bq   bar/whatever   >actual &&
+               git rev-parse   A:foo/bq A:foo/whatever >expect &&
+               test_cmp expect actual &&
+
+               test_must_fail git rev-parse HEAD:bq HEAD:foo/bq &&
+               test_path_is_missing bq foo/bq foo/whatever
+       )
+'
+
+# Testcase 3b, rename into dir foo on A, dir rename foo->bar + change on B
+#   Commit O: bq_1, foo/whatever
+#   Commit A: foo/{bq_1, whatever}
+#   Commit B: bq_2, bar/whatever
+#   Expected: bar/{bq_2, whatever}
+
+test_expect_success '3b-setup: bq_1->foo/bq_2 on A, foo/->bar/ on B' '
+       test_create_repo 3b &&
+       (
+               cd 3b &&
+
+               mkdir foo &&
+               test_seq 1 10 >bq &&
+               test_write_lines a b c d e f g h i j k >foo/whatever &&
+               git add bq foo/whatever &&
+               test_tick &&
+               git commit -m "O" &&
+
+               git branch O &&
+               git branch A &&
+               git branch B &&
+
+               git checkout A &&
+               git mv bq foo/ &&
+               test_tick &&
+               git commit -m "A" &&
+
+               git checkout B &&
+               test_seq 1 11 >bq &&
+               git add bq &&
+               git mv foo/ bar/ &&
+               test_tick &&
+               git commit -m "B"
+       )
+'
+
+test_expect_success '3b-check-L: bq_1->foo/bq_2 on A, foo/->bar/ on B' '
+       test_when_finished "git -C 3b reset --hard" &&
+       test_when_finished "git -C 3b clean -fd" &&
+       (
+               cd 3b &&
+
+               git checkout A^0 &&
+
+               GIT_MERGE_VERBOSITY=3 git merge -s recursive B^0 >out 2>err &&
+
+               test_i18ngrep ! "Skipped bar/bq" out &&
+               test_must_be_empty err &&
+
+               git ls-files -s >index_files &&
+               test_line_count = 2 index_files &&
+
+               git rev-parse >actual HEAD:bar/bq HEAD:bar/whatever &&
+               git rev-parse >expect B:bq        A:foo/whatever &&
+               test_cmp expect actual &&
+
+               git hash-object bar/bq bar/whatever   >actual &&
+               git rev-parse   B:bq   A:foo/whatever >expect &&
+               test_cmp expect actual &&
+
+               test_must_fail git rev-parse HEAD:bq HEAD:foo/bq &&
+               test_path_is_missing bq foo/bq foo/whatever
+       )
+'
+
+test_expect_success '3b-check-R: bq_1->foo/bq_2 on A, foo/->bar/ on B' '
+       test_when_finished "git -C 3b reset --hard" &&
+       test_when_finished "git -C 3b clean -fd" &&
+       (
+               cd 3b &&
+
+               git checkout B^0 &&
+
+               GIT_MERGE_VERBOSITY=3 git merge -s recursive A^0 >out 2>err &&
+
+               test_i18ngrep ! "Skipped bar/bq" out &&
+               test_must_be_empty err &&
+
+               git ls-files -s >index_files &&
+               test_line_count = 2 index_files &&
+
+               git rev-parse >actual HEAD:bar/bq HEAD:bar/whatever &&
+               git rev-parse >expect B:bq        A:foo/whatever &&
+               test_cmp expect actual &&
+
+               git hash-object bar/bq bar/whatever   >actual &&
+               git rev-parse   B:bq   A:foo/whatever >expect &&
+               test_cmp expect actual &&
+
+               test_must_fail git rev-parse HEAD:bq HEAD:foo/bq &&
+               test_path_is_missing bq foo/bq foo/whatever
+       )
+'
+
+###########################################################################
+# SECTION 4: Cases involving dirty changes
+###########################################################################
+
+# Testcase 4a, Changed on A, subset of changes on B, locally modified
+#   Commit O: b_1
+#   Commit A: b_2
+#   Commit B: b_3
+#   Working copy: b_4
+#   Expected: b_2 for merge, b_4 in working copy
+
+test_expect_success '4a-setup: Change on A, change on B subset of A, dirty mods present' '
+       test_create_repo 4a &&
+       (
+               cd 4a &&
+
+               test_write_lines 1 2 3 4 5 6 7 8 9 10 >b &&
+               git add b &&
+               test_tick &&
+               git commit -m "O" &&
+
+               git branch O &&
+               git branch A &&
+               git branch B &&
+
+               git checkout A &&
+               test_write_lines 1 2 3 4 5 5.5 6 7 8 9 10 10.5 >b &&
+               git add b &&
+               test_tick &&
+               git commit -m "A" &&
+
+               git checkout B &&
+               test_write_lines 1 2 3 4 5 5.5 6 7 8 9 10 >b &&
+               git add b &&
+               test_tick &&
+               git commit -m "B"
+       )
+'
+
+# NOTE: For as long as we continue using unpack_trees() without index_only
+#   set to true, it will error out on a case like this claiming the the locally
+#   modified file would be overwritten by the merge.  Getting this testcase
+#   correct requires doing the merge in-memory first, then realizing that no
+#   updates to the file are necessary, and thus that we can just leave the path
+#   alone.
+test_expect_failure '4a-check: Change on A, change on B subset of A, dirty mods present' '
+       test_when_finished "git -C 4a reset --hard" &&
+       test_when_finished "git -C 4a clean -fd" &&
+       (
+               cd 4a &&
+
+               git checkout A^0 &&
+               echo "File rewritten" >b &&
+
+               test-tool chmtime =31337 b &&
+               test-tool chmtime -v +0 b >expected-mtime &&
+
+               GIT_MERGE_VERBOSITY=3 git merge -s recursive B^0 >out 2>err &&
+
+               test_i18ngrep "Skipped b" out &&
+               test_must_be_empty err &&
+
+               test-tool chmtime -v +0 b >actual-mtime &&
+               test_cmp expected-mtime actual-mtime &&
+
+               git ls-files -s >index_files &&
+               test_line_count = 1 index_files &&
+
+               git rev-parse >actual :0:b &&
+               git rev-parse >expect A:b &&
+               test_cmp expect actual &&
+
+               git hash-object b >actual &&
+               echo "File rewritten" | git hash-object --stdin >expect &&
+               test_cmp expect actual
+       )
+'
+
+# Testcase 4b, Changed+renamed on A, subset of changes on B, locally modified
+#   Commit O: b_1
+#   Commit A: c_2
+#   Commit B: b_3
+#   Working copy: c_4
+#   Expected: c_2
+
+test_expect_success '4b-setup: Rename+Mod(A)/Mod(B), change on B subset of A, dirty mods present' '
+       test_create_repo 4b &&
+       (
+               cd 4b &&
+
+               test_write_lines 1 2 3 4 5 6 7 8 9 10 >b &&
+               git add b &&
+               test_tick &&
+               git commit -m "O" &&
+
+               git branch O &&
+               git branch A &&
+               git branch B &&
+
+               git checkout A &&
+               test_write_lines 1 2 3 4 5 5.5 6 7 8 9 10 10.5 >b &&
+               git add b &&
+               git mv b c &&
+               test_tick &&
+               git commit -m "A" &&
+
+               git checkout B &&
+               test_write_lines 1 2 3 4 5 5.5 6 7 8 9 10 >b &&
+               git add b &&
+               test_tick &&
+               git commit -m "B"
+       )
+'
+
+test_expect_success '4b-check: Rename+Mod(A)/Mod(B), change on B subset of A, dirty mods present' '
+       test_when_finished "git -C 4b reset --hard" &&
+       test_when_finished "git -C 4b clean -fd" &&
+       (
+               cd 4b &&
+
+               git checkout A^0 &&
+               echo "File rewritten" >c &&
+
+               test-tool chmtime =31337 c &&
+               test-tool chmtime -v +0 c >expected-mtime &&
+
+               GIT_MERGE_VERBOSITY=3 git merge -s recursive B^0 >out 2>err &&
+
+               test_i18ngrep "Skipped c" out &&
+               test_must_be_empty err &&
+
+               test-tool chmtime -v +0 c >actual-mtime &&
+               test_cmp expected-mtime actual-mtime &&
+
+               git ls-files -s >index_files &&
+               test_line_count = 1 index_files &&
+
+               git rev-parse >actual :0:c &&
+               git rev-parse >expect A:c &&
+               test_cmp expect actual &&
+
+               git hash-object c >actual &&
+               echo "File rewritten" | git hash-object --stdin >expect &&
+               test_cmp expect actual &&
+
+               test_must_fail git rev-parse HEAD:b &&
+               test_path_is_missing b
+       )
+'
+
+test_done
index c630aba657e9c78b6dee4a72be34ed3bd03c01ee..d174bfed309c1adf5cfbafc7b9cc6a6b1c6033ee 100755 (executable)
@@ -444,4 +444,32 @@ test_expect_success GPG '--graft on a commit with a mergetag' '
        git replace -d $HASH10
 '
 
+test_expect_success '--convert-graft-file' '
+       git checkout -b with-graft-file &&
+       test_commit root2 &&
+       git reset --hard root2^ &&
+       test_commit root1 &&
+       test_commit after-root1 &&
+       test_tick &&
+       git merge -m merge-root2 root2 &&
+
+       : add and convert graft file &&
+       printf "%s\n%s %s\n\n# comment\n%s\n" \
+               $(git rev-parse HEAD^^ HEAD^ HEAD^^ HEAD^2) \
+               >.git/info/grafts &&
+       git replace --convert-graft-file &&
+       test_path_is_missing .git/info/grafts &&
+
+       : verify that the history is now "grafted" &&
+       git rev-list HEAD >out &&
+       test_line_count = 4 out &&
+
+       : create invalid graft file and verify that it is not deleted &&
+       test_when_finished "rm -f .git/info/grafts" &&
+       echo $EMPTY_BLOB $EMPTY_TREE >.git/info/grafts &&
+       test_must_fail git replace --convert-graft-file 2>err &&
+       test_i18ngrep "$EMPTY_BLOB $EMPTY_TREE" err &&
+       test_i18ngrep "$EMPTY_BLOB $EMPTY_TREE" .git/info/grafts
+'
+
 test_done
index 41b0be575d523071152c19a224a7ef914ce2ee1a..818435f04e49b50965cd674bf046eb3d494fa939 100755 (executable)
@@ -5,6 +5,13 @@ test_description='basic git gc tests
 
 . ./test-lib.sh
 
+test_expect_success 'setup' '
+       # do not let the amount of physical memory affects gc
+       # behavior, make sure we always pack everything to one pack by
+       # default
+       git config gc.bigPackThreshold 2g
+'
+
 test_expect_success 'gc empty repository' '
        git gc
 '
@@ -43,6 +50,31 @@ test_expect_success 'gc is not aborted due to a stale symref' '
        )
 '
 
+test_expect_success 'gc --keep-largest-pack' '
+       test_create_repo keep-pack &&
+       (
+               cd keep-pack &&
+               test_commit one &&
+               test_commit two &&
+               test_commit three &&
+               git gc &&
+               ( cd .git/objects/pack && ls *.pack ) >pack-list &&
+               test_line_count = 1 pack-list &&
+               BASE_PACK=.git/objects/pack/pack-*.pack &&
+               test_commit four &&
+               git repack -d &&
+               test_commit five &&
+               git repack -d &&
+               ( cd .git/objects/pack && ls *.pack ) >pack-list &&
+               test_line_count = 3 pack-list &&
+               git gc --keep-largest-pack &&
+               ( cd .git/objects/pack && ls *.pack ) >pack-list &&
+               test_line_count = 2 pack-list &&
+               test_path_is_file $BASE_PACK &&
+               git fsck
+       )
+'
+
 test_expect_success 'auto gc with too many loose objects does not attempt to create bitmaps' '
        test_config gc.auto 3 &&
        test_config gc.autodetach false &&
@@ -87,7 +119,7 @@ test_expect_success 'background auto gc does not run if gc.log is present and re
        test_must_fail git gc --auto 2>err &&
        test_i18ngrep "^error:" err &&
        test_config gc.logexpiry 5.days &&
-       test-chmtime =-345600 .git/gc.log &&
+       test-tool chmtime =-345600 .git/gc.log &&
        test_must_fail git gc --auto &&
        test_config gc.logexpiry 2.days &&
        run_and_wait_for_auto_gc &&
index 394b169eada7968875e931c5a43f25a808cfc18f..033871ee5f35c1a143aec6d2b30c5116bf3f94f5 100755 (executable)
@@ -72,8 +72,7 @@ for repack in '' true; do
        '
 
        test_expect_success "simulate time passing ($title)" '
-               find .git/objects -type f |
-               xargs test-chmtime -v -86400
+               test-tool chmtime --get -86400 $(find .git/objects -type f)
        '
 
        test_expect_success "start writing new commit with old blob ($title)" '
@@ -103,8 +102,7 @@ for repack in '' true; do
 
        test_expect_success "abandon objects again ($title)" '
                git reset --hard HEAD^ &&
-               find .git/objects -type f |
-               xargs test-chmtime -v -86400
+               test-tool chmtime --get -86400 $(find .git/objects -type f)
        '
 
        test_expect_success "start writing new commit with same tree ($title)" '
index d4e6485a26eef8985856413a1bf977c95f6ec345..cc3fd2baf2b80817ffc39ab67a988dcf31dbc537 100755 (executable)
@@ -21,8 +21,8 @@ test_expect_success \
 
 test_expect_success \
     'checking the commit' \
-    'git diff-tree -r -M --name-status  HEAD^ HEAD | \
-    grep "^R100..*path0/COPYING..*path1/COPYING"'
+    'git diff-tree -r -M --name-status  HEAD^ HEAD >actual &&
+    grep "^R100..*path0/COPYING..*path1/COPYING" actual'
 
 test_expect_success \
     'moving the file back into subdirectory' \
@@ -35,8 +35,8 @@ test_expect_success \
 
 test_expect_success \
     'checking the commit' \
-    'git diff-tree -r -M --name-status  HEAD^ HEAD | \
-    grep "^R100..*path1/COPYING..*path0/COPYING"'
+    'git diff-tree -r -M --name-status  HEAD^ HEAD >actual &&
+    grep "^R100..*path1/COPYING..*path0/COPYING" actual'
 
 test_expect_success \
     'mv --dry-run does not move file' \
@@ -122,10 +122,9 @@ test_expect_success \
 
 test_expect_success \
     'checking the commit' \
-    'git diff-tree -r -M --name-status  HEAD^ HEAD | \
-     grep "^R100..*path0/COPYING..*path2/COPYING" &&
-     git diff-tree -r -M --name-status  HEAD^ HEAD | \
-     grep "^R100..*path0/README..*path2/README"'
+    'git diff-tree -r -M --name-status  HEAD^ HEAD >actual &&
+     grep "^R100..*path0/COPYING..*path2/COPYING" actual &&
+     grep "^R100..*path0/README..*path2/README" actual'
 
 test_expect_success \
     'succeed when source is a prefix of destination' \
@@ -141,10 +140,9 @@ test_expect_success \
 
 test_expect_success \
     'checking the commit' \
-    'git diff-tree -r -M --name-status  HEAD^ HEAD | \
-     grep "^R100..*path2/COPYING..*path1/path2/COPYING" &&
-     git diff-tree -r -M --name-status  HEAD^ HEAD | \
-     grep "^R100..*path2/README..*path1/path2/README"'
+    'git diff-tree -r -M --name-status  HEAD^ HEAD >actual &&
+     grep "^R100..*path2/COPYING..*path1/path2/COPYING" actual &&
+     grep "^R100..*path2/README..*path1/path2/README" actual'
 
 test_expect_success \
     'do not move directory over existing directory' \
@@ -497,7 +495,7 @@ test_expect_success 'moving a submodule in nested directories' '
        test_cmp expect actual
 '
 
-test_expect_failure 'moving nested submodules' '
+test_expect_success 'moving nested submodules' '
        git commit -am "cleanup commit" &&
        mkdir sub_nested_nested &&
        (cd sub_nested_nested &&
index 7cb60799be1a109e2210350137c8754a5bc4bb7d..ec4b160ddb9f966044e729f35cc1edfcc79eed14 100755 (executable)
@@ -187,7 +187,8 @@ test_expect_success 'author information is preserved' '
                        test \$GIT_COMMIT != $(git rev-parse master) || \
                        echo Hallo" \
                preserved-author) &&
-       test 1 = $(git rev-list --author="B V Uips" preserved-author | wc -l)
+       git rev-list --author="B V Uips" preserved-author >actual &&
+       test_line_count = 1 actual
 '
 
 test_expect_success "remove a certain author's commits" '
@@ -205,7 +206,8 @@ test_expect_success "remove a certain author's commits" '
        cnt1=$(git rev-list master | wc -l) &&
        cnt2=$(git rev-list removed-author | wc -l) &&
        test $cnt1 -eq $(($cnt2 + 1)) &&
-       test 0 = $(git rev-list --author="B V Uips" removed-author | wc -l)
+       git rev-list --author="B V Uips" removed-author >actual &&
+       test_line_count = 0 actual
 '
 
 test_expect_success 'barf on invalid name' '
@@ -258,7 +260,8 @@ test_expect_success 'Subdirectory filter with disappearing trees' '
        git commit -m "Re-adding foo" &&
 
        git filter-branch -f --subdirectory-filter foo &&
-       test $(git rev-list master | wc -l) = 3
+       git rev-list master >actual &&
+       test_line_count = 3 actual
 '
 
 test_expect_success 'Tag name filtering retains tag message' '
@@ -470,4 +473,18 @@ test_expect_success 'tree-filter deals with object name vs pathname ambiguity' '
        git show HEAD:$ambiguous
 '
 
+test_expect_success 'rewrite repository including refs that point at non-commit object' '
+       test_when_finished "git reset --hard original" &&
+       tree=$(git rev-parse HEAD^{tree}) &&
+       test_when_finished "git replace -d $tree" &&
+       echo A >new &&
+       git add new &&
+       new_tree=$(git write-tree) &&
+       git replace $tree $new_tree &&
+       git tag -a -m "tag to a tree" treetag $new_tree &&
+       git reset --hard HEAD &&
+       git filter-branch -f -- --all >filter-output 2>&1 &&
+       ! fgrep fatal filter-output
+'
+
 test_done
index 2aac77af701989dc16980268155d6e40500354bb..d7b319e919c83ca677737840f70075c173364209 100755 (executable)
@@ -363,7 +363,7 @@ test_expect_success 'tag -l <pattern> -l <pattern> works, as our buggy documenta
 '
 
 test_expect_success 'listing tags in column' '
-       COLUMNS=40 git tag -l --column=row >actual &&
+       COLUMNS=41 git tag -l --column=row >actual &&
        cat >expected <<\EOF &&
 a1      aa1     cba     t210    t211
 v0.2.1  v1.0    v1.0.1  v1.1.3
@@ -1056,7 +1056,18 @@ test_expect_success GPG \
        git tag -s -F sigblanknonlfile blanknonlfile-signed-tag &&
        get_tag_msg blanknonlfile-signed-tag >actual &&
        test_cmp expect actual &&
-       git tag -v signed-tag
+       git tag -v blanknonlfile-signed-tag
+'
+
+test_expect_success GPG 'signed tag with embedded PGP message' '
+       cat >msg <<-\EOF &&
+       -----BEGIN PGP MESSAGE-----
+
+       this is not a real PGP message
+       -----END PGP MESSAGE-----
+       EOF
+       git tag -s -F msg confusing-pgp-message &&
+       git tag -v confusing-pgp-message
 '
 
 # messages with commented lines for signed tags:
index 29e5043b9452b9c32a0d8ad437fb33745b296ac6..b2ca77b3384c97954991eae9c5af89c4d8d8035e 100755 (executable)
@@ -111,14 +111,8 @@ do
        '
 done
 
-if echo 'echo space > "$1"' > "e space.sh"
-then
-       # FS supports spaces in filenames
-       test_set_prereq SPACES_IN_FILENAMES
-fi
-
-test_expect_success SPACES_IN_FILENAMES 'editor with a space' '
-
+test_expect_success 'editor with a space' '
+       echo "echo space >\$1" >"e space.sh" &&
        chmod a+x "e space.sh" &&
        GIT_EDITOR="./e\ space.sh" git commit --amend &&
        test space = "$(git show -s --pretty=format:%s)"
@@ -126,7 +120,7 @@ test_expect_success SPACES_IN_FILENAMES 'editor with a space' '
 '
 
 unset GIT_EDITOR
-test_expect_success SPACES_IN_FILENAMES 'core.editor with a space' '
+test_expect_success 'core.editor with a space' '
 
        git config core.editor \"./e\ space.sh\" &&
        git commit --amend &&
index a39e69a3ebd1c39ddf8feb861f8a39c8315aa7f8..152104412f212190c18279b64e80bb9f58242d47 100755 (executable)
@@ -821,6 +821,21 @@ test_expect_success 'moving the superproject does not break submodules' '
        )
 '
 
+test_expect_success 'moving the submodule does not break the superproject' '
+       (
+               cd addtest2 &&
+               git submodule status
+       ) >actual &&
+       sed -e "s/^ \([^ ]* repo\) .*/-\1/" <actual >expect &&
+       mv addtest2/repo addtest2/repo.bak &&
+       test_when_finished "mv addtest2/repo.bak addtest2/repo" &&
+       (
+               cd addtest2 &&
+               git submodule status
+       ) >actual &&
+       test_cmp expect actual
+'
+
 test_expect_success 'submodule add --name allows to replace a submodule with another at the same path' '
        (
                cd addtest2 &&
index 46c09c77654597b2ee501271b4688cc6137df8f1..0bde5850ac547c90dadd9e21341ebad80a1471e6 100755 (executable)
@@ -41,7 +41,7 @@ test_expect_success 'configuration parsing with error' '
        EOF
        (
                cd repo &&
-               test_must_fail test-submodule-config "" s 2>actual &&
+               test_must_fail test-tool submodule-config "" s 2>actual &&
                test_i18ngrep "bad config" actual
        )
 '
@@ -55,7 +55,7 @@ EOF
 
 test_expect_success 'test parsing and lookup of submodule config by path' '
        (cd super &&
-               test-submodule-config \
+               test-tool submodule-config \
                        HEAD^ a \
                        HEAD b \
                        HEAD^ submodule \
@@ -67,7 +67,7 @@ test_expect_success 'test parsing and lookup of submodule config by path' '
 
 test_expect_success 'test parsing and lookup of submodule config by name' '
        (cd super &&
-               test-submodule-config --name \
+               test-tool submodule-config --name \
                        HEAD^ a \
                        HEAD a \
                        HEAD^ submodule \
@@ -89,7 +89,7 @@ test_expect_success 'error in one submodule config lets continue' '
                git add .gitmodules &&
                mv .gitmodules.bak .gitmodules &&
                git commit -m "add error" &&
-               test-submodule-config \
+               test-tool submodule-config \
                        HEAD b \
                        HEAD submodule \
                                >actual &&
@@ -100,7 +100,7 @@ test_expect_success 'error in one submodule config lets continue' '
 test_expect_success 'error message contains blob reference' '
        (cd super &&
                sha1=$(git rev-parse HEAD) &&
-               test-submodule-config \
+               test-tool submodule-config \
                        HEAD b \
                        HEAD submodule \
                                2>actual_err &&
@@ -114,9 +114,9 @@ test_expect_success 'using different treeishs works' '
                git tag new_tag &&
                tree=$(git rev-parse HEAD^{tree}) &&
                commit=$(git rev-parse HEAD^{commit}) &&
-               test-submodule-config $commit b >expect &&
-               test-submodule-config $tree b >actual.1 &&
-               test-submodule-config new_tag b >actual.2 &&
+               test-tool submodule-config $commit b >expect &&
+               test-tool submodule-config $tree b >actual.1 &&
+               test-tool submodule-config new_tag b >actual.2 &&
                test_cmp expect actual.1 &&
                test_cmp expect actual.2
        )
@@ -130,7 +130,7 @@ test_expect_success 'error in history in fetchrecursesubmodule lets continue' '
                git config --unset -f .gitmodules \
                        submodule.submodule.fetchrecursesubmodules &&
                git commit -m "add error in fetchrecursesubmodules" &&
-               test-submodule-config \
+               test-tool submodule-config \
                        HEAD b \
                        HEAD submodule \
                                >actual &&
index fa61b1a4ee4a1d5ba457cb30e5cc045f724ed503..9dbbd01fc07724e378a864e3fe735269665a9bc3 100755 (executable)
@@ -52,6 +52,18 @@ test_expect_success PERL 'can use paths with --interactive' '
        git reset --hard HEAD^
 '
 
+test_expect_success 'removed files and relative paths' '
+       test_when_finished "rm -rf foo" &&
+       git init foo &&
+       >foo/foo.txt &&
+       git -C foo add foo.txt &&
+       git -C foo commit -m first &&
+       git -C foo rm foo.txt &&
+
+       mkdir -p foo/bar &&
+       git -C foo/bar commit -m second ../foo.txt
+'
+
 test_expect_success 'using invalid commit with -C' '
        test_must_fail git commit --allow-empty -C bogus
 '
index 50052e28727dab74037a115003b6083256d7f344..18a40257fbb3226a328fdd520231972c111e6265 100755 (executable)
@@ -1672,12 +1672,12 @@ test_expect_success '"Initial commit" should not be noted in commit template' '
 '
 
 test_expect_success '--no-optional-locks prevents index update' '
-       test-chmtime =1234567890 .git/index &&
+       test-tool chmtime =1234567890 .git/index &&
        git --no-optional-locks status &&
-       test-chmtime -v +0 .git/index >out &&
+       test-tool chmtime --get .git/index >out &&
        grep ^1234567890 out &&
        git status &&
-       test-chmtime -v +0 .git/index >out &&
+       test-tool chmtime --get .git/index >out &&
        ! grep ^1234567890 out
 '
 
index 9c422bcd7cc8c1b2525579ca3a7ffa6ee412ba42..dd8ab7ede182fc3c3da840fee083bf23a94ce13c 100755 (executable)
@@ -92,7 +92,7 @@ test_expect_success 'will not overwrite removed file with staged changes' '
        test_cmp important c1.c
 '
 
-test_expect_failure 'will not overwrite unstaged changes in renamed file' '
+test_expect_success 'will not overwrite unstaged changes in renamed file' '
        git reset --hard c1 &&
        git mv c1.c other.c &&
        git commit -m rename &&
index 6061a04147a06dba0d049cbb5d31f8049c2210d0..6162e2a8e66f6f0e42e0a8ba6ee3b728e8e84918 100755 (executable)
@@ -4,6 +4,12 @@ test_description='git repack works correctly'
 
 . ./test-lib.sh
 
+commit_and_pack() {
+       test_commit "$@" >/dev/null &&
+       SHA1=$(git pack-objects --all --unpacked --incremental .git/objects/pack/pack </dev/null) &&
+       echo pack-${SHA1}.pack
+}
+
 test_expect_success 'objects in packs marked .keep are not repacked' '
        echo content1 > file1 &&
        echo content2 > file2 &&
@@ -194,7 +200,26 @@ test_expect_success 'objects made unreachable by grafts only are kept' '
        git reflog expire --expire=$test_tick --expire-unreachable=$test_tick --all &&
        git repack -a -d &&
        git cat-file -t $H1
-       '
+'
+
+test_expect_success 'repack --keep-pack' '
+       test_create_repo keep-pack &&
+       (
+               cd keep-pack &&
+               P1=$(commit_and_pack 1) &&
+               P2=$(commit_and_pack 2) &&
+               P3=$(commit_and_pack 3) &&
+               P4=$(commit_and_pack 4) &&
+               ls .git/objects/pack/*.pack >old-counts &&
+               test_line_count = 4 old-counts &&
+               git repack -a -d --keep-pack $P1 --keep-pack $P4 &&
+               ls .git/objects/pack/*.pack >new-counts &&
+               grep -q $P1 new-counts &&
+               grep -q $P4 new-counts &&
+               test_line_count = 3 new-counts &&
+               git fsck
+       )
+'
 
 test_done
 
index 987573c41fcd4aee3d5f8fd8aa587c97551c1872..48261ba0805cd21e64bdf471f2835a7075782a6c 100755 (executable)
@@ -55,8 +55,8 @@ test_expect_success '-A with -d option leaves unreachable objects unpacked' '
 
 compare_mtimes ()
 {
-       read tref rest &&
-       while read t rest; do
+       read tref &&
+       while read t; do
                test "$tref" = "$t" || return 1
        done
 }
@@ -90,7 +90,7 @@ test_expect_success 'unpacked objects receive timestamp of pack file' '
        tmppack=".git/objects/pack/tmp_pack" &&
        ln "$packfile" "$tmppack" &&
        git repack -A -l -d &&
-       test-chmtime -v +0 "$tmppack" "$fsha1path" "$csha1path" "$tsha1path" \
+       test-tool chmtime --get "$tmppack" "$fsha1path" "$csha1path" "$tsha1path" \
                > mtimes &&
        compare_mtimes < mtimes
 '
@@ -103,7 +103,7 @@ test_expect_success 'do not bother loosening old objects' '
        git prune-packed &&
        git cat-file -p $obj1 &&
        git cat-file -p $obj2 &&
-       test-chmtime =-86400 .git/objects/pack/pack-$pack2.pack &&
+       test-tool chmtime =-86400 .git/objects/pack/pack-$pack2.pack &&
        git repack -A -d --unpack-unreachable=1.hour.ago &&
        git cat-file -p $obj1 &&
        test_must_fail git cat-file -p $obj2
@@ -117,7 +117,7 @@ test_expect_success 'keep packed objects found only in index' '
        git reset HEAD^ &&
        git reflog expire --expire=now --all &&
        git add file &&
-       test-chmtime =-86400 .git/objects/pack/* &&
+       test-tool chmtime =-86400 .git/objects/pack/* &&
        git gc --prune=1.hour.ago &&
        git cat-file blob :file
 '
index 0059a1f837882c504e717135c9c92bc5b1d7f62c..0c685d35986eebf4dd72861b650c82fb6b515b51 100755 (executable)
@@ -12,7 +12,7 @@ test_expect_success GETTEXT_LOCALE 'setup' '
 '
 
 test_have_prereq GETTEXT_LOCALE &&
-test-regex "HALLÓ" "Halló" ICASE &&
+test-tool regex "HALLÓ" "Halló" ICASE &&
 test_set_prereq REGEX_LOCALE
 
 test_expect_success REGEX_LOCALE 'grep literal string, no -F' '
index b28a028f5503508a4a09c6a07950027d253a1846..7e8894a4a70648fd12d3ab4425f1beac2c3e4641 100755 (executable)
@@ -4,7 +4,7 @@ test_description='check that example code compiles and runs'
 . ./test-lib.sh
 
 test_expect_success 'decorate' '
-       test-example-decorate
+       test-tool example-decorate
 '
 
 test_done
index 8a8ba65a2ae583aa5d0b0526e604a8f9613b5a5e..c937330a5f3a7f6b5c4d8e406564bd88dd0c889b 100755 (executable)
@@ -288,12 +288,12 @@ test_expect_success 'able to dcommit to a subdirectory' '
 
 test_expect_success 'dcommit should not fail with a touched file' '
        test_commit "commit-new-file-foo2" foo2 &&
-       test-chmtime =-60 foo &&
+       test-tool chmtime =-60 foo &&
        git svn dcommit
 '
 
 test_expect_success 'rebase should not fail with a touched file' '
-       test-chmtime =-60 foo &&
+       test-tool chmtime =-60 foo &&
        git svn rebase
 '
 
index cd480edf1606fda8d973ee3decb8e18e03356153..a735fa37170fca27b48fba218a408cb2ea6bac8a 100755 (executable)
@@ -33,8 +33,8 @@ test_expect_success 'init and fetch a moved directory' '
        git svn fetch -i thunk &&
        test "$(git rev-parse --verify refs/remotes/thunk@2)" \
           = "$(git rev-parse --verify refs/remotes/thunk~1)" &&
-       test "$(git cat-file blob refs/remotes/thunk:readme |\
-                sed -n -e "3p")" = goodbye &&
+       git cat-file blob refs/remotes/thunk:readme >actual &&
+       test "$(sed -n -e "3p" actual)" = goodbye &&
        test -z "$(git config --get svn-remote.svn.fetch \
                 "^trunk:refs/remotes/thunk@2$")"
        '
@@ -48,8 +48,8 @@ test_expect_success 'init and fetch from one svn-remote' '
         git svn fetch -i svn/thunk &&
        test "$(git rev-parse --verify refs/remotes/svn/trunk)" \
           = "$(git rev-parse --verify refs/remotes/svn/thunk~1)" &&
-       test "$(git cat-file blob refs/remotes/svn/thunk:readme |\
-                sed -n -e "3p")" = goodbye
+       git cat-file blob refs/remotes/svn/thunk:readme >actual &&
+       test "$(sed -n -e "3p" actual)" = goodbye
         '
 
 test_expect_success 'follow deleted parent' '
@@ -107,7 +107,8 @@ test_expect_success 'follow deleted directory' '
        git svn init --minimize-url -i glob "$svnrepo"/glob &&
        git svn fetch -i glob &&
        test "$(git cat-file blob refs/remotes/glob:blob/bye)" = hi &&
-       test "$(git ls-tree refs/remotes/glob | wc -l )" -eq 1
+       git ls-tree refs/remotes/glob >actual &&
+       test_line_count = 1 actual
        '
 
 # ref: r9270 of the Subversion repository: (http://svn.collab.net/repos/svn)
@@ -204,8 +205,9 @@ test_expect_success "follow-parent is atomic" '
 test_expect_success "track multi-parent paths" '
        svn_cmd cp -m "resurrect /glob" "$svnrepo"/r9270 "$svnrepo"/glob &&
        git svn multi-fetch &&
-       test $(git cat-file commit refs/remotes/glob | \
-              grep "^parent " | wc -l) -eq 2
+       git cat-file commit refs/remotes/glob >actual &&
+       grep "^parent " actual >actual2 &&
+       test_line_count = 2 actual2
        '
 
 test_expect_success "multi-fetch continues to work" "
index a94286c8ec89823805989f4363072417e9c20165..6990f64364200c75f46d9f6ee3d67ebea3549d5b 100755 (executable)
@@ -47,8 +47,8 @@ test_expect_success 'test refspec globbing' '
        git config --add svn-remote.svn.tags\
                         "tags/*/src/a:refs/remotes/tags/*" &&
        git svn multi-fetch &&
-       git log --pretty=oneline refs/remotes/tags/end | \
-           sed -e "s/^.\{41\}//" > output.end &&
+       git log --pretty=oneline refs/remotes/tags/end >actual &&
+       sed -e "s/^.\{41\}//" actual >output.end &&
        test_cmp expect.end output.end &&
        test "$(git rev-parse refs/remotes/tags/end~1)" = \
                "$(git rev-parse refs/remotes/branches/start)" &&
@@ -75,14 +75,16 @@ test_expect_success 'test left-hand-side only globbing' '
                svn_cmd commit -m "try to try"
        ) &&
        git svn fetch two &&
-       test $(git rev-list refs/remotes/two/tags/end | wc -l) -eq 6 &&
-       test $(git rev-list refs/remotes/two/branches/start | wc -l) -eq 3 &&
+       git rev-list refs/remotes/two/tags/end >actual &&
+       test_line_count = 6 actual &&
+       git rev-list refs/remotes/two/branches/start >actual &&
+       test_line_count = 3 actual &&
        test $(git rev-parse refs/remotes/two/branches/start~2) = \
             $(git rev-parse refs/remotes/two/trunk) &&
        test $(git rev-parse refs/remotes/two/tags/end~3) = \
             $(git rev-parse refs/remotes/two/branches/start) &&
-       git log --pretty=oneline refs/remotes/two/tags/end | \
-           sed -e "s/^.\{41\}//" > output.two &&
+       git log --pretty=oneline refs/remotes/two/tags/end >actual &&
+       sed -e "s/^.\{41\}//" actual >output.two &&
        test_cmp expect.two output.two
        '
 
index 8d99e848d47634ea340885fde48bc798a2c0b7a2..c1e7542a371330ecd6b27899cf3f6d4d07d78a29 100755 (executable)
@@ -47,8 +47,8 @@ test_expect_success 'test refspec globbing' '
        git config --add svn-remote.svn.tags\
                         "tags/*/src/a:refs/remotes/tags/*" &&
        git svn multi-fetch &&
-       git log --pretty=oneline refs/remotes/tags/end | \
-           sed -e "s/^.\{41\}//" > output.end &&
+       git log --pretty=oneline refs/remotes/tags/end >actual &&
+       sed -e "s/^.\{41\}//" actual >output.end &&
        test_cmp expect.end output.end &&
        test "$(git rev-parse refs/remotes/tags/end~1)" = \
                "$(git rev-parse refs/remotes/branches/v1/start)" &&
@@ -75,14 +75,16 @@ test_expect_success 'test left-hand-side only globbing' '
                svn_cmd commit -m "try to try"
        ) &&
        git svn fetch two &&
-       test $(git rev-list refs/remotes/two/tags/end | wc -l) -eq 6 &&
-       test $(git rev-list refs/remotes/two/branches/v1/start | wc -l) -eq 3 &&
+       git rev-list refs/remotes/two/tags/end >actual &&
+       test_line_count = 6 actual &&
+       git rev-list refs/remotes/two/branches/v1/start >actual &&
+       test_line_count = 3 actual &&
        test $(git rev-parse refs/remotes/two/branches/v1/start~2) = \
             $(git rev-parse refs/remotes/two/trunk) &&
        test $(git rev-parse refs/remotes/two/tags/end~3) = \
             $(git rev-parse refs/remotes/two/branches/v1/start) &&
-       git log --pretty=oneline refs/remotes/two/tags/end | \
-           sed -e "s/^.\{41\}//" > output.two &&
+       git log --pretty=oneline refs/remotes/two/tags/end >actual &&
+       sed -e "s/^.\{41\}//" actual >output.two &&
        test_cmp expect.two output.two
        '
 cat > expect.four <<EOF
@@ -124,14 +126,16 @@ test_expect_success 'test another branch' '
        git config --add svn-remote.four.tags \
                         "tags/*:refs/remotes/four/tags/*" &&
        git svn fetch four &&
-       test $(git rev-list refs/remotes/four/tags/next | wc -l) -eq 5 &&
-       test $(git rev-list refs/remotes/four/branches/v2/start | wc -l) -eq 3 &&
+       git rev-list refs/remotes/four/tags/next >actual &&
+       test_line_count = 5 actual &&
+       git rev-list refs/remotes/four/branches/v2/start >actual &&
+       test_line_count = 3 actual &&
        test $(git rev-parse refs/remotes/four/branches/v2/start~2) = \
             $(git rev-parse refs/remotes/four/trunk) &&
        test $(git rev-parse refs/remotes/four/tags/next~2) = \
             $(git rev-parse refs/remotes/four/branches/v2/start) &&
-       git log --pretty=oneline refs/remotes/four/tags/next | \
-           sed -e "s/^.\{41\}//" > output.four &&
+       git log --pretty=oneline refs/remotes/four/tags/next >actual &&
+       sed -e "s/^.\{41\}//" actual >output.four &&
        test_cmp expect.four output.four
        '
 
index dde0a3c2229abab27d1592e740db35ebfed544aa..ad37d980c91dd303ba975bc501748e05fd82efe5 100755 (executable)
@@ -21,37 +21,37 @@ uuid=161ce429-a9dd-4828-af4a-52023f968c89
 
 bar_url=http://mayonaise/svnrepo/bar
 test_expect_success 'verify metadata for /bar' "
-       git cat-file commit refs/remotes/bar | \
-          grep '^git-svn-id: $bar_url@12 $uuid$' &&
-       git cat-file commit refs/remotes/bar~1 | \
-          grep '^git-svn-id: $bar_url@11 $uuid$' &&
-       git cat-file commit refs/remotes/bar~2 | \
-          grep '^git-svn-id: $bar_url@10 $uuid$' &&
-       git cat-file commit refs/remotes/bar~3 | \
-          grep '^git-svn-id: $bar_url@9 $uuid$' &&
-       git cat-file commit refs/remotes/bar~4 | \
-          grep '^git-svn-id: $bar_url@6 $uuid$' &&
-       git cat-file commit refs/remotes/bar~5 | \
-          grep '^git-svn-id: $bar_url@1 $uuid$'
+       git cat-file commit refs/remotes/bar >actual &&
+       grep '^git-svn-id: $bar_url@12 $uuid$' actual &&
+       git cat-file commit refs/remotes/bar~1 >actual &&
+       grep '^git-svn-id: $bar_url@11 $uuid$' actual &&
+       git cat-file commit refs/remotes/bar~2 >actual &&
+       grep '^git-svn-id: $bar_url@10 $uuid$' actual &&
+       git cat-file commit refs/remotes/bar~3 >actual &&
+       grep '^git-svn-id: $bar_url@9 $uuid$' actual &&
+       git cat-file commit refs/remotes/bar~4 >actual &&
+       grep '^git-svn-id: $bar_url@6 $uuid$' actual &&
+       git cat-file commit refs/remotes/bar~5 >actual &&
+       grep '^git-svn-id: $bar_url@1 $uuid$' actual
        "
 
 e_url=http://mayonaise/svnrepo/dir/a/b/c/d/e
 test_expect_success 'verify metadata for /dir/a/b/c/d/e' "
-       git cat-file commit refs/remotes/e | \
-          grep '^git-svn-id: $e_url@1 $uuid$'
+       git cat-file commit refs/remotes/e >actual &&
+       grep '^git-svn-id: $e_url@1 $uuid$' actual
        "
 
 dir_url=http://mayonaise/svnrepo/dir
 test_expect_success 'verify metadata for /dir' "
-       git cat-file commit refs/remotes/dir | \
-          grep '^git-svn-id: $dir_url@2 $uuid$' &&
-       git cat-file commit refs/remotes/dir~1 | \
-          grep '^git-svn-id: $dir_url@1 $uuid$'
+       git cat-file commit refs/remotes/dir >actual &&
+       grep '^git-svn-id: $dir_url@2 $uuid$' actual &&
+       git cat-file commit refs/remotes/dir~1 >actual &&
+       grep '^git-svn-id: $dir_url@1 $uuid$' actual
        "
 
 test_expect_success 'find commit based on SVN revision number' "
-        git svn find-rev r12 |
-           grep $(git rev-parse HEAD)
+       git svn find-rev r12 >actual &&
+       grep $(git rev-parse HEAD) actual
         "
 
 test_expect_success 'empty rebase' "
index 22b6e5ee7d8c274b7fe60c9f10de45eaf4d3c385..6c9307355137fe86ed16552113f0d60b007e3eca 100755 (executable)
@@ -20,32 +20,32 @@ uuid=161ce429-a9dd-4828-af4a-52023f968c89
 
 bar_url=http://mayonaise/svnrepo/bar
 test_expect_success 'verify metadata for /bar' "
-       git cat-file commit refs/remotes/bar | \
-          grep '^git-svn-id: $bar_url@12 $uuid$' &&
-       git cat-file commit refs/remotes/bar~1 | \
-          grep '^git-svn-id: $bar_url@11 $uuid$' &&
-       git cat-file commit refs/remotes/bar~2 | \
-          grep '^git-svn-id: $bar_url@10 $uuid$' &&
-       git cat-file commit refs/remotes/bar~3 | \
-          grep '^git-svn-id: $bar_url@9 $uuid$' &&
-       git cat-file commit refs/remotes/bar~4 | \
-          grep '^git-svn-id: $bar_url@6 $uuid$' &&
-       git cat-file commit refs/remotes/bar~5 | \
-          grep '^git-svn-id: $bar_url@1 $uuid$'
+       git cat-file commit refs/remotes/bar >actual &&
+       grep '^git-svn-id: $bar_url@12 $uuid$' actual &&
+       git cat-file commit refs/remotes/bar~1 >actual &&
+       grep '^git-svn-id: $bar_url@11 $uuid$' actual &&
+       git cat-file commit refs/remotes/bar~2 >actual &&
+       grep '^git-svn-id: $bar_url@10 $uuid$' actual &&
+       git cat-file commit refs/remotes/bar~3 >actual &&
+       grep '^git-svn-id: $bar_url@9 $uuid$' actual &&
+       git cat-file commit refs/remotes/bar~4 >actual &&
+       grep '^git-svn-id: $bar_url@6 $uuid$' actual &&
+       git cat-file commit refs/remotes/bar~5 >actual &&
+       grep '^git-svn-id: $bar_url@1 $uuid$' actual
        "
 
 e_url=http://mayonaise/svnrepo/dir/a/b/c/d/e
 test_expect_success 'verify metadata for /dir/a/b/c/d/e' "
-       git cat-file commit refs/remotes/e | \
-          grep '^git-svn-id: $e_url@1 $uuid$'
+       git cat-file commit refs/remotes/e >actual &&
+       grep '^git-svn-id: $e_url@1 $uuid$' actual
        "
 
 dir_url=http://mayonaise/svnrepo/dir
 test_expect_success 'verify metadata for /dir' "
-       git cat-file commit refs/remotes/dir | \
-          grep '^git-svn-id: $dir_url@2 $uuid$' &&
-       git cat-file commit refs/remotes/dir~1 | \
-          grep '^git-svn-id: $dir_url@1 $uuid$'
+       git cat-file commit refs/remotes/dir >actual &&
+       grep '^git-svn-id: $dir_url@2 $uuid$' actual &&
+       git cat-file commit refs/remotes/dir~1 >actual &&
+       grep '^git-svn-id: $dir_url@1 $uuid$' actual
        "
 
 test_done
index 50bca62def6b0632cd89d1c0cfb2d98b76e6aa1d..32317d6bca5f45314a46082ecd9aa68061853904 100755 (executable)
@@ -68,7 +68,8 @@ test_debug 'gitk --all & sleep 1'
 test_expect_success 'verify pre-merge ancestry' "
        test x\$(git rev-parse --verify refs/heads/svn^2) = \
             x\$(git rev-parse --verify refs/heads/merge) &&
-       git cat-file commit refs/heads/svn^ | grep '^friend$'
+       git cat-file commit refs/heads/svn^ >actual &&
+       grep '^friend$' actual
        "
 
 test_expect_success 'git svn dcommit merges' "
@@ -82,12 +83,13 @@ test_expect_success 'verify post-merge ancestry' "
             x\$(git rev-parse --verify refs/remotes/origin/trunk) &&
        test x\$(git rev-parse --verify refs/heads/svn^2) = \
             x\$(git rev-parse --verify refs/heads/merge) &&
-       git cat-file commit refs/heads/svn^ | grep '^friend$'
+       git cat-file commit refs/heads/svn^ >actual &&
+       grep '^friend$' actual
        "
 
 test_expect_success 'verify merge commit message' "
-       git rev-list --pretty=raw -1 refs/heads/svn | \
-         grep \"    Merge branch 'merge' into svn\"
+       git rev-list --pretty=raw -1 refs/heads/svn >actual &&
+       grep \"    Merge branch 'merge' into svn\" actual
        "
 
 test_done
index 41264818ccdd85abb4b0a17c8a508d4bcbfe57f5..d8262854bbee335aec293b8f75b9979e60b91171 100755 (executable)
@@ -26,11 +26,12 @@ test_expect_success 'start import with incomplete authors file' '
 test_expect_success 'imported 2 revisions successfully' '
        (
                cd x
-               test "$(git rev-list refs/remotes/git-svn | wc -l)" -eq 2 &&
-               git rev-list -1 --pretty=raw refs/remotes/git-svn | \
-                 grep "^author BBBBBBB BBBBBBB <bb@example\.com> " &&
-               git rev-list -1 --pretty=raw refs/remotes/git-svn~1 | \
-                 grep "^author AAAAAAA AAAAAAA <aa@example\.com> "
+               git rev-list refs/remotes/git-svn >actual &&
+               test_line_count = 2 actual &&
+               git rev-list -1 --pretty=raw refs/remotes/git-svn >actual &&
+               grep "^author BBBBBBB BBBBBBB <bb@example\.com> " actual &&
+               git rev-list -1 --pretty=raw refs/remotes/git-svn~1 >actual &&
+               grep "^author AAAAAAA AAAAAAA <aa@example\.com> " actual
        )
        '
 
@@ -43,11 +44,12 @@ test_expect_success 'continues to import once authors have been added' '
        (
                cd x
                git svn fetch --authors-file=../svn-authors &&
-               test "$(git rev-list refs/remotes/git-svn | wc -l)" -eq 4 &&
-               git rev-list -1 --pretty=raw refs/remotes/git-svn | \
-                 grep "^author DDDDDDD DDDDDDD <dd@example\.com> " &&
-               git rev-list -1 --pretty=raw refs/remotes/git-svn~1 | \
-                 grep "^author CCCCCCC CCCCCCC <cc@example\.com> "
+               git rev-list refs/remotes/git-svn >actual &&
+               test_line_count = 4 actual &&
+               git rev-list -1 --pretty=raw refs/remotes/git-svn >actual &&
+               grep "^author DDDDDDD DDDDDDD <dd@example\.com> " actual &&
+               git rev-list -1 --pretty=raw refs/remotes/git-svn~1 >actual &&
+               grep "^author CCCCCCC CCCCCCC <cc@example\.com> " actual
        )
        '
 
@@ -102,12 +104,28 @@ test_expect_success !MINGW 'fresh clone with svn.authors-file in config' '
                test x"$HOME"/svn-authors = x"$(git config svn.authorsfile)" &&
                git svn clone "$svnrepo" gitconfig.clone &&
                cd gitconfig.clone &&
-               nr_ex=$(git log | grep "^Author:.*example.com" | wc -l) &&
-               nr_rev=$(git rev-list HEAD | wc -l) &&
+               git log >actual &&
+               nr_ex=$(grep "^Author:.*example.com" actual | wc -l) &&
+               git rev-list HEAD >actual &&
+               nr_rev=$(wc -l <actual) &&
                test $nr_rev -eq $nr_ex
        )
 '
 
+cat >> svn-authors <<EOF
+ff = FFFFFFF FFFFFFF <>
+EOF
+
+test_expect_success 'authors-file imported user without email' '
+       svn_cmd mkdir -m aa/branches/ff --username ff "$svnrepo/aa/branches/ff" &&
+       (
+               cd aa-work &&
+               git svn fetch --authors-file=../svn-authors &&
+               git rev-list -1 --pretty=raw refs/remotes/origin/ff | \
+                 grep "^author FFFFFFF FFFFFFF <> "
+       )
+       '
+
 test_debug 'GIT_DIR=gitconfig.clone/.git git log'
 
 test_done
index 7d7e9d46bc6bf40f52efffede6b369f052c46843..93ef44fae8f28149344b9125530807ed49727e68 100755 (executable)
@@ -9,7 +9,9 @@ test_description='git svn authors prog tests'
 
 write_script svn-authors-prog "$PERL_PATH" <<-\EOF
        $_ = shift;
-       if (s/-sub$//)  {
+       if (s/-hermit//) {
+               print "$_ <>\n";
+       } elsif (s/-sub$//)  {
                print "$_ <$_\@sub.example.com>\n";
        } else {
                print "$_ <$_\@example.com>\n";
@@ -37,44 +39,67 @@ test_expect_success 'import authors with prog and file' '
 test_expect_success 'imported 6 revisions successfully' '
        (
                cd x
-               test "$(git rev-list refs/remotes/git-svn | wc -l)" -eq 6
+               git rev-list refs/remotes/git-svn >actual &&
+               test_line_count = 6 actual
        )
 '
 
 test_expect_success 'authors-prog ran correctly' '
        (
                cd x
-               git rev-list -1 --pretty=raw refs/remotes/git-svn~1 | \
-                 grep "^author ee-foo <ee-foo@example\.com> " &&
-               git rev-list -1 --pretty=raw refs/remotes/git-svn~2 | \
-                 grep "^author dd <dd@sub\.example\.com> " &&
-               git rev-list -1 --pretty=raw refs/remotes/git-svn~3 | \
-                 grep "^author cc <cc@sub\.example\.com> " &&
-               git rev-list -1 --pretty=raw refs/remotes/git-svn~4 | \
-                 grep "^author bb <bb@example\.com> " &&
-               git rev-list -1 --pretty=raw refs/remotes/git-svn~5 | \
-                 grep "^author aa <aa@example\.com> "
+               git rev-list -1 --pretty=raw refs/remotes/git-svn~1 >actual &&
+               grep "^author ee-foo <ee-foo@example\.com> " actual &&
+               git rev-list -1 --pretty=raw refs/remotes/git-svn~2 >actual &&
+               grep "^author dd <dd@sub\.example\.com> " actual &&
+               git rev-list -1 --pretty=raw refs/remotes/git-svn~3 >actual &&
+               grep "^author cc <cc@sub\.example\.com> " actual &&
+               git rev-list -1 --pretty=raw refs/remotes/git-svn~4 >actual &&
+               grep "^author bb <bb@example\.com> " actual &&
+               git rev-list -1 --pretty=raw refs/remotes/git-svn~5 >actual &&
+               grep "^author aa <aa@example\.com> " actual
        )
 '
 
 test_expect_success 'authors-file overrode authors-prog' '
        (
                cd x
-               git rev-list -1 --pretty=raw refs/remotes/git-svn | \
-                 grep "^author FFFFFFF FFFFFFF <fFf@other\.example\.com> "
+               git rev-list -1 --pretty=raw refs/remotes/git-svn >actual &&
+               grep "^author FFFFFFF FFFFFFF <fFf@other\.example\.com> " actual
        )
 '
 
 git --git-dir=x/.git config --unset svn.authorsfile
 git --git-dir=x/.git config --unset svn.authorsprog
 
+test_expect_success 'authors-prog imported user without email' '
+       svn mkdir -m gg --username gg-hermit "$svnrepo"/gg &&
+       (
+               cd x &&
+               git svn fetch --authors-prog=../svn-authors-prog &&
+               git rev-list -1 --pretty=raw refs/remotes/git-svn | \
+                 grep "^author gg <> "
+       )
+'
+
+test_expect_success 'imported without authors-prog and authors-file' '
+       svn mkdir -m hh --username hh "$svnrepo"/hh &&
+       (
+               uuid=$(svn info "$svnrepo" |
+                       sed -n "s/^Repository UUID: //p") &&
+               cd x &&
+               git svn fetch &&
+               git rev-list -1 --pretty=raw refs/remotes/git-svn | \
+                 grep "^author hh <hh@$uuid> "
+       )
+'
+
 test_expect_success 'authors-prog handled special characters in username' '
        svn mkdir -m bad --username "xyz; touch evil" "$svnrepo"/bad &&
        (
                cd x &&
                git svn --authors-prog=../svn-authors-prog fetch &&
-               git rev-list -1 --pretty=raw refs/remotes/git-svn |
-               grep "^author xyz; touch evil <xyz; touch evil@example\.com> " &&
+               git rev-list -1 --pretty=raw refs/remotes/git-svn >actual &&
+               grep "^author xyz; touch evil <xyz; touch evil@example\.com> " actual &&
                ! test -f evil
        )
 '
index 372ef156850098928ac1ed402ab2228460af3c00..8cb2b5c69cfc89c4613fc3f7e4f2153a251433c7 100755 (executable)
@@ -16,10 +16,10 @@ test_expect_success 'load svn repo' "
        "
 
 test_expect_success 'verify uuid' "
-       git cat-file commit refs/remotes/git-svn~0 | \
-          grep '^git-svn-id: .*@2 $uuid$' &&
-       git cat-file commit refs/remotes/git-svn~1 | \
-          grep '^git-svn-id: .*@1 $uuid$'
+       git cat-file commit refs/remotes/git-svn~0 >actual &&
+       grep '^git-svn-id: .*@2 $uuid$' actual &&
+       git cat-file commit refs/remotes/git-svn~1 >actual &&
+       grep '^git-svn-id: .*@1 $uuid$' actual
        "
 
 test_done
index 8b22f2272cca47cd9187accdaed3ce1f12b6c9e1..bdf6e849993bff79a5e04c5b8a1d7df77118091f 100755 (executable)
@@ -48,8 +48,8 @@ test_expect_success 'test refspec prefixed globbing' '
        git config --add svn-remote.svn.tags\
                         "tags/t_*/src/a:refs/remotes/tags/t_*" &&
        git svn multi-fetch &&
-       git log --pretty=oneline refs/remotes/tags/t_end | \
-           sed -e "s/^.\{41\}//" >output.end &&
+       git log --pretty=oneline refs/remotes/tags/t_end >actual &&
+       sed -e "s/^.\{41\}//" actual >output.end &&
        test_cmp expect.end output.end &&
        test "$(git rev-parse refs/remotes/tags/t_end~1)" = \
                "$(git rev-parse refs/remotes/branches/b_start)" &&
@@ -78,14 +78,16 @@ test_expect_success 'test left-hand-side only prefixed globbing' '
                svn_cmd commit -m "try to try"
        ) &&
        git svn fetch two &&
-       test $(git rev-list refs/remotes/two/tags/t_end | wc -l) -eq 6 &&
-       test $(git rev-list refs/remotes/two/branches/b_start | wc -l) -eq 3 &&
+       git rev-list refs/remotes/two/tags/t_end >actual &&
+       test_line_count = 6 actual &&
+       git rev-list refs/remotes/two/branches/b_start >actual &&
+       test_line_count = 3 actual &&
        test $(git rev-parse refs/remotes/two/branches/b_start~2) = \
             $(git rev-parse refs/remotes/two/trunk) &&
        test $(git rev-parse refs/remotes/two/tags/t_end~3) = \
             $(git rev-parse refs/remotes/two/branches/b_start) &&
-       git log --pretty=oneline refs/remotes/two/tags/t_end | \
-           sed -e "s/^.\{41\}//" >output.two &&
+       git log --pretty=oneline refs/remotes/two/tags/t_end >actual &&
+       sed -e "s/^.\{41\}//" actual >output.two &&
        test_cmp expect.two output.two
        '
 
@@ -118,14 +120,16 @@ test_expect_success 'test prefixed globs match just prefix' '
                svn_cmd up
        ) &&
        git svn fetch three &&
-       test $(git rev-list refs/remotes/three/branches/b_ | wc -l) -eq 2 &&
-       test $(git rev-list refs/remotes/three/tags/t_ | wc -l) -eq 3 &&
+       git rev-list refs/remotes/three/branches/b_ >actual &&
+       test_line_count = 2 actual &&
+       git rev-list refs/remotes/three/tags/t_ >actual &&
+       test_line_count = 3 actual &&
        test $(git rev-parse refs/remotes/three/branches/b_~1) = \
             $(git rev-parse refs/remotes/three/trunk) &&
        test $(git rev-parse refs/remotes/three/tags/t_~1) = \
             $(git rev-parse refs/remotes/three/branches/b_) &&
-       git log --pretty=oneline refs/remotes/three/tags/t_ | \
-           sed -e "s/^.\{41\}//" >output.three &&
+       git log --pretty=oneline refs/remotes/three/tags/t_ >actual &&
+       sed -e "s/^.\{41\}//" actual >output.three &&
        test_cmp expect.three output.three
        '
 
@@ -186,14 +190,16 @@ test_expect_success 'test globbing in the middle of the word' '
                svn_cmd up
        ) &&
        git svn fetch five &&
-       test $(git rev-list refs/remotes/five/branches/abcde | wc -l) -eq 2 &&
-       test $(git rev-list refs/remotes/five/tags/fghij | wc -l) -eq 3 &&
+       git rev-list refs/remotes/five/branches/abcde >actual &&
+       test_line_count = 2 actual &&
+       git rev-list refs/remotes/five/tags/fghij >actual &&
+       test_line_count = 3 actual &&
        test $(git rev-parse refs/remotes/five/branches/abcde~1) = \
             $(git rev-parse refs/remotes/five/trunk) &&
        test $(git rev-parse refs/remotes/five/tags/fghij~1) = \
             $(git rev-parse refs/remotes/five/branches/abcde) &&
-       git log --pretty=oneline refs/remotes/five/tags/fghij | \
-           sed -e "s/^.\{41\}//" >output.five &&
+       git log --pretty=oneline refs/remotes/five/tags/fghij >actual &&
+       sed -e "s/^.\{41\}//" actual >output.five &&
        test_cmp expect.five output.five
        '
 
index e4d06accc458191d0d52fe325b3118ac019e96c2..dc79df7b042b75ca192ff1a860eb0c22a9059a8e 100755 (executable)
@@ -2654,7 +2654,7 @@ test_expect_success 'R: corrupt lines do not mess marks file' '
 ##
 test_expect_success 'R: blob bigger than threshold' '
        blobsize=$((2*1024*1024 + 53)) &&
-       test-genrandom bar $blobsize >expect &&
+       test-tool genrandom bar $blobsize >expect &&
        cat >input <<-INPUT_END &&
        commit refs/heads/big-file
        committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
index 866ddf60581e3fea1afdbe28e71e7f3137da5722..6a392e87bcc17712961c548c0ee84e11269aef0f 100755 (executable)
@@ -43,20 +43,20 @@ test_expect_success 'fast-export | fast-import' '
        MUSS=$(git rev-parse --verify muss) &&
        mkdir new &&
        git --git-dir=new/.git init &&
-       git fast-export --all |
+       git fast-export --all >actual &&
        (cd new &&
         git fast-import &&
         test $MASTER = $(git rev-parse --verify refs/heads/master) &&
         test $REIN = $(git rev-parse --verify refs/tags/rein) &&
         test $WER = $(git rev-parse --verify refs/heads/wer) &&
-        test $MUSS = $(git rev-parse --verify refs/tags/muss))
+        test $MUSS = $(git rev-parse --verify refs/tags/muss)) <actual
 
 '
 
 test_expect_success 'fast-export master~2..master' '
 
-       git fast-export master~2..master |
-               sed "s/master/partial/" |
+       git fast-export master~2..master >actual &&
+       sed "s/master/partial/" actual |
                (cd new &&
                 git fast-import &&
                 test $MASTER != $(git rev-parse --verify refs/heads/partial) &&
@@ -74,11 +74,12 @@ test_expect_success 'iso-8859-1' '
        test_tick &&
        echo rosten >file &&
        git commit -s -m den file &&
-       git fast-export wer^..wer |
-               sed "s/wer/i18n/" |
+       git fast-export wer^..wer >iso8859-1.fi &&
+       sed "s/wer/i18n/" iso8859-1.fi |
                (cd new &&
                 git fast-import &&
-                git cat-file commit i18n | grep "Áéí óú")
+                git cat-file commit i18n >actual &&
+                grep "Áéí óú" actual)
 
 '
 test_expect_success 'import/export-marks' '
@@ -87,20 +88,14 @@ test_expect_success 'import/export-marks' '
        git fast-export --export-marks=tmp-marks HEAD &&
        test -s tmp-marks &&
        test_line_count = 3 tmp-marks &&
-       test $(
-               git fast-export --import-marks=tmp-marks\
-               --export-marks=tmp-marks HEAD |
-               grep ^commit |
-               wc -l) \
-       -eq 0 &&
+       git fast-export --import-marks=tmp-marks \
+               --export-marks=tmp-marks HEAD >actual &&
+       test $(grep ^commit actual | wc -l) -eq 0 &&
        echo change > file &&
        git commit -m "last commit" file &&
-       test $(
-               git fast-export --import-marks=tmp-marks \
-               --export-marks=tmp-marks HEAD |
-               grep ^commit\  |
-               wc -l) \
-       -eq 1 &&
+       git fast-export --import-marks=tmp-marks \
+               --export-marks=tmp-marks HEAD >actual &&
+       test $(grep ^commit\  actual | wc -l) -eq 1 &&
        test_line_count = 4 tmp-marks
 
 '
@@ -184,7 +179,7 @@ test_expect_success 'submodule fast-export | fast-import' '
        rm -rf new &&
        mkdir new &&
        git --git-dir=new/.git init &&
-       git fast-export --signed-tags=strip --all |
+       git fast-export --signed-tags=strip --all >actual &&
        (cd new &&
         git fast-import &&
         test "$SUBENT1" = "$(git ls-tree refs/heads/master^ sub)" &&
@@ -192,7 +187,7 @@ test_expect_success 'submodule fast-export | fast-import' '
         git checkout master &&
         git submodule init &&
         git submodule update &&
-        cmp sub/file ../sub/file)
+        cmp sub/file ../sub/file) <actual
 
 '
 
@@ -367,12 +362,14 @@ test_expect_success 'path limiting with import-marks does not lose unmodified fi
        echo more content >> file &&
        test_tick &&
        git commit -mnext file &&
-       git fast-export --import-marks=marks simple -- file file0 | grep file0
+       git fast-export --import-marks=marks simple -- file file0 >actual &&
+       grep file0 actual
 '
 
 test_expect_success 'full-tree re-shows unmodified files'        '
        git checkout -f simple &&
-       test $(git fast-export --full-tree simple | grep -c file0) -eq 3
+       git fast-export --full-tree simple >actual &&
+       test $(grep -c file0 actual) -eq 3
 '
 
 test_expect_success 'set-up a few more tags for tag export tests' '
@@ -505,8 +502,8 @@ test_expect_success 'refs are updated even if no commits need to be exported' '
 '
 
 test_expect_success 'use refspec' '
-       git fast-export --refspec refs/heads/master:refs/heads/foobar master | \
-               grep "^commit " | sort | uniq > actual &&
+       git fast-export --refspec refs/heads/master:refs/heads/foobar master >actual2 &&
+       grep "^commit " actual2 | sort | uniq >actual &&
        echo "commit refs/heads/foobar" > expected &&
        test_cmp expected actual
 '
@@ -534,10 +531,29 @@ test_expect_success 'when using -C, do not declare copy when source of copy is a
        git -C src commit -m 2nd_commit &&
 
        test_create_repo dst &&
-       git -C src fast-export --all -C | git -C dst fast-import &&
+       git -C src fast-export --all -C >actual &&
+       git -C dst fast-import <actual &&
        git -C src show >expected &&
        git -C dst show >actual &&
        test_cmp expected actual
 '
 
+test_expect_success 'merge commit gets exported with --import-marks' '
+       test_create_repo merging &&
+       (
+               cd merging &&
+               test_commit initial &&
+               git checkout -b topic &&
+               test_commit on-topic &&
+               git checkout master &&
+               test_commit on-master &&
+               test_tick &&
+               git merge --no-ff -m Yeah topic &&
+
+               echo ":1 $(git rev-parse HEAD^^)" >marks &&
+               git fast-export --import-marks=marks master >out &&
+               grep Yeah out
+       )
+'
+
 test_done
index eb9a8ed197b4c6383a4c688f5e5c3a3fcb102b12..1fc9b33aeb5f095816fba72e85c94f0455994418 100755 (executable)
@@ -237,7 +237,7 @@ test_expect_success 'ignore apple' '
        build_gendouble &&
        (
                cd "$cli" &&
-               test-genrandom apple 1024 >double.png &&
+               test-tool genrandom apple 1024 >double.png &&
                "$PYTHON_PATH" "$TRASH_DIRECTORY/gendouble.py" >%double.png &&
                p4 add -t apple double.png &&
                p4 submit -d appledouble
index d950c7d665498c484f83d127cd2363def5169a89..d5c367510049607ce33db73ffc1869a0652fe663 100755 (executable)
@@ -28,7 +28,7 @@ test_expect_success 'shell metachars in filenames' '
                echo f2 >"file with spaces" &&
                git add "file with spaces" &&
                git commit -m "add files" &&
-               P4EDITOR="test-chmtime +5" git p4 submit
+               P4EDITOR="test-tool chmtime +5" git p4 submit
        ) &&
        (
                cd "$cli" &&
@@ -47,7 +47,7 @@ test_expect_success 'deleting with shell metachars' '
                git rm foo\$bar &&
                git rm file\ with\ spaces &&
                git commit -m "remove files" &&
-               P4EDITOR="test-chmtime +5" git p4 submit
+               P4EDITOR="test-tool chmtime +5" git p4 submit
        ) &&
        (
                cd "$cli" &&
index bda222aa0270f3a93fa494b308aa174ebc942eca..783c6ad1653142d174e4ddc6d49e1f631121be51 100755 (executable)
@@ -53,7 +53,7 @@ test_expect_success 'preserve users' '
                git commit --author "Alice <alice@example.com>" -m "a change by alice" file1 &&
                git commit --author "Bob <bob@example.com>" -m "a change by bob" file2 &&
                git config git-p4.skipSubmitEditCheck true &&
-               P4EDITOR="test-chmtime +5" P4USER=alice P4PASSWD=secret &&
+               P4EDITOR="test-tool chmtime +5" P4USER=alice P4PASSWD=secret &&
                export P4EDITOR P4USER P4PASSWD &&
                git p4 commit --preserve-user &&
                p4_check_commit_author file1 alice &&
@@ -71,7 +71,7 @@ test_expect_success 'refuse to preserve users without perms' '
                git config git-p4.skipSubmitEditCheck true &&
                echo "username-noperms: a change by alice" >>file1 &&
                git commit --author "Alice <alice@example.com>" -m "perms: a change by alice" file1 &&
-               P4EDITOR="test-chmtime +5" P4USER=bob P4PASSWD=secret &&
+               P4EDITOR="test-tool chmtime +5" P4USER=bob P4PASSWD=secret &&
                export P4EDITOR P4USER P4PASSWD &&
                test_must_fail git p4 commit --preserve-user &&
                ! git diff --exit-code HEAD..p4/master
@@ -89,7 +89,7 @@ test_expect_success 'preserve user where author is unknown to p4' '
                git commit --author "Bob <bob@example.com>" -m "preserve: a change by bob" file1 &&
                echo "username-unknown: a change by charlie" >>file1 &&
                git commit --author "Charlie <charlie@example.com>" -m "preserve: a change by charlie" file1 &&
-               P4EDITOR="test-chmtime +5" P4USER=alice P4PASSWD=secret &&
+               P4EDITOR="test-tool chmtime +5" P4USER=alice P4PASSWD=secret &&
                export P4EDITOR P4USER P4PASSWD &&
                test_must_fail git p4 commit --preserve-user &&
                ! git diff --exit-code HEAD..p4/master &&
index 6dc6df032ec0c15b39dccd24ade52580fc115add..3c22f74bd436b7c6d94d5c29d5b2e3e3510d58e6 100755 (executable)
@@ -26,7 +26,7 @@ test_expect_success 'EDITOR with options' '
                cd "$git" &&
                echo change >file1 &&
                git commit -m "change" file1 &&
-               P4EDITOR=": >\"$git/touched\" && test-chmtime +5" git p4 submit &&
+               P4EDITOR=": >\"$git/touched\" && test-tool chmtime +5" git p4 submit &&
                test_path_is_file "$git/touched"
        )
 '
index b7f5b1e632fb27a0448239361d6b4207be4b9908..1b34caa1e1a5e86cd2edccbb1559009b35022e10 100755 (executable)
@@ -1454,6 +1454,12 @@ test_expect_success 'completion used <cmd> completion for alias: !f() { : git <c
        EOF
 '
 
+test_expect_success 'completion without explicit _git_xxx function' '
+       test_completion "git version --" <<-\EOF
+       --build-options Z
+       EOF
+'
+
 test_expect_failure 'complete with tilde expansion' '
        git init tmp && cd tmp &&
        test_when_finished "cd .. && rm -rf tmp" &&
index b895366feef6027ac00bf47271b2530e8b7e7162..2b2181dca09089ed36d10ee8f6f67eedda8cf352 100644 (file)
@@ -145,12 +145,28 @@ test_pause () {
        "$SHELL_PATH" <&6 >&5 2>&7
 }
 
-# Wrap git in gdb. Adding this to a command can make it easier to
-# understand what is going on in a failing test.
+# Wrap git with a debugger. Adding this to a command can make it easier
+# to understand what is going on in a failing test.
 #
-# Example: "debug git checkout master".
+# Examples:
+#     debug git checkout master
+#     debug --debugger=nemiver git $ARGS
+#     debug -d "valgrind --tool=memcheck --track-origins=yes" git $ARGS
 debug () {
-        GIT_TEST_GDB=1 "$@" <&6 >&5 2>&7
+       case "$1" in
+       -d)
+               GIT_DEBUGGER="$2" &&
+               shift 2
+               ;;
+       --debugger=*)
+               GIT_DEBUGGER="${1#*=}" &&
+               shift 1
+               ;;
+       *)
+               GIT_DEBUGGER=1
+               ;;
+       esac &&
+       GIT_DEBUGGER="${GIT_DEBUGGER}" "$@" <&6 >&5 2>&7
 }
 
 # Call test_commit with the arguments
@@ -278,8 +294,20 @@ write_script () {
 # The single parameter is the prerequisite tag (a simple word, in all
 # capital letters by convention).
 
+test_unset_prereq () {
+       ! test_have_prereq "$1" ||
+       satisfied_prereq="${satisfied_prereq% $1 *} ${satisfied_prereq#* $1 }"
+}
+
 test_set_prereq () {
-       satisfied_prereq="$satisfied_prereq$1 "
+       case "$1" in
+       !*)
+               test_unset_prereq "${1#!}"
+               ;;
+       *)
+               satisfied_prereq="$satisfied_prereq$1 "
+               ;;
+       esac
 }
 satisfied_prereq=" "
 lazily_testable_prereq= lazily_tested_prereq=
@@ -782,11 +810,8 @@ verbose () {
 # otherwise.
 
 test_must_be_empty () {
-       if ! test -f "$1"
-       then
-               echo "'$1' is missing"
-               return 1
-       elif test -s "$1"
+       test_path_is_file "$1" &&
+       if test -s "$1"
        then
                echo "'$1' is not empty, it contains:"
                cat "$1"
index 7740d511d289f44bb1313308fe49d5894f64b3c2..f3771ab195bad599e908b2ceb1d956fed5213e9b 100644 (file)
@@ -963,10 +963,10 @@ test -d "$GIT_BUILD_DIR"/templates/blt || {
        error "You haven't built things yet, have you?"
 }
 
-if ! test -x "$GIT_BUILD_DIR"/t/helper/test-chmtime
+if ! test -x "$GIT_BUILD_DIR"/t/helper/test-tool
 then
-       echo >&2 'You need to build test-chmtime:'
-       echo >&2 'Run "make t/helper/test-chmtime" in the source (toplevel) directory'
+       echo >&2 'You need to build test-tool:'
+       echo >&2 'Run "make t/helper/test-tool" in the source (toplevel) directory'
        exit 1
 fi
 
@@ -1106,12 +1106,7 @@ test_lazy_prereq UTF8_NFD_TO_NFC '
        auml=$(printf "\303\244")
        aumlcdiar=$(printf "\141\314\210")
        >"$auml" &&
-       case "$(echo *)" in
-       "$aumlcdiar")
-               true ;;
-       *)
-               false ;;
-       esac
+       test -f "$aumlcdiar"
 '
 
 test_lazy_prereq AUTOIDENT '
@@ -1206,5 +1201,9 @@ test_lazy_prereq LONG_IS_64BIT '
        test 8 -le "$(build_option sizeof-long)"
 '
 
-test_lazy_prereq TIME_IS_64BIT 'test-date is64bit'
-test_lazy_prereq TIME_T_IS_64BIT 'test-date time_t-is64bit'
+test_lazy_prereq TIME_IS_64BIT 'test-tool date is64bit'
+test_lazy_prereq TIME_T_IS_64BIT 'test-tool date time_t-is64bit'
+
+test_lazy_prereq CURL '
+       curl --version
+'
diff --git a/tag.c b/tag.c
index 66210fd4772778ac4c95925ec4670717dd27da62..3d37c1bd251c5f8c5eb06ede72ab57b323888709 100644 (file)
--- a/tag.c
+++ b/tag.c
@@ -41,20 +41,20 @@ int gpg_verify_tag(const struct object_id *oid, const char *name_to_report,
        unsigned long size;
        int ret;
 
-       type = sha1_object_info(oid->hash, NULL);
+       type = oid_object_info(the_repository, oid, NULL);
        if (type != OBJ_TAG)
                return error("%s: cannot verify a non-tag object of type %s.",
                                name_to_report ?
                                name_to_report :
-                               find_unique_abbrev(oid->hash, DEFAULT_ABBREV),
+                               find_unique_abbrev(oid, DEFAULT_ABBREV),
                                type_name(type));
 
-       buf = read_sha1_file(oid->hash, &type, &size);
+       buf = read_object_file(oid, &type, &size);
        if (!buf)
                return error("%s: unable to read file.",
                                name_to_report ?
                                name_to_report :
-                               find_unique_abbrev(oid->hash, DEFAULT_ABBREV));
+                               find_unique_abbrev(oid, DEFAULT_ABBREV));
 
        ret = run_gpg_verify(buf, size, flags);
 
@@ -182,7 +182,7 @@ int parse_tag(struct tag *item)
 
        if (item->object.parsed)
                return 0;
-       data = read_sha1_file(item->object.oid.hash, &type, &size);
+       data = read_object_file(&item->object.oid, &type, &size);
        if (!data)
                return error("Could not read %s",
                             oid_to_hex(&item->object.oid));
index b2d9280f104aec7095dd2b37ff1f7165355b5388..fea3f55545cbf193f823db468c7640bfd48d9bbd 100644 (file)
@@ -6,6 +6,7 @@
 #include "strbuf.h"
 #include "argv-array.h"
 #include "quote.h"
+#include "object-store.h"
 
 struct tmp_objdir {
        struct strbuf path;
diff --git a/trace.c b/trace.c
index 7f3b08e148044c6c94cbef03ae265e134391357a..fc623e91fdd7ed8268922ae0460cfbd6903f3800 100644 (file)
--- a/trace.c
+++ b/trace.c
@@ -26,6 +26,7 @@
 
 struct trace_key trace_default_key = { "GIT_TRACE", 0, 0, 0 };
 struct trace_key trace_perf_key = TRACE_KEY_INIT(PERFORMANCE);
+struct trace_key trace_setup_key = TRACE_KEY_INIT(SETUP);
 
 /* Get a trace file descriptor from "key" env variable. */
 static int get_trace_fd(struct trace_key *key)
@@ -300,11 +301,10 @@ static const char *quote_crnl(const char *path)
 /* FIXME: move prefix to startup_info struct and get rid of this arg */
 void trace_repo_setup(const char *prefix)
 {
-       static struct trace_key key = TRACE_KEY_INIT(SETUP);
        const char *git_work_tree;
        char *cwd;
 
-       if (!trace_want(&key))
+       if (!trace_want(&trace_setup_key))
                return;
 
        cwd = xgetcwd();
@@ -315,11 +315,11 @@ void trace_repo_setup(const char *prefix)
        if (!prefix)
                prefix = "(null)";
 
-       trace_printf_key(&key, "setup: git_dir: %s\n", quote_crnl(get_git_dir()));
-       trace_printf_key(&key, "setup: git_common_dir: %s\n", quote_crnl(get_git_common_dir()));
-       trace_printf_key(&key, "setup: worktree: %s\n", quote_crnl(git_work_tree));
-       trace_printf_key(&key, "setup: cwd: %s\n", quote_crnl(cwd));
-       trace_printf_key(&key, "setup: prefix: %s\n", quote_crnl(prefix));
+       trace_printf_key(&trace_setup_key, "setup: git_dir: %s\n", quote_crnl(get_git_dir()));
+       trace_printf_key(&trace_setup_key, "setup: git_common_dir: %s\n", quote_crnl(get_git_common_dir()));
+       trace_printf_key(&trace_setup_key, "setup: worktree: %s\n", quote_crnl(git_work_tree));
+       trace_printf_key(&trace_setup_key, "setup: cwd: %s\n", quote_crnl(cwd));
+       trace_printf_key(&trace_setup_key, "setup: prefix: %s\n", quote_crnl(prefix));
 
        free(cwd);
 }
diff --git a/trace.h b/trace.h
index 88055abef7342ee638f6597a6628bcdf4baeace9..2b6a1bc17c2cc1a8642d8c7bd460808638f28d77 100644 (file)
--- a/trace.h
+++ b/trace.h
@@ -15,6 +15,7 @@ extern struct trace_key trace_default_key;
 
 #define TRACE_KEY_INIT(name) { "GIT_TRACE_" #name, 0, 0, 0 }
 extern struct trace_key trace_perf_key;
+extern struct trace_key trace_setup_key;
 
 extern void trace_repo_setup(const char *prefix);
 extern int trace_want(struct trace_key *key);
index 3f380d87d99eab317d5ac567b43e3cea05885145..11f1055b47e5e204a272e3588f74f161bdf73895 100644 (file)
@@ -12,6 +12,7 @@
 #include "argv-array.h"
 #include "refs.h"
 #include "transport-internal.h"
+#include "protocol.h"
 
 static int debug;
 
@@ -26,6 +27,7 @@ struct helper_data {
                option : 1,
                push : 1,
                connect : 1,
+               stateless_connect : 1,
                signed_tags : 1,
                check_connectivity : 1,
                no_disconnect_req : 1,
@@ -49,7 +51,7 @@ static void sendline(struct helper_data *helper, struct strbuf *buffer)
                die_errno("Full write to remote helper failed");
 }
 
-static int recvline_fh(FILE *helper, struct strbuf *buffer, const char *name)
+static int recvline_fh(FILE *helper, struct strbuf *buffer)
 {
        strbuf_reset(buffer);
        if (debug)
@@ -67,7 +69,7 @@ static int recvline_fh(FILE *helper, struct strbuf *buffer, const char *name)
 
 static int recvline(struct helper_data *helper, struct strbuf *buffer)
 {
-       return recvline_fh(helper->out, buffer, helper->name);
+       return recvline_fh(helper->out, buffer);
 }
 
 static void write_constant(int fd, const char *str)
@@ -188,6 +190,8 @@ static struct child_process *get_helper(struct transport *transport)
                        refspecs[refspec_nr++] = xstrdup(arg);
                } else if (!strcmp(capname, "connect")) {
                        data->connect = 1;
+               } else if (!strcmp(capname, "stateless-connect")) {
+                       data->stateless_connect = 1;
                } else if (!strcmp(capname, "signed-tags")) {
                        data->signed_tags = 1;
                } else if (skip_prefix(capname, "export-marks ", &arg)) {
@@ -545,14 +549,13 @@ static int fetch_with_import(struct transport *transport,
        return 0;
 }
 
-static int process_connect_service(struct transport *transport,
-                                  const char *name, const char *exec)
+static int run_connect(struct transport *transport, struct strbuf *cmdbuf)
 {
        struct helper_data *data = transport->data;
-       struct strbuf cmdbuf = STRBUF_INIT;
-       struct child_process *helper;
-       int r, duped, ret = 0;
+       int ret = 0;
+       int duped;
        FILE *input;
+       struct child_process *helper;
 
        helper = get_helper(transport);
 
@@ -568,44 +571,61 @@ static int process_connect_service(struct transport *transport,
        input = xfdopen(duped, "r");
        setvbuf(input, NULL, _IONBF, 0);
 
+       sendline(data, cmdbuf);
+       if (recvline_fh(input, cmdbuf))
+               exit(128);
+
+       if (!strcmp(cmdbuf->buf, "")) {
+               data->no_disconnect_req = 1;
+               if (debug)
+                       fprintf(stderr, "Debug: Smart transport connection "
+                               "ready.\n");
+               ret = 1;
+       } else if (!strcmp(cmdbuf->buf, "fallback")) {
+               if (debug)
+                       fprintf(stderr, "Debug: Falling back to dumb "
+                               "transport.\n");
+       } else {
+               die("Unknown response to connect: %s",
+                       cmdbuf->buf);
+       }
+
+       fclose(input);
+       return ret;
+}
+
+static int process_connect_service(struct transport *transport,
+                                  const char *name, const char *exec)
+{
+       struct helper_data *data = transport->data;
+       struct strbuf cmdbuf = STRBUF_INIT;
+       int ret = 0;
+
        /*
         * Handle --upload-pack and friends. This is fire and forget...
         * just warn if it fails.
         */
        if (strcmp(name, exec)) {
-               r = set_helper_option(transport, "servpath", exec);
+               int r = set_helper_option(transport, "servpath", exec);
                if (r > 0)
                        warning("Setting remote service path not supported by protocol.");
                else if (r < 0)
                        warning("Invalid remote service path.");
        }
 
-       if (data->connect)
+       if (data->connect) {
                strbuf_addf(&cmdbuf, "connect %s\n", name);
-       else
-               goto exit;
-
-       sendline(data, &cmdbuf);
-       if (recvline_fh(input, &cmdbuf, name))
-               exit(128);
-
-       if (!strcmp(cmdbuf.buf, "")) {
-               data->no_disconnect_req = 1;
-               if (debug)
-                       fprintf(stderr, "Debug: Smart transport connection "
-                               "ready.\n");
-               ret = 1;
-       } else if (!strcmp(cmdbuf.buf, "fallback")) {
-               if (debug)
-                       fprintf(stderr, "Debug: Falling back to dumb "
-                               "transport.\n");
-       } else
-               die("Unknown response to connect: %s",
-                       cmdbuf.buf);
+               ret = run_connect(transport, &cmdbuf);
+       } else if (data->stateless_connect &&
+                  (get_protocol_version_config() == protocol_v2) &&
+                  !strcmp("git-upload-pack", name)) {
+               strbuf_addf(&cmdbuf, "stateless-connect %s\n", name);
+               ret = run_connect(transport, &cmdbuf);
+               if (ret)
+                       transport->stateless_rpc = 1;
+       }
 
-exit:
        strbuf_release(&cmdbuf);
-       fclose(input);
        return ret;
 }
 
@@ -1031,7 +1051,8 @@ static int has_attribute(const char *attrs, const char *attr) {
        }
 }
 
-static struct ref *get_refs_list(struct transport *transport, int for_push)
+static struct ref *get_refs_list(struct transport *transport, int for_push,
+                                const struct argv_array *ref_prefixes)
 {
        struct helper_data *data = transport->data;
        struct child_process *helper;
@@ -1044,7 +1065,7 @@ static struct ref *get_refs_list(struct transport *transport, int for_push)
 
        if (process_connect(transport, for_push)) {
                do_take_over(transport);
-               return transport->vtable->get_refs_list(transport, for_push);
+               return transport->vtable->get_refs_list(transport, for_push, ref_prefixes);
        }
 
        if (data->push && for_push)
index 3c1a29d7274465b977d9285781673f235e66ad21..1cde6258a73bcf8582b0746d1c44a23b30115dc9 100644 (file)
@@ -3,6 +3,7 @@
 
 struct ref;
 struct transport;
+struct argv_array;
 
 struct transport_vtable {
        /**
@@ -17,11 +18,19 @@ struct transport_vtable {
         * the transport to try to share connections, for_push is a
         * hint as to whether the ultimate operation is a push or a fetch.
         *
+        * If communicating using protocol v2 a list of prefixes can be
+        * provided to be sent to the server to enable it to limit the ref
+        * advertisement.  Since ref filtering is done on the server's end, and
+        * only when using protocol v2, this list will be ignored when not
+        * using protocol v2 meaning this function can return refs which don't
+        * match the provided ref_prefixes.
+        *
         * If the transport is able to determine the remote hash for
         * the ref without a huge amount of effort, it should store it
         * in the ref's old_sha1 field; otherwise it should be all 0.
         **/
-       struct ref *(*get_refs_list)(struct transport *transport, int for_push);
+       struct ref *(*get_refs_list)(struct transport *transport, int for_push,
+                                    const struct argv_array *ref_prefixes);
 
        /**
         * Fetch the objects for the given refs. Note that this gets
index 00d48b5b565b0edfa826a8c426e76cfc60a2f0a1..2c4de32b3335312a4ba8c79e82606d1b8339570b 100644 (file)
 #include "sha1-array.h"
 #include "sigchain.h"
 #include "transport-internal.h"
+#include "protocol.h"
+#include "object-store.h"
+#include "color.h"
+
+static int transport_use_color = -1;
+static char transport_colors[][COLOR_MAXLEN] = {
+       GIT_COLOR_RESET,
+       GIT_COLOR_RED           /* REJECTED */
+};
+
+enum color_transport {
+       TRANSPORT_COLOR_RESET = 0,
+       TRANSPORT_COLOR_REJECTED = 1
+};
+
+static int transport_color_config(void)
+{
+       const char *keys[] = {
+               "color.transport.reset",
+               "color.transport.rejected"
+       }, *key = "color.transport";
+       char *value;
+       int i;
+       static int initialized;
+
+       if (initialized)
+               return 0;
+       initialized = 1;
+
+       if (!git_config_get_string(key, &value))
+               transport_use_color = git_config_colorbool(key, value);
+
+       if (!want_color_stderr(transport_use_color))
+               return 0;
+
+       for (i = 0; i < ARRAY_SIZE(keys); i++)
+               if (!git_config_get_string(keys[i], &value)) {
+                       if (!value)
+                               return config_error_nonbool(keys[i]);
+                       if (color_parse(value, transport_colors[i]) < 0)
+                               return -1;
+               }
+
+       return 0;
+}
+
+static const char *transport_get_color(enum color_transport ix)
+{
+       if (want_color_stderr(transport_use_color))
+               return transport_colors[ix];
+       return "";
+}
 
 static void set_upstreams(struct transport *transport, struct ref *refs,
        int pretend)
@@ -71,7 +123,9 @@ struct bundle_transport_data {
        struct bundle_header header;
 };
 
-static struct ref *get_refs_from_bundle(struct transport *transport, int for_push)
+static struct ref *get_refs_from_bundle(struct transport *transport,
+                                       int for_push,
+                                       const struct argv_array *ref_prefixes)
 {
        struct bundle_transport_data *data = transport->data;
        struct ref *result = NULL;
@@ -117,6 +171,7 @@ struct git_transport_data {
        struct child_process *conn;
        int fd[2];
        unsigned got_remote_heads : 1;
+       enum protocol_version version;
        struct oid_array extra_have;
        struct oid_array shallow;
 };
@@ -196,16 +251,35 @@ static int connect_setup(struct transport *transport, int for_push)
        return 0;
 }
 
-static struct ref *get_refs_via_connect(struct transport *transport, int for_push)
+static struct ref *get_refs_via_connect(struct transport *transport, int for_push,
+                                       const struct argv_array *ref_prefixes)
 {
        struct git_transport_data *data = transport->data;
-       struct ref *refs;
+       struct ref *refs = NULL;
+       struct packet_reader reader;
 
        connect_setup(transport, for_push);
-       get_remote_heads(data->fd[0], NULL, 0, &refs,
-                        for_push ? REF_NORMAL : 0,
-                        &data->extra_have,
-                        &data->shallow);
+
+       packet_reader_init(&reader, data->fd[0], NULL, 0,
+                          PACKET_READ_CHOMP_NEWLINE |
+                          PACKET_READ_GENTLE_ON_EOF);
+
+       data->version = discover_version(&reader);
+       switch (data->version) {
+       case protocol_v2:
+               get_remote_refs(data->fd[1], &reader, &refs, for_push,
+                               ref_prefixes, transport->server_options);
+               break;
+       case protocol_v1:
+       case protocol_v0:
+               get_remote_heads(&reader, &refs,
+                                for_push ? REF_NORMAL : 0,
+                                &data->extra_have,
+                                &data->shallow);
+               break;
+       case protocol_unknown_version:
+               BUG("unknown protocol version");
+       }
        data->got_remote_heads = 1;
 
        return refs;
@@ -216,7 +290,7 @@ static int fetch_refs_via_pack(struct transport *transport,
 {
        int ret = 0;
        struct git_transport_data *data = transport->data;
-       struct ref *refs;
+       struct ref *refs = NULL;
        char *dest = xstrdup(transport->url);
        struct fetch_pack_args args;
        struct ref *refs_tmp = NULL;
@@ -241,18 +315,30 @@ static int fetch_refs_via_pack(struct transport *transport,
        args.from_promisor = data->options.from_promisor;
        args.no_dependents = data->options.no_dependents;
        args.filter_options = data->options.filter_options;
-
-       if (!data->got_remote_heads) {
-               connect_setup(transport, 0);
-               get_remote_heads(data->fd[0], NULL, 0, &refs_tmp, 0,
-                                NULL, &data->shallow);
-               data->got_remote_heads = 1;
+       args.stateless_rpc = transport->stateless_rpc;
+       args.server_options = transport->server_options;
+
+       if (!data->got_remote_heads)
+               refs_tmp = get_refs_via_connect(transport, 0, NULL);
+
+       switch (data->version) {
+       case protocol_v2:
+               refs = fetch_pack(&args, data->fd, data->conn,
+                                 refs_tmp ? refs_tmp : transport->remote_refs,
+                                 dest, to_fetch, nr_heads, &data->shallow,
+                                 &transport->pack_lockfile, data->version);
+               break;
+       case protocol_v1:
+       case protocol_v0:
+               refs = fetch_pack(&args, data->fd, data->conn,
+                                 refs_tmp ? refs_tmp : transport->remote_refs,
+                                 dest, to_fetch, nr_heads, &data->shallow,
+                                 &transport->pack_lockfile, data->version);
+               break;
+       case protocol_unknown_version:
+               BUG("unknown protocol version");
        }
 
-       refs = fetch_pack(&args, data->fd, data->conn,
-                         refs_tmp ? refs_tmp : transport->remote_refs,
-                         dest, to_fetch, nr_heads, &data->shallow,
-                         &transport->pack_lockfile);
        close(data->fd[0]);
        close(data->fd[1]);
        if (finish_connect(data->conn))
@@ -338,7 +424,13 @@ static void print_ref_status(char flag, const char *summary,
                else
                        fprintf(stdout, "%s\n", summary);
        } else {
-               fprintf(stderr, " %c %-*s ", flag, summary_width, summary);
+               const char *red = "", *reset = "";
+               if (push_had_errors(to)) {
+                       red = transport_get_color(TRANSPORT_COLOR_REJECTED);
+                       reset = transport_get_color(TRANSPORT_COLOR_RESET);
+               }
+               fprintf(stderr, " %s%c %-*s%s ", red, flag, summary_width,
+                       summary, reset);
                if (from)
                        fprintf(stderr, "%s -> %s", prettify_refname(from->name), prettify_refname(to->name));
                else
@@ -367,7 +459,7 @@ static void print_ok_ref_status(struct ref *ref, int porcelain, int summary_widt
                char type;
                const char *msg;
 
-               strbuf_add_unique_abbrev(&quickref, ref->old_oid.hash,
+               strbuf_add_unique_abbrev(&quickref, &ref->old_oid,
                                         DEFAULT_ABBREV);
                if (ref->forced_update) {
                        strbuf_addstr(&quickref, "...");
@@ -378,7 +470,7 @@ static void print_ok_ref_status(struct ref *ref, int porcelain, int summary_widt
                        type = ' ';
                        msg = NULL;
                }
-               strbuf_add_unique_abbrev(&quickref, ref->new_oid.hash,
+               strbuf_add_unique_abbrev(&quickref, &ref->new_oid,
                                         DEFAULT_ABBREV);
 
                print_ref_status(type, quickref.buf, ref, ref->peer_ref, msg,
@@ -461,7 +553,7 @@ static int print_one_push_status(struct ref *ref, const char *dest, int count,
 static int measure_abbrev(const struct object_id *oid, int sofar)
 {
        char hex[GIT_MAX_HEXSZ + 1];
-       int w = find_unique_abbrev_r(hex, oid->hash, DEFAULT_ABBREV);
+       int w = find_unique_abbrev_r(hex, oid, DEFAULT_ABBREV);
 
        return (w < sofar) ? sofar : w;
 }
@@ -487,6 +579,9 @@ void transport_print_push_status(const char *dest, struct ref *refs,
        char *head;
        int summary_width = transport_summary_width(refs);
 
+       if (transport_color_config() < 0)
+               warning(_("could not parse transport.color.* config"));
+
        head = resolve_refdup("HEAD", RESOLVE_REF_READING, NULL, NULL);
 
        if (verbose) {
@@ -551,16 +646,13 @@ static int git_transport_push(struct transport *transport, struct ref *remote_re
 {
        struct git_transport_data *data = transport->data;
        struct send_pack_args args;
-       int ret;
+       int ret = 0;
 
-       if (!data->got_remote_heads) {
-               struct ref *tmp_refs;
-               connect_setup(transport, 1);
+       if (transport_color_config() < 0)
+               return -1;
 
-               get_remote_heads(data->fd[0], NULL, 0, &tmp_refs, REF_NORMAL,
-                                NULL, &data->shallow);
-               data->got_remote_heads = 1;
-       }
+       if (!data->got_remote_heads)
+               get_refs_via_connect(transport, 1, NULL);
 
        memset(&args, 0, sizeof(args));
        args.send_mirror = !!(flags & TRANSPORT_PUSH_MIRROR);
@@ -582,8 +674,18 @@ static int git_transport_push(struct transport *transport, struct ref *remote_re
        else
                args.push_cert = SEND_PACK_PUSH_CERT_NEVER;
 
-       ret = send_pack(&args, data->fd, data->conn, remote_refs,
-                       &data->extra_have);
+       switch (data->version) {
+       case protocol_v2:
+               die("support for protocol v2 not implemented yet");
+               break;
+       case protocol_v1:
+       case protocol_v0:
+               ret = send_pack(&args, data->fd, data->conn, remote_refs,
+                               &data->extra_have);
+               break;
+       case protocol_unknown_version:
+               BUG("unknown protocol version");
+       }
 
        close(data->fd[1]);
        close(data->fd[0]);
@@ -997,6 +1099,9 @@ int transport_push(struct transport *transport,
        *reject_reasons = 0;
        transport_verify_remote_names(refspec_nr, refspec);
 
+       if (transport_color_config() < 0)
+               return -1;
+
        if (transport->vtable->push_refs) {
                struct ref *remote_refs;
                struct ref *local_refs = get_local_heads();
@@ -1006,11 +1111,38 @@ int transport_push(struct transport *transport,
                int porcelain = flags & TRANSPORT_PUSH_PORCELAIN;
                int pretend = flags & TRANSPORT_PUSH_DRY_RUN;
                int push_ret, ret, err;
+               struct refspec *tmp_rs;
+               struct argv_array ref_prefixes = ARGV_ARRAY_INIT;
+               int i;
 
                if (check_push_refs(local_refs, refspec_nr, refspec) < 0)
                        return -1;
 
-               remote_refs = transport->vtable->get_refs_list(transport, 1);
+               tmp_rs = parse_push_refspec(refspec_nr, refspec);
+               for (i = 0; i < refspec_nr; i++) {
+                       const char *prefix = NULL;
+
+                       if (tmp_rs[i].dst)
+                               prefix = tmp_rs[i].dst;
+                       else if (tmp_rs[i].src && !tmp_rs[i].exact_sha1)
+                               prefix = tmp_rs[i].src;
+
+                       if (prefix) {
+                               const char *glob = strchr(prefix, '*');
+                               if (glob)
+                                       argv_array_pushf(&ref_prefixes, "%.*s",
+                                                        (int)(glob - prefix),
+                                                        prefix);
+                               else
+                                       expand_ref_prefix(&ref_prefixes, prefix);
+                       }
+               }
+
+               remote_refs = transport->vtable->get_refs_list(transport, 1,
+                                                              &ref_prefixes);
+
+               argv_array_clear(&ref_prefixes);
+               free_refspec(refspec_nr, tmp_rs);
 
                if (flags & TRANSPORT_PUSH_ALL)
                        match_flags |= MATCH_REFS_ALL;
@@ -1116,10 +1248,13 @@ int transport_push(struct transport *transport,
        return 1;
 }
 
-const struct ref *transport_get_remote_refs(struct transport *transport)
+const struct ref *transport_get_remote_refs(struct transport *transport,
+                                           const struct argv_array *ref_prefixes)
 {
        if (!transport->got_remote_refs) {
-               transport->remote_refs = transport->vtable->get_refs_list(transport, 0);
+               transport->remote_refs =
+                       transport->vtable->get_refs_list(transport, 0,
+                                                        ref_prefixes);
                transport->got_remote_refs = 1;
        }
 
index 3c68d73b215bbabc81a75a810b1083697bfd6329..73a7be3c8a4d3b68838a3130599a5f8c628edece 100644 (file)
@@ -59,12 +59,24 @@ struct transport {
         */
        unsigned cloning : 1;
 
+       /*
+        * Indicates that the transport is connected via a half-duplex
+        * connection and should operate in stateless-rpc mode.
+        */
+       unsigned stateless_rpc : 1;
+
        /*
         * These strings will be passed to the {pre, post}-receive hook,
         * on the remote side, if both sides support the push options capability.
         */
        const struct string_list *push_options;
 
+       /*
+        * These strings will be passed to the remote side on each command
+        * request, if both sides support the server-option capability.
+        */
+       const struct string_list *server_options;
+
        char *pack_lockfile;
        signed verbose : 3;
        /**
@@ -194,7 +206,17 @@ int transport_push(struct transport *connection,
                   int refspec_nr, const char **refspec, int flags,
                   unsigned int * reject_reasons);
 
-const struct ref *transport_get_remote_refs(struct transport *transport);
+/*
+ * Retrieve refs from a remote.
+ *
+ * Optionally a list of ref prefixes can be provided which can be sent to the
+ * server (when communicating using protocol v2) to enable it to limit the ref
+ * advertisement.  Since ref filtering is done on the server's end (and only
+ * when using protocol v2), this can return refs which don't match the provided
+ * ref_prefixes.
+ */
+const struct ref *transport_get_remote_refs(struct transport *transport,
+                                           const struct argv_array *ref_prefixes);
 
 int transport_fetch_refs(struct transport *transport, struct ref *refs);
 void transport_unlock_pack(struct transport *transport);
index 63a87ed666bbb10cb3c2bd0e27117ac696e7d1b3..e11b3063afa610239162dc45c24528dd144c4759 100644 (file)
@@ -84,8 +84,7 @@ void *fill_tree_descriptor(struct tree_desc *desc, const struct object_id *oid)
        void *buf = NULL;
 
        if (oid) {
-               buf = read_object_with_reference(oid->hash, tree_type, &size,
-                                                NULL);
+               buf = read_object_with_reference(oid, tree_type, &size, NULL);
                if (!buf)
                        die("unable to read tree %s", oid_to_hex(oid));
        }
@@ -492,7 +491,7 @@ struct dir_state {
        unsigned char sha1[20];
 };
 
-static int find_tree_entry(struct tree_desc *t, const char *name, unsigned char *result, unsigned *mode)
+static int find_tree_entry(struct tree_desc *t, const char *name, struct object_id *result, unsigned *mode)
 {
        int namelen = strlen(name);
        while (t->size) {
@@ -511,7 +510,7 @@ static int find_tree_entry(struct tree_desc *t, const char *name, unsigned char
                if (cmp < 0)
                        break;
                if (entrylen == namelen) {
-                       hashcpy(result, oid->hash);
+                       oidcpy(result, oid);
                        return 0;
                }
                if (name[entrylen] != '/')
@@ -519,27 +518,27 @@ static int find_tree_entry(struct tree_desc *t, const char *name, unsigned char
                if (!S_ISDIR(*mode))
                        break;
                if (++entrylen == namelen) {
-                       hashcpy(result, oid->hash);
+                       oidcpy(result, oid);
                        return 0;
                }
-               return get_tree_entry(oid->hash, name + entrylen, result, mode);
+               return get_tree_entry(oid, name + entrylen, result, mode);
        }
        return -1;
 }
 
-int get_tree_entry(const unsigned char *tree_sha1, const char *name, unsigned char *sha1, unsigned *mode)
+int get_tree_entry(const struct object_id *tree_oid, const char *name, struct object_id *oid, unsigned *mode)
 {
        int retval;
        void *tree;
        unsigned long size;
-       unsigned char root[20];
+       struct object_id root;
 
-       tree = read_object_with_reference(tree_sha1, tree_type, &size, root);
+       tree = read_object_with_reference(tree_oid, tree_type, &size, &root);
        if (!tree)
                return -1;
 
        if (name[0] == '\0') {
-               hashcpy(sha1, root);
+               oidcpy(oid, &root);
                free(tree);
                return 0;
        }
@@ -549,7 +548,7 @@ int get_tree_entry(const unsigned char *tree_sha1, const char *name, unsigned ch
        } else {
                struct tree_desc t;
                init_tree_desc(&t, tree, size);
-               retval = find_tree_entry(&t, name, sha1, mode);
+               retval = find_tree_entry(&t, name, oid, mode);
        }
        free(tree);
        return retval;
@@ -583,14 +582,14 @@ enum follow_symlinks_result get_tree_entry_follow_symlinks(unsigned char *tree_s
        struct dir_state *parents = NULL;
        size_t parents_alloc = 0;
        size_t i, parents_nr = 0;
-       unsigned char current_tree_sha1[20];
+       struct object_id current_tree_oid;
        struct strbuf namebuf = STRBUF_INIT;
        struct tree_desc t;
        int follows_remaining = GET_TREE_ENTRY_FOLLOW_SYMLINKS_MAX_LINKS;
 
        init_tree_desc(&t, NULL, 0UL);
        strbuf_addstr(&namebuf, name);
-       hashcpy(current_tree_sha1, tree_sha1);
+       hashcpy(current_tree_oid.hash, tree_sha1);
 
        while (1) {
                int find_result;
@@ -599,22 +598,22 @@ enum follow_symlinks_result get_tree_entry_follow_symlinks(unsigned char *tree_s
 
                if (!t.buffer) {
                        void *tree;
-                       unsigned char root[20];
+                       struct object_id root;
                        unsigned long size;
-                       tree = read_object_with_reference(current_tree_sha1,
+                       tree = read_object_with_reference(&current_tree_oid,
                                                          tree_type, &size,
-                                                         root);
+                                                         &root);
                        if (!tree)
                                goto done;
 
                        ALLOC_GROW(parents, parents_nr + 1, parents_alloc);
                        parents[parents_nr].tree = tree;
                        parents[parents_nr].size = size;
-                       hashcpy(parents[parents_nr].sha1, root);
+                       hashcpy(parents[parents_nr].sha1, root.hash);
                        parents_nr++;
 
                        if (namebuf.buf[0] == '\0') {
-                               hashcpy(result, root);
+                               hashcpy(result, root.hash);
                                retval = FOUND;
                                goto done;
                        }
@@ -671,14 +670,14 @@ enum follow_symlinks_result get_tree_entry_follow_symlinks(unsigned char *tree_s
 
                /* Look up the first (or only) path component in the tree. */
                find_result = find_tree_entry(&t, namebuf.buf,
-                                             current_tree_sha1, mode);
+                                             &current_tree_oid, mode);
                if (find_result) {
                        goto done;
                }
 
                if (S_ISDIR(*mode)) {
                        if (!remainder) {
-                               hashcpy(result, current_tree_sha1);
+                               hashcpy(result, current_tree_oid.hash);
                                retval = FOUND;
                                goto done;
                        }
@@ -688,7 +687,7 @@ enum follow_symlinks_result get_tree_entry_follow_symlinks(unsigned char *tree_s
                                      1 + first_slash - namebuf.buf);
                } else if (S_ISREG(*mode)) {
                        if (!remainder) {
-                               hashcpy(result, current_tree_sha1);
+                               hashcpy(result, current_tree_oid.hash);
                                retval = FOUND;
                        } else {
                                retval = NOT_DIR;
@@ -714,8 +713,8 @@ enum follow_symlinks_result get_tree_entry_follow_symlinks(unsigned char *tree_s
                         */
                        retval = DANGLING_SYMLINK;
 
-                       contents = read_sha1_file(current_tree_sha1, &type,
-                                                 &link_len);
+                       contents = read_object_file(&current_tree_oid, &type,
+                                                   &link_len);
 
                        if (!contents)
                                goto done;
index b6bd1b4ccfbb8bb69c464ea687c63a2058a424b8..4617deeb0e09e71c7ba7231192ece055eccb2dde 100644 (file)
@@ -79,7 +79,7 @@ struct traverse_info {
        int show_all_errors;
 };
 
-int get_tree_entry(const unsigned char *, const char *, unsigned char *, unsigned *);
+int get_tree_entry(const struct object_id *, const char *, struct object_id *, unsigned *);
 extern char *make_traverse_path(char *path, const struct traverse_info *info, const struct name_entry *n);
 extern void setup_traverse_info(struct traverse_info *info, const char *base);
 
diff --git a/tree.c b/tree.c
index b224115e0f4d61368560eba406a04f0259b7c4f0..244eb5e665e931a6b735366d74b5ed2bcbce4c9b 100644 (file)
--- a/tree.c
+++ b/tree.c
@@ -10,7 +10,7 @@
 const char *tree_type = "tree";
 
 static int read_one_entry_opt(struct index_state *istate,
-                             const unsigned char *sha1,
+                             const struct object_id *oid,
                              const char *base, int baselen,
                              const char *pathname,
                              unsigned mode, int stage, int opt)
@@ -31,16 +31,16 @@ static int read_one_entry_opt(struct index_state *istate,
        ce->ce_namelen = baselen + len;
        memcpy(ce->name, base, baselen);
        memcpy(ce->name + baselen, pathname, len+1);
-       hashcpy(ce->oid.hash, sha1);
+       oidcpy(&ce->oid, oid);
        return add_index_entry(istate, ce, opt);
 }
 
-static int read_one_entry(const unsigned char *sha1, struct strbuf *base,
+static int read_one_entry(const struct object_id *oid, struct strbuf *base,
                          const char *pathname, unsigned mode, int stage,
                          void *context)
 {
        struct index_state *istate = context;
-       return read_one_entry_opt(istate, sha1, base->buf, base->len, pathname,
+       return read_one_entry_opt(istate, oid, base->buf, base->len, pathname,
                                  mode, stage,
                                  ADD_CACHE_OK_TO_ADD|ADD_CACHE_SKIP_DFCHECK);
 }
@@ -49,12 +49,12 @@ static int read_one_entry(const unsigned char *sha1, struct strbuf *base,
  * This is used when the caller knows there is no existing entries at
  * the stage that will conflict with the entry being added.
  */
-static int read_one_entry_quick(const unsigned char *sha1, struct strbuf *base,
+static int read_one_entry_quick(const struct object_id *oid, struct strbuf *base,
                                const char *pathname, unsigned mode, int stage,
                                void *context)
 {
        struct index_state *istate = context;
-       return read_one_entry_opt(istate, sha1, base->buf, base->len, pathname,
+       return read_one_entry_opt(istate, oid, base->buf, base->len, pathname,
                                  mode, stage,
                                  ADD_CACHE_JUST_APPEND);
 }
@@ -83,7 +83,7 @@ static int read_tree_1(struct tree *tree, struct strbuf *base,
                                continue;
                }
 
-               switch (fn(entry.oid->hash, base,
+               switch (fn(entry.oid, base,
                           entry.path, entry.mode, stage, context)) {
                case 0:
                        continue;
@@ -109,7 +109,7 @@ static int read_tree_1(struct tree *tree, struct strbuf *base,
                                    oid_to_hex(entry.oid),
                                    base->buf, entry.path);
 
-                       oidcpy(&oid, &commit->tree->object.oid);
+                       oidcpy(&oid, get_commit_tree_oid(commit));
                }
                else
                        continue;
@@ -219,7 +219,7 @@ int parse_tree_gently(struct tree *item, int quiet_on_missing)
 
        if (item->object.parsed)
                return 0;
-       buffer = read_sha1_file(item->object.oid.hash, &type, &size);
+       buffer = read_object_file(&item->object.oid, &type, &size);
        if (!buffer)
                return quiet_on_missing ? -1 :
                        error("Could not read %s",
@@ -248,7 +248,7 @@ struct tree *parse_tree_indirect(const struct object_id *oid)
                if (obj->type == OBJ_TREE)
                        return (struct tree *) obj;
                else if (obj->type == OBJ_COMMIT)
-                       obj = &(((struct commit *) obj)->tree->object);
+                       obj = &(get_commit_tree(((struct commit *)obj))->object);
                else if (obj->type == OBJ_TAG)
                        obj = ((struct tag *) obj)->tagged;
                else
diff --git a/tree.h b/tree.h
index 744e6dc2ac883adfa0e079f5f84f45a45e22b59d..e2a80be4ef87e35d895e8591a0f8a75df347d347 100644 (file)
--- a/tree.h
+++ b/tree.h
@@ -27,7 +27,7 @@ void free_tree_buffer(struct tree *tree);
 struct tree *parse_tree_indirect(const struct object_id *oid);
 
 #define READ_TREE_RECURSIVE 1
-typedef int (*read_tree_fn_t)(const unsigned char *, struct strbuf *, const char *, unsigned int, int, void *);
+typedef int (*read_tree_fn_t)(const struct object_id *, struct strbuf *, const char *, unsigned int, int, void *);
 
 extern int read_tree_recursive(struct tree *tree,
                               const char *base, int baselen,
diff --git a/unicode-width.h b/unicode-width.h
new file mode 100644 (file)
index 0000000..6dee2c7
--- /dev/null
@@ -0,0 +1,422 @@
+static const struct interval zero_width[] = {
+{ 0x0300, 0x036F },
+{ 0x0483, 0x0489 },
+{ 0x0591, 0x05BD },
+{ 0x05BF, 0x05BF },
+{ 0x05C1, 0x05C2 },
+{ 0x05C4, 0x05C5 },
+{ 0x05C7, 0x05C7 },
+{ 0x0600, 0x0605 },
+{ 0x0610, 0x061A },
+{ 0x061C, 0x061C },
+{ 0x064B, 0x065F },
+{ 0x0670, 0x0670 },
+{ 0x06D6, 0x06DD },
+{ 0x06DF, 0x06E4 },
+{ 0x06E7, 0x06E8 },
+{ 0x06EA, 0x06ED },
+{ 0x070F, 0x070F },
+{ 0x0711, 0x0711 },
+{ 0x0730, 0x074A },
+{ 0x07A6, 0x07B0 },
+{ 0x07EB, 0x07F3 },
+{ 0x0816, 0x0819 },
+{ 0x081B, 0x0823 },
+{ 0x0825, 0x0827 },
+{ 0x0829, 0x082D },
+{ 0x0859, 0x085B },
+{ 0x08D4, 0x0902 },
+{ 0x093A, 0x093A },
+{ 0x093C, 0x093C },
+{ 0x0941, 0x0948 },
+{ 0x094D, 0x094D },
+{ 0x0951, 0x0957 },
+{ 0x0962, 0x0963 },
+{ 0x0981, 0x0981 },
+{ 0x09BC, 0x09BC },
+{ 0x09C1, 0x09C4 },
+{ 0x09CD, 0x09CD },
+{ 0x09E2, 0x09E3 },
+{ 0x0A01, 0x0A02 },
+{ 0x0A3C, 0x0A3C },
+{ 0x0A41, 0x0A42 },
+{ 0x0A47, 0x0A48 },
+{ 0x0A4B, 0x0A4D },
+{ 0x0A51, 0x0A51 },
+{ 0x0A70, 0x0A71 },
+{ 0x0A75, 0x0A75 },
+{ 0x0A81, 0x0A82 },
+{ 0x0ABC, 0x0ABC },
+{ 0x0AC1, 0x0AC5 },
+{ 0x0AC7, 0x0AC8 },
+{ 0x0ACD, 0x0ACD },
+{ 0x0AE2, 0x0AE3 },
+{ 0x0AFA, 0x0AFF },
+{ 0x0B01, 0x0B01 },
+{ 0x0B3C, 0x0B3C },
+{ 0x0B3F, 0x0B3F },
+{ 0x0B41, 0x0B44 },
+{ 0x0B4D, 0x0B4D },
+{ 0x0B56, 0x0B56 },
+{ 0x0B62, 0x0B63 },
+{ 0x0B82, 0x0B82 },
+{ 0x0BC0, 0x0BC0 },
+{ 0x0BCD, 0x0BCD },
+{ 0x0C00, 0x0C00 },
+{ 0x0C3E, 0x0C40 },
+{ 0x0C46, 0x0C48 },
+{ 0x0C4A, 0x0C4D },
+{ 0x0C55, 0x0C56 },
+{ 0x0C62, 0x0C63 },
+{ 0x0C81, 0x0C81 },
+{ 0x0CBC, 0x0CBC },
+{ 0x0CBF, 0x0CBF },
+{ 0x0CC6, 0x0CC6 },
+{ 0x0CCC, 0x0CCD },
+{ 0x0CE2, 0x0CE3 },
+{ 0x0D00, 0x0D01 },
+{ 0x0D3B, 0x0D3C },
+{ 0x0D41, 0x0D44 },
+{ 0x0D4D, 0x0D4D },
+{ 0x0D62, 0x0D63 },
+{ 0x0DCA, 0x0DCA },
+{ 0x0DD2, 0x0DD4 },
+{ 0x0DD6, 0x0DD6 },
+{ 0x0E31, 0x0E31 },
+{ 0x0E34, 0x0E3A },
+{ 0x0E47, 0x0E4E },
+{ 0x0EB1, 0x0EB1 },
+{ 0x0EB4, 0x0EB9 },
+{ 0x0EBB, 0x0EBC },
+{ 0x0EC8, 0x0ECD },
+{ 0x0F18, 0x0F19 },
+{ 0x0F35, 0x0F35 },
+{ 0x0F37, 0x0F37 },
+{ 0x0F39, 0x0F39 },
+{ 0x0F71, 0x0F7E },
+{ 0x0F80, 0x0F84 },
+{ 0x0F86, 0x0F87 },
+{ 0x0F8D, 0x0F97 },
+{ 0x0F99, 0x0FBC },
+{ 0x0FC6, 0x0FC6 },
+{ 0x102D, 0x1030 },
+{ 0x1032, 0x1037 },
+{ 0x1039, 0x103A },
+{ 0x103D, 0x103E },
+{ 0x1058, 0x1059 },
+{ 0x105E, 0x1060 },
+{ 0x1071, 0x1074 },
+{ 0x1082, 0x1082 },
+{ 0x1085, 0x1086 },
+{ 0x108D, 0x108D },
+{ 0x109D, 0x109D },
+{ 0x1160, 0x11FF },
+{ 0x135D, 0x135F },
+{ 0x1712, 0x1714 },
+{ 0x1732, 0x1734 },
+{ 0x1752, 0x1753 },
+{ 0x1772, 0x1773 },
+{ 0x17B4, 0x17B5 },
+{ 0x17B7, 0x17BD },
+{ 0x17C6, 0x17C6 },
+{ 0x17C9, 0x17D3 },
+{ 0x17DD, 0x17DD },
+{ 0x180B, 0x180E },
+{ 0x1885, 0x1886 },
+{ 0x18A9, 0x18A9 },
+{ 0x1920, 0x1922 },
+{ 0x1927, 0x1928 },
+{ 0x1932, 0x1932 },
+{ 0x1939, 0x193B },
+{ 0x1A17, 0x1A18 },
+{ 0x1A1B, 0x1A1B },
+{ 0x1A56, 0x1A56 },
+{ 0x1A58, 0x1A5E },
+{ 0x1A60, 0x1A60 },
+{ 0x1A62, 0x1A62 },
+{ 0x1A65, 0x1A6C },
+{ 0x1A73, 0x1A7C },
+{ 0x1A7F, 0x1A7F },
+{ 0x1AB0, 0x1ABE },
+{ 0x1B00, 0x1B03 },
+{ 0x1B34, 0x1B34 },
+{ 0x1B36, 0x1B3A },
+{ 0x1B3C, 0x1B3C },
+{ 0x1B42, 0x1B42 },
+{ 0x1B6B, 0x1B73 },
+{ 0x1B80, 0x1B81 },
+{ 0x1BA2, 0x1BA5 },
+{ 0x1BA8, 0x1BA9 },
+{ 0x1BAB, 0x1BAD },
+{ 0x1BE6, 0x1BE6 },
+{ 0x1BE8, 0x1BE9 },
+{ 0x1BED, 0x1BED },
+{ 0x1BEF, 0x1BF1 },
+{ 0x1C2C, 0x1C33 },
+{ 0x1C36, 0x1C37 },
+{ 0x1CD0, 0x1CD2 },
+{ 0x1CD4, 0x1CE0 },
+{ 0x1CE2, 0x1CE8 },
+{ 0x1CED, 0x1CED },
+{ 0x1CF4, 0x1CF4 },
+{ 0x1CF8, 0x1CF9 },
+{ 0x1DC0, 0x1DF9 },
+{ 0x1DFB, 0x1DFF },
+{ 0x200B, 0x200F },
+{ 0x202A, 0x202E },
+{ 0x2060, 0x2064 },
+{ 0x2066, 0x206F },
+{ 0x20D0, 0x20F0 },
+{ 0x2CEF, 0x2CF1 },
+{ 0x2D7F, 0x2D7F },
+{ 0x2DE0, 0x2DFF },
+{ 0x302A, 0x302D },
+{ 0x3099, 0x309A },
+{ 0xA66F, 0xA672 },
+{ 0xA674, 0xA67D },
+{ 0xA69E, 0xA69F },
+{ 0xA6F0, 0xA6F1 },
+{ 0xA802, 0xA802 },
+{ 0xA806, 0xA806 },
+{ 0xA80B, 0xA80B },
+{ 0xA825, 0xA826 },
+{ 0xA8C4, 0xA8C5 },
+{ 0xA8E0, 0xA8F1 },
+{ 0xA926, 0xA92D },
+{ 0xA947, 0xA951 },
+{ 0xA980, 0xA982 },
+{ 0xA9B3, 0xA9B3 },
+{ 0xA9B6, 0xA9B9 },
+{ 0xA9BC, 0xA9BC },
+{ 0xA9E5, 0xA9E5 },
+{ 0xAA29, 0xAA2E },
+{ 0xAA31, 0xAA32 },
+{ 0xAA35, 0xAA36 },
+{ 0xAA43, 0xAA43 },
+{ 0xAA4C, 0xAA4C },
+{ 0xAA7C, 0xAA7C },
+{ 0xAAB0, 0xAAB0 },
+{ 0xAAB2, 0xAAB4 },
+{ 0xAAB7, 0xAAB8 },
+{ 0xAABE, 0xAABF },
+{ 0xAAC1, 0xAAC1 },
+{ 0xAAEC, 0xAAED },
+{ 0xAAF6, 0xAAF6 },
+{ 0xABE5, 0xABE5 },
+{ 0xABE8, 0xABE8 },
+{ 0xABED, 0xABED },
+{ 0xFB1E, 0xFB1E },
+{ 0xFE00, 0xFE0F },
+{ 0xFE20, 0xFE2F },
+{ 0xFEFF, 0xFEFF },
+{ 0xFFF9, 0xFFFB },
+{ 0x101FD, 0x101FD },
+{ 0x102E0, 0x102E0 },
+{ 0x10376, 0x1037A },
+{ 0x10A01, 0x10A03 },
+{ 0x10A05, 0x10A06 },
+{ 0x10A0C, 0x10A0F },
+{ 0x10A38, 0x10A3A },
+{ 0x10A3F, 0x10A3F },
+{ 0x10AE5, 0x10AE6 },
+{ 0x11001, 0x11001 },
+{ 0x11038, 0x11046 },
+{ 0x1107F, 0x11081 },
+{ 0x110B3, 0x110B6 },
+{ 0x110B9, 0x110BA },
+{ 0x110BD, 0x110BD },
+{ 0x11100, 0x11102 },
+{ 0x11127, 0x1112B },
+{ 0x1112D, 0x11134 },
+{ 0x11173, 0x11173 },
+{ 0x11180, 0x11181 },
+{ 0x111B6, 0x111BE },
+{ 0x111CA, 0x111CC },
+{ 0x1122F, 0x11231 },
+{ 0x11234, 0x11234 },
+{ 0x11236, 0x11237 },
+{ 0x1123E, 0x1123E },
+{ 0x112DF, 0x112DF },
+{ 0x112E3, 0x112EA },
+{ 0x11300, 0x11301 },
+{ 0x1133C, 0x1133C },
+{ 0x11340, 0x11340 },
+{ 0x11366, 0x1136C },
+{ 0x11370, 0x11374 },
+{ 0x11438, 0x1143F },
+{ 0x11442, 0x11444 },
+{ 0x11446, 0x11446 },
+{ 0x114B3, 0x114B8 },
+{ 0x114BA, 0x114BA },
+{ 0x114BF, 0x114C0 },
+{ 0x114C2, 0x114C3 },
+{ 0x115B2, 0x115B5 },
+{ 0x115BC, 0x115BD },
+{ 0x115BF, 0x115C0 },
+{ 0x115DC, 0x115DD },
+{ 0x11633, 0x1163A },
+{ 0x1163D, 0x1163D },
+{ 0x1163F, 0x11640 },
+{ 0x116AB, 0x116AB },
+{ 0x116AD, 0x116AD },
+{ 0x116B0, 0x116B5 },
+{ 0x116B7, 0x116B7 },
+{ 0x1171D, 0x1171F },
+{ 0x11722, 0x11725 },
+{ 0x11727, 0x1172B },
+{ 0x11A01, 0x11A06 },
+{ 0x11A09, 0x11A0A },
+{ 0x11A33, 0x11A38 },
+{ 0x11A3B, 0x11A3E },
+{ 0x11A47, 0x11A47 },
+{ 0x11A51, 0x11A56 },
+{ 0x11A59, 0x11A5B },
+{ 0x11A8A, 0x11A96 },
+{ 0x11A98, 0x11A99 },
+{ 0x11C30, 0x11C36 },
+{ 0x11C38, 0x11C3D },
+{ 0x11C3F, 0x11C3F },
+{ 0x11C92, 0x11CA7 },
+{ 0x11CAA, 0x11CB0 },
+{ 0x11CB2, 0x11CB3 },
+{ 0x11CB5, 0x11CB6 },
+{ 0x11D31, 0x11D36 },
+{ 0x11D3A, 0x11D3A },
+{ 0x11D3C, 0x11D3D },
+{ 0x11D3F, 0x11D45 },
+{ 0x11D47, 0x11D47 },
+{ 0x16AF0, 0x16AF4 },
+{ 0x16B30, 0x16B36 },
+{ 0x16F8F, 0x16F92 },
+{ 0x1BC9D, 0x1BC9E },
+{ 0x1BCA0, 0x1BCA3 },
+{ 0x1D167, 0x1D169 },
+{ 0x1D173, 0x1D182 },
+{ 0x1D185, 0x1D18B },
+{ 0x1D1AA, 0x1D1AD },
+{ 0x1D242, 0x1D244 },
+{ 0x1DA00, 0x1DA36 },
+{ 0x1DA3B, 0x1DA6C },
+{ 0x1DA75, 0x1DA75 },
+{ 0x1DA84, 0x1DA84 },
+{ 0x1DA9B, 0x1DA9F },
+{ 0x1DAA1, 0x1DAAF },
+{ 0x1E000, 0x1E006 },
+{ 0x1E008, 0x1E018 },
+{ 0x1E01B, 0x1E021 },
+{ 0x1E023, 0x1E024 },
+{ 0x1E026, 0x1E02A },
+{ 0x1E8D0, 0x1E8D6 },
+{ 0x1E944, 0x1E94A },
+{ 0xE0001, 0xE0001 },
+{ 0xE0020, 0xE007F },
+{ 0xE0100, 0xE01EF }
+};
+static const struct interval double_width[] = {
+{ 0x1100, 0x115F },
+{ 0x231A, 0x231B },
+{ 0x2329, 0x232A },
+{ 0x23E9, 0x23EC },
+{ 0x23F0, 0x23F0 },
+{ 0x23F3, 0x23F3 },
+{ 0x25FD, 0x25FE },
+{ 0x2614, 0x2615 },
+{ 0x2648, 0x2653 },
+{ 0x267F, 0x267F },
+{ 0x2693, 0x2693 },
+{ 0x26A1, 0x26A1 },
+{ 0x26AA, 0x26AB },
+{ 0x26BD, 0x26BE },
+{ 0x26C4, 0x26C5 },
+{ 0x26CE, 0x26CE },
+{ 0x26D4, 0x26D4 },
+{ 0x26EA, 0x26EA },
+{ 0x26F2, 0x26F3 },
+{ 0x26F5, 0x26F5 },
+{ 0x26FA, 0x26FA },
+{ 0x26FD, 0x26FD },
+{ 0x2705, 0x2705 },
+{ 0x270A, 0x270B },
+{ 0x2728, 0x2728 },
+{ 0x274C, 0x274C },
+{ 0x274E, 0x274E },
+{ 0x2753, 0x2755 },
+{ 0x2757, 0x2757 },
+{ 0x2795, 0x2797 },
+{ 0x27B0, 0x27B0 },
+{ 0x27BF, 0x27BF },
+{ 0x2B1B, 0x2B1C },
+{ 0x2B50, 0x2B50 },
+{ 0x2B55, 0x2B55 },
+{ 0x2E80, 0x2E99 },
+{ 0x2E9B, 0x2EF3 },
+{ 0x2F00, 0x2FD5 },
+{ 0x2FF0, 0x2FFB },
+{ 0x3000, 0x303E },
+{ 0x3041, 0x3096 },
+{ 0x3099, 0x30FF },
+{ 0x3105, 0x312E },
+{ 0x3131, 0x318E },
+{ 0x3190, 0x31BA },
+{ 0x31C0, 0x31E3 },
+{ 0x31F0, 0x321E },
+{ 0x3220, 0x3247 },
+{ 0x3250, 0x32FE },
+{ 0x3300, 0x4DBF },
+{ 0x4E00, 0xA48C },
+{ 0xA490, 0xA4C6 },
+{ 0xA960, 0xA97C },
+{ 0xAC00, 0xD7A3 },
+{ 0xF900, 0xFAFF },
+{ 0xFE10, 0xFE19 },
+{ 0xFE30, 0xFE52 },
+{ 0xFE54, 0xFE66 },
+{ 0xFE68, 0xFE6B },
+{ 0xFF01, 0xFF60 },
+{ 0xFFE0, 0xFFE6 },
+{ 0x16FE0, 0x16FE1 },
+{ 0x17000, 0x187EC },
+{ 0x18800, 0x18AF2 },
+{ 0x1B000, 0x1B11E },
+{ 0x1B170, 0x1B2FB },
+{ 0x1F004, 0x1F004 },
+{ 0x1F0CF, 0x1F0CF },
+{ 0x1F18E, 0x1F18E },
+{ 0x1F191, 0x1F19A },
+{ 0x1F200, 0x1F202 },
+{ 0x1F210, 0x1F23B },
+{ 0x1F240, 0x1F248 },
+{ 0x1F250, 0x1F251 },
+{ 0x1F260, 0x1F265 },
+{ 0x1F300, 0x1F320 },
+{ 0x1F32D, 0x1F335 },
+{ 0x1F337, 0x1F37C },
+{ 0x1F37E, 0x1F393 },
+{ 0x1F3A0, 0x1F3CA },
+{ 0x1F3CF, 0x1F3D3 },
+{ 0x1F3E0, 0x1F3F0 },
+{ 0x1F3F4, 0x1F3F4 },
+{ 0x1F3F8, 0x1F43E },
+{ 0x1F440, 0x1F440 },
+{ 0x1F442, 0x1F4FC },
+{ 0x1F4FF, 0x1F53D },
+{ 0x1F54B, 0x1F54E },
+{ 0x1F550, 0x1F567 },
+{ 0x1F57A, 0x1F57A },
+{ 0x1F595, 0x1F596 },
+{ 0x1F5A4, 0x1F5A4 },
+{ 0x1F5FB, 0x1F64F },
+{ 0x1F680, 0x1F6C5 },
+{ 0x1F6CC, 0x1F6CC },
+{ 0x1F6D0, 0x1F6D2 },
+{ 0x1F6EB, 0x1F6EC },
+{ 0x1F6F4, 0x1F6F8 },
+{ 0x1F910, 0x1F93E },
+{ 0x1F940, 0x1F94C },
+{ 0x1F950, 0x1F96B },
+{ 0x1F980, 0x1F997 },
+{ 0x1F9C0, 0x1F9C0 },
+{ 0x1F9D0, 0x1F9E6 },
+{ 0x20000, 0x2FFFD },
+{ 0x30000, 0x3FFFD }
+};
diff --git a/unicode_width.h b/unicode_width.h
deleted file mode 100644 (file)
index 6dee2c7..0000000
+++ /dev/null
@@ -1,422 +0,0 @@
-static const struct interval zero_width[] = {
-{ 0x0300, 0x036F },
-{ 0x0483, 0x0489 },
-{ 0x0591, 0x05BD },
-{ 0x05BF, 0x05BF },
-{ 0x05C1, 0x05C2 },
-{ 0x05C4, 0x05C5 },
-{ 0x05C7, 0x05C7 },
-{ 0x0600, 0x0605 },
-{ 0x0610, 0x061A },
-{ 0x061C, 0x061C },
-{ 0x064B, 0x065F },
-{ 0x0670, 0x0670 },
-{ 0x06D6, 0x06DD },
-{ 0x06DF, 0x06E4 },
-{ 0x06E7, 0x06E8 },
-{ 0x06EA, 0x06ED },
-{ 0x070F, 0x070F },
-{ 0x0711, 0x0711 },
-{ 0x0730, 0x074A },
-{ 0x07A6, 0x07B0 },
-{ 0x07EB, 0x07F3 },
-{ 0x0816, 0x0819 },
-{ 0x081B, 0x0823 },
-{ 0x0825, 0x0827 },
-{ 0x0829, 0x082D },
-{ 0x0859, 0x085B },
-{ 0x08D4, 0x0902 },
-{ 0x093A, 0x093A },
-{ 0x093C, 0x093C },
-{ 0x0941, 0x0948 },
-{ 0x094D, 0x094D },
-{ 0x0951, 0x0957 },
-{ 0x0962, 0x0963 },
-{ 0x0981, 0x0981 },
-{ 0x09BC, 0x09BC },
-{ 0x09C1, 0x09C4 },
-{ 0x09CD, 0x09CD },
-{ 0x09E2, 0x09E3 },
-{ 0x0A01, 0x0A02 },
-{ 0x0A3C, 0x0A3C },
-{ 0x0A41, 0x0A42 },
-{ 0x0A47, 0x0A48 },
-{ 0x0A4B, 0x0A4D },
-{ 0x0A51, 0x0A51 },
-{ 0x0A70, 0x0A71 },
-{ 0x0A75, 0x0A75 },
-{ 0x0A81, 0x0A82 },
-{ 0x0ABC, 0x0ABC },
-{ 0x0AC1, 0x0AC5 },
-{ 0x0AC7, 0x0AC8 },
-{ 0x0ACD, 0x0ACD },
-{ 0x0AE2, 0x0AE3 },
-{ 0x0AFA, 0x0AFF },
-{ 0x0B01, 0x0B01 },
-{ 0x0B3C, 0x0B3C },
-{ 0x0B3F, 0x0B3F },
-{ 0x0B41, 0x0B44 },
-{ 0x0B4D, 0x0B4D },
-{ 0x0B56, 0x0B56 },
-{ 0x0B62, 0x0B63 },
-{ 0x0B82, 0x0B82 },
-{ 0x0BC0, 0x0BC0 },
-{ 0x0BCD, 0x0BCD },
-{ 0x0C00, 0x0C00 },
-{ 0x0C3E, 0x0C40 },
-{ 0x0C46, 0x0C48 },
-{ 0x0C4A, 0x0C4D },
-{ 0x0C55, 0x0C56 },
-{ 0x0C62, 0x0C63 },
-{ 0x0C81, 0x0C81 },
-{ 0x0CBC, 0x0CBC },
-{ 0x0CBF, 0x0CBF },
-{ 0x0CC6, 0x0CC6 },
-{ 0x0CCC, 0x0CCD },
-{ 0x0CE2, 0x0CE3 },
-{ 0x0D00, 0x0D01 },
-{ 0x0D3B, 0x0D3C },
-{ 0x0D41, 0x0D44 },
-{ 0x0D4D, 0x0D4D },
-{ 0x0D62, 0x0D63 },
-{ 0x0DCA, 0x0DCA },
-{ 0x0DD2, 0x0DD4 },
-{ 0x0DD6, 0x0DD6 },
-{ 0x0E31, 0x0E31 },
-{ 0x0E34, 0x0E3A },
-{ 0x0E47, 0x0E4E },
-{ 0x0EB1, 0x0EB1 },
-{ 0x0EB4, 0x0EB9 },
-{ 0x0EBB, 0x0EBC },
-{ 0x0EC8, 0x0ECD },
-{ 0x0F18, 0x0F19 },
-{ 0x0F35, 0x0F35 },
-{ 0x0F37, 0x0F37 },
-{ 0x0F39, 0x0F39 },
-{ 0x0F71, 0x0F7E },
-{ 0x0F80, 0x0F84 },
-{ 0x0F86, 0x0F87 },
-{ 0x0F8D, 0x0F97 },
-{ 0x0F99, 0x0FBC },
-{ 0x0FC6, 0x0FC6 },
-{ 0x102D, 0x1030 },
-{ 0x1032, 0x1037 },
-{ 0x1039, 0x103A },
-{ 0x103D, 0x103E },
-{ 0x1058, 0x1059 },
-{ 0x105E, 0x1060 },
-{ 0x1071, 0x1074 },
-{ 0x1082, 0x1082 },
-{ 0x1085, 0x1086 },
-{ 0x108D, 0x108D },
-{ 0x109D, 0x109D },
-{ 0x1160, 0x11FF },
-{ 0x135D, 0x135F },
-{ 0x1712, 0x1714 },
-{ 0x1732, 0x1734 },
-{ 0x1752, 0x1753 },
-{ 0x1772, 0x1773 },
-{ 0x17B4, 0x17B5 },
-{ 0x17B7, 0x17BD },
-{ 0x17C6, 0x17C6 },
-{ 0x17C9, 0x17D3 },
-{ 0x17DD, 0x17DD },
-{ 0x180B, 0x180E },
-{ 0x1885, 0x1886 },
-{ 0x18A9, 0x18A9 },
-{ 0x1920, 0x1922 },
-{ 0x1927, 0x1928 },
-{ 0x1932, 0x1932 },
-{ 0x1939, 0x193B },
-{ 0x1A17, 0x1A18 },
-{ 0x1A1B, 0x1A1B },
-{ 0x1A56, 0x1A56 },
-{ 0x1A58, 0x1A5E },
-{ 0x1A60, 0x1A60 },
-{ 0x1A62, 0x1A62 },
-{ 0x1A65, 0x1A6C },
-{ 0x1A73, 0x1A7C },
-{ 0x1A7F, 0x1A7F },
-{ 0x1AB0, 0x1ABE },
-{ 0x1B00, 0x1B03 },
-{ 0x1B34, 0x1B34 },
-{ 0x1B36, 0x1B3A },
-{ 0x1B3C, 0x1B3C },
-{ 0x1B42, 0x1B42 },
-{ 0x1B6B, 0x1B73 },
-{ 0x1B80, 0x1B81 },
-{ 0x1BA2, 0x1BA5 },
-{ 0x1BA8, 0x1BA9 },
-{ 0x1BAB, 0x1BAD },
-{ 0x1BE6, 0x1BE6 },
-{ 0x1BE8, 0x1BE9 },
-{ 0x1BED, 0x1BED },
-{ 0x1BEF, 0x1BF1 },
-{ 0x1C2C, 0x1C33 },
-{ 0x1C36, 0x1C37 },
-{ 0x1CD0, 0x1CD2 },
-{ 0x1CD4, 0x1CE0 },
-{ 0x1CE2, 0x1CE8 },
-{ 0x1CED, 0x1CED },
-{ 0x1CF4, 0x1CF4 },
-{ 0x1CF8, 0x1CF9 },
-{ 0x1DC0, 0x1DF9 },
-{ 0x1DFB, 0x1DFF },
-{ 0x200B, 0x200F },
-{ 0x202A, 0x202E },
-{ 0x2060, 0x2064 },
-{ 0x2066, 0x206F },
-{ 0x20D0, 0x20F0 },
-{ 0x2CEF, 0x2CF1 },
-{ 0x2D7F, 0x2D7F },
-{ 0x2DE0, 0x2DFF },
-{ 0x302A, 0x302D },
-{ 0x3099, 0x309A },
-{ 0xA66F, 0xA672 },
-{ 0xA674, 0xA67D },
-{ 0xA69E, 0xA69F },
-{ 0xA6F0, 0xA6F1 },
-{ 0xA802, 0xA802 },
-{ 0xA806, 0xA806 },
-{ 0xA80B, 0xA80B },
-{ 0xA825, 0xA826 },
-{ 0xA8C4, 0xA8C5 },
-{ 0xA8E0, 0xA8F1 },
-{ 0xA926, 0xA92D },
-{ 0xA947, 0xA951 },
-{ 0xA980, 0xA982 },
-{ 0xA9B3, 0xA9B3 },
-{ 0xA9B6, 0xA9B9 },
-{ 0xA9BC, 0xA9BC },
-{ 0xA9E5, 0xA9E5 },
-{ 0xAA29, 0xAA2E },
-{ 0xAA31, 0xAA32 },
-{ 0xAA35, 0xAA36 },
-{ 0xAA43, 0xAA43 },
-{ 0xAA4C, 0xAA4C },
-{ 0xAA7C, 0xAA7C },
-{ 0xAAB0, 0xAAB0 },
-{ 0xAAB2, 0xAAB4 },
-{ 0xAAB7, 0xAAB8 },
-{ 0xAABE, 0xAABF },
-{ 0xAAC1, 0xAAC1 },
-{ 0xAAEC, 0xAAED },
-{ 0xAAF6, 0xAAF6 },
-{ 0xABE5, 0xABE5 },
-{ 0xABE8, 0xABE8 },
-{ 0xABED, 0xABED },
-{ 0xFB1E, 0xFB1E },
-{ 0xFE00, 0xFE0F },
-{ 0xFE20, 0xFE2F },
-{ 0xFEFF, 0xFEFF },
-{ 0xFFF9, 0xFFFB },
-{ 0x101FD, 0x101FD },
-{ 0x102E0, 0x102E0 },
-{ 0x10376, 0x1037A },
-{ 0x10A01, 0x10A03 },
-{ 0x10A05, 0x10A06 },
-{ 0x10A0C, 0x10A0F },
-{ 0x10A38, 0x10A3A },
-{ 0x10A3F, 0x10A3F },
-{ 0x10AE5, 0x10AE6 },
-{ 0x11001, 0x11001 },
-{ 0x11038, 0x11046 },
-{ 0x1107F, 0x11081 },
-{ 0x110B3, 0x110B6 },
-{ 0x110B9, 0x110BA },
-{ 0x110BD, 0x110BD },
-{ 0x11100, 0x11102 },
-{ 0x11127, 0x1112B },
-{ 0x1112D, 0x11134 },
-{ 0x11173, 0x11173 },
-{ 0x11180, 0x11181 },
-{ 0x111B6, 0x111BE },
-{ 0x111CA, 0x111CC },
-{ 0x1122F, 0x11231 },
-{ 0x11234, 0x11234 },
-{ 0x11236, 0x11237 },
-{ 0x1123E, 0x1123E },
-{ 0x112DF, 0x112DF },
-{ 0x112E3, 0x112EA },
-{ 0x11300, 0x11301 },
-{ 0x1133C, 0x1133C },
-{ 0x11340, 0x11340 },
-{ 0x11366, 0x1136C },
-{ 0x11370, 0x11374 },
-{ 0x11438, 0x1143F },
-{ 0x11442, 0x11444 },
-{ 0x11446, 0x11446 },
-{ 0x114B3, 0x114B8 },
-{ 0x114BA, 0x114BA },
-{ 0x114BF, 0x114C0 },
-{ 0x114C2, 0x114C3 },
-{ 0x115B2, 0x115B5 },
-{ 0x115BC, 0x115BD },
-{ 0x115BF, 0x115C0 },
-{ 0x115DC, 0x115DD },
-{ 0x11633, 0x1163A },
-{ 0x1163D, 0x1163D },
-{ 0x1163F, 0x11640 },
-{ 0x116AB, 0x116AB },
-{ 0x116AD, 0x116AD },
-{ 0x116B0, 0x116B5 },
-{ 0x116B7, 0x116B7 },
-{ 0x1171D, 0x1171F },
-{ 0x11722, 0x11725 },
-{ 0x11727, 0x1172B },
-{ 0x11A01, 0x11A06 },
-{ 0x11A09, 0x11A0A },
-{ 0x11A33, 0x11A38 },
-{ 0x11A3B, 0x11A3E },
-{ 0x11A47, 0x11A47 },
-{ 0x11A51, 0x11A56 },
-{ 0x11A59, 0x11A5B },
-{ 0x11A8A, 0x11A96 },
-{ 0x11A98, 0x11A99 },
-{ 0x11C30, 0x11C36 },
-{ 0x11C38, 0x11C3D },
-{ 0x11C3F, 0x11C3F },
-{ 0x11C92, 0x11CA7 },
-{ 0x11CAA, 0x11CB0 },
-{ 0x11CB2, 0x11CB3 },
-{ 0x11CB5, 0x11CB6 },
-{ 0x11D31, 0x11D36 },
-{ 0x11D3A, 0x11D3A },
-{ 0x11D3C, 0x11D3D },
-{ 0x11D3F, 0x11D45 },
-{ 0x11D47, 0x11D47 },
-{ 0x16AF0, 0x16AF4 },
-{ 0x16B30, 0x16B36 },
-{ 0x16F8F, 0x16F92 },
-{ 0x1BC9D, 0x1BC9E },
-{ 0x1BCA0, 0x1BCA3 },
-{ 0x1D167, 0x1D169 },
-{ 0x1D173, 0x1D182 },
-{ 0x1D185, 0x1D18B },
-{ 0x1D1AA, 0x1D1AD },
-{ 0x1D242, 0x1D244 },
-{ 0x1DA00, 0x1DA36 },
-{ 0x1DA3B, 0x1DA6C },
-{ 0x1DA75, 0x1DA75 },
-{ 0x1DA84, 0x1DA84 },
-{ 0x1DA9B, 0x1DA9F },
-{ 0x1DAA1, 0x1DAAF },
-{ 0x1E000, 0x1E006 },
-{ 0x1E008, 0x1E018 },
-{ 0x1E01B, 0x1E021 },
-{ 0x1E023, 0x1E024 },
-{ 0x1E026, 0x1E02A },
-{ 0x1E8D0, 0x1E8D6 },
-{ 0x1E944, 0x1E94A },
-{ 0xE0001, 0xE0001 },
-{ 0xE0020, 0xE007F },
-{ 0xE0100, 0xE01EF }
-};
-static const struct interval double_width[] = {
-{ 0x1100, 0x115F },
-{ 0x231A, 0x231B },
-{ 0x2329, 0x232A },
-{ 0x23E9, 0x23EC },
-{ 0x23F0, 0x23F0 },
-{ 0x23F3, 0x23F3 },
-{ 0x25FD, 0x25FE },
-{ 0x2614, 0x2615 },
-{ 0x2648, 0x2653 },
-{ 0x267F, 0x267F },
-{ 0x2693, 0x2693 },
-{ 0x26A1, 0x26A1 },
-{ 0x26AA, 0x26AB },
-{ 0x26BD, 0x26BE },
-{ 0x26C4, 0x26C5 },
-{ 0x26CE, 0x26CE },
-{ 0x26D4, 0x26D4 },
-{ 0x26EA, 0x26EA },
-{ 0x26F2, 0x26F3 },
-{ 0x26F5, 0x26F5 },
-{ 0x26FA, 0x26FA },
-{ 0x26FD, 0x26FD },
-{ 0x2705, 0x2705 },
-{ 0x270A, 0x270B },
-{ 0x2728, 0x2728 },
-{ 0x274C, 0x274C },
-{ 0x274E, 0x274E },
-{ 0x2753, 0x2755 },
-{ 0x2757, 0x2757 },
-{ 0x2795, 0x2797 },
-{ 0x27B0, 0x27B0 },
-{ 0x27BF, 0x27BF },
-{ 0x2B1B, 0x2B1C },
-{ 0x2B50, 0x2B50 },
-{ 0x2B55, 0x2B55 },
-{ 0x2E80, 0x2E99 },
-{ 0x2E9B, 0x2EF3 },
-{ 0x2F00, 0x2FD5 },
-{ 0x2FF0, 0x2FFB },
-{ 0x3000, 0x303E },
-{ 0x3041, 0x3096 },
-{ 0x3099, 0x30FF },
-{ 0x3105, 0x312E },
-{ 0x3131, 0x318E },
-{ 0x3190, 0x31BA },
-{ 0x31C0, 0x31E3 },
-{ 0x31F0, 0x321E },
-{ 0x3220, 0x3247 },
-{ 0x3250, 0x32FE },
-{ 0x3300, 0x4DBF },
-{ 0x4E00, 0xA48C },
-{ 0xA490, 0xA4C6 },
-{ 0xA960, 0xA97C },
-{ 0xAC00, 0xD7A3 },
-{ 0xF900, 0xFAFF },
-{ 0xFE10, 0xFE19 },
-{ 0xFE30, 0xFE52 },
-{ 0xFE54, 0xFE66 },
-{ 0xFE68, 0xFE6B },
-{ 0xFF01, 0xFF60 },
-{ 0xFFE0, 0xFFE6 },
-{ 0x16FE0, 0x16FE1 },
-{ 0x17000, 0x187EC },
-{ 0x18800, 0x18AF2 },
-{ 0x1B000, 0x1B11E },
-{ 0x1B170, 0x1B2FB },
-{ 0x1F004, 0x1F004 },
-{ 0x1F0CF, 0x1F0CF },
-{ 0x1F18E, 0x1F18E },
-{ 0x1F191, 0x1F19A },
-{ 0x1F200, 0x1F202 },
-{ 0x1F210, 0x1F23B },
-{ 0x1F240, 0x1F248 },
-{ 0x1F250, 0x1F251 },
-{ 0x1F260, 0x1F265 },
-{ 0x1F300, 0x1F320 },
-{ 0x1F32D, 0x1F335 },
-{ 0x1F337, 0x1F37C },
-{ 0x1F37E, 0x1F393 },
-{ 0x1F3A0, 0x1F3CA },
-{ 0x1F3CF, 0x1F3D3 },
-{ 0x1F3E0, 0x1F3F0 },
-{ 0x1F3F4, 0x1F3F4 },
-{ 0x1F3F8, 0x1F43E },
-{ 0x1F440, 0x1F440 },
-{ 0x1F442, 0x1F4FC },
-{ 0x1F4FF, 0x1F53D },
-{ 0x1F54B, 0x1F54E },
-{ 0x1F550, 0x1F567 },
-{ 0x1F57A, 0x1F57A },
-{ 0x1F595, 0x1F596 },
-{ 0x1F5A4, 0x1F5A4 },
-{ 0x1F5FB, 0x1F64F },
-{ 0x1F680, 0x1F6C5 },
-{ 0x1F6CC, 0x1F6CC },
-{ 0x1F6D0, 0x1F6D2 },
-{ 0x1F6EB, 0x1F6EC },
-{ 0x1F6F4, 0x1F6F8 },
-{ 0x1F910, 0x1F93E },
-{ 0x1F940, 0x1F94C },
-{ 0x1F950, 0x1F96B },
-{ 0x1F980, 0x1F997 },
-{ 0x1F9C0, 0x1F9C0 },
-{ 0x1F9D0, 0x1F9E6 },
-{ 0x20000, 0x2FFFD },
-{ 0x30000, 0x3FFFD }
-};
index e73745051e505934b44be216b1d80a6c053c96de..0f01be60047fff034fed2280981e1ad70abf6ed9 100644 (file)
@@ -290,7 +290,7 @@ static void load_gitmodules_file(struct index_state *index,
                if (!state && ce->ce_flags & CE_WT_REMOVE) {
                        repo_read_gitmodules(the_repository);
                } else if (state && (ce->ce_flags & CE_UPDATE)) {
-                       submodule_free();
+                       submodule_free(the_repository);
                        checkout_entry(ce, state, NULL);
                        repo_read_gitmodules(the_repository);
                }
@@ -1284,9 +1284,20 @@ int unpack_trees(unsigned len, struct tree_desc *t, struct unpack_trees_options
        o->result.timestamp.sec = o->src_index->timestamp.sec;
        o->result.timestamp.nsec = o->src_index->timestamp.nsec;
        o->result.version = o->src_index->version;
-       o->result.split_index = o->src_index->split_index;
-       if (o->result.split_index)
+       if (!o->src_index->split_index) {
+               o->result.split_index = NULL;
+       } else if (o->src_index == o->dst_index) {
+               /*
+                * o->dst_index (and thus o->src_index) will be discarded
+                * and overwritten with o->result at the end of this function,
+                * so just use src_index's split_index to avoid having to
+                * create a new one.
+                */
+               o->result.split_index = o->src_index->split_index;
                o->result.split_index->refcount++;
+       } else {
+               o->result.split_index = init_split_index(&o->result);
+       }
        hashcpy(o->result.sha1, o->src_index->sha1);
        o->merge_size = len;
        mark_all_ce_unused(o->src_index);
@@ -1401,7 +1412,6 @@ int unpack_trees(unsigned len, struct tree_desc *t, struct unpack_trees_options
                }
        }
 
-       o->src_index = NULL;
        ret = check_updates(o) ? (-2) : 0;
        if (o->dst_index) {
                if (!ret) {
@@ -1412,12 +1422,13 @@ int unpack_trees(unsigned len, struct tree_desc *t, struct unpack_trees_options
                                                  WRITE_TREE_SILENT |
                                                  WRITE_TREE_REPAIR);
                }
-               move_index_extensions(&o->result, o->dst_index);
+               move_index_extensions(&o->result, o->src_index);
                discard_index(o->dst_index);
                *o->dst_index = o->result;
        } else {
                discard_index(&o->result);
        }
+       o->src_index = NULL;
 
 done:
        clear_exclude_list(&el);
@@ -1509,8 +1520,8 @@ static int verify_uptodate_1(const struct cache_entry *ce,
                add_rejected_path(o, error_type, ce->name);
 }
 
-static int verify_uptodate(const struct cache_entry *ce,
-                          struct unpack_trees_options *o)
+int verify_uptodate(const struct cache_entry *ce,
+                   struct unpack_trees_options *o)
 {
        if (!o->skip_sparse_checkout && (ce->ce_flags & CE_NEW_SKIP_WORKTREE))
                return 0;
index 6c48117b845fbf7b983852be302e4472c5e6d651..41178ada94a4b7c5222cab7dd17d9eeb7a1956e4 100644 (file)
@@ -1,6 +1,7 @@
 #ifndef UNPACK_TREES_H
 #define UNPACK_TREES_H
 
+#include "tree-walk.h"
 #include "string-list.h"
 
 #define MAX_UNPACK_TREES 8
@@ -78,6 +79,9 @@ struct unpack_trees_options {
 extern int unpack_trees(unsigned n, struct tree_desc *t,
                struct unpack_trees_options *options);
 
+int verify_uptodate(const struct cache_entry *ce,
+                   struct unpack_trees_options *o);
+
 int threeway_merge(const struct cache_entry * const *stages,
                   struct unpack_trees_options *o);
 int twoway_merge(const struct cache_entry * const *src,
index 4a82602be5d0ab111a805a2f2456f382ae7a7364..87b4d32a6e23aed2416a9bb752dfa56b916b141c 100644 (file)
@@ -6,7 +6,6 @@
 #include "tag.h"
 #include "object.h"
 #include "commit.h"
-#include "exec_cmd.h"
 #include "diff.h"
 #include "revision.h"
 #include "list-objects.h"
 #include "sigchain.h"
 #include "version.h"
 #include "string-list.h"
-#include "parse-options.h"
 #include "argv-array.h"
 #include "prio-queue.h"
 #include "protocol.h"
 #include "quote.h"
-
-static const char * const upload_pack_usage[] = {
-       N_("git upload-pack [<options>] <dir>"),
-       NULL
-};
+#include "upload-pack.h"
+#include "serve.h"
 
 /* Remember to update object flag allocation in object.h */
 #define THEY_HAVE      (1u << 11)
@@ -64,7 +59,6 @@ static int keepalive = 5;
  * otherwise maximum packet size (up to 65520 bytes).
  */
 static int use_sideband;
-static int advertise_refs;
 static int stateless_rpc;
 static const char *pack_objects_hook;
 
@@ -734,7 +728,6 @@ static void deepen(int depth, int deepen_relative,
        }
 
        send_unshallow(shallows);
-       packet_flush(1);
 }
 
 static void deepen_by_rev_list(int ac, const char **av,
@@ -746,7 +739,122 @@ static void deepen_by_rev_list(int ac, const char **av,
        send_shallow(result);
        free_commit_list(result);
        send_unshallow(shallows);
-       packet_flush(1);
+}
+
+/* Returns 1 if a shallow list is sent or 0 otherwise */
+static int send_shallow_list(int depth, int deepen_rev_list,
+                            timestamp_t deepen_since,
+                            struct string_list *deepen_not,
+                            struct object_array *shallows)
+{
+       int ret = 0;
+
+       if (depth > 0 && deepen_rev_list)
+               die("git upload-pack: deepen and deepen-since (or deepen-not) cannot be used together");
+       if (depth > 0) {
+               deepen(depth, deepen_relative, shallows);
+               ret = 1;
+       } else if (deepen_rev_list) {
+               struct argv_array av = ARGV_ARRAY_INIT;
+               int i;
+
+               argv_array_push(&av, "rev-list");
+               if (deepen_since)
+                       argv_array_pushf(&av, "--max-age=%"PRItime, deepen_since);
+               if (deepen_not->nr) {
+                       argv_array_push(&av, "--not");
+                       for (i = 0; i < deepen_not->nr; i++) {
+                               struct string_list_item *s = deepen_not->items + i;
+                               argv_array_push(&av, s->string);
+                       }
+                       argv_array_push(&av, "--not");
+               }
+               for (i = 0; i < want_obj.nr; i++) {
+                       struct object *o = want_obj.objects[i].item;
+                       argv_array_push(&av, oid_to_hex(&o->oid));
+               }
+               deepen_by_rev_list(av.argc, av.argv, shallows);
+               argv_array_clear(&av);
+               ret = 1;
+       } else {
+               if (shallows->nr > 0) {
+                       int i;
+                       for (i = 0; i < shallows->nr; i++)
+                               register_shallow(&shallows->objects[i].item->oid);
+               }
+       }
+
+       shallow_nr += shallows->nr;
+       return ret;
+}
+
+static int process_shallow(const char *line, struct object_array *shallows)
+{
+       const char *arg;
+       if (skip_prefix(line, "shallow ", &arg)) {
+               struct object_id oid;
+               struct object *object;
+               if (get_oid_hex(arg, &oid))
+                       die("invalid shallow line: %s", line);
+               object = parse_object(&oid);
+               if (!object)
+                       return 1;
+               if (object->type != OBJ_COMMIT)
+                       die("invalid shallow object %s", oid_to_hex(&oid));
+               if (!(object->flags & CLIENT_SHALLOW)) {
+                       object->flags |= CLIENT_SHALLOW;
+                       add_object_array(object, NULL, shallows);
+               }
+               return 1;
+       }
+
+       return 0;
+}
+
+static int process_deepen(const char *line, int *depth)
+{
+       const char *arg;
+       if (skip_prefix(line, "deepen ", &arg)) {
+               char *end = NULL;
+               *depth = (int)strtol(arg, &end, 0);
+               if (!end || *end || *depth <= 0)
+                       die("Invalid deepen: %s", line);
+               return 1;
+       }
+
+       return 0;
+}
+
+static int process_deepen_since(const char *line, timestamp_t *deepen_since, int *deepen_rev_list)
+{
+       const char *arg;
+       if (skip_prefix(line, "deepen-since ", &arg)) {
+               char *end = NULL;
+               *deepen_since = parse_timestamp(arg, &end, 0);
+               if (!end || *end || !deepen_since ||
+                   /* revisions.c's max_age -1 is special */
+                   *deepen_since == -1)
+                       die("Invalid deepen-since: %s", line);
+               *deepen_rev_list = 1;
+               return 1;
+       }
+       return 0;
+}
+
+static int process_deepen_not(const char *line, struct string_list *deepen_not, int *deepen_rev_list)
+{
+       const char *arg;
+       if (skip_prefix(line, "deepen-not ", &arg)) {
+               char *ref = NULL;
+               struct object_id oid;
+               if (expand_ref(arg, strlen(arg), &oid, &ref) != 1)
+                       die("git upload-pack: ambiguous deepen-not: %s", line);
+               string_list_append(deepen_not, ref);
+               free(ref);
+               *deepen_rev_list = 1;
+               return 1;
+       }
+       return 0;
 }
 
 static void receive_needs(void)
@@ -770,55 +878,22 @@ static void receive_needs(void)
                if (!line)
                        break;
 
-               if (skip_prefix(line, "shallow ", &arg)) {
-                       struct object_id oid;
-                       struct object *object;
-                       if (get_oid_hex(arg, &oid))
-                               die("invalid shallow line: %s", line);
-                       object = parse_object(&oid);
-                       if (!object)
-                               continue;
-                       if (object->type != OBJ_COMMIT)
-                               die("invalid shallow object %s", oid_to_hex(&oid));
-                       if (!(object->flags & CLIENT_SHALLOW)) {
-                               object->flags |= CLIENT_SHALLOW;
-                               add_object_array(object, NULL, &shallows);
-                       }
+               if (process_shallow(line, &shallows))
                        continue;
-               }
-               if (skip_prefix(line, "deepen ", &arg)) {
-                       char *end = NULL;
-                       depth = strtol(arg, &end, 0);
-                       if (!end || *end || depth <= 0)
-                               die("Invalid deepen: %s", line);
+               if (process_deepen(line, &depth))
                        continue;
-               }
-               if (skip_prefix(line, "deepen-since ", &arg)) {
-                       char *end = NULL;
-                       deepen_since = parse_timestamp(arg, &end, 0);
-                       if (!end || *end || !deepen_since ||
-                           /* revisions.c's max_age -1 is special */
-                           deepen_since == -1)
-                               die("Invalid deepen-since: %s", line);
-                       deepen_rev_list = 1;
+               if (process_deepen_since(line, &deepen_since, &deepen_rev_list))
                        continue;
-               }
-               if (skip_prefix(line, "deepen-not ", &arg)) {
-                       char *ref = NULL;
-                       struct object_id oid;
-                       if (expand_ref(arg, strlen(arg), &oid, &ref) != 1)
-                               die("git upload-pack: ambiguous deepen-not: %s", line);
-                       string_list_append(&deepen_not, ref);
-                       free(ref);
-                       deepen_rev_list = 1;
+               if (process_deepen_not(line, &deepen_not, &deepen_rev_list))
                        continue;
-               }
+
                if (skip_prefix(line, "filter ", &arg)) {
                        if (!filter_capability_requested)
                                die("git upload-pack: filtering capability not negotiated");
                        parse_list_objects_filter(&filter_options, arg);
                        continue;
                }
+
                if (!skip_prefix(line, "want ", &arg) ||
                    get_oid_hex(arg, &oid_buf))
                        die("git upload-pack: protocol error, "
@@ -881,40 +956,10 @@ static void receive_needs(void)
 
        if (depth == 0 && !deepen_rev_list && shallows.nr == 0)
                return;
-       if (depth > 0 && deepen_rev_list)
-               die("git upload-pack: deepen and deepen-since (or deepen-not) cannot be used together");
-       if (depth > 0)
-               deepen(depth, deepen_relative, &shallows);
-       else if (deepen_rev_list) {
-               struct argv_array av = ARGV_ARRAY_INIT;
-               int i;
 
-               argv_array_push(&av, "rev-list");
-               if (deepen_since)
-                       argv_array_pushf(&av, "--max-age=%"PRItime, deepen_since);
-               if (deepen_not.nr) {
-                       argv_array_push(&av, "--not");
-                       for (i = 0; i < deepen_not.nr; i++) {
-                               struct string_list_item *s = deepen_not.items + i;
-                               argv_array_push(&av, s->string);
-                       }
-                       argv_array_push(&av, "--not");
-               }
-               for (i = 0; i < want_obj.nr; i++) {
-                       struct object *o = want_obj.objects[i].item;
-                       argv_array_push(&av, oid_to_hex(&o->oid));
-               }
-               deepen_by_rev_list(av.argc, av.argv, &shallows);
-               argv_array_clear(&av);
-       }
-       else
-               if (shallows.nr > 0) {
-                       int i;
-                       for (i = 0; i < shallows.nr; i++)
-                               register_shallow(&shallows.objects[i].item->oid);
-               }
-
-       shallow_nr += shallows.nr;
+       if (send_shallow_list(depth, deepen_rev_list, deepen_since,
+                             &deepen_not, &shallows))
+               packet_flush(1);
        object_array_clear(&shallows);
 }
 
@@ -1004,33 +1049,6 @@ static int find_symref(const char *refname, const struct object_id *oid,
        return 0;
 }
 
-static void upload_pack(void)
-{
-       struct string_list symref = STRING_LIST_INIT_DUP;
-
-       head_ref_namespaced(find_symref, &symref);
-
-       if (advertise_refs || !stateless_rpc) {
-               reset_timeout();
-               head_ref_namespaced(send_ref, &symref);
-               for_each_namespaced_ref(send_ref, &symref);
-               advertise_shallow_grafts(1);
-               packet_flush(1);
-       } else {
-               head_ref_namespaced(check_ref, NULL);
-               for_each_namespaced_ref(check_ref, NULL);
-       }
-       string_list_clear(&symref, 1);
-       if (advertise_refs)
-               return;
-
-       receive_needs();
-       if (want_obj.nr) {
-               get_common_commits();
-               create_pack_file();
-       }
-}
-
 static int upload_pack_config(const char *var, const char *value, void *unused)
 {
        if (!strcmp("uploadpack.allowtipsha1inwant", var)) {
@@ -1061,58 +1079,356 @@ static int upload_pack_config(const char *var, const char *value, void *unused)
        return parse_hide_refs_config(var, value, "uploadpack");
 }
 
-int cmd_main(int argc, const char **argv)
+void upload_pack(struct upload_pack_options *options)
 {
-       const char *dir;
-       int strict = 0;
-       struct option options[] = {
-               OPT_BOOL(0, "stateless-rpc", &stateless_rpc,
-                        N_("quit after a single request/response exchange")),
-               OPT_BOOL(0, "advertise-refs", &advertise_refs,
-                        N_("exit immediately after initial ref advertisement")),
-               OPT_BOOL(0, "strict", &strict,
-                        N_("do not try <directory>/.git/ if <directory> is no Git directory")),
-               OPT_INTEGER(0, "timeout", &timeout,
-                           N_("interrupt transfer after <n> seconds of inactivity")),
-               OPT_END()
-       };
+       struct string_list symref = STRING_LIST_INIT_DUP;
 
-       packet_trace_identity("upload-pack");
-       check_replace_refs = 0;
+       stateless_rpc = options->stateless_rpc;
+       timeout = options->timeout;
+       daemon_mode = options->daemon_mode;
 
-       argc = parse_options(argc, argv, NULL, options, upload_pack_usage, 0);
+       git_config(upload_pack_config, NULL);
 
-       if (argc != 1)
-               usage_with_options(upload_pack_usage, options);
+       head_ref_namespaced(find_symref, &symref);
 
-       if (timeout)
-               daemon_mode = 1;
+       if (options->advertise_refs || !stateless_rpc) {
+               reset_timeout();
+               head_ref_namespaced(send_ref, &symref);
+               for_each_namespaced_ref(send_ref, &symref);
+               advertise_shallow_grafts(1);
+               packet_flush(1);
+       } else {
+               head_ref_namespaced(check_ref, NULL);
+               for_each_namespaced_ref(check_ref, NULL);
+       }
+       string_list_clear(&symref, 1);
+       if (options->advertise_refs)
+               return;
 
-       setup_path();
+       receive_needs();
+       if (want_obj.nr) {
+               get_common_commits();
+               create_pack_file();
+       }
+}
 
-       dir = argv[0];
+struct upload_pack_data {
+       struct object_array wants;
+       struct oid_array haves;
 
-       if (!enter_repo(dir, strict))
-               die("'%s' does not appear to be a git repository", dir);
+       struct object_array shallows;
+       struct string_list deepen_not;
+       int depth;
+       timestamp_t deepen_since;
+       int deepen_rev_list;
+       int deepen_relative;
 
-       git_config(upload_pack_config, NULL);
+       unsigned stateless_rpc : 1;
 
-       switch (determine_protocol_version_server()) {
-       case protocol_v1:
-               /*
-                * v1 is just the original protocol with a version string,
-                * so just fall through after writing the version string.
-                */
-               if (advertise_refs || !stateless_rpc)
-                       packet_write_fmt(1, "version 1\n");
-
-               /* fallthrough */
-       case protocol_v0:
-               upload_pack();
-               break;
-       case protocol_unknown_version:
-               BUG("unknown protocol version");
+       unsigned use_thin_pack : 1;
+       unsigned use_ofs_delta : 1;
+       unsigned no_progress : 1;
+       unsigned use_include_tag : 1;
+       unsigned done : 1;
+};
+
+static void upload_pack_data_init(struct upload_pack_data *data)
+{
+       struct object_array wants = OBJECT_ARRAY_INIT;
+       struct oid_array haves = OID_ARRAY_INIT;
+       struct object_array shallows = OBJECT_ARRAY_INIT;
+       struct string_list deepen_not = STRING_LIST_INIT_DUP;
+
+       memset(data, 0, sizeof(*data));
+       data->wants = wants;
+       data->haves = haves;
+       data->shallows = shallows;
+       data->deepen_not = deepen_not;
+}
+
+static void upload_pack_data_clear(struct upload_pack_data *data)
+{
+       object_array_clear(&data->wants);
+       oid_array_clear(&data->haves);
+       object_array_clear(&data->shallows);
+       string_list_clear(&data->deepen_not, 0);
+}
+
+static int parse_want(const char *line)
+{
+       const char *arg;
+       if (skip_prefix(line, "want ", &arg)) {
+               struct object_id oid;
+               struct object *o;
+
+               if (get_oid_hex(arg, &oid))
+                       die("git upload-pack: protocol error, "
+                           "expected to get oid, not '%s'", line);
+
+               o = parse_object(&oid);
+               if (!o) {
+                       packet_write_fmt(1,
+                                        "ERR upload-pack: not our ref %s",
+                                        oid_to_hex(&oid));
+                       die("git upload-pack: not our ref %s",
+                           oid_to_hex(&oid));
+               }
+
+               if (!(o->flags & WANTED)) {
+                       o->flags |= WANTED;
+                       add_object_array(o, NULL, &want_obj);
+               }
+
+               return 1;
+       }
+
+       return 0;
+}
+
+static int parse_have(const char *line, struct oid_array *haves)
+{
+       const char *arg;
+       if (skip_prefix(line, "have ", &arg)) {
+               struct object_id oid;
+
+               if (get_oid_hex(arg, &oid))
+                       die("git upload-pack: expected SHA1 object, got '%s'", arg);
+               oid_array_append(haves, &oid);
+               return 1;
        }
 
        return 0;
 }
+
+static void process_args(struct packet_reader *request,
+                        struct upload_pack_data *data)
+{
+       while (packet_reader_read(request) != PACKET_READ_FLUSH) {
+               const char *arg = request->line;
+
+               /* process want */
+               if (parse_want(arg))
+                       continue;
+               /* process have line */
+               if (parse_have(arg, &data->haves))
+                       continue;
+
+               /* process args like thin-pack */
+               if (!strcmp(arg, "thin-pack")) {
+                       use_thin_pack = 1;
+                       continue;
+               }
+               if (!strcmp(arg, "ofs-delta")) {
+                       use_ofs_delta = 1;
+                       continue;
+               }
+               if (!strcmp(arg, "no-progress")) {
+                       no_progress = 1;
+                       continue;
+               }
+               if (!strcmp(arg, "include-tag")) {
+                       use_include_tag = 1;
+                       continue;
+               }
+               if (!strcmp(arg, "done")) {
+                       data->done = 1;
+                       continue;
+               }
+
+               /* Shallow related arguments */
+               if (process_shallow(arg, &data->shallows))
+                       continue;
+               if (process_deepen(arg, &data->depth))
+                       continue;
+               if (process_deepen_since(arg, &data->deepen_since,
+                                        &data->deepen_rev_list))
+                       continue;
+               if (process_deepen_not(arg, &data->deepen_not,
+                                      &data->deepen_rev_list))
+                       continue;
+               if (!strcmp(arg, "deepen-relative")) {
+                       data->deepen_relative = 1;
+                       continue;
+               }
+
+               /* ignore unknown lines maybe? */
+               die("unexpect line: '%s'", arg);
+       }
+}
+
+static int process_haves(struct oid_array *haves, struct oid_array *common)
+{
+       int i;
+
+       /* Process haves */
+       for (i = 0; i < haves->nr; i++) {
+               const struct object_id *oid = &haves->oid[i];
+               struct object *o;
+               int we_knew_they_have = 0;
+
+               if (!has_object_file(oid))
+                       continue;
+
+               oid_array_append(common, oid);
+
+               o = parse_object(oid);
+               if (!o)
+                       die("oops (%s)", oid_to_hex(oid));
+               if (o->type == OBJ_COMMIT) {
+                       struct commit_list *parents;
+                       struct commit *commit = (struct commit *)o;
+                       if (o->flags & THEY_HAVE)
+                               we_knew_they_have = 1;
+                       else
+                               o->flags |= THEY_HAVE;
+                       if (!oldest_have || (commit->date < oldest_have))
+                               oldest_have = commit->date;
+                       for (parents = commit->parents;
+                            parents;
+                            parents = parents->next)
+                               parents->item->object.flags |= THEY_HAVE;
+               }
+               if (!we_knew_they_have)
+                       add_object_array(o, NULL, &have_obj);
+       }
+
+       return 0;
+}
+
+static int send_acks(struct oid_array *acks, struct strbuf *response)
+{
+       int i;
+
+       packet_buf_write(response, "acknowledgments\n");
+
+       /* Send Acks */
+       if (!acks->nr)
+               packet_buf_write(response, "NAK\n");
+
+       for (i = 0; i < acks->nr; i++) {
+               packet_buf_write(response, "ACK %s\n",
+                                oid_to_hex(&acks->oid[i]));
+       }
+
+       if (ok_to_give_up()) {
+               /* Send Ready */
+               packet_buf_write(response, "ready\n");
+               return 1;
+       }
+
+       return 0;
+}
+
+static int process_haves_and_send_acks(struct upload_pack_data *data)
+{
+       struct oid_array common = OID_ARRAY_INIT;
+       struct strbuf response = STRBUF_INIT;
+       int ret = 0;
+
+       process_haves(&data->haves, &common);
+       if (data->done) {
+               ret = 1;
+       } else if (send_acks(&common, &response)) {
+               packet_buf_delim(&response);
+               ret = 1;
+       } else {
+               /* Add Flush */
+               packet_buf_flush(&response);
+               ret = 0;
+       }
+
+       /* Send response */
+       write_or_die(1, response.buf, response.len);
+       strbuf_release(&response);
+
+       oid_array_clear(&data->haves);
+       oid_array_clear(&common);
+       return ret;
+}
+
+static void send_shallow_info(struct upload_pack_data *data)
+{
+       /* No shallow info needs to be sent */
+       if (!data->depth && !data->deepen_rev_list && !data->shallows.nr &&
+           !is_repository_shallow())
+               return;
+
+       packet_write_fmt(1, "shallow-info\n");
+
+       if (!send_shallow_list(data->depth, data->deepen_rev_list,
+                              data->deepen_since, &data->deepen_not,
+                              &data->shallows) && is_repository_shallow())
+               deepen(INFINITE_DEPTH, data->deepen_relative, &data->shallows);
+
+       packet_delim(1);
+}
+
+enum fetch_state {
+       FETCH_PROCESS_ARGS = 0,
+       FETCH_SEND_ACKS,
+       FETCH_SEND_PACK,
+       FETCH_DONE,
+};
+
+int upload_pack_v2(struct repository *r, struct argv_array *keys,
+                  struct packet_reader *request)
+{
+       enum fetch_state state = FETCH_PROCESS_ARGS;
+       struct upload_pack_data data;
+
+       upload_pack_data_init(&data);
+       use_sideband = LARGE_PACKET_MAX;
+
+       while (state != FETCH_DONE) {
+               switch (state) {
+               case FETCH_PROCESS_ARGS:
+                       process_args(request, &data);
+
+                       if (!want_obj.nr) {
+                               /*
+                                * Request didn't contain any 'want' lines,
+                                * guess they didn't want anything.
+                                */
+                               state = FETCH_DONE;
+                       } else if (data.haves.nr) {
+                               /*
+                                * Request had 'have' lines, so lets ACK them.
+                                */
+                               state = FETCH_SEND_ACKS;
+                       } else {
+                               /*
+                                * Request had 'want's but no 'have's so we can
+                                * immedietly go to construct and send a pack.
+                                */
+                               state = FETCH_SEND_PACK;
+                       }
+                       break;
+               case FETCH_SEND_ACKS:
+                       if (process_haves_and_send_acks(&data))
+                               state = FETCH_SEND_PACK;
+                       else
+                               state = FETCH_DONE;
+                       break;
+               case FETCH_SEND_PACK:
+                       send_shallow_info(&data);
+
+                       packet_write_fmt(1, "packfile\n");
+                       create_pack_file();
+                       state = FETCH_DONE;
+                       break;
+               case FETCH_DONE:
+                       continue;
+               }
+       }
+
+       upload_pack_data_clear(&data);
+       return 0;
+}
+
+int upload_pack_advertise(struct repository *r,
+                         struct strbuf *value)
+{
+       if (value)
+               strbuf_addstr(value, "shallow");
+       return 1;
+}
diff --git a/upload-pack.h b/upload-pack.h
new file mode 100644 (file)
index 0000000..cab2178
--- /dev/null
@@ -0,0 +1,23 @@
+#ifndef UPLOAD_PACK_H
+#define UPLOAD_PACK_H
+
+struct upload_pack_options {
+       int stateless_rpc;
+       int advertise_refs;
+       unsigned int timeout;
+       int daemon_mode;
+};
+
+void upload_pack(struct upload_pack_options *options);
+
+struct repository;
+struct argv_array;
+struct packet_reader;
+extern int upload_pack_v2(struct repository *r, struct argv_array *keys,
+                         struct packet_reader *request);
+
+struct strbuf;
+extern int upload_pack_advertise(struct repository *r,
+                                struct strbuf *value);
+
+#endif /* UPLOAD_PACK_H */
diff --git a/utf8.c b/utf8.c
index 2c27ce0137f8a60ca2fadf855f2c67738931e2f8..0fcc6487e3d8b4a4af81c92148fb4edb9574f524 100644 (file)
--- a/utf8.c
+++ b/utf8.c
@@ -81,7 +81,7 @@ static int git_wcwidth(ucs_char_t ch)
        /*
         * Sorted list of non-overlapping intervals of non-spacing characters,
         */
-#include "unicode_width.h"
+#include "unicode-width.h"
 
        /* test for 8-bit control characters */
        if (ch == 0)
@@ -401,18 +401,40 @@ void strbuf_utf8_replace(struct strbuf *sb_src, int pos, int width,
        strbuf_release(&sb_dst);
 }
 
+/*
+ * Returns true (1) if the src encoding name matches the dst encoding
+ * name directly or one of its alternative names. E.g. UTF-16BE is the
+ * same as UTF16BE.
+ */
+static int same_utf_encoding(const char *src, const char *dst)
+{
+       if (istarts_with(src, "utf") && istarts_with(dst, "utf")) {
+               /* src[3] or dst[3] might be '\0' */
+               int i = (src[3] == '-' ? 4 : 3);
+               int j = (dst[3] == '-' ? 4 : 3);
+               return !strcasecmp(src+i, dst+j);
+       }
+       return 0;
+}
+
 int is_encoding_utf8(const char *name)
 {
        if (!name)
                return 1;
-       if (!strcasecmp(name, "utf-8") || !strcasecmp(name, "utf8"))
+       if (same_utf_encoding("utf-8", name))
                return 1;
        return 0;
 }
 
 int same_encoding(const char *src, const char *dst)
 {
-       if (is_encoding_utf8(src) && is_encoding_utf8(dst))
+       static const char utf8[] = "UTF-8";
+
+       if (!src)
+               src = utf8;
+       if (!dst)
+               dst = utf8;
+       if (same_utf_encoding(src, dst))
                return 1;
        return !strcasecmp(src, dst);
 }
@@ -538,6 +560,45 @@ char *reencode_string_len(const char *in, int insz,
 }
 #endif
 
+static int has_bom_prefix(const char *data, size_t len,
+                         const char *bom, size_t bom_len)
+{
+       return data && bom && (len >= bom_len) && !memcmp(data, bom, bom_len);
+}
+
+static const char utf16_be_bom[] = {0xFE, 0xFF};
+static const char utf16_le_bom[] = {0xFF, 0xFE};
+static const char utf32_be_bom[] = {0x00, 0x00, 0xFE, 0xFF};
+static const char utf32_le_bom[] = {0xFF, 0xFE, 0x00, 0x00};
+
+int has_prohibited_utf_bom(const char *enc, const char *data, size_t len)
+{
+       return (
+         (same_utf_encoding("UTF-16BE", enc) ||
+          same_utf_encoding("UTF-16LE", enc)) &&
+         (has_bom_prefix(data, len, utf16_be_bom, sizeof(utf16_be_bom)) ||
+          has_bom_prefix(data, len, utf16_le_bom, sizeof(utf16_le_bom)))
+       ) || (
+         (same_utf_encoding("UTF-32BE",  enc) ||
+          same_utf_encoding("UTF-32LE", enc)) &&
+         (has_bom_prefix(data, len, utf32_be_bom, sizeof(utf32_be_bom)) ||
+          has_bom_prefix(data, len, utf32_le_bom, sizeof(utf32_le_bom)))
+       );
+}
+
+int is_missing_required_utf_bom(const char *enc, const char *data, size_t len)
+{
+       return (
+          (same_utf_encoding(enc, "UTF-16")) &&
+          !(has_bom_prefix(data, len, utf16_be_bom, sizeof(utf16_be_bom)) ||
+            has_bom_prefix(data, len, utf16_le_bom, sizeof(utf16_le_bom)))
+       ) || (
+          (same_utf_encoding(enc, "UTF-32")) &&
+          !(has_bom_prefix(data, len, utf32_be_bom, sizeof(utf32_be_bom)) ||
+            has_bom_prefix(data, len, utf32_le_bom, sizeof(utf32_le_bom)))
+       );
+}
+
 /*
  * Returns first character length in bytes for multi-byte `text` according to
  * `encoding`.
diff --git a/utf8.h b/utf8.h
index 6bbcf31a831d60faf119fdc3f82f1eb10233e255..cce654a64a2012bd8a6c9d1ba2070500b5b3ba8a 100644 (file)
--- a/utf8.h
+++ b/utf8.h
@@ -70,4 +70,32 @@ typedef enum {
 void strbuf_utf8_align(struct strbuf *buf, align_type position, unsigned int width,
                       const char *s);
 
+/*
+ * If a data stream is declared as UTF-16BE or UTF-16LE, then a UTF-16
+ * BOM must not be used [1]. The same applies for the UTF-32 equivalents.
+ * The function returns true if this rule is violated.
+ *
+ * [1] http://unicode.org/faq/utf_bom.html#bom10
+ */
+int has_prohibited_utf_bom(const char *enc, const char *data, size_t len);
+
+/*
+ * If the endianness is not defined in the encoding name, then we
+ * require a BOM. The function returns true if a required BOM is missing.
+ *
+ * The Unicode standard instructs to assume big-endian if there in no
+ * BOM for UTF-16/32 [1][2]. However, the W3C/WHATWG encoding standard
+ * used in HTML5 recommends to assume little-endian to "deal with
+ * deployed content" [3].
+ *
+ * Therefore, strictly requiring a BOM seems to be the safest option for
+ * content in Git.
+ *
+ * [1] http://unicode.org/faq/utf_bom.html#gen6
+ * [2] http://www.unicode.org/versions/Unicode10.0.0/ch03.pdf
+ *     Section 3.10, D98, page 132
+ * [3] https://encoding.spec.whatwg.org/#utf-16le
+ */
+int is_missing_required_utf_bom(const char *enc, const char *data, size_t len);
+
 #endif
index dffb9c8e37c220e71e108060dc5a81bc21f8370c..0b162a09b95a3eadeef47f3e6d314d386215cc9f 100644 (file)
--- a/walker.c
+++ b/walker.c
@@ -72,6 +72,8 @@ static struct commit_list *complete = NULL;
 
 static int process_commit(struct walker *walker, struct commit *commit)
 {
+       struct commit_list *parents;
+
        if (parse_commit(commit))
                return -1;
 
@@ -86,19 +88,14 @@ static int process_commit(struct walker *walker, struct commit *commit)
 
        walker_say(walker, "walk %s\n", oid_to_hex(&commit->object.oid));
 
-       if (walker->get_tree) {
-               if (process(walker, &commit->tree->object))
+       if (process(walker, &get_commit_tree(commit)->object))
+               return -1;
+
+       for (parents = commit->parents; parents; parents = parents->next) {
+               if (process(walker, &parents->item->object))
                        return -1;
-               if (!walker->get_all)
-                       walker->get_tree = 0;
-       }
-       if (walker->get_history) {
-               struct commit_list *parents = commit->parents;
-               for (; parents; parents = parents->next) {
-                       if (process(walker, &parents->item->object))
-                               return -1;
-               }
        }
+
        return 0;
 }
 
index a869013e85110a0b64d8fe344c8bd67f5e8f7f09..6d8ae00e5b995f6565fab8b617e4629788c3c0c7 100644 (file)
--- a/walker.h
+++ b/walker.h
@@ -9,9 +9,6 @@ struct walker {
        void (*prefetch)(struct walker *, unsigned char *sha1);
        int (*fetch)(struct walker *, unsigned char *sha1);
        void (*cleanup)(struct walker *);
-       int get_tree;
-       int get_history;
-       int get_all;
        int get_verbosely;
        int get_recover;
 
index 5842408817aa7e5c584f244a7625cf458882d568..95851b85b6b7181130f0cd441c2bd7ac0bfb89da 100644 (file)
@@ -20,10 +20,17 @@ PATH='@@BUILD_DIR@@/bin-wrappers:'"$PATH"
 
 export GIT_EXEC_PATH GITPERLLIB PATH GIT_TEXTDOMAINDIR
 
-if test -n "$GIT_TEST_GDB"
-then
-       unset GIT_TEST_GDB
-       exec gdb --args "${GIT_EXEC_PATH}/@@PROG@@" "$@"
-else
+case "$GIT_DEBUGGER" in
+'')
        exec "${GIT_EXEC_PATH}/@@PROG@@" "$@"
-fi
+       ;;
+1)
+       unset GIT_DEBUGGER
+       exec gdb --args "${GIT_EXEC_PATH}/@@PROG@@" "$@"
+       ;;
+*)
+       GIT_DEBUGGER_ARGS="$GIT_DEBUGGER"
+       unset GIT_DEBUGGER
+       exec ${GIT_DEBUGGER_ARGS} "${GIT_EXEC_PATH}/@@PROG@@" "$@"
+       ;;
+esac
diff --git a/write-or-die.c b/write-or-die.c
new file mode 100644 (file)
index 0000000..eab8c8d
--- /dev/null
@@ -0,0 +1,71 @@
+#include "cache.h"
+#include "run-command.h"
+
+/*
+ * Some cases use stdio, but want to flush after the write
+ * to get error handling (and to get better interactive
+ * behaviour - not buffering excessively).
+ *
+ * Of course, if the flush happened within the write itself,
+ * we've already lost the error code, and cannot report it any
+ * more. So we just ignore that case instead (and hope we get
+ * the right error code on the flush).
+ *
+ * If the file handle is stdout, and stdout is a file, then skip the
+ * flush entirely since it's not needed.
+ */
+void maybe_flush_or_die(FILE *f, const char *desc)
+{
+       static int skip_stdout_flush = -1;
+       struct stat st;
+       char *cp;
+
+       if (f == stdout) {
+               if (skip_stdout_flush < 0) {
+                       cp = getenv("GIT_FLUSH");
+                       if (cp)
+                               skip_stdout_flush = (atoi(cp) == 0);
+                       else if ((fstat(fileno(stdout), &st) == 0) &&
+                                S_ISREG(st.st_mode))
+                               skip_stdout_flush = 1;
+                       else
+                               skip_stdout_flush = 0;
+               }
+               if (skip_stdout_flush && !ferror(f))
+                       return;
+       }
+       if (fflush(f)) {
+               check_pipe(errno);
+               die_errno("write failure on '%s'", desc);
+       }
+}
+
+void fprintf_or_die(FILE *f, const char *fmt, ...)
+{
+       va_list ap;
+       int ret;
+
+       va_start(ap, fmt);
+       ret = vfprintf(f, fmt, ap);
+       va_end(ap);
+
+       if (ret < 0) {
+               check_pipe(errno);
+               die_errno("write error");
+       }
+}
+
+void fsync_or_die(int fd, const char *msg)
+{
+       if (fsync(fd) < 0) {
+               die_errno("fsync error on '%s'", msg);
+       }
+}
+
+void write_or_die(int fd, const void *buf, size_t count)
+{
+       if (write_in_full(fd, buf, count) < 0) {
+               check_pipe(errno);
+               die_errno("write error");
+       }
+}
diff --git a/write_or_die.c b/write_or_die.c
deleted file mode 100644 (file)
index eab8c8d..0000000
+++ /dev/null
@@ -1,71 +0,0 @@
-#include "cache.h"
-#include "run-command.h"
-
-/*
- * Some cases use stdio, but want to flush after the write
- * to get error handling (and to get better interactive
- * behaviour - not buffering excessively).
- *
- * Of course, if the flush happened within the write itself,
- * we've already lost the error code, and cannot report it any
- * more. So we just ignore that case instead (and hope we get
- * the right error code on the flush).
- *
- * If the file handle is stdout, and stdout is a file, then skip the
- * flush entirely since it's not needed.
- */
-void maybe_flush_or_die(FILE *f, const char *desc)
-{
-       static int skip_stdout_flush = -1;
-       struct stat st;
-       char *cp;
-
-       if (f == stdout) {
-               if (skip_stdout_flush < 0) {
-                       cp = getenv("GIT_FLUSH");
-                       if (cp)
-                               skip_stdout_flush = (atoi(cp) == 0);
-                       else if ((fstat(fileno(stdout), &st) == 0) &&
-                                S_ISREG(st.st_mode))
-                               skip_stdout_flush = 1;
-                       else
-                               skip_stdout_flush = 0;
-               }
-               if (skip_stdout_flush && !ferror(f))
-                       return;
-       }
-       if (fflush(f)) {
-               check_pipe(errno);
-               die_errno("write failure on '%s'", desc);
-       }
-}
-
-void fprintf_or_die(FILE *f, const char *fmt, ...)
-{
-       va_list ap;
-       int ret;
-
-       va_start(ap, fmt);
-       ret = vfprintf(f, fmt, ap);
-       va_end(ap);
-
-       if (ret < 0) {
-               check_pipe(errno);
-               die_errno("write error");
-       }
-}
-
-void fsync_or_die(int fd, const char *msg)
-{
-       if (fsync(fd) < 0) {
-               die_errno("fsync error on '%s'", msg);
-       }
-}
-
-void write_or_die(int fd, const void *buf, size_t count)
-{
-       if (write_in_full(fd, buf, count) < 0) {
-               check_pipe(errno);
-               die_errno("write error");
-       }
-}
index 66f4234af1149618b47786f3b623916be7c02c74..32f3bcaebd43aab58aea440ff704ca5340c61963 100644 (file)
@@ -625,9 +625,6 @@ static void wt_status_collect_changes_index(struct wt_status *s)
        rev.diffopt.output_format |= DIFF_FORMAT_CALLBACK;
        rev.diffopt.format_callback = wt_status_collect_updated_cb;
        rev.diffopt.format_callback_data = s;
-       rev.diffopt.detect_rename = DIFF_DETECT_RENAME;
-       rev.diffopt.rename_limit = 200;
-       rev.diffopt.break_opt = 0;
        copy_pathspec(&rev.prune_data, &s->pathspec);
        run_diff_index(&rev, 1);
 }
@@ -985,7 +982,6 @@ static void wt_longstatus_print_verbose(struct wt_status *s)
        setup_revisions(0, NULL, &rev, &opt);
 
        rev.diffopt.output_format |= DIFF_FORMAT_PATCH;
-       rev.diffopt.detect_rename = DIFF_DETECT_RENAME;
        rev.diffopt.file = s->fp;
        rev.diffopt.close_file = 0;
        /*
@@ -1188,7 +1184,7 @@ static void abbrev_sha1_in_line(struct strbuf *line)
                strbuf_trim(split[1]);
                if (!get_oid(split[1]->buf, &oid)) {
                        strbuf_reset(split[1]);
-                       strbuf_add_unique_abbrev(split[1], oid.hash,
+                       strbuf_add_unique_abbrev(split[1], &oid,
                                                 DEFAULT_ABBREV);
                        strbuf_addch(split[1], ' ');
                        strbuf_reset(line);
@@ -1350,7 +1346,7 @@ static void show_cherry_pick_in_progress(struct wt_status *s,
                                        const char *color)
 {
        status_printf_ln(s, color, _("You are currently cherry-picking commit %s."),
-                       find_unique_abbrev(state->cherry_pick_head_sha1, DEFAULT_ABBREV));
+                       find_unique_abbrev(&state->cherry_pick_head_oid, DEFAULT_ABBREV));
        if (s->hints) {
                if (has_unmerged(s))
                        status_printf_ln(s, color,
@@ -1369,7 +1365,7 @@ static void show_revert_in_progress(struct wt_status *s,
                                        const char *color)
 {
        status_printf_ln(s, color, _("You are currently reverting commit %s."),
-                        find_unique_abbrev(state->revert_head_sha1, DEFAULT_ABBREV));
+                        find_unique_abbrev(&state->revert_head_oid, DEFAULT_ABBREV));
        if (s->hints) {
                if (has_unmerged(s))
                        status_printf_ln(s, color,
@@ -1422,7 +1418,7 @@ static char *get_branch(const struct worktree *wt, const char *path)
                ;
        else if (!get_oid_hex(sb.buf, &oid)) {
                strbuf_reset(&sb);
-               strbuf_add_unique_abbrev(&sb, oid.hash, DEFAULT_ABBREV);
+               strbuf_add_unique_abbrev(&sb, &oid, DEFAULT_ABBREV);
        } else if (!strcmp(sb.buf, "detached HEAD")) /* rebase */
                goto got_nothing;
        else                    /* bisect */
@@ -1459,7 +1455,7 @@ static int grab_1st_switch(struct object_id *ooid, struct object_id *noid,
        if (!strcmp(cb->buf.buf, "HEAD")) {
                /* HEAD is relative. Resolve it to the right reflog entry. */
                strbuf_reset(&cb->buf);
-               strbuf_add_unique_abbrev(&cb->buf, noid->hash, DEFAULT_ABBREV);
+               strbuf_add_unique_abbrev(&cb->buf, noid, DEFAULT_ABBREV);
        }
        return 1;
 }
@@ -1489,10 +1485,10 @@ static void wt_status_get_detached_from(struct wt_status_state *state)
                state->detached_from = xstrdup(from);
        } else
                state->detached_from =
-                       xstrdup(find_unique_abbrev(cb.noid.hash, DEFAULT_ABBREV));
-       hashcpy(state->detached_sha1, cb.noid.hash);
+                       xstrdup(find_unique_abbrev(&cb.noid, DEFAULT_ABBREV));
+       oidcpy(&state->detached_oid, &cb.noid);
        state->detached_at = !get_oid("HEAD", &oid) &&
-                            !hashcmp(oid.hash, state->detached_sha1);
+                            !oidcmp(&oid, &state->detached_oid);
 
        free(ref);
        strbuf_release(&cb.buf);
@@ -1551,13 +1547,13 @@ void wt_status_get_state(struct wt_status_state *state,
        } else if (!stat(git_path_cherry_pick_head(), &st) &&
                        !get_oid("CHERRY_PICK_HEAD", &oid)) {
                state->cherry_pick_in_progress = 1;
-               hashcpy(state->cherry_pick_head_sha1, oid.hash);
+               oidcpy(&state->cherry_pick_head_oid, &oid);
        }
        wt_status_check_bisect(NULL, state);
        if (!stat(git_path_revert_head(), &st) &&
            !get_oid("REVERT_HEAD", &oid)) {
                state->revert_in_progress = 1;
-               hashcpy(state->revert_head_sha1, oid.hash);
+               oidcpy(&state->revert_head_oid, &oid);
        }
 
        if (get_detached_from)
index ea2456daf24a4a74b3d85b041527ce5f99dc2695..430770b854c41b38b95dae6e8fa8943629a2309b 100644 (file)
@@ -118,9 +118,9 @@ struct wt_status_state {
        char *branch;
        char *onto;
        char *detached_from;
-       unsigned char detached_sha1[20];
-       unsigned char revert_head_sha1[20];
-       unsigned char cherry_pick_head_sha1[20];
+       struct object_id detached_oid;
+       struct object_id revert_head_oid;
+       struct object_id cherry_pick_head_oid;
 };
 
 size_t wt_status_locate_end(const char *s, size_t len);
index 770e1f7f8185e05f2618c261b70a5773041432fb..9315bc0ede11ba0377e27d711e37b6a0ae555c43 100644 (file)
@@ -191,7 +191,7 @@ void read_mmblob(mmfile_t *ptr, const struct object_id *oid)
                return;
        }
 
-       ptr->ptr = read_sha1_file(oid->hash, &type, &size);
+       ptr->ptr = read_object_file(oid, &type, &size);
        if (!ptr->ptr || type != OBJ_BLOB)
                die("unable to read blob object %s", oid_to_hex(oid));
        ptr->size = size;