Merge branch 'en/status-multiple-renames-to-the-same-target-fix'
authorJunio C Hamano <gitster@pobox.com>
Tue, 16 Oct 2018 07:16:05 +0000 (16:16 +0900)
committerJunio C Hamano <gitster@pobox.com>
Tue, 16 Oct 2018 07:16:05 +0000 (16:16 +0900)
The code in "git status" sometimes hit an assertion failure. This
was caused by a structure that was reused without cleaning the data
used for the first run, which has been corrected.

* en/status-multiple-renames-to-the-same-target-fix:
commit: fix erroneous BUG, 'multiple renames on the same target? how?'

336 files changed:
.gitattributes
.gitignore
.mailmap
Documentation/CodingGuidelines
Documentation/Makefile
Documentation/RelNotes/2.14.5.txt [new file with mode: 0644]
Documentation/RelNotes/2.15.3.txt [new file with mode: 0644]
Documentation/RelNotes/2.16.5.txt [new file with mode: 0644]
Documentation/RelNotes/2.17.2.txt [new file with mode: 0644]
Documentation/RelNotes/2.18.1.txt [new file with mode: 0644]
Documentation/RelNotes/2.19.1.txt [new file with mode: 0644]
Documentation/RelNotes/2.20.0.txt [new file with mode: 0644]
Documentation/SubmittingPatches
Documentation/config.txt
Documentation/doc-diff
Documentation/fetch-config.txt [new file with mode: 0644]
Documentation/fetch-options.txt
Documentation/format-config.txt [new file with mode: 0644]
Documentation/git-archimport.txt
Documentation/git-branch.txt
Documentation/git-column.txt
Documentation/git-config.txt
Documentation/git-describe.txt
Documentation/git-format-patch.txt
Documentation/git-interpret-trailers.txt
Documentation/git-multi-pack-index.txt [new file with mode: 0644]
Documentation/git-pack-objects.txt
Documentation/git-push.txt
Documentation/git-repack.txt
Documentation/git-rerere.txt
Documentation/git-update-ref.txt
Documentation/git-worktree.txt
Documentation/git.txt
Documentation/gitcvs-config.txt [new file with mode: 0644]
Documentation/gitrevisions.txt
Documentation/gui-config.txt [new file with mode: 0644]
Documentation/pull-config.txt [new file with mode: 0644]
Documentation/pull-fetch-param.txt
Documentation/push-config.txt [new file with mode: 0644]
Documentation/receive-config.txt [new file with mode: 0644]
Documentation/sendemail-config.txt [new file with mode: 0644]
Documentation/submodule-config.txt [new file with mode: 0644]
Documentation/technical/commit-graph.txt
Documentation/technical/multi-pack-index.txt [new file with mode: 0644]
Documentation/technical/pack-format.txt
Documentation/technical/rerere.txt [new file with mode: 0644]
GIT-VERSION-GEN
Makefile
RelNotes
archive.c
attr.c
attr.h
bisect.c
blame.c
builtin.h
builtin/add.c
builtin/am.c
builtin/branch.c
builtin/check-attr.c
builtin/checkout.c
builtin/clone.c
builtin/commit-graph.c
builtin/commit.c
builtin/count-objects.c
builtin/describe.c
builtin/diff.c
builtin/difftool.c
builtin/fast-export.c
builtin/fetch.c
builtin/fmt-merge-msg.c
builtin/fsck.c
builtin/gc.c
builtin/index-pack.c
builtin/interpret-trailers.c
builtin/log.c
builtin/merge-base.c
builtin/merge-tree.c
builtin/merge.c
builtin/multi-pack-index.c [new file with mode: 0644]
builtin/pack-objects.c
builtin/pack-redundant.c
builtin/pull.c
builtin/range-diff.c
builtin/receive-pack.c
builtin/remote.c
builtin/repack.c
builtin/replace.c
builtin/rerere.c
builtin/rev-list.c
builtin/rev-parse.c
builtin/rm.c
builtin/show-branch.c
builtin/submodule--helper.c
builtin/tag.c
builtin/unpack-objects.c
builtin/update-index.c
builtin/update-ref.c
builtin/worktree.c
bulk-checkin.c
bundle.c
cache-tree.c
cache-tree.h
cache.h
combine-diff.c
command-list.txt
commit-graph.c
commit-graph.h
commit-reach.c [new file with mode: 0644]
commit-reach.h [new file with mode: 0644]
commit.c
commit.h
compat/mingw.c
config.mak.dev
connect.c
contrib/coccinelle/commit.cocci
contrib/coccinelle/object_id.cocci
convert.c
delta-islands.c [new file with mode: 0644]
delta-islands.h [new file with mode: 0644]
diff-lib.c
diff.c
diff.h
diffcore-break.c
diffcore-rename.c
dir.c
entry.c
ewah/ewok_rlw.h
fast-import.c
fetch-object.c
fetch-object.h
fetch-pack.c
fsck.c
fsck.h
git-submodule.sh
git.c
http-backend.c
http-push.c
http-walker.c
http.c
interdiff.c [new file with mode: 0644]
interdiff.h [new file with mode: 0644]
json-writer.h
linear-assignment.c
ll-merge.c
lockfile.h
log-tree.c
mailinfo.c
mailinfo.h
match-trees.c
merge-recursive.c
midx.c [new file with mode: 0644]
midx.h [new file with mode: 0644]
name-hash.c
notes-merge.c
notes.c
object-store.h
object.c
object.h
oidmap.c
pack-bitmap-write.c
pack-bitmap.c
pack-bitmap.h
pack-check.c
pack-objects.c
pack-objects.h
pack-write.c
packfile.c
packfile.h
patch-delta.c
patch-ids.c
preload-index.c
pretty.c
range-diff.c
range-diff.h
read-cache.c
ref-filter.c
refs.c
refs.h
refs/files-backend.c
refs/iterator.c
refs/packed-backend.c
refs/packed-backend.h
refs/ref-cache.c
refs/ref-cache.h
refs/refs-internal.h
remote-curl.c
remote.c
remote.h
replace-object.c
replace-object.h
rerere.c
revision.c
revision.h
sequencer.c
sequencer.h
server-info.c
sha1-array.c
sha1-file.c
sha1-name.c
shallow.c
string-list.c
string-list.h
submodule-config.c
submodule.c
t/README
t/helper/test-delta.c
t/helper/test-dump-cache-tree.c
t/helper/test-dump-fsmonitor.c
t/helper/test-dump-untracked-cache.c
t/helper/test-parse-options.c
t/helper/test-pkt-line.c
t/helper/test-reach.c [new file with mode: 0644]
t/helper/test-read-midx.c [new file with mode: 0644]
t/helper/test-repository.c
t/helper/test-tool.c
t/helper/test-tool.h
t/helper/test-windows-named-pipe.c [new file with mode: 0644]
t/lib-gpg.sh
t/oid-info/README [new file with mode: 0644]
t/oid-info/hash-info [new file with mode: 0644]
t/oid-info/oid [new file with mode: 0644]
t/perf/README
t/perf/aggregate.perl
t/perf/p1450-fsck.sh [new file with mode: 0755]
t/perf/p1451-fsck-skip-list.sh [new file with mode: 0755]
t/perf/p5311-pack-bitmaps-fetch.sh [new file with mode: 0755]
t/perf/perf-lib.sh
t/t0000-basic.sh
t/t0002-gitfile.sh
t/t0014-alias.sh [new file with mode: 0755]
t/t0021-conversion.sh
t/t0040-parse-options.sh
t/t0051-windows-named-pipe.sh [new file with mode: 0755]
t/t0064-sha1-array.sh
t/t0090-cache-tree.sh
t/t0410-partial-clone.sh
t/t1006-cat-file.sh
t/t1090-sparse-checkout-scope.sh
t/t1300-config.sh
t/t1303-wacky-config.sh
t/t1400-update-ref.sh
t/t1404-update-ref-errors.sh
t/t1405-main-ref-store.sh
t/t1406-submodule-ref-store.sh
t/t1407-worktree-ref-store.sh
t/t1700-split-index.sh
t/t2025-worktree-add.sh
t/t2028-worktree-move.sh
t/t2101-update-index-reupdate.sh
t/t3200-branch.sh
t/t3206-range-diff.sh
t/t3320-notes-merge-worktrees.sh
t/t3400-rebase.sh
t/t3404-rebase-interactive.sh
t/t3405-rebase-malformed.sh
t/t3415-rebase-autosquash.sh
t/t3417-rebase-whitespace-fix.sh
t/t3505-cherry-pick-empty.sh
t/t3701-add-interactive.sh
t/t3702-add-edit.sh
t/t3903-stash.sh
t/t3905-stash-include-untracked.sh
t/t4014-format-patch.sh
t/t4025-hunk-header.sh
t/t4117-apply-reject.sh
t/t4124-apply-ws-rule.sh
t/t4138-apply-ws-expansion.sh
t/t4200-rerere.sh
t/t4205-log-pretty-formats.sh
t/t4256-am-format-flowed.sh [new file with mode: 0755]
t/t4256/1/mailinfo.c [new file with mode: 0644]
t/t4256/1/mailinfo.c.orig [new file with mode: 0644]
t/t4256/1/patch [new file with mode: 0644]
t/t5303-pack-corruption-resilience.sh
t/t5307-pack-missing-commit.sh
t/t5310-pack-bitmaps.sh
t/t5317-pack-objects-filter-objects.sh
t/t5318-commit-graph.sh
t/t5319-multi-pack-index.sh [new file with mode: 0755]
t/t5320-delta-islands.sh [new file with mode: 0755]
t/t5500-fetch-pack.sh
t/t5504-fetch-receive-strict.sh
t/t5505-remote.sh
t/t5516-fetch-push.sh
t/t5551-http-fetch-smart.sh
t/t5562-http-backend-content-length.sh
t/t5601-clone.sh
t/t5612-clone-refspec.sh
t/t5616-partial-clone.sh
t/t5701-git-serve.sh
t/t5702-protocol-v2.sh
t/t5703-upload-pack-ref-in-want.sh
t/t6011-rev-list-with-bad-commit.sh
t/t6018-rev-list-glob.sh
t/t6023-merge-file.sh
t/t6024-recursive-merge.sh
t/t6027-merge-binary.sh
t/t6031-merge-filemode.sh
t/t6112-rev-list-filters-objects.sh
t/t6135-pathspec-with-attrs.sh
t/t6300-for-each-ref.sh
t/t6500-gc.sh
t/t6600-test-reach.sh [new file with mode: 0755]
t/t7063-status-untracked-cache.sh
t/t7201-co.sh
t/t7406-submodule-update.sh
t/t7416-submodule-dash-url.sh [new file with mode: 0755]
t/t7417-submodule-path-url.sh [new file with mode: 0755]
t/t7501-commit.sh
t/t7513-interpret-trailers.sh
t/t7519-status-fsmonitor.sh
t/t7800-difftool.sh
t/t9100-git-svn-basic.sh
t/t9101-git-svn-props.sh
t/t9133-git-svn-nested-git-repo.sh
t/t9600-cvsimport.sh
t/t9603-cvsimport-patchsets.sh
t/t9604-cvsimport-timestamps.sh
t/test-lib-functions.sh
t/test-lib.sh
tempfile.c
tempfile.h
trace.c
trace.h
trailer.c
trailer.h
transport.c
tree-diff.c
unpack-trees.c
unpack-trees.h
upload-pack.c
userdiff.c
worktree.c
ws.c
wt-status.c
xdiff-interface.c
index 1bdc91e282c5393c527b3902a208227c19971b84..49b30516419c8dfe8c039ef368a3af984439ebcc 100644 (file)
@@ -9,3 +9,7 @@
 /command-list.txt eol=lf
 /GIT-VERSION-GEN eol=lf
 /mergetools/* eol=lf
+/Documentation/git-merge.txt conflict-marker-size=32
+/Documentation/gitk.txt conflict-marker-size=32
+/Documentation/user-manual.txt conflict-marker-size=32
+/t/t????-*.sh conflict-marker-size=32
index ffceea7d59fd21d5c5deed2c8d14f507bdbf1666..9d1363a1ebce8432c15f610aa7af9520e3e2bb12 100644 (file)
@@ -99,8 +99,9 @@
 /git-mergetool--lib
 /git-mktag
 /git-mktree
-/git-name-rev
+/git-multi-pack-index
 /git-mv
+/git-name-rev
 /git-notes
 /git-p4
 /git-pack-redundant
index f165222a7821e982174adc8e1c3678275ae80753..bef3352b0d3c6e84a33304b2fe44f6a68f6044a3 100644 (file)
--- a/.mailmap
+++ b/.mailmap
@@ -25,7 +25,7 @@ Ben Walton <bdwalton@gmail.com> <bwalton@artsci.utoronto.ca>
 Benoit Sigoure <tsunanet@gmail.com> <tsuna@lrde.epita.fr>
 Bernt Hansen <bernt@norang.ca> <bernt@alumni.uwaterloo.ca>
 Brandon Casey <drafnel@gmail.com> <casey@nrlssc.navy.mil>
-brian m. carlson <sandals@crustytoothpaste.net> Brian M. Carlson <sandals@crustytoothpaste.ath.cx>
+brian m. carlson <sandals@crustytoothpaste.net>
 brian m. carlson <sandals@crustytoothpaste.net> <sandals@crustytoothpaste.ath.cx>
 Bryan Larsen <bryan@larsen.st> <bryan.larsen@gmail.com>
 Bryan Larsen <bryan@larsen.st> <bryanlarsen@yahoo.com>
index 48aa4edfbdd180e1c6d874b6bb61ea5fc8e32ef5..72967deb785814546830cf23a54eeeb93d147681 100644 (file)
@@ -118,6 +118,24 @@ For shell scripts specifically (not exhaustive):
                do this
        fi
 
+ - If a command sequence joined with && or || or | spans multiple
+   lines, put each command on a separate line and put && and || and |
+   operators at the end of each line, rather than the start. This
+   means you don't need to use \ to join lines, since the above
+   operators imply the sequence isn't finished.
+
+       (incorrect)
+       grep blob verify_pack_result \
+       | awk -f print_1.awk \
+       | sort >actual &&
+       ...
+
+       (correct)
+       grep blob verify_pack_result |
+       awk -f print_1.awk |
+       sort >actual &&
+       ...
+
  - We prefer "test" over "[ ... ]".
 
  - We do not write the noiseword "function" in front of shell
index a42dcfc74599a29bce540a54191b84755c161e05..95f6a321f239cb2c43e9a556f5ef4219bc35ef0a 100644 (file)
@@ -344,7 +344,7 @@ $(OBSOLETE_HTML): %.html : %.txto asciidoc.conf
        mv $@+ $@
 
 manpage-base-url.xsl: manpage-base-url.xsl.in
-       sed "s|@@MAN_BASE_URL@@|$(MAN_BASE_URL)|" $< > $@
+       $(QUIET_GEN)sed "s|@@MAN_BASE_URL@@|$(MAN_BASE_URL)|" $< > $@
 
 %.1 %.5 %.7 : %.xml manpage-base-url.xsl
        $(QUIET_XMLTO)$(RM) $@ && \
diff --git a/Documentation/RelNotes/2.14.5.txt b/Documentation/RelNotes/2.14.5.txt
new file mode 100644 (file)
index 0000000..130645f
--- /dev/null
@@ -0,0 +1,16 @@
+Git v2.14.5 Release Notes
+=========================
+
+This release is to address the recently reported CVE-2018-17456.
+
+Fixes since v2.14.4
+-------------------
+
+ * Submodules' "URL"s come from the untrusted .gitmodules file, but
+   we blindly gave it to "git clone" to clone submodules when "git
+   clone --recurse-submodules" was used to clone a project that has
+   such a submodule.  The code has been hardened to reject such
+   malformed URLs (e.g. one that begins with a dash).
+
+Credit for finding and fixing this vulnerability goes to joernchen
+and Jeff King, respectively.
diff --git a/Documentation/RelNotes/2.15.3.txt b/Documentation/RelNotes/2.15.3.txt
new file mode 100644 (file)
index 0000000..fd2e6f8
--- /dev/null
@@ -0,0 +1,6 @@
+Git v2.15.3 Release Notes
+=========================
+
+This release merges up the fixes that appear in v2.14.5 to address
+the recently reported CVE-2018-17456; see the release notes for that
+version for details.
diff --git a/Documentation/RelNotes/2.16.5.txt b/Documentation/RelNotes/2.16.5.txt
new file mode 100644 (file)
index 0000000..cb8ee02
--- /dev/null
@@ -0,0 +1,6 @@
+Git v2.16.5 Release Notes
+=========================
+
+This release merges up the fixes that appear in v2.14.5 to address
+the recently reported CVE-2018-17456; see the release notes for that
+version for details.
diff --git a/Documentation/RelNotes/2.17.2.txt b/Documentation/RelNotes/2.17.2.txt
new file mode 100644 (file)
index 0000000..ef021be
--- /dev/null
@@ -0,0 +1,12 @@
+Git v2.17.2 Release Notes
+=========================
+
+This release merges up the fixes that appear in v2.14.5 to address
+the recently reported CVE-2018-17456; see the release notes for that
+version for details.
+
+In addition, this release also teaches "fsck" and the server side
+logic to reject pushes to repositories that attempt to create such a
+problematic ".gitmodules" file as tracked contents, to help hosting
+sites protect their customers by preventing malicious contents from
+spreading.
diff --git a/Documentation/RelNotes/2.18.1.txt b/Documentation/RelNotes/2.18.1.txt
new file mode 100644 (file)
index 0000000..2098cdd
--- /dev/null
@@ -0,0 +1,6 @@
+Git v2.18.1 Release Notes
+=========================
+
+This release merges up the fixes that appear in v2.14.5 and in
+v2.17.2 to address the recently reported CVE-2018-17456; see the
+release notes for those versions for details.
diff --git a/Documentation/RelNotes/2.19.1.txt b/Documentation/RelNotes/2.19.1.txt
new file mode 100644 (file)
index 0000000..da76726
--- /dev/null
@@ -0,0 +1,6 @@
+Git v2.19.1 Release Notes
+=========================
+
+This release merges up the fixes that appear in v2.14.5 and in
+v2.17.2 to address the recently reported CVE-2018-17456; see the
+release notes for those versions for details.
diff --git a/Documentation/RelNotes/2.20.0.txt b/Documentation/RelNotes/2.20.0.txt
new file mode 100644 (file)
index 0000000..2cc5fd7
--- /dev/null
@@ -0,0 +1,183 @@
+Git Release Notes
+=================
+
+Backward Compatibility Notes
+----------------------------
+
+ * "git branch -l <foo>" used to be a way to ask a reflog to be
+   created while creating a new branch, but that is no longer the
+   case.  It is a short-hand for "git branch --list <foo>" now.
+
+ * "git push" into refs/tags/* hierarchy is rejected without getting
+   forced, but "git fetch" (misguidedly) used the "fast forwarding"
+   rule used for the refs/heads/* hierarchy; this has been corrected,
+   which means some fetches of tags that did not fail with older
+   version of Git will fail without "--force" with this version.
+
+
+Updates since v2.19
+-------------------
+
+UI, Workflows & Features
+
+ * Running "git clone" against a project that contain two files with
+   pathnames that differ only in cases on a case insensitive
+   filesystem would result in one of the files lost because the
+   underlying filesystem is incapable of holding both at the same
+   time.  An attempt is made to detect such a case and warn.
+
+ * "git checkout -b newbranch [HEAD]" should not have to do as much as
+   checking out a commit different from HEAD.  An attempt is made to
+   optimize this special case.
+
+ * "git rev-list --stdin </dev/null" used to be an error; it now shows
+   no output without an error.  "git rev-list --stdin --default HEAD"
+   still falls back to the given default when nothing is given on the
+   standard input.
+
+ * Lift code from GitHub to restrict delta computation so that an
+   object that exists in one fork is not made into a delta against
+   another object that does not appear in the same forked repository.
+
+ * "git format-patch" learned new "--interdiff" and "--range-diff"
+   options to explain the difference between this version and the
+   previous attempt in the cover letter (or after the tree-dashes as
+   a comment).
+
+ * "git mailinfo" used in "git am" learned to make a best-effort
+   recovery of a patch corrupted by MUA that sends text/plain with
+   format=flawed option.
+   (merge 3aa4d81f88 rs/mailinfo-format-flowed later to maint).
+
+ * The rules used by "git push" and "git fetch" to determine if a ref
+   can or cannot be updated were inconsistent; specifically, fetching
+   to update existing tags were allowed even though tags are supposed
+   to be unmoving anchoring points.  "git fetch" was taught to forbid
+   updates to existing tags without the "--force" option.
+
+ * "git multi-pack-index" learned to detect corruption in the .midx
+   file it uses, and this feature has been integrated into "git fsck".
+
+
+Performance, Internal Implementation, Development Support etc.
+
+ * When there are too many packfiles in a repository (which is not
+   recommended), looking up an object in these would require
+   consulting many pack .idx files; a new mechanism to have a single
+   file that consolidates all of these .idx files is introduced.
+
+ * "git submodule update" is getting rewritten piece-by-piece into C.
+
+ * The code for computing history reachability has been shuffled,
+   obtained a bunch of new tests to cover them, and then being
+   improved.
+
+ * The unpack_trees() API used in checking out a branch and merging
+   walks one or more trees along with the index.  When the cache-tree
+   in the index tells us that we are walking a tree whose flattened
+   contents is known (i.e. matches a span in the index), as linearly
+   scanning a span in the index is much more efficient than having to
+   open tree objects recursively and listing their entries, the walk
+   can be optimized, which has been done.
+
+ * When creating a thin pack, which allows objects to be made into a
+   delta against another object that is not in the resulting pack but
+   is known to be present on the receiving end, the code learned to
+   take advantage of the reachability bitmap; this allows the server
+   to send a delta against a base beyond the "boundary" commit.
+
+ * spatch transformation to replace boolean uses of !hashcmp() to
+   newly introduced oideq() is added, and applied, to regain
+   performance lost due to support of multiple hash algorithms.
+
+ * Fix a bug in which the same path could be registered under multiple
+   worktree entries if the path was missing (for instance, was removed
+   manually).  Also, as a convenience, expand the number of cases in
+   which --force is applicable.
+
+ * Split Documentation/config.txt for easier maintenance.
+   (merge 6014363f0b nd/config-split later to maint).
+
+ * Test helper binaries clean-up.
+   (merge c9a1f4161f nd/test-tool later to maint).
+
+ * Various tests have been updated to make it easier to swap the
+   hash function used for object identification.
+   (merge ae0c89d41b bc/hash-independent-tests later to maint).
+
+ * Update fsck.skipList implementation and documentation.
+   (merge 371a655074 ab/fsck-skiplist later to maint).
+
+
+Fixes since v2.19
+-----------------
+
+ * "git interpret-trailers" and its underlying machinery had a buggy
+   code that attempted to ignore patch text after commit log message,
+   which triggered in various codepaths that will always get the log
+   message alone and never get such an input.
+   (merge 66e83d9b41 jk/trailer-fixes later to maint).
+
+ * Malformed or crafted data in packstream can make our code attempt
+   to read or write past the allocated buffer and abort, instead of
+   reporting an error, which has been fixed.
+
+ * "git rebase -i" did not clear the state files correctly when a run
+   of "squash/fixup" is aborted and then the user manually amended the
+   commit instead, which has been corrected.
+   (merge 10d2f35436 js/rebase-i-autosquash-fix later to maint).
+
+ * When fsmonitor is in use, after operation on submodules updates
+   .gitmodules, we lost track of the fact that we did so and relied on
+   stale fsmonitor data.
+   (merge 43f1180814 bp/mv-submodules-with-fsmonitor later to maint).
+
+ * Fix for a long-standing bug that leaves the index file corrupt when
+   it shrinks during a partial commit.
+   (merge 6c003d6ffb jk/reopen-tempfile-truncate later to maint).
+
+ * Further fix for O_APPEND emulation on Windows
+   (merge eeaf7ddac7 js/mingw-o-append later to maint).
+
+ * A corner case bugfix in "git rerere" code.
+   (merge ad2bf0d9b4 en/rerere-multi-stage-1-fix later to maint).
+
+ * "git add ':(attr:foo)'" is not supported and is supposed to be
+   rejected while the command line arguments are parsed, but we fail
+   to reject such a command line upfront.
+   (merge 84d938b732 nd/attr-pathspec-fix later to maint).
+
+ * Recent update broke the reachability algorithm when refs (e.g.
+   tags) that point at objects that are not commit were involved,
+   which has been fixed.
+
+ * "git rebase" etc. in Git 2.19 fails to abort when given an empty
+   commit log message as result of editing, which has been corrected.
+   (merge a3ec9eaf38 en/sequencer-empty-edit-result-aborts later to maint).
+
+ * The code to backfill objects in lazily cloned repository did not
+   work correctly, which has been corrected.
+   (merge e68302011c jt/lazy-object-fetch-fix later to maint).
+
+ * Update error messages given by "git remote" and make them consistent.
+   (merge 5025425dff ms/remote-error-message-update later to maint).
+
+ * "git update-ref" learned to make both "--no-deref" and "--stdin"
+   work at the same time.
+   (merge d345e9fbe7 en/update-ref-no-deref-stdin later to maint).
+
+ * Recently added "range-diff" had a corner-case bug to cause it
+   segfault, which has been corrected.
+   (merge e467a90c7a tg/range-diff-corner-case-fix later to maint).
+
+ * Code cleanup, docfix, build fix, etc.
+   (merge 96a7501aad ts/doc-build-manpage-xsl-quietly later to maint).
+   (merge b9b07efdb2 tg/conflict-marker-size later to maint).
+   (merge fa0aeea770 sg/doc-trace-appends later to maint).
+   (merge d64324cb60 tb/void-check-attr later to maint).
+   (merge c3b9bc94b9 en/double-semicolon-fix later to maint).
+   (merge 79336116f5 sg/t3701-tighten-trace later to maint).
+   (merge 801fa63a90 jk/dev-build-format-security later to maint).
+   (merge 0597dd62ba sb/string-list-remove-unused later to maint).
+   (merge db2d36fad8 bw/protocol-v2 later to maint).
+   (merge 456d7cd3a9 sg/split-index-test later to maint).
index b44fd51f275b105b62a1f5c9d23ca616e2d87e7d..ec8b2051450abb90a77b1edf243d9dd01b16867e 100644 (file)
@@ -80,7 +80,9 @@ GitHub-Travis CI hints section for details.
 
 Do not forget to update the documentation to describe the updated
 behavior and make sure that the resulting documentation set formats
-well. It is currently a liberal mixture of US and UK English norms for
+well (try the Documentation/doc-diff script).
+
+We currently have a liberal mixture of US and UK English norms for
 spelling and grammar, which is somewhat unfortunate.  A huge patch that
 touches the files all over the place only to correct the inconsistency
 is not welcome, though.  Potential clashes with other changes that can
index eb66a119753726b0260acd456f5728351dc05ba3..7d58253368ce5eed36d6ccf2d757c87a1343fc5b 100644 (file)
@@ -225,7 +225,7 @@ boolean::
        false;; Boolean false literals are `no`, `off`, `false`,
                `0` and the empty string.
 +
-When converting value to the canonical form using `--bool` type
+When converting a value to its canonical form using the `--type=bool` type
 specifier, 'git config' will ensure that the output is "true" or
 "false" (spelled in lowercase).
 
@@ -832,12 +832,6 @@ core.packedRefsTimeout::
        all; -1 means to try indefinitely. Default is 1000 (i.e.,
        retry for 1 second).
 
-sequence.editor::
-       Text editor used by `git rebase -i` for editing the rebase instruction file.
-       The value is meant to be interpreted by the shell when it is used.
-       It can be overridden by the `GIT_SEQUENCE_EDITOR` environment variable.
-       When not configured the default commit message editor is used instead.
-
 core.pager::
        Text viewer for use by Git commands (e.g., 'less').  The value
        is meant to be interpreted by the shell.  The order of preference
@@ -937,6 +931,11 @@ core.useReplaceRefs::
        option was given on the command line. See linkgit:git[1] and
        linkgit:git-replace[1] for more information.
 
+core.multiPackIndex::
+       Use the multi-pack-index file to track multiple packfiles using a
+       single index. See link:technical/multi-pack-index.html[the
+       multi-pack-index design document].
+
 core.sparseCheckout::
        Enable "sparse checkout" feature. See section "Sparse checkout" in
        linkgit:git-read-tree[1] for more information.
@@ -1154,6 +1153,14 @@ and by linkgit:git-worktree[1] when 'git worktree add' refers to a
 remote branch. This setting might be used for other checkout-like
 commands or functionality in the future.
 
+checkout.optimizeNewBranch::
+       Optimizes the performance of "git checkout -b <new_branch>" when
+       using sparse-checkout.  When set to true, git will not update the
+       repo based on the current sparse-checkout settings.  This means it
+       will not update the skip-worktree bit in the index nor add/remove
+       files in the working directory to reflect the current sparse checkout
+       settings nor will it show the local changes.
+
 clean.requireForce::
        A boolean to make git-clean do nothing unless given -f,
        -i or -n.   Defaults to true.
@@ -1507,159 +1514,9 @@ fastimport.unpackLimit::
        operation complete faster, especially on slow filesystems.  If
        not set, the value of `transfer.unpackLimit` is used instead.
 
-fetch.recurseSubmodules::
-       This option can be either set to a boolean value or to 'on-demand'.
-       Setting it to a boolean changes the behavior of fetch and pull to
-       unconditionally recurse into submodules when set to true or to not
-       recurse at all when set to false. When set to 'on-demand' (the default
-       value), fetch and pull will only recurse into a populated submodule
-       when its superproject retrieves a commit that updates the submodule's
-       reference.
-
-fetch.fsckObjects::
-       If it is set to true, git-fetch-pack will check all fetched
-       objects. See `transfer.fsckObjects` for what's
-       checked. Defaults to false. If not set, the value of
-       `transfer.fsckObjects` is used instead.
-
-fetch.fsck.<msg-id>::
-       Acts like `fsck.<msg-id>`, but is used by
-       linkgit:git-fetch-pack[1] instead of linkgit:git-fsck[1]. See
-       the `fsck.<msg-id>` documentation for details.
-
-fetch.fsck.skipList::
-       Acts like `fsck.skipList`, but is used by
-       linkgit:git-fetch-pack[1] instead of linkgit:git-fsck[1]. See
-       the `fsck.skipList` documentation for details.
-
-fetch.unpackLimit::
-       If the number of objects fetched over the Git native
-       transfer is below this
-       limit, then the objects will be unpacked into loose object
-       files. However if the number of received objects equals or
-       exceeds this limit then the received pack will be stored as
-       a pack, after adding any missing delta bases.  Storing the
-       pack from a push can make the push operation complete faster,
-       especially on slow filesystems.  If not set, the value of
-       `transfer.unpackLimit` is used instead.
-
-fetch.prune::
-       If true, fetch will automatically behave as if the `--prune`
-       option was given on the command line.  See also `remote.<name>.prune`
-       and the PRUNING section of linkgit:git-fetch[1].
-
-fetch.pruneTags::
-       If true, fetch will automatically behave as if the
-       `refs/tags/*:refs/tags/*` refspec was provided when pruning,
-       if not set already. This allows for setting both this option
-       and `fetch.prune` to maintain a 1=1 mapping to upstream
-       refs. See also `remote.<name>.pruneTags` and the PRUNING
-       section of linkgit:git-fetch[1].
-
-fetch.output::
-       Control how ref update status is printed. Valid values are
-       `full` and `compact`. Default value is `full`. See section
-       OUTPUT in linkgit:git-fetch[1] for detail.
-
-fetch.negotiationAlgorithm::
-       Control how information about the commits in the local repository is
-       sent when negotiating the contents of the packfile to be sent by the
-       server. Set to "skipping" to use an algorithm that skips commits in an
-       effort to converge faster, but may result in a larger-than-necessary
-       packfile; The default is "default" which instructs Git to use the default algorithm
-       that never skips commits (unless the server has acknowledged it or one
-       of its descendants).
-       Unknown values will cause 'git fetch' to error out.
-+
-See also the `--negotiation-tip` option for linkgit:git-fetch[1].
-
-format.attach::
-       Enable multipart/mixed attachments as the default for
-       'format-patch'.  The value can also be a double quoted string
-       which will enable attachments as the default and set the
-       value as the boundary.  See the --attach option in
-       linkgit:git-format-patch[1].
-
-format.from::
-       Provides the default value for the `--from` option to format-patch.
-       Accepts a boolean value, or a name and email address.  If false,
-       format-patch defaults to `--no-from`, using commit authors directly in
-       the "From:" field of patch mails.  If true, format-patch defaults to
-       `--from`, using your committer identity in the "From:" field of patch
-       mails and including a "From:" field in the body of the patch mail if
-       different.  If set to a non-boolean value, format-patch uses that
-       value instead of your committer identity.  Defaults to false.
-
-format.numbered::
-       A boolean which can enable or disable sequence numbers in patch
-       subjects.  It defaults to "auto" which enables it only if there
-       is more than one patch.  It can be enabled or disabled for all
-       messages by setting it to "true" or "false".  See --numbered
-       option in linkgit:git-format-patch[1].
-
-format.headers::
-       Additional email headers to include in a patch to be submitted
-       by mail.  See linkgit:git-format-patch[1].
-
-format.to::
-format.cc::
-       Additional recipients to include in a patch to be submitted
-       by mail.  See the --to and --cc options in
-       linkgit:git-format-patch[1].
-
-format.subjectPrefix::
-       The default for format-patch is to output files with the '[PATCH]'
-       subject prefix. Use this variable to change that prefix.
-
-format.signature::
-       The default for format-patch is to output a signature containing
-       the Git version number. Use this variable to change that default.
-       Set this variable to the empty string ("") to suppress
-       signature generation.
-
-format.signatureFile::
-       Works just like format.signature except the contents of the
-       file specified by this variable will be used as the signature.
-
-format.suffix::
-       The default for format-patch is to output files with the suffix
-       `.patch`. Use this variable to change that suffix (make sure to
-       include the dot if you want it).
-
-format.pretty::
-       The default pretty format for log/show/whatchanged command,
-       See linkgit:git-log[1], linkgit:git-show[1],
-       linkgit:git-whatchanged[1].
-
-format.thread::
-       The default threading style for 'git format-patch'.  Can be
-       a boolean value, or `shallow` or `deep`.  `shallow` threading
-       makes every mail a reply to the head of the series,
-       where the head is chosen from the cover letter, the
-       `--in-reply-to`, and the first patch mail, in this order.
-       `deep` threading makes every mail a reply to the previous one.
-       A true boolean value is the same as `shallow`, and a false
-       value disables threading.
-
-format.signOff::
-       A boolean value which lets you enable the `-s/--signoff` option of
-       format-patch by default. *Note:* Adding the Signed-off-by: line to a
-       patch should be a conscious act and means that you certify you have
-       the rights to submit this work under the same open source license.
-       Please see the 'SubmittingPatches' document for further discussion.
-
-format.coverLetter::
-       A boolean that controls whether to generate a cover-letter when
-       format-patch is invoked, but in addition can be set to "auto", to
-       generate a cover-letter only when there's more than one patch.
-
-format.outputDirectory::
-       Set a custom directory to store the resulting files instead of the
-       current working directory.
-
-format.useAutoBase::
-       A boolean value which lets you enable the `--base=auto` option of
-       format-patch by default.
+include::fetch-config.txt[]
+
+include::format-config.txt[]
 
 filter.<driver>.clean::
        The command which is used to convert the content of a worktree
@@ -1710,12 +1567,16 @@ doing the same for `receive.fsck.<msg-id>` and `fetch.fsck.<msg-id>`
 will only cause git to warn.
 
 fsck.skipList::
-       The path to a sorted list of object names (i.e. one SHA-1 per
+       The path to a list of object names (i.e. one unabbreviated SHA-1 per
        line) that are known to be broken in a non-fatal way and should
-       be ignored. This feature is useful when an established project
-       should be accepted despite early commits containing errors that
-       can be safely ignored such as invalid committer email addresses.
-       Note: corrupt objects cannot be skipped with this setting.
+       be ignored. On versions of Git 2.20 and later comments ('#'), empty
+       lines, and any leading and trailing whitespace is ignored. Everything
+       but a SHA-1 per line will error out on older versions.
++
+This feature is useful when an established project should be accepted
+despite early commits containing errors that can be safely ignored
+such as invalid committer email addresses.  Note: corrupt objects
+cannot be skipped with this setting.
 +
 Like `fsck.<msg-id>` this variable has corresponding
 `receive.fsck.skipList` and `fetch.fsck.skipList` variants.
@@ -1725,6 +1586,15 @@ Unlike variables like `color.ui` and `core.editor` the
 fall back on the `fsck.skipList` configuration if they aren't set. To
 uniformly configure the same fsck settings in different circumstances
 all three of them they must all set to the same values.
++
+Older versions of Git (before 2.20) documented that the object names
+list should be sorted. This was never a requirement, the object names
+could appear in any order, but when reading the list we tracked whether
+the list was sorted for the purposes of an internal binary search
+implementation, which could save itself some work with an already sorted
+list. Unless you had a humongous list there was no reason to go out of
+your way to pre-sort the list. After Git version 2.20 a hash implementation
+is used instead, so there's now no reason to pre-sort the list.
 
 gc.aggressiveDepth::
        The depth parameter used in the delta compression
@@ -1773,7 +1643,8 @@ gc.writeCommitGraph::
        for details.
 
 gc.logExpiry::
-       If the file gc.log exists, then `git gc --auto` won't run
+       If the file gc.log exists, then `git gc --auto` will print
+       its content and exit with status zero instead of running
        unless that file is more than 'gc.logExpiry' old.  Default is
        "1.day".  See `gc.pruneExpire` for more ways to specify its
        value.
@@ -1834,73 +1705,7 @@ gc.rerereUnresolved::
        You can also use more human-readable "1.month.ago", etc.
        The default is 15 days.  See linkgit:git-rerere[1].
 
-gitcvs.commitMsgAnnotation::
-       Append this string to each commit message. Set to empty string
-       to disable this feature. Defaults to "via git-CVS emulator".
-
-gitcvs.enabled::
-       Whether the CVS server interface is enabled for this repository.
-       See linkgit:git-cvsserver[1].
-
-gitcvs.logFile::
-       Path to a log file where the CVS server interface well... logs
-       various stuff. See linkgit:git-cvsserver[1].
-
-gitcvs.usecrlfattr::
-       If true, the server will look up the end-of-line conversion
-       attributes for files to determine the `-k` modes to use. If
-       the attributes force Git to treat a file as text,
-       the `-k` mode will be left blank so CVS clients will
-       treat it as text. If they suppress text conversion, the file
-       will be set with '-kb' mode, which suppresses any newline munging
-       the client might otherwise do. If the attributes do not allow
-       the file type to be determined, then `gitcvs.allBinary` is
-       used. See linkgit:gitattributes[5].
-
-gitcvs.allBinary::
-       This is used if `gitcvs.usecrlfattr` does not resolve
-       the correct '-kb' mode to use. If true, all
-       unresolved files are sent to the client in
-       mode '-kb'. This causes the client to treat them
-       as binary files, which suppresses any newline munging it
-       otherwise might do. Alternatively, if it is set to "guess",
-       then the contents of the file are examined to decide if
-       it is binary, similar to `core.autocrlf`.
-
-gitcvs.dbName::
-       Database used by git-cvsserver to cache revision information
-       derived from the Git repository. The exact meaning depends on the
-       used database driver, for SQLite (which is the default driver) this
-       is a filename. Supports variable substitution (see
-       linkgit:git-cvsserver[1] for details). May not contain semicolons (`;`).
-       Default: '%Ggitcvs.%m.sqlite'
-
-gitcvs.dbDriver::
-       Used Perl DBI driver. You can specify any available driver
-       for this here, but it might not work. git-cvsserver is tested
-       with 'DBD::SQLite', reported to work with 'DBD::Pg', and
-       reported *not* to work with 'DBD::mysql'. Experimental feature.
-       May not contain double colons (`:`). Default: 'SQLite'.
-       See linkgit:git-cvsserver[1].
-
-gitcvs.dbUser, gitcvs.dbPass::
-       Database user and password. Only useful if setting `gitcvs.dbDriver`,
-       since SQLite has no concept of database users and/or passwords.
-       'gitcvs.dbUser' supports variable substitution (see
-       linkgit:git-cvsserver[1] for details).
-
-gitcvs.dbTableNamePrefix::
-       Database table name prefix.  Prepended to the names of any
-       database tables used, allowing a single database to be used
-       for several repositories.  Supports variable substitution (see
-       linkgit:git-cvsserver[1] for details).  Any non-alphabetic
-       characters will be replaced with underscores.
-
-All gitcvs variables except for `gitcvs.usecrlfattr` and
-`gitcvs.allBinary` can also be specified as
-'gitcvs.<access_method>.<varname>' (where 'access_method'
-is one of "ext" and "pserver") to make them apply only for the given
-access method.
+include::gitcvs-config.txt[]
 
 gitweb.category::
 gitweb.description::
@@ -1965,63 +1770,7 @@ gpg.<format>.program::
        be used as a legacy synonym for `gpg.openpgp.program`. The default
        value for `gpg.x509.program` is "gpgsm".
 
-gui.commitMsgWidth::
-       Defines how wide the commit message window is in the
-       linkgit:git-gui[1]. "75" is the default.
-
-gui.diffContext::
-       Specifies how many context lines should be used in calls to diff
-       made by the linkgit:git-gui[1]. The default is "5".
-
-gui.displayUntracked::
-       Determines if linkgit:git-gui[1] shows untracked files
-       in the file list. The default is "true".
-
-gui.encoding::
-       Specifies the default encoding to use for displaying of
-       file contents in linkgit:git-gui[1] and linkgit:gitk[1].
-       It can be overridden by setting the 'encoding' attribute
-       for relevant files (see linkgit:gitattributes[5]).
-       If this option is not set, the tools default to the
-       locale encoding.
-
-gui.matchTrackingBranch::
-       Determines if new branches created with linkgit:git-gui[1] should
-       default to tracking remote branches with matching names or
-       not. Default: "false".
-
-gui.newBranchTemplate::
-       Is used as suggested name when creating new branches using the
-       linkgit:git-gui[1].
-
-gui.pruneDuringFetch::
-       "true" if linkgit:git-gui[1] should prune remote-tracking branches when
-       performing a fetch. The default value is "false".
-
-gui.trustmtime::
-       Determines if linkgit:git-gui[1] should trust the file modification
-       timestamp or not. By default the timestamps are not trusted.
-
-gui.spellingDictionary::
-       Specifies the dictionary used for spell checking commit messages in
-       the linkgit:git-gui[1]. When set to "none" spell checking is turned
-       off.
-
-gui.fastCopyBlame::
-       If true, 'git gui blame' uses `-C` instead of `-C -C` for original
-       location detection. It makes blame significantly faster on huge
-       repositories at the expense of less thorough copy detection.
-
-gui.copyBlameThreshold::
-       Specifies the threshold to use in 'git gui blame' original location
-       detection, measured in alphanumeric characters. See the
-       linkgit:git-blame[1] manual for more information on copy detection.
-
-gui.blamehistoryctx::
-       Specifies the radius of history context in days to show in
-       linkgit:gitk[1] for the selected commit, when the `Show History
-       Context` menu item is invoked from 'git gui blame'. If this
-       variable is set to zero, the whole history is shown.
+include::gui-config.txt[]
 
 guitool.<name>.cmd::
        Specifies the shell command line to execute when the corresponding item
@@ -2671,6 +2420,21 @@ Note that changing the compression level will not automatically recompress
 all existing objects. You can force recompression by passing the -F option
 to linkgit:git-repack[1].
 
+pack.island::
+       An extended regular expression configuring a set of delta
+       islands. See "DELTA ISLANDS" in linkgit:git-pack-objects[1]
+       for details.
+
+pack.islandCore::
+       Specify an island name which gets to have its objects be
+       packed first. This creates a kind of pseudo-pack at the front
+       of one pack, so that the objects from the specified island are
+       hopefully faster to copy into any pack that should be served
+       to a user requesting these objects. In practice this means
+       that the island specified should likely correspond to what is
+       the most commonly cloned in the repo. See also "DELTA ISLANDS"
+       in linkgit:git-pack-objects[1].
+
 pack.deltaCacheSize::
        The maximum memory in bytes used for caching deltas in
        linkgit:git-pack-objects[1] before writing them out to a pack.
@@ -2828,284 +2592,17 @@ protocol.version::
 * `1` - the original wire protocol with the addition of a version string
   in the initial response from the server.
 
---
-
-pull.ff::
-       By default, Git does not create an extra merge commit when merging
-       a commit that is a descendant of the current commit. Instead, the
-       tip of the current branch is fast-forwarded. When set to `false`,
-       this variable tells Git to create an extra merge commit in such
-       a case (equivalent to giving the `--no-ff` option from the command
-       line). When set to `only`, only such fast-forward merges are
-       allowed (equivalent to giving the `--ff-only` option from the
-       command line). This setting overrides `merge.ff` when pulling.
-
-pull.rebase::
-       When true, rebase branches on top of the fetched branch, instead
-       of merging the default branch from the default remote when "git
-       pull" is run. See "branch.<name>.rebase" for setting this on a
-       per-branch basis.
-+
-When `merges`, pass the `--rebase-merges` option to 'git rebase'
-so that the local merge commits are included in the rebase (see
-linkgit:git-rebase[1] for details).
-+
-When preserve, also pass `--preserve-merges` along to 'git rebase'
-so that locally committed merge commits will not be flattened
-by running 'git pull'.
-+
-When the value is `interactive`, the rebase is run in interactive mode.
-+
-*NOTE*: this is a possibly dangerous operation; do *not* use
-it unless you understand the implications (see linkgit:git-rebase[1]
-for details).
-
-pull.octopus::
-       The default merge strategy to use when pulling multiple branches
-       at once.
-
-pull.twohead::
-       The default merge strategy to use when pulling a single branch.
-
-push.default::
-       Defines the action `git push` should take if no refspec is
-       explicitly given.  Different values are well-suited for
-       specific workflows; for instance, in a purely central workflow
-       (i.e. the fetch source is equal to the push destination),
-       `upstream` is probably what you want.  Possible values are:
-+
---
-
-* `nothing` - do not push anything (error out) unless a refspec is
-  explicitly given. This is primarily meant for people who want to
-  avoid mistakes by always being explicit.
-
-* `current` - push the current branch to update a branch with the same
-  name on the receiving end.  Works in both central and non-central
-  workflows.
-
-* `upstream` - push the current branch back to the branch whose
-  changes are usually integrated into the current branch (which is
-  called `@{upstream}`).  This mode only makes sense if you are
-  pushing to the same repository you would normally pull from
-  (i.e. central workflow).
-
-* `tracking` - This is a deprecated synonym for `upstream`.
-
-* `simple` - in centralized workflow, work like `upstream` with an
-  added safety to refuse to push if the upstream branch's name is
-  different from the local one.
-+
-When pushing to a remote that is different from the remote you normally
-pull from, work as `current`.  This is the safest option and is suited
-for beginners.
-+
-This mode has become the default in Git 2.0.
-
-* `matching` - push all branches having the same name on both ends.
-  This makes the repository you are pushing to remember the set of
-  branches that will be pushed out (e.g. if you always push 'maint'
-  and 'master' there and no other branches, the repository you push
-  to will have these two branches, and your local 'maint' and
-  'master' will be pushed there).
-+
-To use this mode effectively, you have to make sure _all_ the
-branches you would push out are ready to be pushed out before
-running 'git push', as the whole point of this mode is to allow you
-to push all of the branches in one go.  If you usually finish work
-on only one branch and push out the result, while other branches are
-unfinished, this mode is not for you.  Also this mode is not
-suitable for pushing into a shared central repository, as other
-people may add new branches there, or update the tip of existing
-branches outside your control.
-+
-This used to be the default, but not since Git 2.0 (`simple` is the
-new default).
+* `2` - link:technical/protocol-v2.html[wire protocol version 2].
 
 --
 
-push.followTags::
-       If set to true enable `--follow-tags` option by default.  You
-       may override this configuration at time of push by specifying
-       `--no-follow-tags`.
-
-push.gpgSign::
-       May be set to a boolean value, or the string 'if-asked'. A true
-       value causes all pushes to be GPG signed, as if `--signed` is
-       passed to linkgit:git-push[1]. The string 'if-asked' causes
-       pushes to be signed if the server supports it, as if
-       `--signed=if-asked` is passed to 'git push'. A false value may
-       override a value from a lower-priority config file. An explicit
-       command-line flag always overrides this config option.
-
-push.pushOption::
-       When no `--push-option=<option>` argument is given from the
-       command line, `git push` behaves as if each <value> of
-       this variable is given as `--push-option=<value>`.
-+
-This is a multi-valued variable, and an empty value can be used in a
-higher priority configuration file (e.g. `.git/config` in a
-repository) to clear the values inherited from a lower priority
-configuration files (e.g. `$HOME/.gitconfig`).
-+
---
+include::pull-config.txt[]
 
-Example:
-
-/etc/gitconfig
-  push.pushoption = a
-  push.pushoption = b
-
-~/.gitconfig
-  push.pushoption = c
-
-repo/.git/config
-  push.pushoption =
-  push.pushoption = b
-
-This will result in only b (a and c are cleared).
-
---
-
-push.recurseSubmodules::
-       Make sure all submodule commits used by the revisions to be pushed
-       are available on a remote-tracking branch. If the value is 'check'
-       then Git will verify that all submodule commits that changed in the
-       revisions to be pushed are available on at least one remote of the
-       submodule. If any commits are missing, the push will be aborted and
-       exit with non-zero status. If the value is 'on-demand' then all
-       submodules that changed in the revisions to be pushed will be
-       pushed. If on-demand was not able to push all necessary revisions
-       it will also be aborted and exit with non-zero status. If the value
-       is 'no' then default behavior of ignoring submodules when pushing
-       is retained. You may override this configuration at time of push by
-       specifying '--recurse-submodules=check|on-demand|no'.
+include::push-config.txt[]
 
 include::rebase-config.txt[]
 
-receive.advertiseAtomic::
-       By default, git-receive-pack will advertise the atomic push
-       capability to its clients. If you don't want to advertise this
-       capability, set this variable to false.
-
-receive.advertisePushOptions::
-       When set to true, git-receive-pack will advertise the push options
-       capability to its clients. False by default.
-
-receive.autogc::
-       By default, git-receive-pack will run "git-gc --auto" after
-       receiving data from git-push and updating refs.  You can stop
-       it by setting this variable to false.
-
-receive.certNonceSeed::
-       By setting this variable to a string, `git receive-pack`
-       will accept a `git push --signed` and verifies it by using
-       a "nonce" protected by HMAC using this string as a secret
-       key.
-
-receive.certNonceSlop::
-       When a `git push --signed` sent a push certificate with a
-       "nonce" that was issued by a receive-pack serving the same
-       repository within this many seconds, export the "nonce"
-       found in the certificate to `GIT_PUSH_CERT_NONCE` to the
-       hooks (instead of what the receive-pack asked the sending
-       side to include).  This may allow writing checks in
-       `pre-receive` and `post-receive` a bit easier.  Instead of
-       checking `GIT_PUSH_CERT_NONCE_SLOP` environment variable
-       that records by how many seconds the nonce is stale to
-       decide if they want to accept the certificate, they only
-       can check `GIT_PUSH_CERT_NONCE_STATUS` is `OK`.
-
-receive.fsckObjects::
-       If it is set to true, git-receive-pack will check all received
-       objects. See `transfer.fsckObjects` for what's checked.
-       Defaults to false. If not set, the value of
-       `transfer.fsckObjects` is used instead.
-
-receive.fsck.<msg-id>::
-       Acts like `fsck.<msg-id>`, but is used by
-       linkgit:git-receive-pack[1] instead of
-       linkgit:git-fsck[1]. See the `fsck.<msg-id>` documentation for
-       details.
-
-receive.fsck.skipList::
-       Acts like `fsck.skipList`, but is used by
-       linkgit:git-receive-pack[1] instead of
-       linkgit:git-fsck[1]. See the `fsck.skipList` documentation for
-       details.
-
-receive.keepAlive::
-       After receiving the pack from the client, `receive-pack` may
-       produce no output (if `--quiet` was specified) while processing
-       the pack, causing some networks to drop the TCP connection.
-       With this option set, if `receive-pack` does not transmit
-       any data in this phase for `receive.keepAlive` seconds, it will
-       send a short keepalive packet.  The default is 5 seconds; set
-       to 0 to disable keepalives entirely.
-
-receive.unpackLimit::
-       If the number of objects received in a push is below this
-       limit then the objects will be unpacked into loose object
-       files. However if the number of received objects equals or
-       exceeds this limit then the received pack will be stored as
-       a pack, after adding any missing delta bases.  Storing the
-       pack from a push can make the push operation complete faster,
-       especially on slow filesystems.  If not set, the value of
-       `transfer.unpackLimit` is used instead.
-
-receive.maxInputSize::
-       If the size of the incoming pack stream is larger than this
-       limit, then git-receive-pack will error out, instead of
-       accepting the pack file. If not set or set to 0, then the size
-       is unlimited.
-
-receive.denyDeletes::
-       If set to true, git-receive-pack will deny a ref update that deletes
-       the ref. Use this to prevent such a ref deletion via a push.
-
-receive.denyDeleteCurrent::
-       If set to true, git-receive-pack will deny a ref update that
-       deletes the currently checked out branch of a non-bare repository.
-
-receive.denyCurrentBranch::
-       If set to true or "refuse", git-receive-pack will deny a ref update
-       to the currently checked out branch of a non-bare repository.
-       Such a push is potentially dangerous because it brings the HEAD
-       out of sync with the index and working tree. If set to "warn",
-       print a warning of such a push to stderr, but allow the push to
-       proceed. If set to false or "ignore", allow such pushes with no
-       message. Defaults to "refuse".
-+
-Another option is "updateInstead" which will update the working
-tree if pushing into the current branch.  This option is
-intended for synchronizing working directories when one side is not easily
-accessible via interactive ssh (e.g. a live web site, hence the requirement
-that the working directory be clean). This mode also comes in handy when
-developing inside a VM to test and fix code on different Operating Systems.
-+
-By default, "updateInstead" will refuse the push if the working tree or
-the index have any difference from the HEAD, but the `push-to-checkout`
-hook can be used to customize this.  See linkgit:githooks[5].
-
-receive.denyNonFastForwards::
-       If set to true, git-receive-pack will deny a ref update which is
-       not a fast-forward. Use this to prevent such an update via a push,
-       even if that push is forced. This configuration variable is
-       set when initializing a shared repository.
-
-receive.hideRefs::
-       This variable is the same as `transfer.hideRefs`, but applies
-       only to `receive-pack` (and so affects pushes, but not fetches).
-       An attempt to update or delete a hidden ref by `git push` is
-       rejected.
-
-receive.updateServerInfo::
-       If set to true, git-receive-pack will run git-update-server-info
-       after receiving data from git-push and updating refs.
-
-receive.shallowUpdate::
-       If set to true, .git/shallow can be updated when new refs
-       require new shallow roots. Otherwise those refs are rejected.
+include::receive-config.txt[]
 
 remote.pushDefault::
        The remote to push to by default.  Overrides
@@ -3205,6 +2702,10 @@ repack.packKeptObjects::
        index is being written (either via `--write-bitmap-index` or
        `repack.writeBitmaps`).
 
+repack.useDeltaIslands::
+       If set to true, makes `git repack` act as if `--delta-islands`
+       was passed. Defaults to `false`.
+
 repack.writeBitmaps::
        When true, git will write a bitmap index when packing all
        objects to disk (e.g., when `git repack -a` is run).  This
@@ -3227,71 +2728,15 @@ rerere.enabled::
        `$GIT_DIR`, e.g. if "rerere" was previously used in the
        repository.
 
-sendemail.identity::
-       A configuration identity. When given, causes values in the
-       'sendemail.<identity>' subsection to take precedence over
-       values in the 'sendemail' section. The default identity is
-       the value of `sendemail.identity`.
-
-sendemail.smtpEncryption::
-       See linkgit:git-send-email[1] for description.  Note that this
-       setting is not subject to the 'identity' mechanism.
-
-sendemail.smtpssl (deprecated)::
-       Deprecated alias for 'sendemail.smtpEncryption = ssl'.
-
-sendemail.smtpsslcertpath::
-       Path to ca-certificates (either a directory or a single file).
-       Set it to an empty string to disable certificate verification.
-
-sendemail.<identity>.*::
-       Identity-specific versions of the 'sendemail.*' parameters
-       found below, taking precedence over those when this
-       identity is selected, through either the command-line or
-       `sendemail.identity`.
-
-sendemail.aliasesFile::
-sendemail.aliasFileType::
-sendemail.annotate::
-sendemail.bcc::
-sendemail.cc::
-sendemail.ccCmd::
-sendemail.chainReplyTo::
-sendemail.confirm::
-sendemail.envelopeSender::
-sendemail.from::
-sendemail.multiEdit::
-sendemail.signedoffbycc::
-sendemail.smtpPass::
-sendemail.suppresscc::
-sendemail.suppressFrom::
-sendemail.to::
-sendemail.tocmd::
-sendemail.smtpDomain::
-sendemail.smtpServer::
-sendemail.smtpServerPort::
-sendemail.smtpServerOption::
-sendemail.smtpUser::
-sendemail.thread::
-sendemail.transferEncoding::
-sendemail.validate::
-sendemail.xmailer::
-       See linkgit:git-send-email[1] for description.
-
-sendemail.signedoffcc (deprecated)::
-       Deprecated alias for `sendemail.signedoffbycc`.
-
-sendemail.smtpBatchSize::
-       Number of messages to be sent per connection, after that a relogin
-       will happen.  If the value is 0 or undefined, send all messages in
-       one connection.
-       See also the `--batch-size` option of linkgit:git-send-email[1].
-
-sendemail.smtpReloginDelay::
-       Seconds wait before reconnecting to smtp server.
-       See also the `--relogin-delay` option of linkgit:git-send-email[1].
-
-showbranch.default::
+include::sendemail-config.txt[]
+
+sequence.editor::
+       Text editor used by `git rebase -i` for editing the rebase instruction file.
+       The value is meant to be interpreted by the shell when it is used.
+       It can be overridden by the `GIT_SEQUENCE_EDITOR` environment variable.
+       When not configured the default commit message editor is used instead.
+
+showBranch.default::
        The default set of branches for linkgit:git-show-branch[1].
        See linkgit:git-show-branch[1].
 
@@ -3403,88 +2848,7 @@ stash.showStat::
        option will show diffstat of the stash entry.  Defaults to true.
        See description of 'show' command in linkgit:git-stash[1].
 
-submodule.<name>.url::
-       The URL for a submodule. This variable is copied from the .gitmodules
-       file to the git config via 'git submodule init'. The user can change
-       the configured URL before obtaining the submodule via 'git submodule
-       update'. If neither submodule.<name>.active or submodule.active are
-       set, the presence of this variable is used as a fallback to indicate
-       whether the submodule is of interest to git commands.
-       See linkgit:git-submodule[1] and linkgit:gitmodules[5] for details.
-
-submodule.<name>.update::
-       The method by which a submodule is updated by 'git submodule update',
-       which is the only affected command, others such as
-       'git checkout --recurse-submodules' are unaffected. It exists for
-       historical reasons, when 'git submodule' was the only command to
-       interact with submodules; settings like `submodule.active`
-       and `pull.rebase` are more specific. It is populated by
-       `git submodule init` from the linkgit:gitmodules[5] file.
-       See description of 'update' command in linkgit:git-submodule[1].
-
-submodule.<name>.branch::
-       The remote branch name for a submodule, used by `git submodule
-       update --remote`.  Set this option to override the value found in
-       the `.gitmodules` file.  See linkgit:git-submodule[1] and
-       linkgit:gitmodules[5] for details.
-
-submodule.<name>.fetchRecurseSubmodules::
-       This option can be used to control recursive fetching of this
-       submodule. It can be overridden by using the --[no-]recurse-submodules
-       command-line option to "git fetch" and "git pull".
-       This setting will override that from in the linkgit:gitmodules[5]
-       file.
-
-submodule.<name>.ignore::
-       Defines under what circumstances "git status" and the diff family show
-       a submodule as modified. When set to "all", it will never be considered
-       modified (but it will nonetheless show up in the output of status and
-       commit when it has been staged), "dirty" will ignore all changes
-       to the submodules work tree and
-       takes only differences between the HEAD of the submodule and the commit
-       recorded in the superproject into account. "untracked" will additionally
-       let submodules with modified tracked files in their work tree show up.
-       Using "none" (the default when this option is not set) also shows
-       submodules that have untracked files in their work tree as changed.
-       This setting overrides any setting made in .gitmodules for this submodule,
-       both settings can be overridden on the command line by using the
-       "--ignore-submodules" option. The 'git submodule' commands are not
-       affected by this setting.
-
-submodule.<name>.active::
-       Boolean value indicating if the submodule is of interest to git
-       commands.  This config option takes precedence over the
-       submodule.active config option. See linkgit:gitsubmodules[7] for
-       details.
-
-submodule.active::
-       A repeated field which contains a pathspec used to match against a
-       submodule's path to determine if the submodule is of interest to git
-       commands. See linkgit:gitsubmodules[7] for details.
-
-submodule.recurse::
-       Specifies if commands recurse into submodules by default. This
-       applies to all commands that have a `--recurse-submodules` option,
-       except `clone`.
-       Defaults to false.
-
-submodule.fetchJobs::
-       Specifies how many submodules are fetched/cloned at the same time.
-       A positive integer allows up to that number of submodules fetched
-       in parallel. A value of 0 will give some reasonable default.
-       If unset, it defaults to 1.
-
-submodule.alternateLocation::
-       Specifies how the submodules obtain alternates when submodules are
-       cloned. Possible values are `no`, `superproject`.
-       By default `no` is assumed, which doesn't add references. When the
-       value is set to `superproject` the submodule to be cloned computes
-       its alternates location relative to the superprojects alternate.
-
-submodule.alternateErrorStrategy::
-       Specifies how to treat errors with the alternates for a submodule
-       as computed via `submodule.alternateLocation`. Possible values are
-       `ignore`, `info`, `die`. Default is `die`.
+include::submodule-config.txt[]
 
 tag.forceSignAnnotated::
        A boolean to specify whether annotated tags created should be GPG signed.
index f483fe427cea4c82ebe69fc67f3a8efd3d53ec9c..dfd9418778a80a209e8ceb43aa94dbb4259f91db 100755 (executable)
@@ -1,21 +1,34 @@
 #!/bin/sh
+#
+# Build two documentation trees and diff the resulting formatted output.
+# Compared to a source diff, this can reveal mistakes in the formatting.
+# For example:
+#
+#   ./doc-diff origin/master HEAD
+#
+# would show the differences introduced by a branch based on master.
 
 OPTIONS_SPEC="\
 doc-diff [options] <from> <to> [-- <diff-options>]
+doc-diff (-c|--clean)
 --
 j=n    parallel argument to pass to make
 f      force rebuild; do not rely on cached results
+c,clean        cleanup temporary working files
 "
 SUBDIRECTORY_OK=1
 . "$(git --exec-path)/git-sh-setup"
 
 parallel=
 force=
+clean=
 while test $# -gt 0
 do
        case "$1" in
        -j)
                parallel=$2; shift ;;
+       -c|--clean)
+               clean=t ;;
        -f)
                force=t ;;
        --)
@@ -26,6 +39,17 @@ do
        shift
 done
 
+cd_to_toplevel
+tmp=Documentation/tmp-doc-diff
+
+if test -n "$clean"
+then
+       test $# -eq 0 || usage
+       git worktree remove --force "$tmp/worktree" 2>/dev/null
+       rm -rf "$tmp"
+       exit 0
+fi
+
 if test -z "$parallel"
 then
        parallel=$(getconf _NPROCESSORS_ONLN 2>/dev/null)
@@ -42,9 +66,6 @@ to=$1; shift
 from_oid=$(git rev-parse --verify "$from") || exit 1
 to_oid=$(git rev-parse --verify "$to") || exit 1
 
-cd_to_toplevel
-tmp=Documentation/tmp-doc-diff
-
 if test -n "$force"
 then
        rm -rf "$tmp"
@@ -54,7 +75,7 @@ fi
 # results that don't differ between the two trees.
 if ! test -d "$tmp/worktree"
 then
-       git worktree add --detach "$tmp/worktree" "$from" &&
+       git worktree add -f --detach "$tmp/worktree" "$from" &&
        dots=$(echo "$tmp/worktree" | sed 's#[^/]*#..#g') &&
        ln -s "$dots/config.mak" "$tmp/worktree/config.mak"
 fi
@@ -69,12 +90,12 @@ generate_render_makefile () {
                printf '%s: %s\n' "$dst" "$src"
                printf '\t@echo >&2 "  RENDER $(notdir $@)" && \\\n'
                printf '\tmkdir -p $(dir $@) && \\\n'
-               printf '\tMANWIDTH=80 man -l $< >$@+ && \\\n'
+               printf '\tMANWIDTH=80 man $< >$@+ && \\\n'
                printf '\tmv $@+ $@\n'
        done
 }
 
-# render_tree <dirname> <committish>
+# render_tree <committish_oid>
 render_tree () {
        # Skip install-man entirely if we already have an installed directory.
        # We can't rely on make here, since "install-man" unconditionally
@@ -84,7 +105,7 @@ render_tree () {
        # through.
        if ! test -d "$tmp/installed/$1"
        then
-               git -C "$tmp/worktree" checkout "$2" &&
+               git -C "$tmp/worktree" checkout --detach "$1" &&
                make -j$parallel -C "$tmp/worktree" \
                        GIT_VERSION=omitted \
                        SOURCE_DATE_EPOCH=0 \
@@ -104,6 +125,6 @@ render_tree () {
        fi
 }
 
-render_tree $from_oid "$from" &&
-render_tree $to_oid "$to" &&
+render_tree $from_oid &&
+render_tree $to_oid &&
 git -C $tmp/rendered diff --no-index "$@" $from_oid $to_oid
diff --git a/Documentation/fetch-config.txt b/Documentation/fetch-config.txt
new file mode 100644 (file)
index 0000000..cbfad6c
--- /dev/null
@@ -0,0 +1,65 @@
+fetch.recurseSubmodules::
+       This option can be either set to a boolean value or to 'on-demand'.
+       Setting it to a boolean changes the behavior of fetch and pull to
+       unconditionally recurse into submodules when set to true or to not
+       recurse at all when set to false. When set to 'on-demand' (the default
+       value), fetch and pull will only recurse into a populated submodule
+       when its superproject retrieves a commit that updates the submodule's
+       reference.
+
+fetch.fsckObjects::
+       If it is set to true, git-fetch-pack will check all fetched
+       objects. See `transfer.fsckObjects` for what's
+       checked. Defaults to false. If not set, the value of
+       `transfer.fsckObjects` is used instead.
+
+fetch.fsck.<msg-id>::
+       Acts like `fsck.<msg-id>`, but is used by
+       linkgit:git-fetch-pack[1] instead of linkgit:git-fsck[1]. See
+       the `fsck.<msg-id>` documentation for details.
+
+fetch.fsck.skipList::
+       Acts like `fsck.skipList`, but is used by
+       linkgit:git-fetch-pack[1] instead of linkgit:git-fsck[1]. See
+       the `fsck.skipList` documentation for details.
+
+fetch.unpackLimit::
+       If the number of objects fetched over the Git native
+       transfer is below this
+       limit, then the objects will be unpacked into loose object
+       files. However if the number of received objects equals or
+       exceeds this limit then the received pack will be stored as
+       a pack, after adding any missing delta bases.  Storing the
+       pack from a push can make the push operation complete faster,
+       especially on slow filesystems.  If not set, the value of
+       `transfer.unpackLimit` is used instead.
+
+fetch.prune::
+       If true, fetch will automatically behave as if the `--prune`
+       option was given on the command line.  See also `remote.<name>.prune`
+       and the PRUNING section of linkgit:git-fetch[1].
+
+fetch.pruneTags::
+       If true, fetch will automatically behave as if the
+       `refs/tags/*:refs/tags/*` refspec was provided when pruning,
+       if not set already. This allows for setting both this option
+       and `fetch.prune` to maintain a 1=1 mapping to upstream
+       refs. See also `remote.<name>.pruneTags` and the PRUNING
+       section of linkgit:git-fetch[1].
+
+fetch.output::
+       Control how ref update status is printed. Valid values are
+       `full` and `compact`. Default value is `full`. See section
+       OUTPUT in linkgit:git-fetch[1] for detail.
+
+fetch.negotiationAlgorithm::
+       Control how information about the commits in the local repository is
+       sent when negotiating the contents of the packfile to be sent by the
+       server. Set to "skipping" to use an algorithm that skips commits in an
+       effort to converge faster, but may result in a larger-than-necessary
+       packfile; The default is "default" which instructs Git to use the default algorithm
+       that never skips commits (unless the server has acknowledged it or one
+       of its descendants).
+       Unknown values will cause 'git fetch' to error out.
++
+See also the `--negotiation-tip` option for linkgit:git-fetch[1].
index 8bc36af4b1b10375c81c6f700ea300cee2295cc1..fa0a3151b3f7e96ee61e65669e057e48ee053aae 100644 (file)
@@ -68,11 +68,16 @@ endif::git-pull[]
 
 -f::
 --force::
-       When 'git fetch' is used with `<rbranch>:<lbranch>`
-       refspec, it refuses to update the local branch
-       `<lbranch>` unless the remote branch `<rbranch>` it
-       fetches is a descendant of `<lbranch>`.  This option
-       overrides that check.
+       When 'git fetch' is used with `<src>:<dst>` refspec it may
+       refuse to update the local branch as discussed
+ifdef::git-pull[]
+       in the `<refspec>` part of the linkgit:git-fetch[1]
+       documentation.
+endif::git-pull[]
+ifndef::git-pull[]
+       in the `<refspec>` part below.
+endif::git-pull[]
+       This option overrides that check.
 
 -k::
 --keep::
diff --git a/Documentation/format-config.txt b/Documentation/format-config.txt
new file mode 100644 (file)
index 0000000..dc77941
--- /dev/null
@@ -0,0 +1,87 @@
+format.attach::
+       Enable multipart/mixed attachments as the default for
+       'format-patch'.  The value can also be a double quoted string
+       which will enable attachments as the default and set the
+       value as the boundary.  See the --attach option in
+       linkgit:git-format-patch[1].
+
+format.from::
+       Provides the default value for the `--from` option to format-patch.
+       Accepts a boolean value, or a name and email address.  If false,
+       format-patch defaults to `--no-from`, using commit authors directly in
+       the "From:" field of patch mails.  If true, format-patch defaults to
+       `--from`, using your committer identity in the "From:" field of patch
+       mails and including a "From:" field in the body of the patch mail if
+       different.  If set to a non-boolean value, format-patch uses that
+       value instead of your committer identity.  Defaults to false.
+
+format.numbered::
+       A boolean which can enable or disable sequence numbers in patch
+       subjects.  It defaults to "auto" which enables it only if there
+       is more than one patch.  It can be enabled or disabled for all
+       messages by setting it to "true" or "false".  See --numbered
+       option in linkgit:git-format-patch[1].
+
+format.headers::
+       Additional email headers to include in a patch to be submitted
+       by mail.  See linkgit:git-format-patch[1].
+
+format.to::
+format.cc::
+       Additional recipients to include in a patch to be submitted
+       by mail.  See the --to and --cc options in
+       linkgit:git-format-patch[1].
+
+format.subjectPrefix::
+       The default for format-patch is to output files with the '[PATCH]'
+       subject prefix. Use this variable to change that prefix.
+
+format.signature::
+       The default for format-patch is to output a signature containing
+       the Git version number. Use this variable to change that default.
+       Set this variable to the empty string ("") to suppress
+       signature generation.
+
+format.signatureFile::
+       Works just like format.signature except the contents of the
+       file specified by this variable will be used as the signature.
+
+format.suffix::
+       The default for format-patch is to output files with the suffix
+       `.patch`. Use this variable to change that suffix (make sure to
+       include the dot if you want it).
+
+format.pretty::
+       The default pretty format for log/show/whatchanged command,
+       See linkgit:git-log[1], linkgit:git-show[1],
+       linkgit:git-whatchanged[1].
+
+format.thread::
+       The default threading style for 'git format-patch'.  Can be
+       a boolean value, or `shallow` or `deep`.  `shallow` threading
+       makes every mail a reply to the head of the series,
+       where the head is chosen from the cover letter, the
+       `--in-reply-to`, and the first patch mail, in this order.
+       `deep` threading makes every mail a reply to the previous one.
+       A true boolean value is the same as `shallow`, and a false
+       value disables threading.
+
+format.signOff::
+       A boolean value which lets you enable the `-s/--signoff` option of
+       format-patch by default. *Note:* Adding the Signed-off-by: line to a
+       patch should be a conscious act and means that you certify you have
+       the rights to submit this work under the same open source license.
+       Please see the 'SubmittingPatches' document for further discussion.
+
+format.coverLetter::
+       A boolean that controls whether to generate a cover-letter when
+       format-patch is invoked, but in addition can be set to "auto", to
+       generate a cover-letter only when there's more than one patch.
+
+format.outputDirectory::
+       Set a custom directory to store the resulting files instead of the
+       current working directory.
+
+format.useAutoBase::
+       A boolean value which lets you enable the `--base=auto` option of
+       format-patch by default.
index ea7065336920756d03329475472016391cec83dc..a595a0ffeee56d8426907c9297078fac6977e5a3 100644 (file)
@@ -3,7 +3,7 @@ git-archimport(1)
 
 NAME
 ----
-git-archimport - Import an Arch repository into Git
+git-archimport - Import a GNU Arch repository into Git
 
 
 SYNOPSIS
@@ -14,7 +14,8 @@ SYNOPSIS
 
 DESCRIPTION
 -----------
-Imports a project from one or more Arch repositories. It will follow branches
+Imports a project from one or more GNU Arch repositories.
+It will follow branches
 and repositories within the namespaces defined by the <archive/branch>
 parameters supplied. If it cannot find the remote branch a merge comes from
 it will just import it as a regular commit. If it can find it, it will mark it
index 9767b2b483dbe634db1a700e4b3529593f8189ec..bf5316ffa929a88aa98f5b3c7a892ee43638a098 100644 (file)
@@ -14,7 +14,7 @@ SYNOPSIS
        [(--merged | --no-merged) [<commit>]]
        [--contains [<commit]] [--no-contains [<commit>]]
        [--points-at <object>] [--format=<format>] [<pattern>...]
-'git branch' [--track | --no-track] [-l] [-f] <branchname> [<start-point>]
+'git branch' [--track | --no-track] [-f] <branchname> [<start-point>]
 'git branch' (--set-upstream-to=<upstream> | -u <upstream>) [<branchname>]
 'git branch' --unset-upstream [<branchname>]
 'git branch' (-m | -M) [<oldbranch>] <newbranch>
@@ -100,8 +100,6 @@ OPTIONS
        The negated form `--no-create-reflog` only overrides an earlier
        `--create-reflog`, but currently does not negate the setting of
        `core.logAllRefUpdates`.
-+
-The `-l` option is a deprecated synonym for `--create-reflog`.
 
 -f::
 --force::
@@ -156,14 +154,11 @@ This option is only applicable in non-verbose mode.
 --all::
        List both remote-tracking branches and local branches.
 
+-l::
 --list::
        List branches.  With optional `<pattern>...`, e.g. `git
        branch --list 'maint-*'`, list only the branches that match
        the pattern(s).
-+
-This should not be confused with `git branch -l <branchname>`,
-which creates a branch named `<branchname>` with a reflog.
-See `--create-reflog` above for details.
 
 -v::
 -vv::
index 03d18465d4f2cc8327362917d1c808e523333d11..763afabb6dc7487009ab49f5bb258f9181f0da78 100644 (file)
@@ -13,7 +13,10 @@ SYNOPSIS
 
 DESCRIPTION
 -----------
-This command formats its input into multiple columns.
+This command formats the lines of its standard input into a table with
+multiple columns. Each input line occupies one cell of the table. It
+is used internally by other git commands to format output into
+columns.
 
 OPTIONS
 -------
@@ -23,7 +26,7 @@ OPTIONS
 
 --mode=<mode>::
        Specify layout mode. See configuration variable column.ui for option
-       syntax.
+       syntax in linkgit:git-config[1].
 
 --raw-mode=<n>::
        Same as --mode but take mode encoded as a number. This is mainly used
@@ -43,6 +46,34 @@ OPTIONS
 --padding=<N>::
        The number of spaces between columns. One space by default.
 
+EXAMPLES
+------
+
+Format data by columns:
+------------
+$ seq 1 24 | git column --mode=column --padding=5
+1      4      7      10     13     16     19     22
+2      5      8      11     14     17     20     23
+3      6      9      12     15     18     21     24
+------------
+
+Format data by rows:
+------------
+$ seq 1 21 | git column --mode=row --padding=5
+1      2      3      4      5      6      7
+8      9      10     11     12     13     14
+15     16     17     18     19     20     21
+------------
+
+List some tags in a table with unequal column widths:
+------------
+$ git tag --list 'v2.4.*' --column=row,dense
+v2.4.0  v2.4.0-rc0  v2.4.0-rc1  v2.4.0-rc2  v2.4.0-rc3
+v2.4.1  v2.4.10     v2.4.11     v2.4.12     v2.4.2
+v2.4.3  v2.4.4      v2.4.5      v2.4.6      v2.4.7
+v2.4.8  v2.4.9
+------------
+
 GIT
 ---
 Part of the linkgit:git[1] suite
index 8e240435bee8fb9599fc979a5dab48742d7e1e1d..5e87d82933e77aec1e9f8a715b264d72521e8bc9 100644 (file)
@@ -188,8 +188,8 @@ Valid `<type>`'s include:
 --bool-or-int::
 --path::
 --expiry-date::
-  Historical options for selecting a type specifier. Prefer instead `--type`,
-  (see: above).
+  Historical options for selecting a type specifier. Prefer instead `--type`
+  (see above).
 
 --no-type::
   Un-sets the previously set type specifier (if one was previously set). This
@@ -442,9 +442,9 @@ For URLs in `https://weak.example.com`, `http.sslVerify` is set to
 false, while it is set to `true` for all others:
 
 ------------
-% git config --bool --get-urlmatch http.sslverify https://good.example.com
+% git config --type=bool --get-urlmatch http.sslverify https://good.example.com
 true
-% git config --bool --get-urlmatch http.sslverify https://weak.example.com
+% git config --type=bool --get-urlmatch http.sslverify https://weak.example.com
 false
 % git config --get-urlmatch http https://weak.example.com
 http.cookieFile /tmp/cookie.txt
index e027fb8c4b8269bb2266ca98f87c9c18ca45fad3..ccdc5f83d6dcd297f1e4922b27b904f00d6f4721 100644 (file)
@@ -18,7 +18,9 @@ The command finds the most recent tag that is reachable from a
 commit.  If the tag points to the commit, then only the tag is
 shown.  Otherwise, it suffixes the tag name with the number of
 additional commits on top of the tagged object and the
-abbreviated object name of the most recent commit.
+abbreviated object name of the most recent commit. The result
+is a "human-readable" object name which can also be used to
+identify the commit to other git commands.
 
 By default (without --all or --tags) `git describe` only shows
 annotated tags.  For more information about creating annotated tags
index b41e1329a7d8439762790e663ac52ec5f487bc8b..aba4c5febeb7ef4248d50a638d0a8bd58f7d553b 100644 (file)
@@ -23,6 +23,8 @@ SYNOPSIS
                   [(--reroll-count|-v) <n>]
                   [--to=<email>] [--cc=<email>]
                   [--[no-]cover-letter] [--quiet] [--notes[=<ref>]]
+                  [--interdiff=<previous>]
+                  [--range-diff=<previous> [--creation-factor=<percent>]]
                   [--progress]
                   [<common diff options>]
                   [ <since> | <revision range> ]
@@ -228,6 +230,33 @@ feeding the result to `git send-email`.
        containing the branch description, shortlog and the overall diffstat.  You can
        fill in a description in the file before sending it out.
 
+--interdiff=<previous>::
+       As a reviewer aid, insert an interdiff into the cover letter,
+       or as commentary of the lone patch of a 1-patch series, showing
+       the differences between the previous version of the patch series and
+       the series currently being formatted. `previous` is a single revision
+       naming the tip of the previous series which shares a common base with
+       the series being formatted (for example `git format-patch
+       --cover-letter --interdiff=feature/v1 -3 feature/v2`).
+
+--range-diff=<previous>::
+       As a reviewer aid, insert a range-diff (see linkgit:git-range-diff[1])
+       into the cover letter, or as commentary of the lone patch of a
+       1-patch series, showing the differences between the previous
+       version of the patch series and the series currently being formatted.
+       `previous` can be a single revision naming the tip of the previous
+       series if it shares a common base with the series being formatted (for
+       example `git format-patch --cover-letter --range-diff=feature/v1 -3
+       feature/v2`), or a revision range if the two versions of the series are
+       disjoint (for example `git format-patch --cover-letter
+       --range-diff=feature/v1~3..feature/v1 -3 feature/v2`).
+
+--creation-factor=<percent>::
+       Used with `--range-diff`, tweak the heuristic which matches up commits
+       between the previous and current series of patches by adjusting the
+       creation/deletion cost fudge factor. See linkgit:git-range-diff[1])
+       for details.
+
 --notes[=<ref>]::
        Append the notes (see linkgit:git-notes[1]) for the commit
        after the three-dash line.
index b8fafb1e8bdd6c3e061b88b227e1ef264bbfbd07..a5e8b36f62bcf5eeedfb5a04ac852c476d9f43f3 100644 (file)
@@ -56,8 +56,9 @@ least one Git-generated or user-configured trailer and consists of at
 least 25% trailers.
 The group must be preceded by one or more empty (or whitespace-only) lines.
 The group must either be at the end of the message or be the last
-non-whitespace lines before a line that starts with '---'. Such three
-minus signs start the patch part of the message.
+non-whitespace lines before a line that starts with '---' (followed by a
+space or the end of the line). Such three minus signs start the patch
+part of the message. See also `--no-divider` below.
 
 When reading trailers, there can be whitespaces after the
 token, the separator and the value. There can also be whitespaces
@@ -125,6 +126,11 @@ OPTIONS
        A convenience alias for `--only-trailers --only-input
        --unfold`.
 
+--no-divider::
+       Do not treat `---` as the end of the commit message. Use this
+       when you know your input contains just the commit message itself
+       (and not an email or the output of `git format-patch`).
+
 CONFIGURATION VARIABLES
 -----------------------
 
diff --git a/Documentation/git-multi-pack-index.txt b/Documentation/git-multi-pack-index.txt
new file mode 100644 (file)
index 0000000..f7778a2
--- /dev/null
@@ -0,0 +1,66 @@
+git-multi-pack-index(1)
+=======================
+
+NAME
+----
+git-multi-pack-index - Write and verify multi-pack-indexes
+
+
+SYNOPSIS
+--------
+[verse]
+'git multi-pack-index' [--object-dir=<dir>] <verb>
+
+DESCRIPTION
+-----------
+Write or verify a multi-pack-index (MIDX) file.
+
+OPTIONS
+-------
+
+--object-dir=<dir>::
+       Use given directory for the location of Git objects. We check
+       `<dir>/packs/multi-pack-index` for the current MIDX file, and
+       `<dir>/packs` for the pack-files to index.
+
+write::
+       When given as the verb, write a new MIDX file to
+       `<dir>/packs/multi-pack-index`.
+
+verify::
+       When given as the verb, verify the contents of the MIDX file
+       at `<dir>/packs/multi-pack-index`.
+
+
+EXAMPLES
+--------
+
+* Write a MIDX file for the packfiles in the current .git folder.
++
+-----------------------------------------------
+$ git multi-pack-index write
+-----------------------------------------------
+
+* Write a MIDX file for the packfiles in an alternate object store.
++
+-----------------------------------------------
+$ git multi-pack-index --object-dir <alt> write
+-----------------------------------------------
+
+* Verify the MIDX file for the packfiles in the current .git folder.
++
+-----------------------------------------------
+$ git multi-pack-index verify
+-----------------------------------------------
+
+
+SEE ALSO
+--------
+See link:technical/multi-pack-index.html[The Multi-Pack-Index Design
+Document] and link:technical/pack-format.html[The Multi-Pack-Index
+Format] for more information on the multi-pack-index feature.
+
+
+GIT
+---
+Part of the linkgit:git[1] suite
index d95b472d16828b2bea304727e7c5daaa2b75ae89..40c825c38197f4e335ebfb162415cdcc52bbdf1e 100644 (file)
@@ -289,6 +289,103 @@ Unexpected missing object will raise an error.
 --unpack-unreachable::
        Keep unreachable objects in loose form. This implies `--revs`.
 
+--delta-islands::
+       Restrict delta matches based on "islands". See DELTA ISLANDS
+       below.
+
+
+DELTA ISLANDS
+-------------
+
+When possible, `pack-objects` tries to reuse existing on-disk deltas to
+avoid having to search for new ones on the fly. This is an important
+optimization for serving fetches, because it means the server can avoid
+inflating most objects at all and just send the bytes directly from
+disk. This optimization can't work when an object is stored as a delta
+against a base which the receiver does not have (and which we are not
+already sending). In that case the server "breaks" the delta and has to
+find a new one, which has a high CPU cost. Therefore it's important for
+performance that the set of objects in on-disk delta relationships match
+what a client would fetch.
+
+In a normal repository, this tends to work automatically. The objects
+are mostly reachable from the branches and tags, and that's what clients
+fetch. Any deltas we find on the server are likely to be between objects
+the client has or will have.
+
+But in some repository setups, you may have several related but separate
+groups of ref tips, with clients tending to fetch those groups
+independently. For example, imagine that you are hosting several "forks"
+of a repository in a single shared object store, and letting clients
+view them as separate repositories through `GIT_NAMESPACE` or separate
+repos using the alternates mechanism. A naive repack may find that the
+optimal delta for an object is against a base that is only found in
+another fork. But when a client fetches, they will not have the base
+object, and we'll have to find a new delta on the fly.
+
+A similar situation may exist if you have many refs outside of
+`refs/heads/` and `refs/tags/` that point to related objects (e.g.,
+`refs/pull` or `refs/changes` used by some hosting providers). By
+default, clients fetch only heads and tags, and deltas against objects
+found only in those other groups cannot be sent as-is.
+
+Delta islands solve this problem by allowing you to group your refs into
+distinct "islands". Pack-objects computes which objects are reachable
+from which islands, and refuses to make a delta from an object `A`
+against a base which is not present in all of `A`'s islands. This
+results in slightly larger packs (because we miss some delta
+opportunities), but guarantees that a fetch of one island will not have
+to recompute deltas on the fly due to crossing island boundaries.
+
+When repacking with delta islands the delta window tends to get
+clogged with candidates that are forbidden by the config. Repacking
+with a big --window helps (and doesn't take as long as it otherwise
+might because we can reject some object pairs based on islands before
+doing any computation on the content).
+
+Islands are configured via the `pack.island` option, which can be
+specified multiple times. Each value is a left-anchored regular
+expressions matching refnames. For example:
+
+-------------------------------------------
+[pack]
+island = refs/heads/
+island = refs/tags/
+-------------------------------------------
+
+puts heads and tags into an island (whose name is the empty string; see
+below for more on naming). Any refs which do not match those regular
+expressions (e.g., `refs/pull/123`) is not in any island. Any object
+which is reachable only from `refs/pull/` (but not heads or tags) is
+therefore not a candidate to be used as a base for `refs/heads/`.
+
+Refs are grouped into islands based on their "names", and two regexes
+that produce the same name are considered to be in the same
+island. The names are computed from the regexes by concatenating any
+capture groups from the regex, with a '-' dash in between. (And if
+there are no capture groups, then the name is the empty string, as in
+the above example.) This allows you to create arbitrary numbers of
+islands. Only up to 14 such capture groups are supported though.
+
+For example, imagine you store the refs for each fork in
+`refs/virtual/ID`, where `ID` is a numeric identifier. You might then
+configure:
+
+-------------------------------------------
+[pack]
+island = refs/virtual/([0-9]+)/heads/
+island = refs/virtual/([0-9]+)/tags/
+island = refs/virtual/([0-9]+)/(pull)/
+-------------------------------------------
+
+That puts the heads and tags for each fork in their own island (named
+"1234" or similar), and the pull refs for each go into their own
+"1234-pull".
+
+Note that we pick a single island for each regex to go into, using "last
+one wins" ordering (which allows repo-specific config to take precedence
+over user-wide config, and so forth).
+
 SEE ALSO
 --------
 linkgit:git-rev-list[1]
index 55277a97811fa6933c76c1bd2c96114672f6d5cd..a5fc54aeabccbeac32c73a610dbee45bfb5b0746 100644 (file)
@@ -74,22 +74,57 @@ without any `<refspec>` on the command line.  Otherwise, missing
 `:<dst>` means to update the same ref as the `<src>`.
 +
 The object referenced by <src> is used to update the <dst> reference
-on the remote side.  By default this is only allowed if <dst> is not
-a tag (annotated or lightweight), and then only if it can fast-forward
-<dst>.  By having the optional leading `+`, you can tell Git to update
-the <dst> ref even if it is not allowed by default (e.g., it is not a
-fast-forward.)  This does *not* attempt to merge <src> into <dst>.  See
-EXAMPLES below for details.
-+
-`tag <tag>` means the same as `refs/tags/<tag>:refs/tags/<tag>`.
-+
-Pushing an empty <src> allows you to delete the <dst> ref from
-the remote repository.
+on the remote side. Whether this is allowed depends on where in
+`refs/*` the <dst> reference lives as described in detail below, in
+those sections "update" means any modifications except deletes, which
+as noted after the next few sections are treated differently.
++
+The `refs/heads/*` namespace will only accept commit objects, and
+updates only if they can be fast-forwarded.
++
+The `refs/tags/*` namespace will accept any kind of object (as
+commits, trees and blobs can be tagged), and any updates to them will
+be rejected.
++
+It's possible to push any type of object to any namespace outside of
+`refs/{tags,heads}/*`. In the case of tags and commits, these will be
+treated as if they were the commits inside `refs/heads/*` for the
+purposes of whether the update is allowed.
++
+I.e. a fast-forward of commits and tags outside `refs/{tags,heads}/*`
+is allowed, even in cases where what's being fast-forwarded is not a
+commit, but a tag object which happens to point to a new commit which
+is a fast-forward of the commit the last tag (or commit) it's
+replacing. Replacing a tag with an entirely different tag is also
+allowed, if it points to the same commit, as well as pushing a peeled
+tag, i.e. pushing the commit that existing tag object points to, or a
+new tag object which an existing commit points to.
++
+Tree and blob objects outside of `refs/{tags,heads}/*` will be treated
+the same way as if they were inside `refs/tags/*`, any update of them
+will be rejected.
++
+All of the rules described above about what's not allowed as an update
+can be overridden by adding an the optional leading `+` to a refspec
+(or using `--force` command line option). The only exception to this
+is that no amount of forcing will make the `refs/heads/*` namespace
+accept a non-commit object. Hooks and configuration can also override
+or amend these rules, see e.g. `receive.denyNonFastForwards` in
+linkgit:git-config[1] and `pre-receive` and `update` in
+linkgit:githooks[5].
++
+Pushing an empty <src> allows you to delete the <dst> ref from the
+remote repository. Deletions are always accepted without a leading `+`
+in the refspec (or `--force`), except when forbidden by configuration
+or hooks. See `receive.denyDeletes` in linkgit:git-config[1] and
+`pre-receive` and `update` in linkgit:githooks[5].
 +
 The special refspec `:` (or `+:` to allow non-fast-forward updates)
 directs Git to push "matching" branches: for every branch that exists on
 the local side, the remote side is updated if a branch of the same name
 already exists on the remote side.
++
+`tag <tag>` means the same as `refs/tags/<tag>:refs/tags/<tag>`.
 
 --all::
        Push all branches (i.e. refs under `refs/heads/`); cannot be
index d056250968e13953bd8bc7dae71e745336788e38..aa0cc8bd445c99703d6ee17346d656104194dda8 100644 (file)
@@ -160,6 +160,11 @@ depth is 4095.
        being removed. In addition, any unreachable loose objects will
        be packed (and their loose counterparts removed).
 
+-i::
+--delta-islands::
+       Pass the `--delta-islands` option to `git-pack-objects`, see
+       linkgit:git-pack-objects[1].
+
 Configuration
 -------------
 
index 031f31fa471154f474b35448564aff6c8c2b3f51..df310d2a58cc6c30dfa6d2d8609149fbcf17ecca 100644 (file)
@@ -211,6 +211,12 @@ would conflict the same way as the test merge you resolved earlier.
 'git rerere' will be run by 'git rebase' to help you resolve this
 conflict.
 
+[NOTE] 'git rerere' relies on the conflict markers in the file to
+detect the conflict.  If the file already contains lines that look the
+same as lines with conflict markers, 'git rerere' may fail to record a
+conflict resolution.  To work around this, the `conflict-marker-size`
+setting in linkgit:gitattributes[5] can be used.
+
 GIT
 ---
 Part of the linkgit:git[1] suite
index bc8fdfd4691326b44e2cbdea3dc4925cf7440515..fda8516677237e9c0a8343f9ca96a1eb969b3492 100644 (file)
@@ -8,7 +8,7 @@ git-update-ref - Update the object name stored in a ref safely
 SYNOPSIS
 --------
 [verse]
-'git update-ref' [-m <reason>] (-d <ref> [<oldvalue>] | [--no-deref] [--create-reflog] <ref> <newvalue> [<oldvalue>] | --stdin [-z])
+'git update-ref' [-m <reason>] [--no-deref] (-d <ref> [<oldvalue>] | [--create-reflog] <ref> <newvalue> [<oldvalue>] | --stdin [-z])
 
 DESCRIPTION
 -----------
index 29a5b7e252a378b2f992bdeeb6c26cfe6f01f6b4..e2ee9fc21b400b01b006d5460698f0229e23ff9f 100644 (file)
@@ -120,8 +120,16 @@ OPTIONS
 --force::
        By default, `add` refuses to create a new working tree when
        `<commit-ish>` is a branch name and is already checked out by
-       another working tree and `remove` refuses to remove an unclean
-       working tree. This option overrides these safeguards.
+       another working tree, or if `<path>` is already assigned to some
+       working tree but is missing (for instance, if `<path>` was deleted
+       manually). This option overrides these safeguards. To add a missing but
+       locked working tree path, specify `--force` twice.
++
+`move` refuses to move a locked working tree unless `--force` is specified
+twice.
++
+`remove` refuses to remove an unclean working tree unless `--force` is used.
+To remove a locked working tree, specify `--force` twice.
 
 -b <new-branch>::
 -B <new-branch>::
index dba7f0c18e33e7e26ca100992236e39b3d5b8a91..08e533d62b7f20a026b21d0dc8e9f35f4111b49e 100644 (file)
@@ -76,7 +76,7 @@ Note that omitting the `=` in `git -c foo.bar ...` is allowed and sets
 `foo.bar` to the boolean true value (just like `[foo]bar` would in a
 config file). Including the equals but with an empty value (like `git -c
 foo.bar= ...`) sets `foo.bar` to the empty string which `git config
---bool` will convert to `false`.
+--type=bool` will convert to `false`.
 
 --exec-path[=<path>]::
        Path to wherever your core Git programs are installed.
@@ -599,8 +599,8 @@ trace messages into this file descriptor.
 +
 Alternatively, if the variable is set to an absolute path
 (starting with a '/' character), Git will interpret this
-as a file path and will try to write the trace messages
-into it.
+as a file path and will try to append the trace messages
+to it.
 +
 Unsetting the variable, or setting it to empty, "0" or
 "false" (case insensitive) disables trace messages.
diff --git a/Documentation/gitcvs-config.txt b/Documentation/gitcvs-config.txt
new file mode 100644 (file)
index 0000000..02da427
--- /dev/null
@@ -0,0 +1,67 @@
+gitcvs.commitMsgAnnotation::
+       Append this string to each commit message. Set to empty string
+       to disable this feature. Defaults to "via git-CVS emulator".
+
+gitcvs.enabled::
+       Whether the CVS server interface is enabled for this repository.
+       See linkgit:git-cvsserver[1].
+
+gitcvs.logFile::
+       Path to a log file where the CVS server interface well... logs
+       various stuff. See linkgit:git-cvsserver[1].
+
+gitcvs.usecrlfattr::
+       If true, the server will look up the end-of-line conversion
+       attributes for files to determine the `-k` modes to use. If
+       the attributes force Git to treat a file as text,
+       the `-k` mode will be left blank so CVS clients will
+       treat it as text. If they suppress text conversion, the file
+       will be set with '-kb' mode, which suppresses any newline munging
+       the client might otherwise do. If the attributes do not allow
+       the file type to be determined, then `gitcvs.allBinary` is
+       used. See linkgit:gitattributes[5].
+
+gitcvs.allBinary::
+       This is used if `gitcvs.usecrlfattr` does not resolve
+       the correct '-kb' mode to use. If true, all
+       unresolved files are sent to the client in
+       mode '-kb'. This causes the client to treat them
+       as binary files, which suppresses any newline munging it
+       otherwise might do. Alternatively, if it is set to "guess",
+       then the contents of the file are examined to decide if
+       it is binary, similar to `core.autocrlf`.
+
+gitcvs.dbName::
+       Database used by git-cvsserver to cache revision information
+       derived from the Git repository. The exact meaning depends on the
+       used database driver, for SQLite (which is the default driver) this
+       is a filename. Supports variable substitution (see
+       linkgit:git-cvsserver[1] for details). May not contain semicolons (`;`).
+       Default: '%Ggitcvs.%m.sqlite'
+
+gitcvs.dbDriver::
+       Used Perl DBI driver. You can specify any available driver
+       for this here, but it might not work. git-cvsserver is tested
+       with 'DBD::SQLite', reported to work with 'DBD::Pg', and
+       reported *not* to work with 'DBD::mysql'. Experimental feature.
+       May not contain double colons (`:`). Default: 'SQLite'.
+       See linkgit:git-cvsserver[1].
+
+gitcvs.dbUser, gitcvs.dbPass::
+       Database user and password. Only useful if setting `gitcvs.dbDriver`,
+       since SQLite has no concept of database users and/or passwords.
+       'gitcvs.dbUser' supports variable substitution (see
+       linkgit:git-cvsserver[1] for details).
+
+gitcvs.dbTableNamePrefix::
+       Database table name prefix.  Prepended to the names of any
+       database tables used, allowing a single database to be used
+       for several repositories.  Supports variable substitution (see
+       linkgit:git-cvsserver[1] for details).  Any non-alphabetic
+       characters will be replaced with underscores.
+
+All gitcvs variables except for `gitcvs.usecrlfattr` and
+`gitcvs.allBinary` can also be specified as
+'gitcvs.<access_method>.<varname>' (where 'access_method'
+is one of "ext" and "pserver") to make them apply only for the given
+access method.
index 1f6cceaefb028e5f96d802703a25a16e950a3e17..d407b7dee127b10a82c2b8e5101b44ce6996a24e 100644 (file)
@@ -19,9 +19,10 @@ walk the revision graph (such as linkgit:git-log[1]), all commits which are
 reachable from that commit. For commands that walk the revision graph one can
 also specify a range of revisions explicitly.
 
-In addition, some Git commands (such as linkgit:git-show[1]) also take
-revision parameters which denote other objects than commits, e.g. blobs
-("files") or trees ("directories of files").
+In addition, some Git commands (such as linkgit:git-show[1] and
+linkgit:git-push[1]) can also take revision parameters which denote
+other objects than commits, e.g. blobs ("files") or trees
+("directories of files").
 
 include::revisions.txt[]
 
diff --git a/Documentation/gui-config.txt b/Documentation/gui-config.txt
new file mode 100644 (file)
index 0000000..d30831a
--- /dev/null
@@ -0,0 +1,57 @@
+gui.commitMsgWidth::
+       Defines how wide the commit message window is in the
+       linkgit:git-gui[1]. "75" is the default.
+
+gui.diffContext::
+       Specifies how many context lines should be used in calls to diff
+       made by the linkgit:git-gui[1]. The default is "5".
+
+gui.displayUntracked::
+       Determines if linkgit:git-gui[1] shows untracked files
+       in the file list. The default is "true".
+
+gui.encoding::
+       Specifies the default encoding to use for displaying of
+       file contents in linkgit:git-gui[1] and linkgit:gitk[1].
+       It can be overridden by setting the 'encoding' attribute
+       for relevant files (see linkgit:gitattributes[5]).
+       If this option is not set, the tools default to the
+       locale encoding.
+
+gui.matchTrackingBranch::
+       Determines if new branches created with linkgit:git-gui[1] should
+       default to tracking remote branches with matching names or
+       not. Default: "false".
+
+gui.newBranchTemplate::
+       Is used as suggested name when creating new branches using the
+       linkgit:git-gui[1].
+
+gui.pruneDuringFetch::
+       "true" if linkgit:git-gui[1] should prune remote-tracking branches when
+       performing a fetch. The default value is "false".
+
+gui.trustmtime::
+       Determines if linkgit:git-gui[1] should trust the file modification
+       timestamp or not. By default the timestamps are not trusted.
+
+gui.spellingDictionary::
+       Specifies the dictionary used for spell checking commit messages in
+       the linkgit:git-gui[1]. When set to "none" spell checking is turned
+       off.
+
+gui.fastCopyBlame::
+       If true, 'git gui blame' uses `-C` instead of `-C -C` for original
+       location detection. It makes blame significantly faster on huge
+       repositories at the expense of less thorough copy detection.
+
+gui.copyBlameThreshold::
+       Specifies the threshold to use in 'git gui blame' original location
+       detection, measured in alphanumeric characters. See the
+       linkgit:git-blame[1] manual for more information on copy detection.
+
+gui.blamehistoryctx::
+       Specifies the radius of history context in days to show in
+       linkgit:gitk[1] for the selected commit, when the `Show History
+       Context` menu item is invoked from 'git gui blame'. If this
+       variable is set to zero, the whole history is shown.
diff --git a/Documentation/pull-config.txt b/Documentation/pull-config.txt
new file mode 100644 (file)
index 0000000..bb23a99
--- /dev/null
@@ -0,0 +1,36 @@
+pull.ff::
+       By default, Git does not create an extra merge commit when merging
+       a commit that is a descendant of the current commit. Instead, the
+       tip of the current branch is fast-forwarded. When set to `false`,
+       this variable tells Git to create an extra merge commit in such
+       a case (equivalent to giving the `--no-ff` option from the command
+       line). When set to `only`, only such fast-forward merges are
+       allowed (equivalent to giving the `--ff-only` option from the
+       command line). This setting overrides `merge.ff` when pulling.
+
+pull.rebase::
+       When true, rebase branches on top of the fetched branch, instead
+       of merging the default branch from the default remote when "git
+       pull" is run. See "branch.<name>.rebase" for setting this on a
+       per-branch basis.
++
+When `merges`, pass the `--rebase-merges` option to 'git rebase'
+so that the local merge commits are included in the rebase (see
+linkgit:git-rebase[1] for details).
++
+When preserve, also pass `--preserve-merges` along to 'git rebase'
+so that locally committed merge commits will not be flattened
+by running 'git pull'.
++
+When the value is `interactive`, the rebase is run in interactive mode.
++
+*NOTE*: this is a possibly dangerous operation; do *not* use
+it unless you understand the implications (see linkgit:git-rebase[1]
+for details).
+
+pull.octopus::
+       The default merge strategy to use when pulling multiple branches
+       at once.
+
+pull.twohead::
+       The default merge strategy to use when pulling a single branch.
index f1fb08dc683778b2ebddd4a92c8d3d5d17ee4b7f..7d3a60f5b9361cd46adc4f005694a52ed06fd1c1 100644 (file)
@@ -33,11 +33,40 @@ name.
 it requests fetching everything up to the given tag.
 +
 The remote ref that matches <src>
-is fetched, and if <dst> is not an empty string, the local
-ref that matches it is fast-forwarded using <src>.
-If the optional plus `+` is used, the local ref
-is updated even if it does not result in a fast-forward
-update.
+is fetched, and if <dst> is not an empty string, an attempt
+is made to update the local ref that matches it.
++
+Whether that update is allowed without `--force` depends on the ref
+namespace it's being fetched to, the type of object being fetched, and
+whether the update is considered to be a fast-forward. Generally, the
+same rules apply for fetching as when pushing, see the `<refspec>...`
+section of linkgit:git-push[1] for what those are. Exceptions to those
+rules particular to 'git fetch' are noted below.
++
+Until Git version 2.20, and unlike when pushing with
+linkgit:git-push[1], any updates to `refs/tags/*` would be accepted
+without `+` in the refspec (or `--force`). When fetching, we promiscuously
+considered all tag updates from a remote to be forced fetches.  Since
+Git version 2.20, fetching to update `refs/tags/*` works the same way
+as when pushing. I.e. any updates will be rejected without `+` in the
+refspec (or `--force`).
++
+Unlike when pushing with linkgit:git-push[1], any updates outside of
+`refs/{tags,heads}/*` will be accepted without `+` in the refspec (or
+`--force`), whether that's swapping e.g. a tree object for a blob, or
+a commit for another commit that's doesn't have the previous commit as
+an ancestor etc.
++
+Unlike when pushing with linkgit:git-push[1], there is no
+configuration which'll amend these rules, and nothing like a
+`pre-fetch` hook analogous to the `pre-receive` hook.
++
+As with pushing with linkgit:git-push[1], all of the rules described
+above about what's not allowed as an update can be overridden by
+adding an the optional leading `+` to a refspec (or using `--force`
+command line option). The only exception to this is that no amount of
+forcing will make the `refs/heads/*` namespace accept a non-commit
+object.
 +
 [NOTE]
 When the remote branch you want to fetch is known to
diff --git a/Documentation/push-config.txt b/Documentation/push-config.txt
new file mode 100644 (file)
index 0000000..0a0e000
--- /dev/null
@@ -0,0 +1,113 @@
+push.default::
+       Defines the action `git push` should take if no refspec is
+       explicitly given.  Different values are well-suited for
+       specific workflows; for instance, in a purely central workflow
+       (i.e. the fetch source is equal to the push destination),
+       `upstream` is probably what you want.  Possible values are:
++
+--
+
+* `nothing` - do not push anything (error out) unless a refspec is
+  explicitly given. This is primarily meant for people who want to
+  avoid mistakes by always being explicit.
+
+* `current` - push the current branch to update a branch with the same
+  name on the receiving end.  Works in both central and non-central
+  workflows.
+
+* `upstream` - push the current branch back to the branch whose
+  changes are usually integrated into the current branch (which is
+  called `@{upstream}`).  This mode only makes sense if you are
+  pushing to the same repository you would normally pull from
+  (i.e. central workflow).
+
+* `tracking` - This is a deprecated synonym for `upstream`.
+
+* `simple` - in centralized workflow, work like `upstream` with an
+  added safety to refuse to push if the upstream branch's name is
+  different from the local one.
++
+When pushing to a remote that is different from the remote you normally
+pull from, work as `current`.  This is the safest option and is suited
+for beginners.
++
+This mode has become the default in Git 2.0.
+
+* `matching` - push all branches having the same name on both ends.
+  This makes the repository you are pushing to remember the set of
+  branches that will be pushed out (e.g. if you always push 'maint'
+  and 'master' there and no other branches, the repository you push
+  to will have these two branches, and your local 'maint' and
+  'master' will be pushed there).
++
+To use this mode effectively, you have to make sure _all_ the
+branches you would push out are ready to be pushed out before
+running 'git push', as the whole point of this mode is to allow you
+to push all of the branches in one go.  If you usually finish work
+on only one branch and push out the result, while other branches are
+unfinished, this mode is not for you.  Also this mode is not
+suitable for pushing into a shared central repository, as other
+people may add new branches there, or update the tip of existing
+branches outside your control.
++
+This used to be the default, but not since Git 2.0 (`simple` is the
+new default).
+
+--
+
+push.followTags::
+       If set to true enable `--follow-tags` option by default.  You
+       may override this configuration at time of push by specifying
+       `--no-follow-tags`.
+
+push.gpgSign::
+       May be set to a boolean value, or the string 'if-asked'. A true
+       value causes all pushes to be GPG signed, as if `--signed` is
+       passed to linkgit:git-push[1]. The string 'if-asked' causes
+       pushes to be signed if the server supports it, as if
+       `--signed=if-asked` is passed to 'git push'. A false value may
+       override a value from a lower-priority config file. An explicit
+       command-line flag always overrides this config option.
+
+push.pushOption::
+       When no `--push-option=<option>` argument is given from the
+       command line, `git push` behaves as if each <value> of
+       this variable is given as `--push-option=<value>`.
++
+This is a multi-valued variable, and an empty value can be used in a
+higher priority configuration file (e.g. `.git/config` in a
+repository) to clear the values inherited from a lower priority
+configuration files (e.g. `$HOME/.gitconfig`).
++
+--
+
+Example:
+
+/etc/gitconfig
+  push.pushoption = a
+  push.pushoption = b
+
+~/.gitconfig
+  push.pushoption = c
+
+repo/.git/config
+  push.pushoption =
+  push.pushoption = b
+
+This will result in only b (a and c are cleared).
+
+--
+
+push.recurseSubmodules::
+       Make sure all submodule commits used by the revisions to be pushed
+       are available on a remote-tracking branch. If the value is 'check'
+       then Git will verify that all submodule commits that changed in the
+       revisions to be pushed are available on at least one remote of the
+       submodule. If any commits are missing, the push will be aborted and
+       exit with non-zero status. If the value is 'on-demand' then all
+       submodules that changed in the revisions to be pushed will be
+       pushed. If on-demand was not able to push all necessary revisions
+       it will also be aborted and exit with non-zero status. If the value
+       is 'no' then default behavior of ignoring submodules when pushing
+       is retained. You may override this configuration at time of push by
+       specifying '--recurse-submodules=check|on-demand|no'.
diff --git a/Documentation/receive-config.txt b/Documentation/receive-config.txt
new file mode 100644 (file)
index 0000000..65f78aa
--- /dev/null
@@ -0,0 +1,123 @@
+receive.advertiseAtomic::
+       By default, git-receive-pack will advertise the atomic push
+       capability to its clients. If you don't want to advertise this
+       capability, set this variable to false.
+
+receive.advertisePushOptions::
+       When set to true, git-receive-pack will advertise the push options
+       capability to its clients. False by default.
+
+receive.autogc::
+       By default, git-receive-pack will run "git-gc --auto" after
+       receiving data from git-push and updating refs.  You can stop
+       it by setting this variable to false.
+
+receive.certNonceSeed::
+       By setting this variable to a string, `git receive-pack`
+       will accept a `git push --signed` and verifies it by using
+       a "nonce" protected by HMAC using this string as a secret
+       key.
+
+receive.certNonceSlop::
+       When a `git push --signed` sent a push certificate with a
+       "nonce" that was issued by a receive-pack serving the same
+       repository within this many seconds, export the "nonce"
+       found in the certificate to `GIT_PUSH_CERT_NONCE` to the
+       hooks (instead of what the receive-pack asked the sending
+       side to include).  This may allow writing checks in
+       `pre-receive` and `post-receive` a bit easier.  Instead of
+       checking `GIT_PUSH_CERT_NONCE_SLOP` environment variable
+       that records by how many seconds the nonce is stale to
+       decide if they want to accept the certificate, they only
+       can check `GIT_PUSH_CERT_NONCE_STATUS` is `OK`.
+
+receive.fsckObjects::
+       If it is set to true, git-receive-pack will check all received
+       objects. See `transfer.fsckObjects` for what's checked.
+       Defaults to false. If not set, the value of
+       `transfer.fsckObjects` is used instead.
+
+receive.fsck.<msg-id>::
+       Acts like `fsck.<msg-id>`, but is used by
+       linkgit:git-receive-pack[1] instead of
+       linkgit:git-fsck[1]. See the `fsck.<msg-id>` documentation for
+       details.
+
+receive.fsck.skipList::
+       Acts like `fsck.skipList`, but is used by
+       linkgit:git-receive-pack[1] instead of
+       linkgit:git-fsck[1]. See the `fsck.skipList` documentation for
+       details.
+
+receive.keepAlive::
+       After receiving the pack from the client, `receive-pack` may
+       produce no output (if `--quiet` was specified) while processing
+       the pack, causing some networks to drop the TCP connection.
+       With this option set, if `receive-pack` does not transmit
+       any data in this phase for `receive.keepAlive` seconds, it will
+       send a short keepalive packet.  The default is 5 seconds; set
+       to 0 to disable keepalives entirely.
+
+receive.unpackLimit::
+       If the number of objects received in a push is below this
+       limit then the objects will be unpacked into loose object
+       files. However if the number of received objects equals or
+       exceeds this limit then the received pack will be stored as
+       a pack, after adding any missing delta bases.  Storing the
+       pack from a push can make the push operation complete faster,
+       especially on slow filesystems.  If not set, the value of
+       `transfer.unpackLimit` is used instead.
+
+receive.maxInputSize::
+       If the size of the incoming pack stream is larger than this
+       limit, then git-receive-pack will error out, instead of
+       accepting the pack file. If not set or set to 0, then the size
+       is unlimited.
+
+receive.denyDeletes::
+       If set to true, git-receive-pack will deny a ref update that deletes
+       the ref. Use this to prevent such a ref deletion via a push.
+
+receive.denyDeleteCurrent::
+       If set to true, git-receive-pack will deny a ref update that
+       deletes the currently checked out branch of a non-bare repository.
+
+receive.denyCurrentBranch::
+       If set to true or "refuse", git-receive-pack will deny a ref update
+       to the currently checked out branch of a non-bare repository.
+       Such a push is potentially dangerous because it brings the HEAD
+       out of sync with the index and working tree. If set to "warn",
+       print a warning of such a push to stderr, but allow the push to
+       proceed. If set to false or "ignore", allow such pushes with no
+       message. Defaults to "refuse".
++
+Another option is "updateInstead" which will update the working
+tree if pushing into the current branch.  This option is
+intended for synchronizing working directories when one side is not easily
+accessible via interactive ssh (e.g. a live web site, hence the requirement
+that the working directory be clean). This mode also comes in handy when
+developing inside a VM to test and fix code on different Operating Systems.
++
+By default, "updateInstead" will refuse the push if the working tree or
+the index have any difference from the HEAD, but the `push-to-checkout`
+hook can be used to customize this.  See linkgit:githooks[5].
+
+receive.denyNonFastForwards::
+       If set to true, git-receive-pack will deny a ref update which is
+       not a fast-forward. Use this to prevent such an update via a push,
+       even if that push is forced. This configuration variable is
+       set when initializing a shared repository.
+
+receive.hideRefs::
+       This variable is the same as `transfer.hideRefs`, but applies
+       only to `receive-pack` (and so affects pushes, but not fetches).
+       An attempt to update or delete a hidden ref by `git push` is
+       rejected.
+
+receive.updateServerInfo::
+       If set to true, git-receive-pack will run git-update-server-info
+       after receiving data from git-push and updating refs.
+
+receive.shallowUpdate::
+       If set to true, .git/shallow can be updated when new refs
+       require new shallow roots. Otherwise those refs are rejected.
diff --git a/Documentation/sendemail-config.txt b/Documentation/sendemail-config.txt
new file mode 100644 (file)
index 0000000..0006faf
--- /dev/null
@@ -0,0 +1,63 @@
+sendemail.identity::
+       A configuration identity. When given, causes values in the
+       'sendemail.<identity>' subsection to take precedence over
+       values in the 'sendemail' section. The default identity is
+       the value of `sendemail.identity`.
+
+sendemail.smtpEncryption::
+       See linkgit:git-send-email[1] for description.  Note that this
+       setting is not subject to the 'identity' mechanism.
+
+sendemail.smtpssl (deprecated)::
+       Deprecated alias for 'sendemail.smtpEncryption = ssl'.
+
+sendemail.smtpsslcertpath::
+       Path to ca-certificates (either a directory or a single file).
+       Set it to an empty string to disable certificate verification.
+
+sendemail.<identity>.*::
+       Identity-specific versions of the 'sendemail.*' parameters
+       found below, taking precedence over those when this
+       identity is selected, through either the command-line or
+       `sendemail.identity`.
+
+sendemail.aliasesFile::
+sendemail.aliasFileType::
+sendemail.annotate::
+sendemail.bcc::
+sendemail.cc::
+sendemail.ccCmd::
+sendemail.chainReplyTo::
+sendemail.confirm::
+sendemail.envelopeSender::
+sendemail.from::
+sendemail.multiEdit::
+sendemail.signedoffbycc::
+sendemail.smtpPass::
+sendemail.suppresscc::
+sendemail.suppressFrom::
+sendemail.to::
+sendemail.tocmd::
+sendemail.smtpDomain::
+sendemail.smtpServer::
+sendemail.smtpServerPort::
+sendemail.smtpServerOption::
+sendemail.smtpUser::
+sendemail.thread::
+sendemail.transferEncoding::
+sendemail.validate::
+sendemail.xmailer::
+       See linkgit:git-send-email[1] for description.
+
+sendemail.signedoffcc (deprecated)::
+       Deprecated alias for `sendemail.signedoffbycc`.
+
+sendemail.smtpBatchSize::
+       Number of messages to be sent per connection, after that a relogin
+       will happen.  If the value is 0 or undefined, send all messages in
+       one connection.
+       See also the `--batch-size` option of linkgit:git-send-email[1].
+
+sendemail.smtpReloginDelay::
+       Seconds wait before reconnecting to smtp server.
+       See also the `--relogin-delay` option of linkgit:git-send-email[1].
diff --git a/Documentation/submodule-config.txt b/Documentation/submodule-config.txt
new file mode 100644 (file)
index 0000000..0a1293b
--- /dev/null
@@ -0,0 +1,82 @@
+submodule.<name>.url::
+       The URL for a submodule. This variable is copied from the .gitmodules
+       file to the git config via 'git submodule init'. The user can change
+       the configured URL before obtaining the submodule via 'git submodule
+       update'. If neither submodule.<name>.active or submodule.active are
+       set, the presence of this variable is used as a fallback to indicate
+       whether the submodule is of interest to git commands.
+       See linkgit:git-submodule[1] and linkgit:gitmodules[5] for details.
+
+submodule.<name>.update::
+       The method by which a submodule is updated by 'git submodule update',
+       which is the only affected command, others such as
+       'git checkout --recurse-submodules' are unaffected. It exists for
+       historical reasons, when 'git submodule' was the only command to
+       interact with submodules; settings like `submodule.active`
+       and `pull.rebase` are more specific. It is populated by
+       `git submodule init` from the linkgit:gitmodules[5] file.
+       See description of 'update' command in linkgit:git-submodule[1].
+
+submodule.<name>.branch::
+       The remote branch name for a submodule, used by `git submodule
+       update --remote`.  Set this option to override the value found in
+       the `.gitmodules` file.  See linkgit:git-submodule[1] and
+       linkgit:gitmodules[5] for details.
+
+submodule.<name>.fetchRecurseSubmodules::
+       This option can be used to control recursive fetching of this
+       submodule. It can be overridden by using the --[no-]recurse-submodules
+       command-line option to "git fetch" and "git pull".
+       This setting will override that from in the linkgit:gitmodules[5]
+       file.
+
+submodule.<name>.ignore::
+       Defines under what circumstances "git status" and the diff family show
+       a submodule as modified. When set to "all", it will never be considered
+       modified (but it will nonetheless show up in the output of status and
+       commit when it has been staged), "dirty" will ignore all changes
+       to the submodules work tree and
+       takes only differences between the HEAD of the submodule and the commit
+       recorded in the superproject into account. "untracked" will additionally
+       let submodules with modified tracked files in their work tree show up.
+       Using "none" (the default when this option is not set) also shows
+       submodules that have untracked files in their work tree as changed.
+       This setting overrides any setting made in .gitmodules for this submodule,
+       both settings can be overridden on the command line by using the
+       "--ignore-submodules" option. The 'git submodule' commands are not
+       affected by this setting.
+
+submodule.<name>.active::
+       Boolean value indicating if the submodule is of interest to git
+       commands.  This config option takes precedence over the
+       submodule.active config option. See linkgit:gitsubmodules[7] for
+       details.
+
+submodule.active::
+       A repeated field which contains a pathspec used to match against a
+       submodule's path to determine if the submodule is of interest to git
+       commands. See linkgit:gitsubmodules[7] for details.
+
+submodule.recurse::
+       Specifies if commands recurse into submodules by default. This
+       applies to all commands that have a `--recurse-submodules` option,
+       except `clone`.
+       Defaults to false.
+
+submodule.fetchJobs::
+       Specifies how many submodules are fetched/cloned at the same time.
+       A positive integer allows up to that number of submodules fetched
+       in parallel. A value of 0 will give some reasonable default.
+       If unset, it defaults to 1.
+
+submodule.alternateLocation::
+       Specifies how the submodules obtain alternates when submodules are
+       cloned. Possible values are `no`, `superproject`.
+       By default `no` is assumed, which doesn't add references. When the
+       value is set to `superproject` the submodule to be cloned computes
+       its alternates location relative to the superprojects alternate.
+
+submodule.alternateErrorStrategy::
+       Specifies how to treat errors with the alternates for a submodule
+       as computed via `submodule.alternateLocation`. Possible values are
+       `ignore`, `info`, `die`. Default is `die`.
index c664acbd765d06f000b2feeef1d7e98f1616ff62..001395e95071ec50d2fef65ead09cf5c49558a6a 100644 (file)
@@ -112,12 +112,24 @@ Design Details
 - The file format includes parameters for the object ID hash function,
   so a future change of hash algorithm does not require a change in format.
 
+- Commit grafts and replace objects can change the shape of the commit
+  history. The latter can also be enabled/disabled on the fly using
+  `--no-replace-objects`. This leads to difficultly storing both possible
+  interpretations of a commit id, especially when computing generation
+  numbers. The commit-graph will not be read or written when
+  replace-objects or grafts are present.
+
+- Shallow clones create grafts of commits by dropping their parents. This
+  leads the commit-graph to think those commits have generation number 1.
+  If and when those commits are made unshallow, those generation numbers
+  become invalid. Since shallow clones are intended to restrict the commit
+  history to a very small set of commits, the commit-graph feature is less
+  helpful for these clones, anyway. The commit-graph will not be read or
+  written when shallow commits are present.
+
 Future Work
 -----------
 
-- The commit graph feature currently does not honor commit grafts. This can
-  be remedied by duplicating or refactoring the current graft logic.
-
 - After computing and storing generation numbers, we must make graph
   walks aware of generation numbers to gain the performance benefits they
   enable. This will mostly be accomplished by swapping a commit-date-ordered
diff --git a/Documentation/technical/multi-pack-index.txt b/Documentation/technical/multi-pack-index.txt
new file mode 100644 (file)
index 0000000..d7e5763
--- /dev/null
@@ -0,0 +1,109 @@
+Multi-Pack-Index (MIDX) Design Notes
+====================================
+
+The Git object directory contains a 'pack' directory containing
+packfiles (with suffix ".pack") and pack-indexes (with suffix
+".idx"). The pack-indexes provide a way to lookup objects and
+navigate to their offset within the pack, but these must come
+in pairs with the packfiles. This pairing depends on the file
+names, as the pack-index differs only in suffix with its pack-
+file. While the pack-indexes provide fast lookup per packfile,
+this performance degrades as the number of packfiles increases,
+because abbreviations need to inspect every packfile and we are
+more likely to have a miss on our most-recently-used packfile.
+For some large repositories, repacking into a single packfile
+is not feasible due to storage space or excessive repack times.
+
+The multi-pack-index (MIDX for short) stores a list of objects
+and their offsets into multiple packfiles. It contains:
+
+- A list of packfile names.
+- A sorted list of object IDs.
+- A list of metadata for the ith object ID including:
+  - A value j referring to the jth packfile.
+  - An offset within the jth packfile for the object.
+- If large offsets are required, we use another list of large
+  offsets similar to version 2 pack-indexes.
+
+Thus, we can provide O(log N) lookup time for any number
+of packfiles.
+
+Design Details
+--------------
+
+- The MIDX is stored in a file named 'multi-pack-index' in the
+  .git/objects/pack directory. This could be stored in the pack
+  directory of an alternate. It refers only to packfiles in that
+  same directory.
+
+- The pack.multiIndex config setting must be on to consume MIDX files.
+
+- The file format includes parameters for the object ID hash
+  function, so a future change of hash algorithm does not require
+  a change in format.
+
+- The MIDX keeps only one record per object ID. If an object appears
+  in multiple packfiles, then the MIDX selects the copy in the most-
+  recently modified packfile.
+
+- If there exist packfiles in the pack directory not registered in
+  the MIDX, then those packfiles are loaded into the `packed_git`
+  list and `packed_git_mru` cache.
+
+- The pack-indexes (.idx files) remain in the pack directory so we
+  can delete the MIDX file, set core.midx to false, or downgrade
+  without any loss of information.
+
+- The MIDX file format uses a chunk-based approach (similar to the
+  commit-graph file) that allows optional data to be added.
+
+Future Work
+-----------
+
+- Add a 'verify' subcommand to the 'git midx' builtin to verify the
+  contents of the multi-pack-index file match the offsets listed in
+  the corresponding pack-indexes.
+
+- The multi-pack-index allows many packfiles, especially in a context
+  where repacking is expensive (such as a very large repo), or
+  unexpected maintenance time is unacceptable (such as a high-demand
+  build machine). However, the multi-pack-index needs to be rewritten
+  in full every time. We can extend the format to be incremental, so
+  writes are fast. By storing a small "tip" multi-pack-index that
+  points to large "base" MIDX files, we can keep writes fast while
+  still reducing the number of binary searches required for object
+  lookups.
+
+- The reachability bitmap is currently paired directly with a single
+  packfile, using the pack-order as the object order to hopefully
+  compress the bitmaps well using run-length encoding. This could be
+  extended to pair a reachability bitmap with a multi-pack-index. If
+  the multi-pack-index is extended to store a "stable object order"
+  (a function Order(hash) = integer that is constant for a given hash,
+  even as the multi-pack-index is updated) then a reachability bitmap
+  could point to a multi-pack-index and be updated independently.
+
+- Packfiles can be marked as "special" using empty files that share
+  the initial name but replace ".pack" with ".keep" or ".promisor".
+  We can add an optional chunk of data to the multi-pack-index that
+  records flags of information about the packfiles. This allows new
+  states, such as 'repacked' or 'redeltified', that can help with
+  pack maintenance in a multi-pack environment. It may also be
+  helpful to organize packfiles by object type (commit, tree, blob,
+  etc.) and use this metadata to help that maintenance.
+
+- The partial clone feature records special "promisor" packs that
+  may point to objects that are not stored locally, but available
+  on request to a server. The multi-pack-index does not currently
+  track these promisor packs.
+
+Related Links
+-------------
+[0] https://bugs.chromium.org/p/git/issues/detail?id=6
+    Chromium work item for: Multi-Pack Index (MIDX)
+
+[1] https://public-inbox.org/git/20180107181459.222909-1-dstolee@microsoft.com/
+    An earlier RFC for the multi-pack-index feature
+
+[2] https://public-inbox.org/git/alpine.DEB.2.20.1803091557510.23109@alexmv-linux/
+    Git Merge 2018 Contributor's summit notes (includes discussion of MIDX)
index 70a99fd1423894255f5e0e8cdbb345276620ffde..cab5bdd2ff0f887cb991e2dc9ba3cccec34f8a0a 100644 (file)
@@ -252,3 +252,80 @@ Pack file entry: <+
     corresponding packfile.
 
     20-byte SHA-1-checksum of all of the above.
+
+== multi-pack-index (MIDX) files have the following format:
+
+The multi-pack-index files refer to multiple pack-files and loose objects.
+
+In order to allow extensions that add extra data to the MIDX, we organize
+the body into "chunks" and provide a lookup table at the beginning of the
+body. The header includes certain length values, such as the number of packs,
+the number of base MIDX files, hash lengths and types.
+
+All 4-byte numbers are in network order.
+
+HEADER:
+
+       4-byte signature:
+           The signature is: {'M', 'I', 'D', 'X'}
+
+       1-byte version number:
+           Git only writes or recognizes version 1.
+
+       1-byte Object Id Version
+           Git only writes or recognizes version 1 (SHA1).
+
+       1-byte number of "chunks"
+
+       1-byte number of base multi-pack-index files:
+           This value is currently always zero.
+
+       4-byte number of pack files
+
+CHUNK LOOKUP:
+
+       (C + 1) * 12 bytes providing the chunk offsets:
+           First 4 bytes describe chunk id. Value 0 is a terminating label.
+           Other 8 bytes provide offset in current file for chunk to start.
+           (Chunks are provided in file-order, so you can infer the length
+           using the next chunk position if necessary.)
+
+       The remaining data in the body is described one chunk at a time, and
+       these chunks may be given in any order. Chunks are required unless
+       otherwise specified.
+
+CHUNK DATA:
+
+       Packfile Names (ID: {'P', 'N', 'A', 'M'})
+           Stores the packfile names as concatenated, null-terminated strings.
+           Packfiles must be listed in lexicographic order for fast lookups by
+           name. This is the only chunk not guaranteed to be a multiple of four
+           bytes in length, so should be the last chunk for alignment reasons.
+
+       OID Fanout (ID: {'O', 'I', 'D', 'F'})
+           The ith entry, F[i], stores the number of OIDs with first
+           byte at most i. Thus F[255] stores the total
+           number of objects.
+
+       OID Lookup (ID: {'O', 'I', 'D', 'L'})
+           The OIDs for all objects in the MIDX are stored in lexicographic
+           order in this chunk.
+
+       Object Offsets (ID: {'O', 'O', 'F', 'F'})
+           Stores two 4-byte values for every object.
+           1: The pack-int-id for the pack storing this object.
+           2: The offset within the pack.
+               If all offsets are less than 2^31, then the large offset chunk
+               will not exist and offsets are stored as in IDX v1.
+               If there is at least one offset value larger than 2^32-1, then
+               the large offset chunk must exist. If the large offset chunk
+               exists and the 31st bit is on, then removing that bit reveals
+               the row in the large offsets containing the 8-byte offset of
+               this object.
+
+       [Optional] Object Large Offsets (ID: {'L', 'O', 'F', 'F'})
+           8-byte offsets into large packfiles.
+
+TRAILER:
+
+       20-byte SHA1-checksum of the above contents.
diff --git a/Documentation/technical/rerere.txt b/Documentation/technical/rerere.txt
new file mode 100644 (file)
index 0000000..aa22d7a
--- /dev/null
@@ -0,0 +1,186 @@
+Rerere
+======
+
+This document describes the rerere logic.
+
+Conflict normalization
+----------------------
+
+To ensure recorded conflict resolutions can be looked up in the rerere
+database, even when branches are merged in a different order,
+different branches are merged that result in the same conflict, or
+when different conflict style settings are used, rerere normalizes the
+conflicts before writing them to the rerere database.
+
+Different conflict styles and branch names are normalized by stripping
+the labels from the conflict markers, and removing the common ancestor
+version from the `diff3` conflict style. Branches that are merged
+in different order are normalized by sorting the conflict hunks.  More
+on each of those steps in the following sections.
+
+Once these two normalization operations are applied, a conflict ID is
+calculated based on the normalized conflict, which is later used by
+rerere to look up the conflict in the rerere database.
+
+Removing the common ancestor version
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Say we have three branches AB, AC and AC2.  The common ancestor of
+these branches has a file with a line containing the string "A" (for
+brevity this is called "line A" in the rest of the document).  In
+branch AB this line is changed to "B", in AC, this line is changed to
+"C", and branch AC2 is forked off of AC, after the line was changed to
+"C".
+
+Forking a branch ABAC off of branch AB and then merging AC into it, we
+get a conflict like the following:
+
+    <<<<<<< HEAD
+    B
+    =======
+    C
+    >>>>>>> AC
+
+Doing the analogous with AC2 (forking a branch ABAC2 off of branch AB
+and then merging branch AC2 into it), using the diff3 conflict style,
+we get a conflict like the following:
+
+    <<<<<<< HEAD
+    B
+    ||||||| merged common ancestors
+    A
+    =======
+    C
+    >>>>>>> AC2
+
+By resolving this conflict, to leave line D, the user declares:
+
+    After examining what branches AB and AC did, I believe that making
+    line A into line D is the best thing to do that is compatible with
+    what AB and AC wanted to do.
+
+As branch AC2 refers to the same commit as AC, the above implies that
+this is also compatible what AB and AC2 wanted to do.
+
+By extension, this means that rerere should recognize that the above
+conflicts are the same.  To do this, the labels on the conflict
+markers are stripped, and the common ancestor version is removed.  The above
+examples would both result in the following normalized conflict:
+
+    <<<<<<<
+    B
+    =======
+    C
+    >>>>>>>
+
+Sorting hunks
+~~~~~~~~~~~~~
+
+As before, lets imagine that a common ancestor had a file with line A
+its early part, and line X in its late part.  And then four branches
+are forked that do these things:
+
+    - AB: changes A to B
+    - AC: changes A to C
+    - XY: changes X to Y
+    - XZ: changes X to Z
+
+Now, forking a branch ABAC off of branch AB and then merging AC into
+it, and forking a branch ACAB off of branch AC and then merging AB
+into it, would yield the conflict in a different order.  The former
+would say "A became B or C, what now?" while the latter would say "A
+became C or B, what now?"
+
+As a reminder, the act of merging AC into ABAC and resolving the
+conflict to leave line D means that the user declares:
+
+    After examining what branches AB and AC did, I believe that
+    making line A into line D is the best thing to do that is
+    compatible with what AB and AC wanted to do.
+
+So the conflict we would see when merging AB into ACAB should be
+resolved the same way---it is the resolution that is in line with that
+declaration.
+
+Imagine that similarly previously a branch XYXZ was forked from XY,
+and XZ was merged into it, and resolved "X became Y or Z" into "X
+became W".
+
+Now, if a branch ABXY was forked from AB and then merged XY, then ABXY
+would have line B in its early part and line Y in its later part.
+Such a merge would be quite clean.  We can construct 4 combinations
+using these four branches ((AB, AC) x (XY, XZ)).
+
+Merging ABXY and ACXZ would make "an early A became B or C, a late X
+became Y or Z" conflict, while merging ACXY and ABXZ would make "an
+early A became C or B, a late X became Y or Z".  We can see there are
+4 combinations of ("B or C", "C or B") x ("X or Y", "Y or X").
+
+By sorting, the conflict is given its canonical name, namely, "an
+early part became B or C, a late part becames X or Y", and whenever
+any of these four patterns appear, and we can get to the same conflict
+and resolution that we saw earlier.
+
+Without the sorting, we'd have to somehow find a previous resolution
+from combinatorial explosion.
+
+Conflict ID calculation
+~~~~~~~~~~~~~~~~~~~~~~~
+
+Once the conflict normalization is done, the conflict ID is calculated
+as the sha1 hash of the conflict hunks appended to each other,
+separated by <NUL> characters.  The conflict markers are stripped out
+before the sha1 is calculated.  So in the example above, where we
+merge branch AC which changes line A to line C, into branch AB, which
+changes line A to line C, the conflict ID would be
+SHA1('B<NUL>C<NUL>').
+
+If there are multiple conflicts in one file, the sha1 is calculated
+the same way with all hunks appended to each other, in the order in
+which they appear in the file, separated by a <NUL> character.
+
+Nested conflicts
+~~~~~~~~~~~~~~~~
+
+Nested conflicts are handled very similarly to "simple" conflicts.
+Similar to simple conflicts, the conflict is first normalized by
+stripping the labels from conflict markers, stripping the common ancestor
+version, and the sorting the conflict hunks, both for the outer and the
+inner conflict.  This is done recursively, so any number of nested
+conflicts can be handled.
+
+Note that this only works for conflict markers that "cleanly nest".  If
+there are any unmatched conflict markers, rerere will fail to handle
+the conflict and record a conflict resolution.
+
+The only difference is in how the conflict ID is calculated.  For the
+inner conflict, the conflict markers themselves are not stripped out
+before calculating the sha1.
+
+Say we have the following conflict for example:
+
+    <<<<<<< HEAD
+    1
+    =======
+    <<<<<<< HEAD
+    3
+    =======
+    2
+    >>>>>>> branch-2
+    >>>>>>> branch-3~
+
+After stripping out the labels of the conflict markers, and sorting
+the hunks, the conflict would look as follows:
+
+    <<<<<<<
+    1
+    =======
+    <<<<<<<
+    2
+    =======
+    3
+    >>>>>>>
+    >>>>>>>
+
+and finally the conflict ID would be calculated as:
+`sha1('1<NUL><<<<<<<\n3\n=======\n2\n>>>>>>><NUL>')`
index e9dc8f7a01d4a9a96e55757b0c5b1f369049aefc..498fce8b64af4af730bfe736b43c0ce311fc85a3 100755 (executable)
@@ -1,7 +1,7 @@
 #!/bin/sh
 
 GVF=GIT-VERSION-FILE
-DEF_VER=v2.19.0
+DEF_VER=v2.19.GIT
 
 LF='
 '
index 5a969f5830a4105d3e3e6236eaa51e19880cc873..5bf1af369ec46421c68b9194bbb9938e2ba2933f 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -709,7 +709,9 @@ TEST_BUILTINS_OBJS += test-date.o
 TEST_BUILTINS_OBJS += test-delta.o
 TEST_BUILTINS_OBJS += test-drop-caches.o
 TEST_BUILTINS_OBJS += test-dump-cache-tree.o
+TEST_BUILTINS_OBJS += test-dump-fsmonitor.o
 TEST_BUILTINS_OBJS += test-dump-split-index.o
+TEST_BUILTINS_OBJS += test-dump-untracked-cache.o
 TEST_BUILTINS_OBJS += test-example-decorate.o
 TEST_BUILTINS_OBJS += test-genrandom.o
 TEST_BUILTINS_OBJS += test-hashmap.o
@@ -720,17 +722,21 @@ TEST_BUILTINS_OBJS += test-match-trees.o
 TEST_BUILTINS_OBJS += test-mergesort.o
 TEST_BUILTINS_OBJS += test-mktemp.o
 TEST_BUILTINS_OBJS += test-online-cpus.o
+TEST_BUILTINS_OBJS += test-parse-options.o
 TEST_BUILTINS_OBJS += test-path-utils.o
+TEST_BUILTINS_OBJS += test-pkt-line.o
 TEST_BUILTINS_OBJS += test-prio-queue.o
+TEST_BUILTINS_OBJS += test-reach.o
 TEST_BUILTINS_OBJS += test-read-cache.o
+TEST_BUILTINS_OBJS += test-read-midx.o
 TEST_BUILTINS_OBJS += test-ref-store.o
 TEST_BUILTINS_OBJS += test-regex.o
 TEST_BUILTINS_OBJS += test-repository.o
 TEST_BUILTINS_OBJS += test-revision-walking.o
 TEST_BUILTINS_OBJS += test-run-command.o
 TEST_BUILTINS_OBJS += test-scrap-cache-tree.o
-TEST_BUILTINS_OBJS += test-sha1-array.o
 TEST_BUILTINS_OBJS += test-sha1.o
+TEST_BUILTINS_OBJS += test-sha1-array.o
 TEST_BUILTINS_OBJS += test-sigchain.o
 TEST_BUILTINS_OBJS += test-strcmp-offset.o
 TEST_BUILTINS_OBJS += test-string-list.o
@@ -738,14 +744,13 @@ TEST_BUILTINS_OBJS += test-submodule-config.o
 TEST_BUILTINS_OBJS += test-subprocess.o
 TEST_BUILTINS_OBJS += test-urlmatch-normalization.o
 TEST_BUILTINS_OBJS += test-wildmatch.o
+TEST_BUILTINS_OBJS += test-windows-named-pipe.o
 TEST_BUILTINS_OBJS += test-write-cache.o
 
-TEST_PROGRAMS_NEED_X += test-dump-fsmonitor
-TEST_PROGRAMS_NEED_X += test-dump-untracked-cache
+# Do not add more tests here unless they have extra dependencies. Add
+# them in TEST_BUILTINS_OBJS above.
 TEST_PROGRAMS_NEED_X += test-fake-ssh
 TEST_PROGRAMS_NEED_X += test-line-buffer
-TEST_PROGRAMS_NEED_X += test-parse-options
-TEST_PROGRAMS_NEED_X += test-pkt-line
 TEST_PROGRAMS_NEED_X += test-svn-fe
 TEST_PROGRAMS_NEED_X += test-tool
 
@@ -835,6 +840,7 @@ LIB_OBJS += column.o
 LIB_OBJS += combine-diff.o
 LIB_OBJS += commit.o
 LIB_OBJS += commit-graph.o
+LIB_OBJS += commit-reach.o
 LIB_OBJS += compat/obstack.o
 LIB_OBJS += compat/terminal.o
 LIB_OBJS += config.o
@@ -847,6 +853,7 @@ LIB_OBJS += csum-file.o
 LIB_OBJS += ctype.o
 LIB_OBJS += date.o
 LIB_OBJS += decorate.o
+LIB_OBJS += delta-islands.o
 LIB_OBJS += diffcore-break.o
 LIB_OBJS += diffcore-delta.o
 LIB_OBJS += diffcore-order.o
@@ -880,6 +887,7 @@ LIB_OBJS += linear-assignment.o
 LIB_OBJS += help.o
 LIB_OBJS += hex.o
 LIB_OBJS += ident.o
+LIB_OBJS += interdiff.o
 LIB_OBJS += json-writer.o
 LIB_OBJS += kwset.o
 LIB_OBJS += levenshtein.o
@@ -900,6 +908,7 @@ LIB_OBJS += merge.o
 LIB_OBJS += merge-blobs.o
 LIB_OBJS += merge-recursive.o
 LIB_OBJS += mergesort.o
+LIB_OBJS += midx.o
 LIB_OBJS += name-hash.o
 LIB_OBJS += negotiator/default.o
 LIB_OBJS += negotiator/skipping.o
@@ -1060,6 +1069,7 @@ BUILTIN_OBJS += builtin/merge-recursive.o
 BUILTIN_OBJS += builtin/merge-tree.o
 BUILTIN_OBJS += builtin/mktag.o
 BUILTIN_OBJS += builtin/mktree.o
+BUILTIN_OBJS += builtin/multi-pack-index.o
 BUILTIN_OBJS += builtin/mv.o
 BUILTIN_OBJS += builtin/name-rev.o
 BUILTIN_OBJS += builtin/notes.o
@@ -1786,6 +1796,7 @@ ifndef V
        QUIET_MSGFMT   = @echo '   ' MSGFMT $@;
        QUIET_GCOV     = @echo '   ' GCOV $@;
        QUIET_SP       = @echo '   ' SP $<;
+       QUIET_HDR      = @echo '   ' HDR $<;
        QUIET_RC       = @echo '   ' RC $@;
        QUIET_SUBDIR0  = +@subdir=
        QUIET_SUBDIR1  = ;$(NO_SUBDIR) echo '   ' SUBDIR $$subdir; \
@@ -2668,6 +2679,17 @@ $(SP_OBJ): %.sp: %.c GIT-CFLAGS FORCE
 .PHONY: sparse $(SP_OBJ)
 sparse: $(SP_OBJ)
 
+GEN_HDRS := command-list.h unicode-width.h
+EXCEPT_HDRS := $(GEN_HDRS) compat% xdiff%
+CHK_HDRS = $(filter-out $(EXCEPT_HDRS),$(patsubst ./%,%,$(LIB_H)))
+HCO = $(patsubst %.h,%.hco,$(CHK_HDRS))
+
+$(HCO): %.hco: %.h FORCE
+       $(QUIET_HDR)$(CC) -include git-compat-util.h -I. -o /dev/null -c -xc $<
+
+.PHONY: hdr-check $(HCO)
+hdr-check: $(HCO)
+
 .PHONY: style
 style:
        git clang-format --style file --diff --extensions c,h
index 5d139ba7f1e20412d24b1397b33e2d4cde016d19..8d0b1654d25536f72abe885634c36412ae8b3246 120000 (symlink)
--- a/RelNotes
+++ b/RelNotes
@@ -1 +1 @@
-Documentation/RelNotes/2.19.0.txt
\ No newline at end of file
+Documentation/RelNotes/2.20.0.txt
\ No newline at end of file
index 0a07b140fedd8fb1f14dd844dae9844641842c57..c1870105eb453980ce421f937e9615d1d9dcd3b3 100644 (file)
--- a/archive.c
+++ b/archive.c
@@ -110,7 +110,8 @@ static const struct attr_check *get_archive_attrs(struct index_state *istate,
        static struct attr_check *check;
        if (!check)
                check = attr_check_initl("export-ignore", "export-subst", NULL);
-       return git_check_attr(istate, path, check) ? NULL : check;
+       git_check_attr(istate, path, check);
+       return check;
 }
 
 static int check_attr_export_ignore(const struct attr_check *check)
diff --git a/attr.c b/attr.c
index 98e4953f6e87f5f5ff777ee57cedf705086e109e..60d284796de728f59b839865eb035e1289a1a060 100644 (file)
--- a/attr.c
+++ b/attr.c
@@ -1143,9 +1143,9 @@ static void collect_some_attrs(const struct index_state *istate,
        fill(path, pathlen, basename_offset, check->stack, check->all_attrs, rem);
 }
 
-int git_check_attr(const struct index_state *istate,
-                  const char *path,
-                  struct attr_check *check)
+void git_check_attr(const struct index_state *istate,
+                   const char *path,
+                   struct attr_check *check)
 {
        int i;
 
@@ -1158,8 +1158,6 @@ int git_check_attr(const struct index_state *istate,
                        value = ATTR__UNSET;
                check->items[i].value = value;
        }
-
-       return 0;
 }
 
 void git_all_attrs(const struct index_state *istate,
diff --git a/attr.h b/attr.h
index 2be86db36e730938777066f1951360396ddc9676..b0378bfe5feadb91134535b339515e554622cfee 100644 (file)
--- a/attr.h
+++ b/attr.h
@@ -63,8 +63,8 @@ void attr_check_free(struct attr_check *check);
  */
 const char *git_attr_name(const struct git_attr *);
 
-int git_check_attr(const struct index_state *istate,
-                  const char *path, struct attr_check *check);
+void git_check_attr(const struct index_state *istate,
+                   const char *path, struct attr_check *check);
 
 /*
  * Retrieve all attributes that apply to the specified path.
index e1275ba79e8e7368f5448043e321db2b9376aa71..e8b17cf7e1d8d2bf7895e2fa33d0cf0cbc47ae63 100644 (file)
--- a/bisect.c
+++ b/bisect.c
@@ -13,6 +13,8 @@
 #include "sha1-array.h"
 #include "argv-array.h"
 #include "commit-slab.h"
+#include "commit-reach.h"
+#include "object-store.h"
 
 static struct oid_array good_revs;
 static struct oid_array skipped_revs;
@@ -120,14 +122,14 @@ static inline int halfway(struct commit_list *p, int nr)
        }
 }
 
-#if !DEBUG_BISECT
-#define show_list(a,b,c,d) do { ; } while (0)
-#else
 static void show_list(const char *debug, int counted, int nr,
                      struct commit_list *list)
 {
        struct commit_list *p;
 
+       if (!DEBUG_BISECT)
+               return;
+
        fprintf(stderr, "%s (%d/%d)\n", debug, counted, nr);
 
        for (p = list; p; p = p->next) {
@@ -145,7 +147,7 @@ static void show_list(const char *debug, int counted, int nr,
                        (flags & TREESAME) ? ' ' : 'T',
                        (flags & UNINTERESTING) ? 'U' : ' ',
                        (flags & COUNTED) ? 'C' : ' ');
-               if (commit->util)
+               if (*commit_weight_at(&commit_weight, p->item))
                        fprintf(stderr, "%3d", weight(p));
                else
                        fprintf(stderr, "---");
@@ -160,7 +162,6 @@ static void show_list(const char *debug, int counted, int nr,
                fprintf(stderr, "\n");
        }
 }
-#endif /* DEBUG_BISECT */
 
 static struct commit_list *best_bisection(struct commit_list *list, int nr)
 {
@@ -595,7 +596,7 @@ static struct commit_list *skip_away(struct commit_list *list, int count)
 
        for (i = 0; cur; cur = cur->next, i++) {
                if (i == index) {
-                       if (oidcmp(&cur->item->object.oid, current_bad_oid))
+                       if (!oideq(&cur->item->object.oid, current_bad_oid))
                                return cur;
                        if (previous)
                                return previous;
@@ -807,7 +808,7 @@ static void check_merge_bases(int rev_nr, struct commit **rev, int no_checkout)
 
        for (; result; result = result->next) {
                const struct object_id *mb = &result->item->object.oid;
-               if (!oidcmp(mb, current_bad_oid)) {
+               if (oideq(mb, current_bad_oid)) {
                        handle_bad_merge_base();
                } else if (0 <= oid_array_lookup(&good_revs, mb)) {
                        continue;
@@ -988,7 +989,7 @@ int bisect_next_all(const char *prefix, int no_checkout)
 
        bisect_rev = &revs.commits->item->object.oid;
 
-       if (!oidcmp(bisect_rev, current_bad_oid)) {
+       if (oideq(bisect_rev, current_bad_oid)) {
                exit_if_skipped_commits(tried, current_bad_oid);
                printf("%s is the first %s commit\n", oid_to_hex(bisect_rev),
                        term_bad);
diff --git a/blame.c b/blame.c
index aca06f4b1227a4930a2dbb13b87068a618a76f97..d5f7b7237c4e9f9dcb6ad37504a98a0fcff66f80 100644 (file)
--- a/blame.c
+++ b/blame.c
@@ -1457,14 +1457,14 @@ static void pass_blame(struct blame_scoreboard *sb, struct blame_origin *origin,
                        porigin = find(p, origin);
                        if (!porigin)
                                continue;
-                       if (!oidcmp(&porigin->blob_oid, &origin->blob_oid)) {
+                       if (oideq(&porigin->blob_oid, &origin->blob_oid)) {
                                pass_whole_blame(sb, origin, porigin);
                                blame_origin_decref(porigin);
                                goto finish;
                        }
                        for (j = same = 0; j < i; j++)
                                if (sg_origin[j] &&
-                                   !oidcmp(&sg_origin[j]->blob_oid, &porigin->blob_oid)) {
+                                   oideq(&sg_origin[j]->blob_oid, &porigin->blob_oid)) {
                                        same = 1;
                                        break;
                                }
@@ -1832,7 +1832,7 @@ void setup_scoreboard(struct blame_scoreboard *sb,
 
                sb->revs->children.name = "children";
                while (c->parents &&
-                      oidcmp(&c->object.oid, &sb->final->object.oid)) {
+                      !oideq(&c->object.oid, &sb->final->object.oid)) {
                        struct commit_list *l = xcalloc(1, sizeof(*l));
 
                        l->item = c;
@@ -1842,7 +1842,7 @@ void setup_scoreboard(struct blame_scoreboard *sb,
                        c = c->parents->item;
                }
 
-               if (oidcmp(&c->object.oid, &sb->final->object.oid))
+               if (!oideq(&c->object.oid, &sb->final->object.oid))
                        die(_("--reverse --first-parent together require range along first-parent chain"));
        }
 
index 99206df4bd43fc0c4ff8db538912dec1c600c397..962f0489ab212cc613d98ac2577f3999278d099b 100644 (file)
--- a/builtin.h
+++ b/builtin.h
@@ -191,6 +191,7 @@ extern int cmd_merge_recursive(int argc, const char **argv, const char *prefix);
 extern int cmd_merge_tree(int argc, const char **argv, const char *prefix);
 extern int cmd_mktag(int argc, const char **argv, const char *prefix);
 extern int cmd_mktree(int argc, const char **argv, const char *prefix);
+extern int cmd_multi_pack_index(int argc, const char **argv, const char *prefix);
 extern int cmd_mv(int argc, const char **argv, const char *prefix);
 extern int cmd_name_rev(int argc, const char **argv, const char *prefix);
 extern int cmd_notes(int argc, const char **argv, const char *prefix);
index 9916498a29bbd8fa7c5c5d8e7bd32e1dc184909b..0b64bcdebe0f4581953adccf8addf1c3fd1eb61e 100644 (file)
@@ -454,7 +454,7 @@ int cmd_add(int argc, const char **argv, const char *prefix)
         * Check the "pathspec '%s' did not match any files" block
         * below before enabling new magic.
         */
-       parse_pathspec(&pathspec, 0,
+       parse_pathspec(&pathspec, PATHSPEC_ATTR,
                       PATHSPEC_PREFER_FULL |
                       PATHSPEC_SYMLINK_LEADING_PATH,
                       prefix, argv);
index 5e866d17c7c7b46ccf671916c853be228bf38ad6..5e643e2a3e3d70d9c674d3edab2eb1f2f08f3392 100644 (file)
@@ -1244,6 +1244,10 @@ static int parse_mail(struct am_state *state, const char *mail)
        fclose(mi.input);
        fclose(mi.output);
 
+       if (mi.format_flowed)
+               warning(_("Patch sent with format=flowed; "
+                         "space at the end of lines might be lost."));
+
        /* Extract message and author information */
        fp = xfopen(am_path(state, "info"), "r");
        while (!strbuf_getline_lf(&sb, fp)) {
@@ -2078,7 +2082,7 @@ static int safe_to_abort(const struct am_state *state)
        if (get_oid("HEAD", &head))
                oidclr(&head);
 
-       if (!oidcmp(&head, &abort_safety))
+       if (oideq(&head, &abort_safety))
                return 1;
 
        warning(_("You seem to have moved HEAD since the last 'am' failure.\n"
index bbd006aab4b4798b7e86530fe3562c753e879bc1..c396c41533c38630378db370f1db30e99aad849b 100644 (file)
@@ -23,6 +23,7 @@
 #include "ref-filter.h"
 #include "worktree.h"
 #include "help.h"
+#include "commit-reach.h"
 
 static const char * const builtin_branch_usage[] = {
        N_("git branch [<options>] [-r | -a] [--merged | --no-merged]"),
@@ -37,7 +38,6 @@ static const char * const builtin_branch_usage[] = {
 
 static const char *head;
 static struct object_id head_oid;
-static int used_deprecated_reflog_option;
 
 static int branch_use_color = -1;
 static char branch_colors[][COLOR_MAXLEN] = {
@@ -578,14 +578,6 @@ static int edit_branch_description(const char *branch_name)
        return 0;
 }
 
-static int deprecated_reflog_option_cb(const struct option *opt,
-                                      const char *arg, int unset)
-{
-       used_deprecated_reflog_option = 1;
-       *(int *)opt->value = !unset;
-       return 0;
-}
-
 int cmd_branch(int argc, const char **argv, const char *prefix)
 {
        int delete = 0, rename = 0, copy = 0, force = 0, list = 0;
@@ -627,14 +619,8 @@ int cmd_branch(int argc, const char **argv, const char *prefix)
                OPT_BIT('M', NULL, &rename, N_("move/rename a branch, even if target exists"), 2),
                OPT_BIT('c', "copy", &copy, N_("copy a branch and its reflog"), 1),
                OPT_BIT('C', NULL, &copy, N_("copy a branch, even if target exists"), 2),
-               OPT_BOOL(0, "list", &list, N_("list branch names")),
+               OPT_BOOL('l', "list", &list, N_("list branch names")),
                OPT_BOOL(0, "create-reflog", &reflog, N_("create the branch's reflog")),
-               {
-                       OPTION_CALLBACK, 'l', NULL, &reflog, NULL,
-                       N_("deprecated synonym for --create-reflog"),
-                       PARSE_OPT_NOARG | PARSE_OPT_HIDDEN,
-                       deprecated_reflog_option_cb
-               },
                OPT_BOOL(0, "edit-description", &edit_description,
                         N_("edit the description for the branch")),
                OPT__FORCE(&force, N_("force creation, move/rename, deletion"), PARSE_OPT_NOCOMPLETE),
@@ -707,11 +693,6 @@ int cmd_branch(int argc, const char **argv, const char *prefix)
        if (list)
                setup_auto_pager("branch", 1);
 
-       if (used_deprecated_reflog_option && !list) {
-               warning("the '-l' alias for '--create-reflog' is deprecated;");
-               warning("it will be removed in a future version of Git");
-       }
-
        if (delete) {
                if (!argc)
                        die(_("branch name required"));
index c05573ff9cd091cbe3b5ff1980fa7fb4815f6ebd..30a2f84274d32977769f778d5704accb599cf0e7 100644 (file)
@@ -65,8 +65,7 @@ static void check_attr(const char *prefix,
        if (collect_all) {
                git_all_attrs(&the_index, full_path, check);
        } else {
-               if (git_check_attr(&the_index, full_path, check))
-                       die("git_check_attr died");
+               git_check_attr(&the_index, full_path, check);
        }
        output_attr(check, file);
 
index 29ef50013dccbd118093af0b4dc08eb907953cc2..b30b48767e54a93f18ef0d341a04f14cc390e9a9 100644 (file)
@@ -25,6 +25,8 @@
 #include "submodule.h"
 #include "advice.h"
 
+static int checkout_optimize_new_branch;
+
 static const char * const checkout_usage[] = {
        N_("git checkout [<options>] <branch>"),
        N_("git checkout [<options>] [<branch>] -- <file>..."),
@@ -42,6 +44,10 @@ struct checkout_opts {
        int ignore_skipworktree;
        int ignore_other_worktrees;
        int show_progress;
+       /*
+        * If new checkout options are added, skip_merge_working_tree
+        * should be updated accordingly.
+        */
 
        const char *new_branch;
        const char *new_branch_force;
@@ -96,7 +102,7 @@ static int update_some(const struct object_id *oid, struct strbuf *base,
        if (pos >= 0) {
                struct cache_entry *old = active_cache[pos];
                if (ce->ce_mode == old->ce_mode &&
-                   !oidcmp(&ce->oid, &old->oid)) {
+                   oideq(&ce->oid, &old->oid)) {
                        old->ce_flags |= CE_UPDATE;
                        discard_cache_entry(ce);
                        return 0;
@@ -472,6 +478,98 @@ static void setup_branch_path(struct branch_info *branch)
        branch->path = strbuf_detach(&buf, NULL);
 }
 
+/*
+ * Skip merging the trees, updating the index and working directory if and
+ * only if we are creating a new branch via "git checkout -b <new_branch>."
+ */
+static int skip_merge_working_tree(const struct checkout_opts *opts,
+       const struct branch_info *old_branch_info,
+       const struct branch_info *new_branch_info)
+{
+       /*
+        * Do the merge if sparse checkout is on and the user has not opted in
+        * to the optimized behavior
+        */
+       if (core_apply_sparse_checkout && !checkout_optimize_new_branch)
+               return 0;
+
+       /*
+        * We must do the merge if we are actually moving to a new commit.
+        */
+       if (!old_branch_info->commit || !new_branch_info->commit ||
+               oidcmp(&old_branch_info->commit->object.oid, &new_branch_info->commit->object.oid))
+               return 0;
+
+       /*
+        * opts->patch_mode cannot be used with switching branches so is
+        * not tested here
+        */
+
+       /*
+        * opts->quiet only impacts output so doesn't require a merge
+        */
+
+       /*
+        * Honor the explicit request for a three-way merge or to throw away
+        * local changes
+        */
+       if (opts->merge || opts->force)
+               return 0;
+
+       /*
+        * --detach is documented as "updating the index and the files in the
+        * working tree" but this optimization skips those steps so fall through
+        * to the regular code path.
+        */
+       if (opts->force_detach)
+               return 0;
+
+       /*
+        * opts->writeout_stage cannot be used with switching branches so is
+        * not tested here
+        */
+
+       /*
+        * Honor the explicit ignore requests
+        */
+       if (!opts->overwrite_ignore || opts->ignore_skipworktree ||
+               opts->ignore_other_worktrees)
+               return 0;
+
+       /*
+        * opts->show_progress only impacts output so doesn't require a merge
+        */
+
+       /*
+        * If we aren't creating a new branch any changes or updates will
+        * happen in the existing branch.  Since that could only be updating
+        * the index and working directory, we don't want to skip those steps
+        * or we've defeated any purpose in running the command.
+        */
+       if (!opts->new_branch)
+               return 0;
+
+       /*
+        * new_branch_force is defined to "create/reset and checkout a branch"
+        * so needs to go through the merge to do the reset
+        */
+       if (opts->new_branch_force)
+               return 0;
+
+       /*
+        * A new orphaned branch requrires the index and the working tree to be
+        * adjusted to <start_point>
+        */
+       if (opts->new_orphan_branch)
+               return 0;
+
+       /*
+        * Remaining variables are not checkout options but used to track state
+        */
+
+       return 1;
+}
+
 static int merge_working_tree(const struct checkout_opts *opts,
                              struct branch_info *old_branch_info,
                              struct branch_info *new_branch_info,
@@ -846,10 +944,19 @@ static int switch_branches(const struct checkout_opts *opts,
                parse_commit_or_die(new_branch_info->commit);
        }
 
-       ret = merge_working_tree(opts, &old_branch_info, new_branch_info, &writeout_error);
-       if (ret) {
-               free(path_to_free);
-               return ret;
+       /* optimize the "checkout -b <new_branch> path */
+       if (skip_merge_working_tree(opts, &old_branch_info, new_branch_info)) {
+               if (!checkout_optimize_new_branch && !opts->quiet) {
+                       if (read_cache_preload(NULL) < 0)
+                               return error(_("index file corrupt"));
+                       show_local_changes(&new_branch_info->commit->object, &opts->diff_options);
+               }
+       } else {
+               ret = merge_working_tree(opts, &old_branch_info, new_branch_info, &writeout_error);
+               if (ret) {
+                       free(path_to_free);
+                       return ret;
+               }
        }
 
        if (!opts->quiet && !old_branch_info.path && old_branch_info.commit && new_branch_info->commit != old_branch_info.commit)
@@ -864,6 +971,11 @@ static int switch_branches(const struct checkout_opts *opts,
 
 static int git_checkout_config(const char *var, const char *value, void *cb)
 {
+       if (!strcmp(var, "checkout.optimizenewbranch")) {
+               checkout_optimize_new_branch = git_config_bool(var, value);
+               return 0;
+       }
+
        if (!strcmp(var, "diff.ignoresubmodules")) {
                struct checkout_opts *opts = cb;
                handle_ignore_submodules_arg(&opts->diff_options, value);
index fd2c3ef090146058651af5e411dd304153ee114d..15b142d64640e29c10e62d565ac21adbaaeebca4 100644 (file)
@@ -748,6 +748,7 @@ static int checkout(int submodule_progress)
        memset(&opts, 0, sizeof opts);
        opts.update = 1;
        opts.merge = 1;
+       opts.clone = 1;
        opts.fn = oneway_merge;
        opts.verbose_update = (option_verbosity >= 0);
        opts.src_index = &the_index;
index 0bf0c486575a8557b729658ce05be02d28a1a8b8..22b974f4b434913cf3ebe9e4073d2cb847f8a468 100644 (file)
@@ -120,6 +120,8 @@ static int graph_read(int argc, const char **argv)
        return 0;
 }
 
+extern int read_replace_refs;
+
 static int graph_write(int argc, const char **argv)
 {
        struct string_list *pack_indexes = NULL;
@@ -150,8 +152,10 @@ static int graph_write(int argc, const char **argv)
        if (!opts.obj_dir)
                opts.obj_dir = get_object_directory();
 
+       read_replace_refs = 0;
+
        if (opts.reachable) {
-               write_commit_graph_reachable(opts.obj_dir, opts.append);
+               write_commit_graph_reachable(opts.obj_dir, opts.append, 1);
                return 0;
        }
 
@@ -171,7 +175,8 @@ static int graph_write(int argc, const char **argv)
        write_commit_graph(opts.obj_dir,
                           pack_indexes,
                           commit_hex,
-                          opts.append);
+                          opts.append,
+                          1);
 
        string_list_clear(&lines, 0);
        return 0;
index 83233ca1a54aaf1d25c7084df90d5f1fd4f49a24..1d5292e4d827312d3a08ceb44f6a120cbd0c4576 100644 (file)
@@ -33,6 +33,8 @@
 #include "sequencer.h"
 #include "mailmap.h"
 #include "help.h"
+#include "commit-reach.h"
+#include "commit-graph.h"
 
 static const char * const builtin_commit_usage[] = {
        N_("git commit [<options>] [--] <pathspec>..."),
@@ -1652,6 +1654,9 @@ int cmd_commit(int argc, const char **argv, const char *prefix)
                      "new_index file. Check that disk is not full and quota is\n"
                      "not exceeded, and then \"git reset HEAD\" to recover."));
 
+       if (git_env_bool(GIT_TEST_COMMIT_GRAPH, 0))
+               write_commit_graph_reachable(get_object_directory(), 0, 0);
+
        rerere(0);
        run_command_v_opt(argv_gc_auto, RUN_GIT_CMD);
        run_commit_hook(use_editor, get_index_file(), "post-commit", NULL);
index d51e2ce1ec016ab35f1781675ff70793f0b974d5..a7cad052c615807b819f32c999c161f0f905c1bc 100644 (file)
@@ -123,7 +123,7 @@ int cmd_count_objects(int argc, const char **argv, const char *prefix)
                struct strbuf pack_buf = STRBUF_INIT;
                struct strbuf garbage_buf = STRBUF_INIT;
 
-               for (p = get_packed_git(the_repository); p; p = p->next) {
+               for (p = get_all_packs(the_repository); p; p = p->next) {
                        if (!p->pack_local)
                                continue;
                        if (open_pack_index(p))
index 41606c8a90092bca3f782980baf37031744e0cd6..22c0541da552c8135e21a6f3c9d3f7829fc0d31a 100644 (file)
@@ -62,7 +62,7 @@ static const char *prio_names[] = {
        N_("head"), N_("lightweight"), N_("annotated"),
 };
 
-static int commit_name_cmp(const void *unused_cmp_data,
+static int commit_name_neq(const void *unused_cmp_data,
                           const void *entry,
                           const void *entry_or_key,
                           const void *peeled)
@@ -70,7 +70,7 @@ static int commit_name_cmp(const void *unused_cmp_data,
        const struct commit_name *cn1 = entry;
        const struct commit_name *cn2 = entry_or_key;
 
-       return oidcmp(&cn1->peeled, peeled ? peeled : &cn2->peeled);
+       return !oideq(&cn1->peeled, peeled ? peeled : &cn2->peeled);
 }
 
 static inline struct commit_name *find_commit_name(const struct object_id *peeled)
@@ -190,7 +190,7 @@ static int get_name(const char *path, const struct object_id *oid, int flag, voi
 
        /* Is it annotated? */
        if (!peel_ref(path, &peeled)) {
-               is_annotated = !!oidcmp(oid, &peeled);
+               is_annotated = !oideq(oid, &peeled);
        } else {
                oidcpy(&peeled, oid);
                is_annotated = 0;
@@ -469,7 +469,7 @@ static void process_object(struct object *obj, const char *path, void *data)
 {
        struct process_commit_data *pcd = data;
 
-       if (!oidcmp(&pcd->looking_for, &obj->oid) && !pcd->dst->len) {
+       if (oideq(&pcd->looking_for, &obj->oid) && !pcd->dst->len) {
                reset_revision_walk();
                describe_commit(&pcd->current_commit, pcd->dst);
                strbuf_addf(pcd->dst, ":%s", path);
@@ -596,7 +596,7 @@ int cmd_describe(int argc, const char **argv, const char *prefix)
                return cmd_name_rev(args.argc, args.argv, prefix);
        }
 
-       hashmap_init(&names, commit_name_cmp, NULL, 0);
+       hashmap_init(&names, commit_name_neq, NULL, 0);
        for_each_rawref(get_name, NULL);
        if (!hashmap_get_size(&names) && !always)
                die(_("No names found, cannot describe anything."));
index 361a3c3ed38769f798b83933755fed84724ce43c..b3a8ba488ff824e2feda6b24df362f662fe5ddde 100644 (file)
@@ -41,7 +41,7 @@ static void stuff_change(struct diff_options *opt,
        struct diff_filespec *one, *two;
 
        if (!is_null_oid(old_oid) && !is_null_oid(new_oid) &&
-           !oidcmp(old_oid, new_oid) && (old_mode == new_mode))
+           oideq(old_oid, new_oid) && (old_mode == new_mode))
                return;
 
        if (opt->flags.reverse_diff) {
index cdd585ca76d51f6d5dcca78a9e1dfa6f8102c476..b41a9199ff441579655c91fc692211257e4e91ed 100644 (file)
@@ -116,7 +116,7 @@ static int use_wt_file(const char *workdir, const char *name,
                        if (is_null_oid(oid)) {
                                oidcpy(oid, &wt_oid);
                                use = 1;
-                       } else if (!oidcmp(oid, &wt_oid))
+                       } else if (oideq(oid, &wt_oid))
                                use = 1;
                }
        }
@@ -438,7 +438,7 @@ static int run_dir_diff(const char *extcmd, int symlinks, const char *prefix,
                        strbuf_reset(&buf);
                        strbuf_addf(&buf, "Subproject commit %s",
                                    oid_to_hex(&roid));
-                       if (!oidcmp(&loid, &roid))
+                       if (oideq(&loid, &roid))
                                strbuf_addstr(&buf, "-dirty");
                        add_left_or_right(&submodules, dst_path, buf.buf, 1);
                        continue;
index 9bd8a14b57b8e1b014503dffbcffa27e7242d67a..74f3bf5c96974a5f76c8b50c26915b582e780bb7 100644 (file)
@@ -384,7 +384,7 @@ static void show_filemodify(struct diff_queue_struct *q,
                                string_list_insert(changed, spec->path);
                                putchar('\n');
 
-                               if (!oidcmp(&ospec->oid, &spec->oid) &&
+                               if (oideq(&ospec->oid, &spec->oid) &&
                                    ospec->mode == spec->mode)
                                        break;
                        }
index 61bec5d213d47141d14162bd1a2ca88861c22ef7..0696abfc2a158baa3177fa9e71e2c16bc64655a4 100644 (file)
@@ -22,6 +22,7 @@
 #include "utf8.h"
 #include "packfile.h"
 #include "list-objects-filter-options.h"
+#include "commit-reach.h"
 
 static const char * const builtin_fetch_usage[] = {
        N_("git fetch [<options>] [<repository> [<refspec>...]]"),
@@ -114,7 +115,7 @@ static struct option builtin_fetch_options[] = {
                 N_("append to .git/FETCH_HEAD instead of overwriting")),
        OPT_STRING(0, "upload-pack", &upload_pack, N_("path"),
                   N_("path to upload pack on remote end")),
-       OPT__FORCE(&force, N_("force overwrite of local branch"), 0),
+       OPT__FORCE(&force, N_("force overwrite of local reference"), 0),
        OPT_BOOL('m', "multiple", &multiple,
                 N_("fetch from multiple remotes")),
        OPT_SET_INT('t', "tags", &tags,
@@ -238,7 +239,7 @@ static int will_fetch(struct ref **head, const unsigned char *sha1)
 {
        struct ref *rm = *head;
        while (rm) {
-               if (!hashcmp(rm->old_oid.hash, sha1))
+               if (hasheq(rm->old_oid.hash, sha1))
                        return 1;
                rm = rm->next;
        }
@@ -507,7 +508,7 @@ static void adjust_refcol_width(const struct ref *ref)
        int max, rlen, llen, len;
 
        /* uptodate lines are only shown on high verbosity level */
-       if (!verbosity && !oidcmp(&ref->peer_ref->old_oid, &ref->old_oid))
+       if (!verbosity && oideq(&ref->peer_ref->old_oid, &ref->old_oid))
                return;
 
        max    = term_columns();
@@ -644,7 +645,7 @@ static int update_local_ref(struct ref *ref,
        if (type < 0)
                die(_("object %s not found"), oid_to_hex(&ref->new_oid));
 
-       if (!oidcmp(&ref->old_oid, &ref->new_oid)) {
+       if (oideq(&ref->old_oid, &ref->new_oid)) {
                if (verbosity > 0)
                        format_display(display, '=', _("[up to date]"), NULL,
                                       remote, pretty_ref, summary_width);
@@ -667,12 +668,18 @@ static int update_local_ref(struct ref *ref,
 
        if (!is_null_oid(&ref->old_oid) &&
            starts_with(ref->name, "refs/tags/")) {
-               int r;
-               r = s_update_ref("updating tag", ref, 0);
-               format_display(display, r ? '!' : 't', _("[tag update]"),
-                              r ? _("unable to update local ref") : NULL,
-                              remote, pretty_ref, summary_width);
-               return r;
+               if (force || ref->force) {
+                       int r;
+                       r = s_update_ref("updating tag", ref, 0);
+                       format_display(display, r ? '!' : 't', _("[tag update]"),
+                                      r ? _("unable to update local ref") : NULL,
+                                      remote, pretty_ref, summary_width);
+                       return r;
+               } else {
+                       format_display(display, '!', _("[rejected]"), _("would clobber existing tag"),
+                                      remote, pretty_ref, summary_width);
+                       return 1;
+               }
        }
 
        current = lookup_commit_reference_gently(the_repository,
index f35ff1612bb563c819b611e82f8e3a7730d32461..59a40342b675130de6a2ddd76451bff8818e9623 100644 (file)
@@ -12,6 +12,7 @@
 #include "fmt-merge-msg.h"
 #include "gpg-interface.h"
 #include "repository.h"
+#include "commit-reach.h"
 
 static const char * const fmt_merge_msg_usage[] = {
        N_("git fmt-merge-msg [-m <message>] [--log[=<n>] | --no-log] [--file <file>]"),
@@ -78,9 +79,9 @@ static struct merge_parent *find_merge_parent(struct merge_parents *table,
 {
        int i;
        for (i = 0; i < table->nr; i++) {
-               if (given && oidcmp(&table->item[i].given, given))
+               if (given && !oideq(&table->item[i].given, given))
                        continue;
-               if (commit && oidcmp(&table->item[i].commit, commit))
+               if (commit && !oideq(&table->item[i].commit, commit))
                        continue;
                return &table->item[i];
        }
@@ -582,7 +583,7 @@ static void find_merge_parents(struct merge_parents *result,
        while (parents) {
                struct commit *cmit = pop_commit(&parents);
                for (i = 0; i < result->nr; i++)
-                       if (!oidcmp(&result->item[i].commit, &cmit->object.oid))
+                       if (oideq(&result->item[i].commit, &cmit->object.oid))
                                result->item[i].used = 1;
        }
 
index 250f5af1182ddc66ac0a729c1d39e94d8b19a878..06eb42172099a39e6f181d0dd7eab581595d9756 100644 (file)
@@ -740,7 +740,7 @@ int cmd_fsck(int argc, const char **argv, const char *prefix)
                        struct progress *progress = NULL;
 
                        if (show_progress) {
-                               for (p = get_packed_git(the_repository); p;
+                               for (p = get_all_packs(the_repository); p;
                                     p = p->next) {
                                        if (open_pack_index(p))
                                                continue;
@@ -749,7 +749,7 @@ int cmd_fsck(int argc, const char **argv, const char *prefix)
 
                                progress = start_progress(_("Checking objects"), total);
                        }
-                       for (p = get_packed_git(the_repository); p;
+                       for (p = get_all_packs(the_repository); p;
                             p = p->next) {
                                /* verify gives error messages itself */
                                if (verify_pack(p, fsck_obj_buffer,
@@ -848,5 +848,23 @@ int cmd_fsck(int argc, const char **argv, const char *prefix)
                }
        }
 
+       if (!git_config_get_bool("core.multipackindex", &i) && i) {
+               struct child_process midx_verify = CHILD_PROCESS_INIT;
+               const char *midx_argv[] = { "multi-pack-index", "verify", NULL, NULL, NULL };
+
+               midx_verify.argv = midx_argv;
+               midx_verify.git_cmd = 1;
+               if (run_command(&midx_verify))
+                       errors_found |= ERROR_COMMIT_GRAPH;
+
+               prepare_alt_odb(the_repository);
+               for (alt =  the_repository->objects->alt_odb_list; alt; alt = alt->next) {
+                       midx_argv[2] = "--object-dir";
+                       midx_argv[3] = alt->path;
+                       if (run_command(&midx_verify))
+                               errors_found |= ERROR_COMMIT_GRAPH;
+               }
+       }
+
        return errors_found;
 }
index 57069442b0dc1297366e29b1fbf0e6ee70377146..871a56f1c5a5804db9363b117ae3891ea832bc5f 100644 (file)
@@ -183,7 +183,7 @@ static struct packed_git *find_base_packs(struct string_list *packs,
 {
        struct packed_git *p, *base = NULL;
 
-       for (p = get_packed_git(the_repository); p; p = p->next) {
+       for (p = get_all_packs(the_repository); p; p = p->next) {
                if (!p->pack_local)
                        continue;
                if (limit) {
@@ -208,7 +208,7 @@ static int too_many_packs(void)
        if (gc_auto_pack_limit <= 0)
                return 0;
 
-       for (cnt = 0, p = get_packed_git(the_repository); p; p = p->next) {
+       for (cnt = 0, p = get_all_packs(the_repository); p; p = p->next) {
                if (!p->pack_local)
                        continue;
                if (p->pack_keep)
@@ -441,10 +441,16 @@ static const char *lock_repo_for_gc(int force, pid_t* ret_pid)
        return NULL;
 }
 
+/*
+ * Returns 0 if there was no previous error and gc can proceed, 1 if
+ * gc should not proceed due to an error in the last run. Prints a
+ * message and returns -1 if an error occured while reading gc.log
+ */
 static int report_last_gc_error(void)
 {
        struct strbuf sb = STRBUF_INIT;
        int ret = 0;
+       ssize_t len;
        struct stat st;
        char *gc_log_path = git_pathdup("gc.log");
 
@@ -452,39 +458,47 @@ static int report_last_gc_error(void)
                if (errno == ENOENT)
                        goto done;
 
-               ret = error_errno(_("Can't stat %s"), gc_log_path);
+               ret = error_errno(_("cannot stat '%s'"), gc_log_path);
                goto done;
        }
 
        if (st.st_mtime < gc_log_expire_time)
                goto done;
 
-       ret = strbuf_read_file(&sb, gc_log_path, 0);
-       if (ret > 0)
-               ret = error(_("The last gc run reported the following. "
+       len = strbuf_read_file(&sb, gc_log_path, 0);
+       if (len < 0)
+               ret = error_errno(_("cannot read '%s'"), gc_log_path);
+       else if (len > 0) {
+               /*
+                * A previous gc failed.  Report the error, and don't
+                * bother with an automatic gc run since it is likely
+                * to fail in the same way.
+                */
+               warning(_("The last gc run reported the following. "
                               "Please correct the root cause\n"
                               "and remove %s.\n"
                               "Automatic cleanup will not be performed "
                               "until the file is removed.\n\n"
                               "%s"),
                            gc_log_path, sb.buf);
+               ret = 1;
+       }
        strbuf_release(&sb);
 done:
        free(gc_log_path);
        return ret;
 }
 
-static int gc_before_repack(void)
+static void gc_before_repack(void)
 {
        if (pack_refs && run_command_v_opt(pack_refs_cmd.argv, RUN_GIT_CMD))
-               return error(FAILED_RUN, pack_refs_cmd.argv[0]);
+               die(FAILED_RUN, pack_refs_cmd.argv[0]);
 
        if (prune_reflogs && run_command_v_opt(reflog.argv, RUN_GIT_CMD))
-               return error(FAILED_RUN, reflog.argv[0]);
+               die(FAILED_RUN, reflog.argv[0]);
 
        pack_refs = 0;
        prune_reflogs = 0;
-       return 0;
 }
 
 int cmd_gc(int argc, const char **argv, const char *prefix)
@@ -565,13 +579,17 @@ int cmd_gc(int argc, const char **argv, const char *prefix)
                        fprintf(stderr, _("See \"git help gc\" for manual housekeeping.\n"));
                }
                if (detach_auto) {
-                       if (report_last_gc_error())
-                               return -1;
+                       int ret = report_last_gc_error();
+                       if (ret < 0)
+                               /* an I/O error occured, already reported */
+                               exit(128);
+                       if (ret == 1)
+                               /* Last gc --auto failed. Skip this one. */
+                               return 0;
 
                        if (lock_repo_for_gc(force, &pid))
                                return 0;
-                       if (gc_before_repack())
-                               return -1;
+                       gc_before_repack(); /* dies on failure */
                        delete_tempfile(&pidfile);
 
                        /*
@@ -611,13 +629,12 @@ int cmd_gc(int argc, const char **argv, const char *prefix)
                atexit(process_log_file_at_exit);
        }
 
-       if (gc_before_repack())
-               return -1;
+       gc_before_repack();
 
        if (!repository_format_precious_objects) {
                close_all_packs(the_repository->objects);
                if (run_command_v_opt(repack.argv, RUN_GIT_CMD))
-                       return error(FAILED_RUN, repack.argv[0]);
+                       die(FAILED_RUN, repack.argv[0]);
 
                if (prune_expire) {
                        argv_array_push(&prune, prune_expire);
@@ -627,18 +644,18 @@ int cmd_gc(int argc, const char **argv, const char *prefix)
                                argv_array_push(&prune,
                                                "--exclude-promisor-objects");
                        if (run_command_v_opt(prune.argv, RUN_GIT_CMD))
-                               return error(FAILED_RUN, prune.argv[0]);
+                               die(FAILED_RUN, prune.argv[0]);
                }
        }
 
        if (prune_worktrees_expire) {
                argv_array_push(&prune_worktrees, prune_worktrees_expire);
                if (run_command_v_opt(prune_worktrees.argv, RUN_GIT_CMD))
-                       return error(FAILED_RUN, prune_worktrees.argv[0]);
+                       die(FAILED_RUN, prune_worktrees.argv[0]);
        }
 
        if (run_command_v_opt(rerere.argv, RUN_GIT_CMD))
-               return error(FAILED_RUN, rerere.argv[0]);
+               die(FAILED_RUN, rerere.argv[0]);
 
        report_garbage = report_pack_garbage;
        reprepare_packed_git(the_repository);
@@ -646,7 +663,8 @@ int cmd_gc(int argc, const char **argv, const char *prefix)
                clean_pack_garbage();
 
        if (gc_write_commit_graph)
-               write_commit_graph_reachable(get_object_directory(), 0);
+               write_commit_graph_reachable(get_object_directory(), 0,
+                                            !quiet && !daemonized);
 
        if (auto_gc && too_many_loose_objects())
                warning(_("There are too many unreachable loose objects; "
index 9582ead9507ee27f88386fcc55c044eba233c770..2004e25da230c7d9f0b1e951d8a59dca2d9d08b8 100644 (file)
@@ -719,9 +719,9 @@ static void find_ref_delta_children(const struct object_id *oid,
                *last_index = -1;
                return;
        }
-       while (first > 0 && !oidcmp(&ref_deltas[first - 1].oid, oid))
+       while (first > 0 && oideq(&ref_deltas[first - 1].oid, oid))
                --first;
-       while (last < end && !oidcmp(&ref_deltas[last + 1].oid, oid))
+       while (last < end && oideq(&ref_deltas[last + 1].oid, oid))
                ++last;
        *first_index = first;
        *last_index = last;
@@ -1166,7 +1166,7 @@ static void parse_pack_objects(unsigned char *hash)
        /* Check pack integrity */
        flush();
        the_hash_algo->final_fn(hash, &input_ctx);
-       if (hashcmp(fill(the_hash_algo->rawsz), hash))
+       if (!hasheq(fill(the_hash_algo->rawsz), hash))
                die(_("pack is corrupted (SHA1 mismatch)"));
        use(the_hash_algo->rawsz);
 
@@ -1280,7 +1280,7 @@ static void conclude_pack(int fix_thin_pack, const char *curr_pack, unsigned cha
                fixup_pack_header_footer(output_fd, pack_hash,
                                         curr_pack, nr_objects,
                                         read_hash, consumed_bytes-the_hash_algo->rawsz);
-               if (hashcmp(read_hash, tail_hash) != 0)
+               if (!hasheq(read_hash, tail_hash))
                        die(_("Unexpected tail checksum for %s "
                              "(disk corruption?)"), curr_pack);
        }
index b742539d4de20361bb43bcb4dc33cfc9165c42fb..4b87e0dd2e8854bf940e92e3020c5e4f66e8a6bd 100644 (file)
@@ -104,6 +104,7 @@ int cmd_interpret_trailers(int argc, const char **argv, const char *prefix)
                OPT_BOOL(0, "unfold", &opts.unfold, N_("join whitespace-continued values")),
                { OPTION_CALLBACK, 0, "parse", &opts, NULL, N_("set parsing options"),
                        PARSE_OPT_NOARG | PARSE_OPT_NONEG, parse_opt_parse },
+               OPT_BOOL(0, "no-divider", &opts.no_divider, N_("do not treat --- specially")),
                OPT_CALLBACK(0, "trailer", &trailers, N_("trailer"),
                                N_("trailer(s) to add"), option_parse_trailer),
                OPT_END()
index e094560d9abca6e819c24b845dc7c04d384850c3..1dbb9d829bffcb25739737783c4fd4fd734f5940 100644 (file)
@@ -31,6 +31,9 @@
 #include "progress.h"
 #include "commit-slab.h"
 #include "repository.h"
+#include "commit-reach.h"
+#include "interdiff.h"
+#include "range-diff.h"
 
 #define MAIL_DEFAULT_WRAP 72
 
@@ -992,12 +995,32 @@ static char *find_branch_name(struct rev_info *rev)
        tip_oid = &rev->cmdline.rev[positive].item->oid;
        if (dwim_ref(ref, strlen(ref), &branch_oid, &full_ref) &&
            skip_prefix(full_ref, "refs/heads/", &v) &&
-           !oidcmp(tip_oid, &branch_oid))
+           oideq(tip_oid, &branch_oid))
                branch = xstrdup(v);
        free(full_ref);
        return branch;
 }
 
+static void show_diffstat(struct rev_info *rev,
+                         struct commit *origin, struct commit *head)
+{
+       struct diff_options opts;
+
+       memcpy(&opts, &rev->diffopt, sizeof(opts));
+       opts.output_format = DIFF_FORMAT_SUMMARY | DIFF_FORMAT_DIFFSTAT;
+       opts.stat_width = MAIL_DEFAULT_WRAP;
+
+       diff_setup_done(&opts);
+
+       diff_tree_oid(get_commit_tree_oid(origin),
+                     get_commit_tree_oid(head),
+                     "", &opts);
+       diffcore_std(&opts);
+       diff_flush(&opts);
+
+       fprintf(rev->diffopt.file, "\n");
+}
+
 static void make_cover_letter(struct rev_info *rev, int use_stdout,
                              struct commit *origin,
                              int nr, struct commit **list,
@@ -1011,7 +1034,6 @@ static void make_cover_letter(struct rev_info *rev, int use_stdout,
        struct strbuf sb = STRBUF_INIT;
        int i;
        const char *encoding = "UTF-8";
-       struct diff_options opts;
        int need_8bit_cte = 0;
        struct pretty_print_context pp = {0};
        struct commit *head = list[0];
@@ -1061,25 +1083,20 @@ static void make_cover_letter(struct rev_info *rev, int use_stdout,
 
        shortlog_output(&log);
 
-       /*
-        * We can only do diffstat with a unique reference point
-        */
-       if (!origin)
-               return;
-
-       memcpy(&opts, &rev->diffopt, sizeof(opts));
-       opts.output_format = DIFF_FORMAT_SUMMARY | DIFF_FORMAT_DIFFSTAT;
-       opts.stat_width = MAIL_DEFAULT_WRAP;
+       /* We can only do diffstat with a unique reference point */
+       if (origin)
+               show_diffstat(rev, origin, head);
 
-       diff_setup_done(&opts);
-
-       diff_tree_oid(get_commit_tree_oid(origin),
-                     get_commit_tree_oid(head),
-                     "", &opts);
-       diffcore_std(&opts);
-       diff_flush(&opts);
+       if (rev->idiff_oid1) {
+               fprintf_ln(rev->diffopt.file, "%s", rev->idiff_title);
+               show_interdiff(rev, 0);
+       }
 
-       fprintf(rev->diffopt.file, "\n");
+       if (rev->rdiff1) {
+               fprintf_ln(rev->diffopt.file, "%s", rev->rdiff_title);
+               show_range_diff(rev->rdiff1, rev->rdiff2,
+                               rev->creation_factor, 1, &rev->diffopt);
+       }
 }
 
 static const char *clean_message_id(const char *msg_id)
@@ -1419,6 +1436,36 @@ static void print_bases(struct base_tree_info *bases, FILE *file)
        oidclr(&bases->base_commit);
 }
 
+static const char *diff_title(struct strbuf *sb, int reroll_count,
+                      const char *generic, const char *rerolled)
+{
+       if (reroll_count <= 0)
+               strbuf_addstr(sb, generic);
+       else /* RFC may be v0, so allow -v1 to diff against v0 */
+               strbuf_addf(sb, rerolled, reroll_count - 1);
+       return sb->buf;
+}
+
+static void infer_range_diff_ranges(struct strbuf *r1,
+                                   struct strbuf *r2,
+                                   const char *prev,
+                                   struct commit *origin,
+                                   struct commit *head)
+{
+       const char *head_oid = oid_to_hex(&head->object.oid);
+
+       if (!strstr(prev, "..")) {
+               strbuf_addf(r1, "%s..%s", head_oid, prev);
+               strbuf_addf(r2, "%s..%s", prev, head_oid);
+       } else if (!origin) {
+               die(_("failed to infer range-diff ranges"));
+       } else {
+               strbuf_addstr(r1, prev);
+               strbuf_addf(r2, "%s..%s",
+                           oid_to_hex(&origin->object.oid), head_oid);
+       }
+}
+
 int cmd_format_patch(int argc, const char **argv, const char *prefix)
 {
        struct commit *commit;
@@ -1446,6 +1493,13 @@ int cmd_format_patch(int argc, const char **argv, const char *prefix)
        struct base_tree_info bases;
        int show_progress = 0;
        struct progress *progress = NULL;
+       struct oid_array idiff_prev = OID_ARRAY_INIT;
+       struct strbuf idiff_title = STRBUF_INIT;
+       const char *rdiff_prev = NULL;
+       struct strbuf rdiff1 = STRBUF_INIT;
+       struct strbuf rdiff2 = STRBUF_INIT;
+       struct strbuf rdiff_title = STRBUF_INIT;
+       int creation_factor = -1;
 
        const struct option builtin_format_patch_options[] = {
                { OPTION_CALLBACK, 'n', "numbered", &numbered, NULL,
@@ -1519,6 +1573,13 @@ int cmd_format_patch(int argc, const char **argv, const char *prefix)
                OPT__QUIET(&quiet, N_("don't print the patch filenames")),
                OPT_BOOL(0, "progress", &show_progress,
                         N_("show progress while generating patches")),
+               OPT_CALLBACK(0, "interdiff", &idiff_prev, N_("rev"),
+                            N_("show changes against <rev> in cover letter or single patch"),
+                            parse_opt_object_name),
+               OPT_STRING(0, "range-diff", &rdiff_prev, N_("refspec"),
+                          N_("show changes against <refspec> in cover letter or single patch")),
+               OPT_INTEGER(0, "creation-factor", &creation_factor,
+                           N_("percentage by which creation is weighted")),
                OPT_END()
        };
 
@@ -1703,8 +1764,8 @@ int cmd_format_patch(int argc, const char **argv, const char *prefix)
                /* Don't say anything if head and upstream are the same. */
                if (rev.pending.nr == 2) {
                        struct object_array_entry *o = rev.pending.objects;
-                       if (oidcmp(&o[0].item->oid, &o[1].item->oid) == 0)
-                               return 0;
+                       if (oideq(&o[0].item->oid, &o[1].item->oid))
+                               goto done;
                }
                get_patch_ids(&rev, &ids);
        }
@@ -1728,7 +1789,7 @@ int cmd_format_patch(int argc, const char **argv, const char *prefix)
        }
        if (nr == 0)
                /* nothing to do */
-               return 0;
+               goto done;
        total = nr;
        if (cover_letter == -1) {
                if (config_cover_letter == COVER_AUTO)
@@ -1741,6 +1802,35 @@ int cmd_format_patch(int argc, const char **argv, const char *prefix)
        if (numbered)
                rev.total = total + start_number - 1;
 
+       if (idiff_prev.nr) {
+               if (!cover_letter && total != 1)
+                       die(_("--interdiff requires --cover-letter or single patch"));
+               rev.idiff_oid1 = &idiff_prev.oid[idiff_prev.nr - 1];
+               rev.idiff_oid2 = get_commit_tree_oid(list[0]);
+               rev.idiff_title = diff_title(&idiff_title, reroll_count,
+                                            _("Interdiff:"),
+                                            _("Interdiff against v%d:"));
+       }
+
+       if (creation_factor < 0)
+               creation_factor = RANGE_DIFF_CREATION_FACTOR_DEFAULT;
+       else if (!rdiff_prev)
+               die(_("--creation-factor requires --range-diff"));
+
+       if (rdiff_prev) {
+               if (!cover_letter && total != 1)
+                       die(_("--range-diff requires --cover-letter or single patch"));
+
+               infer_range_diff_ranges(&rdiff1, &rdiff2, rdiff_prev,
+                                       origin, list[0]);
+               rev.rdiff1 = rdiff1.buf;
+               rev.rdiff2 = rdiff2.buf;
+               rev.creation_factor = creation_factor;
+               rev.rdiff_title = diff_title(&rdiff_title, reroll_count,
+                                            _("Range-diff:"),
+                                            _("Range-diff against v%d:"));
+       }
+
        if (!signature) {
                ; /* --no-signature inhibits all signatures */
        } else if (signature && signature != git_version_string) {
@@ -1778,6 +1868,9 @@ int cmd_format_patch(int argc, const char **argv, const char *prefix)
                print_signature(rev.diffopt.file);
                total++;
                start_number--;
+               /* interdiff/range-diff in cover-letter; omit from patches */
+               rev.idiff_oid1 = NULL;
+               rev.rdiff1 = NULL;
        }
        rev.add_signoff = do_signoff;
 
@@ -1858,6 +1951,13 @@ int cmd_format_patch(int argc, const char **argv, const char *prefix)
        string_list_clear(&extra_hdr, 0);
        if (ignore_if_in_upstream)
                free_patch_ids(&ids);
+
+done:
+       oid_array_clear(&idiff_prev);
+       strbuf_release(&idiff_title);
+       strbuf_release(&rdiff1);
+       strbuf_release(&rdiff2);
+       strbuf_release(&rdiff_title);
        return 0;
 }
 
@@ -1949,7 +2049,7 @@ int cmd_cherry(int argc, const char **argv, const char *prefix)
        /* Don't say anything if head and upstream are the same. */
        if (revs.pending.nr == 2) {
                struct object_array_entry *o = revs.pending.objects;
-               if (oidcmp(&o[0].item->oid, &o[1].item->oid) == 0)
+               if (oideq(&o[0].item->oid, &o[1].item->oid))
                        return 0;
        }
 
index 08d91b1f0c0172fef93f1a87bf0843a85f57ca37..1c920990701381452da910e4cee66be5e6fde5a0 100644 (file)
@@ -7,6 +7,7 @@
 #include "revision.h"
 #include "parse-options.h"
 #include "repository.h"
+#include "commit-reach.h"
 
 static int show_merge_base(struct commit **rev, int rev_nr, int show_all)
 {
index f8023bae1e2eceaa8ea086f5d990d8bc4c0e6121..8cea8a74f2b7e89dfedca3688f2b9d8863055010 100644 (file)
@@ -155,7 +155,7 @@ static int same_entry(struct name_entry *a, struct name_entry *b)
 {
        return  a->oid &&
                b->oid &&
-               !oidcmp(a->oid, b->oid) &&
+               oideq(a->oid, b->oid) &&
                a->mode == b->mode;
 }
 
index 8f4a5065c209b5b50e02271b48277fff90306338..e331ca6d481005f6122fa6fda6075728f893632d 100644 (file)
@@ -36,6 +36,7 @@
 #include "packfile.h"
 #include "tag.h"
 #include "alias.h"
+#include "commit-reach.h"
 
 #define DEFAULT_TWOHEAD (1<<0)
 #define DEFAULT_OCTOPUS (1<<1)
@@ -1189,7 +1190,7 @@ static int merging_a_throwaway_tag(struct commit *commit)
        tag_ref = xstrfmt("refs/tags/%s",
                          ((struct tag *)merge_remote_util(commit)->obj)->tag);
        if (!read_ref(tag_ref, &oid) &&
-           !oidcmp(&oid, &merge_remote_util(commit)->obj->oid))
+           oideq(&oid, &merge_remote_util(commit)->obj->oid))
                is_throwaway_tag = 0;
        else
                is_throwaway_tag = 1;
@@ -1448,7 +1449,7 @@ int cmd_merge(int argc, const char **argv, const char *prefix)
                goto done;
        } else if (fast_forward != FF_NO && !remoteheads->next &&
                        !common->next &&
-                       !oidcmp(&common->item->object.oid, &head_commit->object.oid)) {
+                       oideq(&common->item->object.oid, &head_commit->object.oid)) {
                /* Again the most common case of merging one remote. */
                struct strbuf msg = STRBUF_INIT;
                struct commit *commit;
@@ -1521,7 +1522,7 @@ int cmd_merge(int argc, const char **argv, const char *prefix)
                         * HEAD^^" would be missed.
                         */
                        common_one = get_merge_bases(head_commit, j->item);
-                       if (oidcmp(&common_one->item->object.oid, &j->item->object.oid)) {
+                       if (!oideq(&common_one->item->object.oid, &j->item->object.oid)) {
                                up_to_date = 0;
                                break;
                        }
diff --git a/builtin/multi-pack-index.c b/builtin/multi-pack-index.c
new file mode 100644 (file)
index 0000000..fca70f8
--- /dev/null
@@ -0,0 +1,49 @@
+#include "builtin.h"
+#include "cache.h"
+#include "config.h"
+#include "parse-options.h"
+#include "midx.h"
+
+static char const * const builtin_multi_pack_index_usage[] = {
+       N_("git multi-pack-index [--object-dir=<dir>] (write|verify)"),
+       NULL
+};
+
+static struct opts_multi_pack_index {
+       const char *object_dir;
+} opts;
+
+int cmd_multi_pack_index(int argc, const char **argv,
+                        const char *prefix)
+{
+       static struct option builtin_multi_pack_index_options[] = {
+               OPT_FILENAME(0, "object-dir", &opts.object_dir,
+                 N_("object directory containing set of packfile and pack-index pairs")),
+               OPT_END(),
+       };
+
+       git_config(git_default_config, NULL);
+
+       argc = parse_options(argc, argv, prefix,
+                            builtin_multi_pack_index_options,
+                            builtin_multi_pack_index_usage, 0);
+
+       if (!opts.object_dir)
+               opts.object_dir = get_object_directory();
+
+       if (argc == 0)
+               usage_with_options(builtin_multi_pack_index_usage,
+                                  builtin_multi_pack_index_options);
+
+       if (argc > 1) {
+               die(_("too many arguments"));
+               return 1;
+       }
+
+       if (!strcmp(argv[0], "write"))
+               return write_midx_file(opts.object_dir);
+       if (!strcmp(argv[0], "verify"))
+               return verify_midx_file(opts.object_dir);
+
+       die(_("unrecognized verb: %s"), argv[0]);
+}
index d1144a8f7ef79f7efa5bf64141a9133cfeee66d1..c6370f2716a7d5a9f78ea6e6627a87b40ab32cc9 100644 (file)
@@ -24,6 +24,7 @@
 #include "streaming.h"
 #include "thread-utils.h"
 #include "pack-bitmap.h"
+#include "delta-islands.h"
 #include "reachable.h"
 #include "sha1-array.h"
 #include "argv-array.h"
@@ -31,6 +32,7 @@
 #include "packfile.h"
 #include "object-store.h"
 #include "dir.h"
+#include "midx.h"
 
 #define IN_PACK(obj) oe_in_pack(&to_pack, obj)
 #define SIZE(obj) oe_size(&to_pack, obj)
@@ -40,6 +42,7 @@
 #define DELTA_CHILD(obj) oe_delta_child(&to_pack, obj)
 #define DELTA_SIBLING(obj) oe_delta_sibling(&to_pack, obj)
 #define SET_DELTA(obj, val) oe_set_delta(&to_pack, obj, val)
+#define SET_DELTA_EXT(obj, oid) oe_set_delta_ext(&to_pack, obj, oid)
 #define SET_DELTA_SIZE(obj, val) oe_set_delta_size(&to_pack, obj, val)
 #define SET_DELTA_CHILD(obj, val) oe_set_delta_child(&to_pack, obj, val)
 #define SET_DELTA_SIBLING(obj, val) oe_set_delta_sibling(&to_pack, obj, val)
@@ -59,6 +62,8 @@ static struct packing_data to_pack;
 
 static struct pack_idx_entry **written_list;
 static uint32_t nr_result, nr_written, nr_seen;
+static struct bitmap_index *bitmap_git;
+static uint32_t write_layer;
 
 static int non_empty;
 static int reuse_delta = 1, reuse_object = 1;
@@ -79,6 +84,7 @@ static unsigned long pack_size_limit;
 static int depth = 50;
 static int delta_search_threads;
 static int pack_to_stdout;
+static int thin;
 static int num_preferred_base;
 static struct progress *progress_state;
 
@@ -93,6 +99,8 @@ static uint16_t write_bitmap_options;
 
 static int exclude_promisor_objects;
 
+static int use_delta_islands;
+
 static unsigned long delta_cache_size = 0;
 static unsigned long max_delta_cache_size = DEFAULT_DELTA_CACHE_SIZE;
 static unsigned long cache_max_small_delta_size = 1000;
@@ -612,7 +620,7 @@ static inline void add_to_write_order(struct object_entry **wo,
                               unsigned int *endp,
                               struct object_entry *e)
 {
-       if (e->filled)
+       if (e->filled || oe_layer(&to_pack, e) != write_layer)
                return;
        wo[(*endp)++] = e;
        e->filled = 1;
@@ -672,48 +680,15 @@ static void add_family_to_write_order(struct object_entry **wo,
        add_descendants_to_write_order(wo, endp, root);
 }
 
-static struct object_entry **compute_write_order(void)
+static void compute_layer_order(struct object_entry **wo, unsigned int *wo_end)
 {
-       unsigned int i, wo_end, last_untagged;
-
-       struct object_entry **wo;
+       unsigned int i, last_untagged;
        struct object_entry *objects = to_pack.objects;
 
        for (i = 0; i < to_pack.nr_objects; i++) {
-               objects[i].tagged = 0;
-               objects[i].filled = 0;
-               SET_DELTA_CHILD(&objects[i], NULL);
-               SET_DELTA_SIBLING(&objects[i], NULL);
-       }
-
-       /*
-        * Fully connect delta_child/delta_sibling network.
-        * Make sure delta_sibling is sorted in the original
-        * recency order.
-        */
-       for (i = to_pack.nr_objects; i > 0;) {
-               struct object_entry *e = &objects[--i];
-               if (!DELTA(e))
-                       continue;
-               /* Mark me as the first child */
-               e->delta_sibling_idx = DELTA(e)->delta_child_idx;
-               SET_DELTA_CHILD(DELTA(e), e);
-       }
-
-       /*
-        * Mark objects that are at the tip of tags.
-        */
-       for_each_tag_ref(mark_tagged, NULL);
-
-       /*
-        * Give the objects in the original recency order until
-        * we see a tagged tip.
-        */
-       ALLOC_ARRAY(wo, to_pack.nr_objects);
-       for (i = wo_end = 0; i < to_pack.nr_objects; i++) {
                if (objects[i].tagged)
                        break;
-               add_to_write_order(wo, &wo_end, &objects[i]);
+               add_to_write_order(wo, wo_end, &objects[i]);
        }
        last_untagged = i;
 
@@ -722,7 +697,7 @@ static struct object_entry **compute_write_order(void)
         */
        for (; i < to_pack.nr_objects; i++) {
                if (objects[i].tagged)
-                       add_to_write_order(wo, &wo_end, &objects[i]);
+                       add_to_write_order(wo, wo_end, &objects[i]);
        }
 
        /*
@@ -732,7 +707,7 @@ static struct object_entry **compute_write_order(void)
                if (oe_type(&objects[i]) != OBJ_COMMIT &&
                    oe_type(&objects[i]) != OBJ_TAG)
                        continue;
-               add_to_write_order(wo, &wo_end, &objects[i]);
+               add_to_write_order(wo, wo_end, &objects[i]);
        }
 
        /*
@@ -741,17 +716,61 @@ static struct object_entry **compute_write_order(void)
        for (i = last_untagged; i < to_pack.nr_objects; i++) {
                if (oe_type(&objects[i]) != OBJ_TREE)
                        continue;
-               add_to_write_order(wo, &wo_end, &objects[i]);
+               add_to_write_order(wo, wo_end, &objects[i]);
        }
 
        /*
         * Finally all the rest in really tight order
         */
        for (i = last_untagged; i < to_pack.nr_objects; i++) {
-               if (!objects[i].filled)
-                       add_family_to_write_order(wo, &wo_end, &objects[i]);
+               if (!objects[i].filled && oe_layer(&to_pack, &objects[i]) == write_layer)
+                       add_family_to_write_order(wo, wo_end, &objects[i]);
+       }
+}
+
+static struct object_entry **compute_write_order(void)
+{
+       uint32_t max_layers = 1;
+       unsigned int i, wo_end;
+
+       struct object_entry **wo;
+       struct object_entry *objects = to_pack.objects;
+
+       for (i = 0; i < to_pack.nr_objects; i++) {
+               objects[i].tagged = 0;
+               objects[i].filled = 0;
+               SET_DELTA_CHILD(&objects[i], NULL);
+               SET_DELTA_SIBLING(&objects[i], NULL);
+       }
+
+       /*
+        * Fully connect delta_child/delta_sibling network.
+        * Make sure delta_sibling is sorted in the original
+        * recency order.
+        */
+       for (i = to_pack.nr_objects; i > 0;) {
+               struct object_entry *e = &objects[--i];
+               if (!DELTA(e))
+                       continue;
+               /* Mark me as the first child */
+               e->delta_sibling_idx = DELTA(e)->delta_child_idx;
+               SET_DELTA_CHILD(DELTA(e), e);
        }
 
+       /*
+        * Mark objects that are at the tip of tags.
+        */
+       for_each_tag_ref(mark_tagged, NULL);
+
+       if (use_delta_islands)
+               max_layers = compute_pack_layers(&to_pack);
+
+       ALLOC_ARRAY(wo, to_pack.nr_objects);
+       wo_end = 0;
+
+       for (; write_layer < max_layers; ++write_layer)
+               compute_layer_order(wo, &wo_end);
+
        if (wo_end != to_pack.nr_objects)
                die(_("ordered %u objects, expected %"PRIu32),
                    wo_end, to_pack.nr_objects);
@@ -951,8 +970,7 @@ static int no_try_delta(const char *path)
 
        if (!check)
                check = attr_check_initl("delta", NULL);
-       if (git_check_attr(&the_index, path, check))
-               return 0;
+       git_check_attr(&the_index, path, check);
        if (ATTR_FALSE(check->items[0].value))
                return 1;
        return 0;
@@ -1040,6 +1058,7 @@ static int want_object_in_pack(const struct object_id *oid,
 {
        int want;
        struct list_head *pos;
+       struct multi_pack_index *m;
 
        if (!exclude && local && has_loose_object_nonlocal(oid))
                return 0;
@@ -1054,6 +1073,32 @@ static int want_object_in_pack(const struct object_id *oid,
                if (want != -1)
                        return want;
        }
+
+       for (m = get_multi_pack_index(the_repository); m; m = m->next) {
+               struct pack_entry e;
+               if (fill_midx_entry(oid, &e, m)) {
+                       struct packed_git *p = e.p;
+                       off_t offset;
+
+                       if (p == *found_pack)
+                               offset = *found_offset;
+                       else
+                               offset = find_pack_entry_one(oid->hash, p);
+
+                       if (offset) {
+                               if (!*found_pack) {
+                                       if (!is_pack_valid(p))
+                                               continue;
+                                       *found_offset = offset;
+                                       *found_pack = p;
+                               }
+                               want = want_found_object(exclude, p);
+                               if (want != -1)
+                                       return want;
+                       }
+               }
+       }
+
        list_for_each(pos, get_packed_git_mru(the_repository)) {
                struct packed_git *p = list_entry(pos, struct packed_git, mru);
                off_t offset;
@@ -1202,7 +1247,7 @@ static struct pbase_tree_cache *pbase_tree_get(const struct object_id *oid)
         */
        for (neigh = 0; neigh < 8; neigh++) {
                ent = pbase_tree_cache[my_ix];
-               if (ent && !oidcmp(&ent->oid, oid)) {
+               if (ent && oideq(&ent->oid, oid)) {
                        ent->ref++;
                        return ent;
                }
@@ -1384,7 +1429,7 @@ static void add_preferred_base(struct object_id *oid)
                return;
 
        for (it = pbase_tree; it; it = it->next) {
-               if (!oidcmp(&it->pcache.oid, &tree_oid)) {
+               if (oideq(&it->pcache.oid, &tree_oid)) {
                        free(data);
                        return;
                }
@@ -1424,6 +1469,57 @@ static void cleanup_preferred_base(void)
        done_pbase_paths_num = done_pbase_paths_alloc = 0;
 }
 
+/*
+ * Return 1 iff the object specified by "delta" can be sent
+ * literally as a delta against the base in "base_sha1". If
+ * so, then *base_out will point to the entry in our packing
+ * list, or NULL if we must use the external-base list.
+ *
+ * Depth value does not matter - find_deltas() will
+ * never consider reused delta as the base object to
+ * deltify other objects against, in order to avoid
+ * circular deltas.
+ */
+static int can_reuse_delta(const unsigned char *base_sha1,
+                          struct object_entry *delta,
+                          struct object_entry **base_out)
+{
+       struct object_entry *base;
+
+       if (!base_sha1)
+               return 0;
+
+       /*
+        * First see if we're already sending the base (or it's explicitly in
+        * our "excluded" list).
+        */
+       base = packlist_find(&to_pack, base_sha1, NULL);
+       if (base) {
+               if (!in_same_island(&delta->idx.oid, &base->idx.oid))
+                       return 0;
+               *base_out = base;
+               return 1;
+       }
+
+       /*
+        * Otherwise, reachability bitmaps may tell us if the receiver has it,
+        * even if it was buried too deep in history to make it into the
+        * packing list.
+        */
+       if (thin && bitmap_has_sha1_in_uninteresting(bitmap_git, base_sha1)) {
+               if (use_delta_islands) {
+                       struct object_id base_oid;
+                       hashcpy(base_oid.hash, base_sha1);
+                       if (!in_same_island(&delta->idx.oid, &base_oid))
+                               return 0;
+               }
+               *base_out = NULL;
+               return 1;
+       }
+
+       return 0;
+}
+
 static void check_object(struct object_entry *entry)
 {
        unsigned long canonical_size;
@@ -1510,23 +1606,19 @@ static void check_object(struct object_entry *entry)
                        break;
                }
 
-               if (base_ref && (base_entry = packlist_find(&to_pack, base_ref, NULL))) {
-                       /*
-                        * If base_ref was set above that means we wish to
-                        * reuse delta data, and we even found that base
-                        * in the list of objects we want to pack. Goodie!
-                        *
-                        * Depth value does not matter - find_deltas() will
-                        * never consider reused delta as the base object to
-                        * deltify other objects against, in order to avoid
-                        * circular deltas.
-                        */
+               if (can_reuse_delta(base_ref, entry, &base_entry)) {
                        oe_set_type(entry, entry->in_pack_type);
                        SET_SIZE(entry, in_pack_size); /* delta size */
-                       SET_DELTA(entry, base_entry);
                        SET_DELTA_SIZE(entry, in_pack_size);
-                       entry->delta_sibling_idx = base_entry->delta_child_idx;
-                       SET_DELTA_CHILD(base_entry, entry);
+
+                       if (base_entry) {
+                               SET_DELTA(entry, base_entry);
+                               entry->delta_sibling_idx = base_entry->delta_child_idx;
+                               SET_DELTA_CHILD(base_entry, entry);
+                       } else {
+                               SET_DELTA_EXT(entry, base_ref);
+                       }
+
                        unuse_pack(&w_curs);
                        return;
                }
@@ -1826,6 +1918,11 @@ static int type_size_sort(const void *_a, const void *_b)
                return -1;
        if (a->preferred_base < b->preferred_base)
                return 1;
+       if (use_delta_islands) {
+               int island_cmp = island_delta_cmp(&a->idx.oid, &b->idx.oid);
+               if (island_cmp)
+                       return island_cmp;
+       }
        if (a_size > b_size)
                return -1;
        if (a_size < b_size)
@@ -1986,6 +2083,9 @@ static int try_delta(struct unpacked *trg, struct unpacked *src,
        if (trg_size < src_size / 32)
                return 0;
 
+       if (!in_same_island(&trg->entry->idx.oid, &src->entry->idx.oid))
+               return 0;
+
        /* Load data if not already done */
        if (!trg->data) {
                read_lock();
@@ -2528,6 +2628,9 @@ static void prepare_pack(int window, int depth)
        uint32_t i, nr_deltas;
        unsigned n;
 
+       if (use_delta_islands)
+               resolve_tree_islands(progress, &to_pack);
+
        get_object_details();
 
        /*
@@ -2691,6 +2794,9 @@ static void show_commit(struct commit *commit, void *data)
 
        if (write_bitmap_index)
                index_commit_for_bitmap(commit);
+
+       if (use_delta_islands)
+               propagate_island_marks(commit);
 }
 
 static void show_object(struct object *obj, const char *name, void *data)
@@ -2698,6 +2804,19 @@ static void show_object(struct object *obj, const char *name, void *data)
        add_preferred_base_object(name);
        add_object_entry(&obj->oid, obj->type, name, 0);
        obj->flags |= OBJECT_ADDED;
+
+       if (use_delta_islands) {
+               const char *p;
+               unsigned depth = 0;
+               struct object_entry *ent;
+
+               for (p = strchr(name, '/'); p; p = strchr(p + 1, '/'))
+                       depth++;
+
+               ent = packlist_find(&to_pack, obj->oid.hash, NULL);
+               if (ent && depth > oe_tree_depth(&to_pack, ent))
+                       oe_set_tree_depth(&to_pack, ent, depth);
+       }
 }
 
 static void show_object__ma_allow_any(struct object *obj, const char *name, void *data)
@@ -2806,7 +2925,7 @@ static void add_objects_in_unpacked_packs(struct rev_info *revs)
 
        memset(&in_pack, 0, sizeof(in_pack));
 
-       for (p = get_packed_git(the_repository); p; p = p->next) {
+       for (p = get_all_packs(the_repository); p; p = p->next) {
                struct object_id oid;
                struct object *o;
 
@@ -2870,7 +2989,7 @@ static int has_sha1_pack_kept_or_nonlocal(const struct object_id *oid)
        struct packed_git *p;
 
        p = (last_found != (void *)1) ? last_found :
-                                       get_packed_git(the_repository);
+                                       get_all_packs(the_repository);
 
        while (p) {
                if ((!p->pack_local || p->pack_keep ||
@@ -2880,7 +2999,7 @@ static int has_sha1_pack_kept_or_nonlocal(const struct object_id *oid)
                        return 1;
                }
                if (p == last_found)
-                       p = get_packed_git(the_repository);
+                       p = get_all_packs(the_repository);
                else
                        p = p->next;
                if (p == last_found)
@@ -2916,7 +3035,7 @@ static void loosen_unused_packed_objects(struct rev_info *revs)
        uint32_t i;
        struct object_id oid;
 
-       for (p = get_packed_git(the_repository); p; p = p->next) {
+       for (p = get_all_packs(the_repository); p; p = p->next) {
                if (!p->pack_local || p->pack_keep || p->pack_keep_in_core)
                        continue;
 
@@ -2951,7 +3070,6 @@ static int pack_options_allow_reuse(void)
 
 static int get_object_list_from_bitmap(struct rev_info *revs)
 {
-       struct bitmap_index *bitmap_git;
        if (!(bitmap_git = prepare_bitmap_walk(revs)))
                return -1;
 
@@ -2967,7 +3085,6 @@ static int get_object_list_from_bitmap(struct rev_info *revs)
        }
 
        traverse_bitmap_commit_list(bitmap_git, &add_object_entry_from_bitmap);
-       free_bitmap_index(bitmap_git);
        return 0;
 }
 
@@ -3025,6 +3142,9 @@ static void get_object_list(int ac, const char **av)
        if (use_bitmap_index && !get_object_list_from_bitmap(&revs))
                return;
 
+       if (use_delta_islands)
+               load_delta_islands();
+
        if (prepare_revision_walk(&revs))
                die(_("revision walk setup failed"));
        mark_edges_uninteresting(&revs, show_edge);
@@ -3063,7 +3183,7 @@ static void add_extra_kept_packs(const struct string_list *names)
        if (!names->nr)
                return;
 
-       for (p = get_packed_git(the_repository); p; p = p->next) {
+       for (p = get_all_packs(the_repository); p; p = p->next) {
                const char *name = basename(p->pack_name);
                int i;
 
@@ -3115,7 +3235,6 @@ static int option_parse_unpack_unreachable(const struct option *opt,
 int cmd_pack_objects(int argc, const char **argv, const char *prefix)
 {
        int use_internal_rev_list = 0;
-       int thin = 0;
        int shallow = 0;
        int all_progress_implied = 0;
        struct argv_array rp = ARGV_ARRAY_INIT;
@@ -3204,6 +3323,8 @@ int cmd_pack_objects(int argc, const char **argv, const char *prefix)
                  option_parse_missing_action },
                OPT_BOOL(0, "exclude-promisor-objects", &exclude_promisor_objects,
                         N_("do not pack objects in promisor packfiles")),
+               OPT_BOOL(0, "delta-islands", &use_delta_islands,
+                        N_("respect islands during delta compression")),
                OPT_END(),
        };
 
@@ -3330,13 +3451,16 @@ int cmd_pack_objects(int argc, const char **argv, const char *prefix)
        if (pack_to_stdout || !rev_list_all)
                write_bitmap_index = 0;
 
+       if (use_delta_islands)
+               argv_array_push(&rp, "--topo-order");
+
        if (progress && all_progress_implied)
                progress = 2;
 
        add_extra_kept_packs(&keep_pack_list);
        if (ignore_packed_keep_on_disk) {
                struct packed_git *p;
-               for (p = get_packed_git(the_repository); p; p = p->next)
+               for (p = get_all_packs(the_repository); p; p = p->next)
                        if (p->pack_local && p->pack_keep)
                                break;
                if (!p) /* no keep-able packs found */
@@ -3349,7 +3473,7 @@ int cmd_pack_objects(int argc, const char **argv, const char *prefix)
                 * it also covers non-local objects
                 */
                struct packed_git *p;
-               for (p = get_packed_git(the_repository); p; p = p->next) {
+               for (p = get_all_packs(the_repository); p; p = p->next) {
                        if (!p->pack_local) {
                                have_non_local_packs = 1;
                                break;
index 0494dceff73d672ee430f1c5bd0ce9f8b70c83f6..cf9a9aabd4eb2e834c08f85240bcb3d0154c3320 100644 (file)
@@ -577,7 +577,7 @@ static struct pack_list * add_pack(struct packed_git *p)
 
 static struct pack_list * add_pack_file(const char *filename)
 {
-       struct packed_git *p = get_packed_git(the_repository);
+       struct packed_git *p = get_all_packs(the_repository);
 
        if (strlen(filename) < 40)
                die("Bad pack filename: %s", filename);
@@ -592,7 +592,7 @@ static struct pack_list * add_pack_file(const char *filename)
 
 static void load_all(void)
 {
-       struct packed_git *p = get_packed_git(the_repository);
+       struct packed_git *p = get_all_packs(the_repository);
 
        while (p) {
                add_pack(p);
index 681c127a07071c98641972227a28dbe7f77eaf70..b2055d1dd6f180ec2383ee6f569045e5a55cb4b4 100644 (file)
@@ -22,6 +22,7 @@
 #include "tempfile.h"
 #include "lockfile.h"
 #include "wt-status.h"
+#include "commit-reach.h"
 
 enum rebase_type {
        REBASE_INVALID = -1,
@@ -799,7 +800,7 @@ static int run_rebase(const struct object_id *curr_head,
        struct argv_array args = ARGV_ARRAY_INIT;
 
        if (!get_octopus_merge_base(&oct_merge_base, curr_head, merge_head, fork_point))
-               if (!is_null_oid(fork_point) && !oidcmp(&oct_merge_base, fork_point))
+               if (!is_null_oid(fork_point) && oideq(&oct_merge_base, fork_point))
                        fork_point = NULL;
 
        argv_array_push(&args, "rebase");
@@ -902,7 +903,7 @@ int cmd_pull(int argc, const char **argv, const char *prefix)
                oidclr(&curr_head);
 
        if (!is_null_oid(&orig_head) && !is_null_oid(&curr_head) &&
-                       oidcmp(&orig_head, &curr_head)) {
+                       !oideq(&orig_head, &curr_head)) {
                /*
                 * The fetch involved updating the current branch.
                 *
index 0aa9bed41f35bf99784acd4abab52df415c5fb00..96af5374937e5b8b99343e8d324e54e98fb11743 100644 (file)
@@ -11,14 +11,9 @@ N_("git range-diff [<options>] <base> <old-tip> <new-tip>"),
 NULL
 };
 
-static struct strbuf *output_prefix_cb(struct diff_options *opt, void *data)
-{
-       return data;
-}
-
 int cmd_range_diff(int argc, const char **argv, const char *prefix)
 {
-       int creation_factor = 60;
+       int creation_factor = RANGE_DIFF_CREATION_FACTOR_DEFAULT;
        struct diff_options diffopt = { NULL };
        int simple_color = -1;
        struct option options[] = {
@@ -29,17 +24,11 @@ int cmd_range_diff(int argc, const char **argv, const char *prefix)
                OPT_END()
        };
        int i, j, res = 0;
-       struct strbuf four_spaces = STRBUF_INIT;
        struct strbuf range1 = STRBUF_INIT, range2 = STRBUF_INIT;
 
        git_config(git_diff_ui_config, NULL);
 
        diff_setup(&diffopt);
-       diffopt.output_format = DIFF_FORMAT_PATCH;
-       diffopt.flags.suppress_diff_headers = 1;
-       diffopt.output_prefix = output_prefix_cb;
-       strbuf_addstr(&four_spaces, "    ");
-       diffopt.output_prefix_data = &four_spaces;
 
        argc = parse_options(argc, argv, NULL, options,
                             builtin_range_diff_usage, PARSE_OPT_KEEP_UNKNOWN |
@@ -63,12 +52,9 @@ int cmd_range_diff(int argc, const char **argv, const char *prefix)
                             options + ARRAY_SIZE(options) - 1, /* OPT_END */
                             builtin_range_diff_usage, 0);
 
-       if (simple_color < 1) {
-               if (!simple_color)
-                       /* force color when --dual-color was used */
-                       diffopt.use_color = 1;
-               diffopt.flags.dual_color_diffed_diffs = 1;
-       }
+       /* force color when --dual-color was used */
+       if (!simple_color)
+               diffopt.use_color = 1;
 
        if (argc == 2) {
                if (!strstr(argv[0], ".."))
@@ -106,11 +92,10 @@ int cmd_range_diff(int argc, const char **argv, const char *prefix)
        }
 
        res = show_range_diff(range1.buf, range2.buf, creation_factor,
-                             &diffopt);
+                             simple_color < 1, &diffopt);
 
        strbuf_release(&range1);
        strbuf_release(&range2);
-       strbuf_release(&four_spaces);
 
        return res;
 }
index c17ce94e12ee34c5b822b0e09fcd6d7264e759ad..95740f4f0e71de95484edf53a2b86dc71952c5a7 100644 (file)
@@ -27,6 +27,7 @@
 #include "packfile.h"
 #include "object-store.h"
 #include "protocol.h"
+#include "commit-reach.h"
 
 static const char * const receive_pack_usage[] = {
        N_("git receive-pack <git-dir>"),
@@ -465,7 +466,7 @@ static char *prepare_push_cert_nonce(const char *path, timestamp_t stamp)
        unsigned char sha1[GIT_SHA1_RAWSZ];
 
        strbuf_addf(&buf, "%s:%"PRItime, path, stamp);
-       hmac_sha1(sha1, buf.buf, buf.len, cert_nonce_seed, strlen(cert_nonce_seed));;
+       hmac_sha1(sha1, buf.buf, buf.len, cert_nonce_seed, strlen(cert_nonce_seed));
        strbuf_release(&buf);
 
        /* RFC 2104 5. HMAC-SHA1-80 */
@@ -1222,8 +1223,8 @@ static void check_aliased_update(struct command *cmd, struct string_list *list)
 
        dst_cmd = (struct command *) item->util;
 
-       if (!oidcmp(&cmd->old_oid, &dst_cmd->old_oid) &&
-           !oidcmp(&cmd->new_oid, &dst_cmd->new_oid))
+       if (oideq(&cmd->old_oid, &dst_cmd->old_oid) &&
+           oideq(&cmd->new_oid, &dst_cmd->new_oid))
                return;
 
        dst_cmd->skip_update = 1;
@@ -1833,7 +1834,7 @@ static void prepare_shallow_update(struct command *commands,
        /*
         * keep hooks happy by forcing a temporary shallow file via
         * env variable because we can't add --shallow-file to every
-        * command. check_everything_connected() will be done with
+        * command. check_connected() will be done with
         * true .git/shallow though.
         */
        setenv(GIT_SHALLOW_FILE_ENVIRONMENT, alt_shallow_file, 1);
index 7876db1c20d317e28e4a84880197c6a690bf9ef6..f7edf7f2cb1f5880e5674f0bf5d41672602e52e4 100644 (file)
@@ -10,6 +10,7 @@
 #include "refspec.h"
 #include "object-store.h"
 #include "argv-array.h"
+#include "commit-reach.h"
 
 static const char * const builtin_remote_usage[] = {
        N_("git remote [-v | --verbose]"),
@@ -412,7 +413,7 @@ static int get_push_ref_states(const struct ref *remote_refs,
 
                if (is_null_oid(&ref->new_oid)) {
                        info->status = PUSH_STATUS_DELETE;
-               } else if (!oidcmp(&ref->old_oid, &ref->new_oid))
+               } else if (oideq(&ref->old_oid, &ref->new_oid))
                        info->status = PUSH_STATUS_UPTODATE;
                else if (is_null_oid(&ref->old_oid))
                        info->status = PUSH_STATUS_CREATE;
@@ -625,7 +626,7 @@ static int mv(int argc, const char **argv)
 
        oldremote = remote_get(rename.old_name);
        if (!remote_is_configured(oldremote, 1))
-               die(_("No such remote: %s"), rename.old_name);
+               die(_("No such remote: '%s'"), rename.old_name);
 
        if (!strcmp(rename.old_name, rename.new_name) && oldremote->origin != REMOTE_CONFIG)
                return migrate_file(oldremote);
@@ -761,7 +762,7 @@ static int rm(int argc, const char **argv)
 
        remote = remote_get(argv[1]);
        if (!remote_is_configured(remote, 1))
-               die(_("No such remote: %s"), argv[1]);
+               die(_("No such remote: '%s'"), argv[1]);
 
        known_remotes.to_delete = remote;
        for_each_remote(add_known_remote, &known_remotes);
@@ -860,7 +861,7 @@ static int get_remote_ref_states(const char *name,
 
        states->remote = remote_get(name);
        if (!states->remote)
-               return error(_("No such remote: %s"), name);
+               return error(_("No such remote: '%s'"), name);
 
        read_branches();
 
index d5886039cc6656609962fd522a27f61eda6cd0ec..c6a7943d5cb108dbccdfd502d799ab71a9e7e146 100644 (file)
@@ -8,12 +8,14 @@
 #include "strbuf.h"
 #include "string-list.h"
 #include "argv-array.h"
+#include "midx.h"
 #include "packfile.h"
 #include "object-store.h"
 
 static int delta_base_offset = 1;
 static int pack_kept_objects = -1;
 static int write_bitmaps;
+static int use_delta_islands;
 static char *packdir, *packtmp;
 
 static const char *const git_repack_usage[] = {
@@ -42,6 +44,10 @@ static int repack_config(const char *var, const char *value, void *cb)
                write_bitmaps = git_config_bool(var, value);
                return 0;
        }
+       if (!strcmp(var, "repack.usedeltaislands")) {
+               use_delta_islands = git_config_bool(var, value);
+               return 0;
+       }
        return git_default_config(var, value, cb);
 }
 
@@ -280,6 +286,7 @@ int cmd_repack(int argc, const char **argv, const char *prefix)
        int keep_unreachable = 0;
        struct string_list keep_pack_list = STRING_LIST_INIT_NODUP;
        int no_update_server_info = 0;
+       int midx_cleared = 0;
        struct pack_objects_args po_args = {NULL};
 
        struct option builtin_repack_options[] = {
@@ -301,6 +308,8 @@ int cmd_repack(int argc, const char **argv, const char *prefix)
                                N_("pass --local to git-pack-objects")),
                OPT_BOOL('b', "write-bitmap-index", &write_bitmaps,
                                N_("write bitmap index")),
+               OPT_BOOL('i', "delta-islands", &use_delta_islands,
+                               N_("pass --delta-islands to git-pack-objects")),
                OPT_STRING(0, "unpack-unreachable", &unpack_unreachable, N_("approxidate"),
                                N_("with -A, do not loosen objects older than this")),
                OPT_BOOL('k', "keep-unreachable", &keep_unreachable,
@@ -361,6 +370,8 @@ int cmd_repack(int argc, const char **argv, const char *prefix)
                argv_array_push(&cmd.args, "--exclude-promisor-objects");
        if (write_bitmaps)
                argv_array_push(&cmd.args, "--write-bitmap-index");
+       if (use_delta_islands)
+               argv_array_push(&cmd.args, "--delta-islands");
 
        if (pack_everything & ALL_INTO_ONE) {
                get_non_kept_pack_filenames(&existing_packs, &keep_pack_list);
@@ -418,6 +429,13 @@ int cmd_repack(int argc, const char **argv, const char *prefix)
        for_each_string_list_item(item, &names) {
                for (ext = 0; ext < ARRAY_SIZE(exts); ext++) {
                        char *fname, *fname_old;
+
+                       if (!midx_cleared) {
+                               /* if we move a packfile, it will invalidated the midx */
+                               clear_midx_file(get_object_directory());
+                               midx_cleared = 1;
+                       }
+
                        fname = mkpathdup("%s/pack-%s%s", packdir,
                                                item->string, exts[ext].name);
                        if (!file_exists(fname)) {
index 4f05791f3e895f78ba511dd6571bd09abab9269c..30a661ea0c71f058cdb8bc7626334cb03747fa7c 100644 (file)
@@ -39,7 +39,8 @@ struct show_data {
        enum replace_format format;
 };
 
-static int show_reference(const char *refname, const struct object_id *oid,
+static int show_reference(struct repository *r, const char *refname,
+                         const struct object_id *oid,
                          int flag, void *cb_data)
 {
        struct show_data *data = cb_data;
@@ -56,9 +57,8 @@ static int show_reference(const char *refname, const struct object_id *oid,
                        if (get_oid(refname, &object))
                                return error(_("failed to resolve '%s' as a valid ref"), refname);
 
-                       obj_type = oid_object_info(the_repository, &object,
-                                                  NULL);
-                       repl_type = oid_object_info(the_repository, oid, NULL);
+                       obj_type = oid_object_info(r, &object, NULL);
+                       repl_type = oid_object_info(r, oid, NULL);
 
                        printf("%s (%s) -> %s (%s)\n", refname, type_name(obj_type),
                               oid_to_hex(oid), type_name(repl_type));
@@ -343,7 +343,7 @@ static int edit_and_replace(const char *object_ref, int force, int raw)
        }
        free(tmpfile);
 
-       if (!oidcmp(&old_oid, &new_oid))
+       if (oideq(&old_oid, &new_oid))
                return error(_("new object is the same as the old one: '%s'"), oid_to_hex(&old_oid));
 
        return replace_object_oid(object_ref, &old_oid, "replacement", &new_oid, force);
@@ -414,7 +414,7 @@ static int check_one_mergetag(struct commit *commit,
                if (get_oid(mergetag_data->argv[i], &oid) < 0)
                        return error(_("not a valid object name: '%s'"),
                                     mergetag_data->argv[i]);
-               if (!oidcmp(&tag->tagged->oid, &oid))
+               if (oideq(&tag->tagged->oid, &oid))
                        return 0; /* found */
        }
 
@@ -474,7 +474,7 @@ static int create_graft(int argc, const char **argv, int force, int gentle)
 
        strbuf_release(&buf);
 
-       if (!oidcmp(&old_oid, &new_oid)) {
+       if (oideq(&old_oid, &new_oid)) {
                if (gentle) {
                        warning(_("graft for '%s' unnecessary"), oid_to_hex(&old_oid));
                        return 0;
index 0bc40298c2417a6d1f25aafb4b3577243e985c57..5ed941b91f2c1498a3f28f0c6415437f79133432 100644 (file)
@@ -75,7 +75,7 @@ int cmd_rerere(int argc, const char **argv, const char *prefix)
        if (!strcmp(argv[0], "forget")) {
                struct pathspec pathspec;
                if (argc < 2)
-                       warning("'git rerere forget' without paths is deprecated");
+                       warning(_("'git rerere forget' without paths is deprecated"));
                parse_pathspec(&pathspec, 0, PATHSPEC_PREFER_CWD,
                               prefix, argv + 1);
                return rerere_forget(&pathspec);
@@ -107,7 +107,7 @@ int cmd_rerere(int argc, const char **argv, const char *prefix)
                        const char *path = merge_rr.items[i].string;
                        const struct rerere_id *id = merge_rr.items[i].util;
                        if (diff_two(rerere_path(id, "preimage"), path, path, path))
-                               die("unable to generate diff for %s", rerere_path(id, NULL));
+                               die(_("unable to generate diff for '%s'"), rerere_path(id, NULL));
                }
        } else
                usage_with_options(rerere_usage, options);
index 5b07f3f4a2cf6cd5f73f20a930934ca2277c8391..ed0ea7dc5b5bed0c3cb926725b2f70cbf9b93d35 100644 (file)
@@ -493,7 +493,7 @@ int cmd_rev_list(int argc, const char **argv, const char *prefix)
        if ((!revs.commits && reflog_walk_empty(revs.reflog_info) &&
             (!(revs.tag_objects || revs.tree_objects || revs.blob_objects) &&
              !revs.pending.nr) &&
-            !revs.rev_input_given) ||
+            !revs.rev_input_given && !revs.read_from_stdin) ||
            revs.diff)
                usage(rev_list_usage);
 
index 0f09bbbf65a4bc8d6d064a985181ac547d9ee411..455f62246d69ad0468ffde58e568732bd3339c63 100644 (file)
@@ -14,6 +14,7 @@
 #include "revision.h"
 #include "split-index.h"
 #include "submodule.h"
+#include "commit-reach.h"
 
 #define DO_REVS                1
 #define DO_NOREV       2
index 2cbe89e0ae3b7a4801ac27d25fd37306813104ba..17086d3d97c536522720c589ea8f35d366536ea8 100644 (file)
@@ -180,7 +180,7 @@ static int check_local_mod(struct object_id *head, int index_only)
                if (no_head
                     || get_tree_entry(head, name, &oid, &mode)
                     || ce->ce_mode != create_ce_mode(mode)
-                    || oidcmp(&ce->oid, &oid))
+                    || !oideq(&ce->oid, &oid))
                        staged_changes = 1;
 
                /*
index 363cf8509af5bf640da60c36118b841da06918fe..65f4a4c83c42ec33d5ffe3f1d83d7b5465c0796d 100644 (file)
@@ -412,7 +412,7 @@ static int append_head_ref(const char *refname, const struct object_id *oid,
        /* If both heads/foo and tags/foo exists, get_sha1 would
         * get confused.
         */
-       if (get_oid(refname + ofs, &tmp) || oidcmp(&tmp, oid))
+       if (get_oid(refname + ofs, &tmp) || !oideq(&tmp, oid))
                ofs = 5;
        return append_ref(refname + ofs, oid, 0);
 }
@@ -427,7 +427,7 @@ static int append_remote_ref(const char *refname, const struct object_id *oid,
        /* If both heads/foo and tags/foo exists, get_sha1 would
         * get confused.
         */
-       if (get_oid(refname + ofs, &tmp) || oidcmp(&tmp, oid))
+       if (get_oid(refname + ofs, &tmp) || !oideq(&tmp, oid))
                ofs = 5;
        return append_ref(refname + ofs, oid, 0);
 }
@@ -485,7 +485,7 @@ static void snarf_refs(int head, int remotes)
 static int rev_is_head(const char *head, const char *name,
                       unsigned char *head_sha1, unsigned char *sha1)
 {
-       if (!head || (head_sha1 && sha1 && hashcmp(head_sha1, sha1)))
+       if (!head || (head_sha1 && sha1 && !hasheq(head_sha1, sha1)))
                return 0;
        skip_prefix(head, "refs/heads/", &head);
        if (!skip_prefix(name, "refs/heads/", &name))
index f6fb8991f3a81b0b2dd895c1e8550b5bda5ea99a..247881189fde2a910bbd993342bb74fe10e06b0e 100644 (file)
@@ -1233,6 +1233,7 @@ static int clone_submodule(const char *path, const char *gitdir, const char *url
        if (gitdir && *gitdir)
                argv_array_pushl(&cp.args, "--separate-git-dir", gitdir, NULL);
 
+       argv_array_push(&cp.args, "--");
        argv_array_push(&cp.args, url);
        argv_array_push(&cp.args, path);
 
@@ -1443,6 +1444,72 @@ static int module_clone(int argc, const char **argv, const char *prefix)
        return 0;
 }
 
+static void determine_submodule_update_strategy(struct repository *r,
+                                               int just_cloned,
+                                               const char *path,
+                                               const char *update,
+                                               struct submodule_update_strategy *out)
+{
+       const struct submodule *sub = submodule_from_path(r, &null_oid, path);
+       char *key;
+       const char *val;
+
+       key = xstrfmt("submodule.%s.update", sub->name);
+
+       if (update) {
+               trace_printf("parsing update");
+               if (parse_submodule_update_strategy(update, out) < 0)
+                       die(_("Invalid update mode '%s' for submodule path '%s'"),
+                               update, path);
+       } else if (!repo_config_get_string_const(r, key, &val)) {
+               if (parse_submodule_update_strategy(val, out) < 0)
+                       die(_("Invalid update mode '%s' configured for submodule path '%s'"),
+                               val, path);
+       } else if (sub->update_strategy.type != SM_UPDATE_UNSPECIFIED) {
+               trace_printf("loaded thing");
+               out->type = sub->update_strategy.type;
+               out->command = sub->update_strategy.command;
+       } else
+               out->type = SM_UPDATE_CHECKOUT;
+
+       if (just_cloned &&
+           (out->type == SM_UPDATE_MERGE ||
+            out->type == SM_UPDATE_REBASE ||
+            out->type == SM_UPDATE_NONE))
+               out->type = SM_UPDATE_CHECKOUT;
+
+       free(key);
+}
+
+static int module_update_module_mode(int argc, const char **argv, const char *prefix)
+{
+       const char *path, *update = NULL;
+       int just_cloned;
+       struct submodule_update_strategy update_strategy = { .type = SM_UPDATE_CHECKOUT };
+
+       if (argc < 3 || argc > 4)
+               die("submodule--helper update-module-clone expects <just-cloned> <path> [<update>]");
+
+       just_cloned = git_config_int("just_cloned", argv[1]);
+       path = argv[2];
+
+       if (argc == 4)
+               update = argv[3];
+
+       determine_submodule_update_strategy(the_repository,
+                                           just_cloned, path, update,
+                                           &update_strategy);
+       fputs(submodule_strategy_to_string(&update_strategy), stdout);
+
+       return 0;
+}
+
+struct update_clone_data {
+       const struct submodule *sub;
+       struct object_id oid;
+       unsigned just_cloned;
+};
+
 struct submodule_update_clone {
        /* index into 'list', the list of submodules to look into for cloning */
        int current;
@@ -1462,8 +1529,9 @@ struct submodule_update_clone {
        const char *recursive_prefix;
        const char *prefix;
 
-       /* Machine-readable status lines to be consumed by git-submodule.sh */
-       struct string_list projectlines;
+       /* to be consumed by git-submodule.sh */
+       struct update_clone_data *update_clone;
+       int update_clone_nr; int update_clone_alloc;
 
        /* If we want to stop as fast as possible and return an error */
        unsigned quickstop : 1;
@@ -1471,11 +1539,13 @@ struct submodule_update_clone {
        /* failed clones to be retried again */
        const struct cache_entry **failed_clones;
        int failed_clones_nr, failed_clones_alloc;
+
+       int max_jobs;
 };
 #define SUBMODULE_UPDATE_CLONE_INIT {0, MODULE_LIST_INIT, 0, \
        SUBMODULE_UPDATE_STRATEGY_INIT, 0, 0, -1, STRING_LIST_INIT_DUP, 0, \
        NULL, NULL, NULL, \
-       STRING_LIST_INIT_DUP, 0, NULL, 0, 0}
+       NULL, 0, 0, 0, NULL, 0, 0, 0}
 
 
 static void next_submodule_warn_missing(struct submodule_update_clone *suc,
@@ -1569,11 +1639,12 @@ static int prepare_to_clone_next_submodule(const struct cache_entry *ce,
        strbuf_addf(&sb, "%s/.git", ce->name);
        needs_cloning = !file_exists(sb.buf);
 
-       strbuf_reset(&sb);
-       strbuf_addf(&sb, "%06o %s %d %d\t%s\n", ce->ce_mode,
-                       oid_to_hex(&ce->oid), ce_stage(ce),
-                       needs_cloning, ce->name);
-       string_list_append(&suc->projectlines, sb.buf);
+       ALLOC_GROW(suc->update_clone, suc->update_clone_nr + 1,
+                  suc->update_clone_alloc);
+       oidcpy(&suc->update_clone[suc->update_clone_nr].oid, &ce->oid);
+       suc->update_clone[suc->update_clone_nr].just_cloned = needs_cloning;
+       suc->update_clone[suc->update_clone_nr].sub = sub;
+       suc->update_clone_nr++;
 
        if (!needs_cloning)
                goto cleanup;
@@ -1714,11 +1785,44 @@ static int git_update_clone_config(const char *var, const char *value,
        return 0;
 }
 
+static void update_submodule(struct update_clone_data *ucd)
+{
+       fprintf(stdout, "dummy %s %d\t%s\n",
+               oid_to_hex(&ucd->oid),
+               ucd->just_cloned,
+               ucd->sub->path);
+}
+
+static int update_submodules(struct submodule_update_clone *suc)
+{
+       int i;
+
+       run_processes_parallel(suc->max_jobs,
+                              update_clone_get_next_task,
+                              update_clone_start_failure,
+                              update_clone_task_finished,
+                              suc);
+
+       /*
+        * We saved the output and put it out all at once now.
+        * That means:
+        * - the listener does not have to interleave their (checkout)
+        *   work with our fetching.  The writes involved in a
+        *   checkout involve more straightforward sequential I/O.
+        * - the listener can avoid doing any work if fetching failed.
+        */
+       if (suc->quickstop)
+               return 1;
+
+       for (i = 0; i < suc->update_clone_nr; i++)
+               update_submodule(&suc->update_clone[i]);
+
+       return 0;
+}
+
 static int update_clone(int argc, const char **argv, const char *prefix)
 {
        const char *update = NULL;
-       int max_jobs = 1;
-       struct string_list_item *item;
        struct pathspec pathspec;
        struct submodule_update_clone suc = SUBMODULE_UPDATE_CLONE_INIT;
 
@@ -1740,7 +1844,7 @@ static int update_clone(int argc, const char **argv, const char *prefix)
                OPT_STRING(0, "depth", &suc.depth, "<depth>",
                           N_("Create a shallow clone truncated to the "
                              "specified number of revisions")),
-               OPT_INTEGER('j', "jobs", &max_jobs,
+               OPT_INTEGER('j', "jobs", &suc.max_jobs,
                            N_("parallel jobs")),
                OPT_BOOL(0, "recommend-shallow", &suc.recommend_shallow,
                            N_("whether the initial clone should follow the shallow recommendation")),
@@ -1756,8 +1860,8 @@ static int update_clone(int argc, const char **argv, const char *prefix)
        };
        suc.prefix = prefix;
 
-       update_clone_config_from_gitmodules(&max_jobs);
-       git_config(git_update_clone_config, &max_jobs);
+       update_clone_config_from_gitmodules(&suc.max_jobs);
+       git_config(git_update_clone_config, &suc.max_jobs);
 
        argc = parse_options(argc, argv, prefix, module_update_clone_options,
                             git_submodule_helper_usage, 0);
@@ -1772,27 +1876,7 @@ static int update_clone(int argc, const char **argv, const char *prefix)
        if (pathspec.nr)
                suc.warn_if_uninitialized = 1;
 
-       run_processes_parallel(max_jobs,
-                              update_clone_get_next_task,
-                              update_clone_start_failure,
-                              update_clone_task_finished,
-                              &suc);
-
-       /*
-        * We saved the output and put it out all at once now.
-        * That means:
-        * - the listener does not have to interleave their (checkout)
-        *   work with our fetching.  The writes involved in a
-        *   checkout involve more straightforward sequential I/O.
-        * - the listener can avoid doing any work if fetching failed.
-        */
-       if (suc.quickstop)
-               return 1;
-
-       for_each_string_list_item(item, &suc.projectlines)
-               fprintf(stdout, "%s", item->string);
-
-       return 0;
+       return update_submodules(&suc);
 }
 
 static int resolve_relative_path(int argc, const char **argv, const char *prefix)
@@ -1938,6 +2022,45 @@ static int push_check(int argc, const char **argv, const char *prefix)
        return 0;
 }
 
+static int ensure_core_worktree(int argc, const char **argv, const char *prefix)
+{
+       const struct submodule *sub;
+       const char *path;
+       char *cw;
+       struct repository subrepo;
+
+       if (argc != 2)
+               BUG("submodule--helper connect-gitdir-workingtree <name> <path>");
+
+       path = argv[1];
+
+       sub = submodule_from_path(the_repository, &null_oid, path);
+       if (!sub)
+               BUG("We could get the submodule handle before?");
+
+       if (repo_submodule_init(&subrepo, the_repository, path))
+               die(_("could not get a repository handle for submodule '%s'"), path);
+
+       if (!repo_config_get_string(&subrepo, "core.worktree", &cw)) {
+               char *cfg_file, *abs_path;
+               const char *rel_path;
+               struct strbuf sb = STRBUF_INIT;
+
+               cfg_file = repo_git_path(&subrepo, "config");
+
+               abs_path = absolute_pathdup(path);
+               rel_path = relative_path(abs_path, subrepo.gitdir, &sb);
+
+               git_config_set_in_file(cfg_file, "core.worktree", rel_path);
+
+               free(cfg_file);
+               free(abs_path);
+               strbuf_release(&sb);
+       }
+
+       return 0;
+}
+
 static int absorb_git_dirs(int argc, const char **argv, const char *prefix)
 {
        int i;
@@ -2015,7 +2138,9 @@ static struct cmd_struct commands[] = {
        {"list", module_list, 0},
        {"name", module_name, 0},
        {"clone", module_clone, 0},
+       {"update-module-mode", module_update_module_mode, 0},
        {"update-clone", update_clone, 0},
+       {"ensure-core-worktree", ensure_core_worktree, 0},
        {"relative-path", resolve_relative_path, 0},
        {"resolve-relative-url", resolve_relative_url, 0},
        {"resolve-relative-url-test", resolve_relative_url_test, 0},
index 9a19ffb49f68d7c0318f650d34d0edbcd6db81d3..f6236321865773cb60907bfe8b56d96bbaf3e83a 100644 (file)
@@ -559,7 +559,7 @@ int cmd_tag(int argc, const char **argv, const char *prefix)
            ref_transaction_commit(transaction, &err))
                die("%s", err.buf);
        ref_transaction_free(transaction);
-       if (force && !is_null_oid(&prev) && oidcmp(&prev, &object))
+       if (force && !is_null_oid(&prev) && !oideq(&prev, &object))
                printf(_("Updated tag '%s' (was %s)\n"), tag,
                       find_unique_abbrev(&prev, DEFAULT_ABBREV));
 
index 30d9413b4bfb95ad1ee774430b8f954699e46d74..80478808b3dcc7f98e5989140de1451783454c01 100644 (file)
@@ -303,7 +303,7 @@ static void added_object(unsigned nr, enum object_type type,
        struct delta_info *info;
 
        while ((info = *p) != NULL) {
-               if (!oidcmp(&info->base_oid, &obj_list[nr].oid) ||
+               if (oideq(&info->base_oid, &obj_list[nr].oid) ||
                    info->base_offset == obj_list[nr].offset) {
                        *p = info->next;
                        p = &delta_list;
@@ -579,7 +579,7 @@ int cmd_unpack_objects(int argc, const char **argv, const char *prefix)
                if (fsck_finish(&fsck_options))
                        die(_("fsck error in pack objects"));
        }
-       if (hashcmp(fill(the_hash_algo->rawsz), oid.hash))
+       if (!hasheq(fill(the_hash_algo->rawsz), oid.hash))
                die("final sha1 did not match");
        use(the_hash_algo->rawsz);
 
index fe84003b4fa05c377bb4ab1de04a7cd1c9ae4d5d..e7fab78b3b9cf2433342e195a206d8ff3087f9b9 100644 (file)
@@ -669,7 +669,7 @@ static int unresolve_one(const char *path)
                ret = -1;
                goto free_return;
        }
-       if (!oidcmp(&ce_2->oid, &ce_3->oid) &&
+       if (oideq(&ce_2->oid, &ce_3->oid) &&
            ce_2->ce_mode == ce_3->ce_mode) {
                fprintf(stderr, "%s: identical in both, skipping.\n",
                        path);
@@ -754,7 +754,7 @@ static int do_reupdate(int ac, const char **av,
                        old = read_one_ent(NULL, &head_oid,
                                           ce->name, ce_namelen(ce), 0);
                if (old && ce->ce_mode == old->ce_mode &&
-                   !oidcmp(&ce->oid, &old->oid)) {
+                   oideq(&ce->oid, &old->oid)) {
                        discard_cache_entry(old);
                        continue; /* unchanged */
                }
index 4fa3c0a86fd7cbeb5214223e06aa10a2ecfa2d85..2d8f7f05785dca5a083ee6d003d16b4d008dc4d2 100644 (file)
@@ -14,7 +14,8 @@ static const char * const git_update_ref_usage[] = {
 };
 
 static char line_termination = '\n';
-static int update_flags;
+static unsigned int update_flags;
+static unsigned int default_flags;
 static unsigned create_reflog_flag;
 static const char *msg;
 
@@ -205,7 +206,7 @@ static const char *parse_cmd_update(struct ref_transaction *transaction,
                                   msg, &err))
                die("%s", err.buf);
 
-       update_flags = 0;
+       update_flags = default_flags;
        free(refname);
        strbuf_release(&err);
 
@@ -237,7 +238,7 @@ static const char *parse_cmd_create(struct ref_transaction *transaction,
                                   msg, &err))
                die("%s", err.buf);
 
-       update_flags = 0;
+       update_flags = default_flags;
        free(refname);
        strbuf_release(&err);
 
@@ -273,7 +274,7 @@ static const char *parse_cmd_delete(struct ref_transaction *transaction,
                                   update_flags, msg, &err))
                die("%s", err.buf);
 
-       update_flags = 0;
+       update_flags = default_flags;
        free(refname);
        strbuf_release(&err);
 
@@ -302,7 +303,7 @@ static const char *parse_cmd_verify(struct ref_transaction *transaction,
                                   update_flags, &err))
                die("%s", err.buf);
 
-       update_flags = 0;
+       update_flags = default_flags;
        free(refname);
        strbuf_release(&err);
 
@@ -357,7 +358,6 @@ int cmd_update_ref(int argc, const char **argv, const char *prefix)
        const char *refname, *oldval;
        struct object_id oid, oldoid;
        int delete = 0, no_deref = 0, read_stdin = 0, end_null = 0;
-       unsigned int flags = 0;
        int create_reflog = 0;
        struct option options[] = {
                OPT_STRING( 'm', NULL, &msg, N_("reason"), N_("reason of the update")),
@@ -378,6 +378,11 @@ int cmd_update_ref(int argc, const char **argv, const char *prefix)
 
        create_reflog_flag = create_reflog ? REF_FORCE_CREATE_REFLOG : 0;
 
+       if (no_deref) {
+               default_flags = REF_NO_DEREF;
+               update_flags = default_flags;
+       }
+
        if (read_stdin) {
                struct strbuf err = STRBUF_INIT;
                struct ref_transaction *transaction;
@@ -385,7 +390,7 @@ int cmd_update_ref(int argc, const char **argv, const char *prefix)
                transaction = ref_transaction_begin(&err);
                if (!transaction)
                        die("%s", err.buf);
-               if (delete || no_deref || argc > 0)
+               if (delete || argc > 0)
                        usage_with_options(git_update_ref_usage, options);
                if (end_null)
                        line_termination = '\0';
@@ -427,8 +432,6 @@ int cmd_update_ref(int argc, const char **argv, const char *prefix)
                        die("%s: not a valid old SHA1", oldval);
        }
 
-       if (no_deref)
-               flags = REF_NO_DEREF;
        if (delete)
                /*
                 * For purposes of backwards compatibility, we treat
@@ -436,9 +439,9 @@ int cmd_update_ref(int argc, const char **argv, const char *prefix)
                 */
                return delete_ref(msg, refname,
                                  (oldval && !is_null_oid(&oldoid)) ? &oldoid : NULL,
-                                 flags);
+                                 default_flags);
        else
                return update_ref(msg, refname, &oid, oldval ? &oldoid : NULL,
-                                 flags | create_reflog_flag,
+                                 default_flags | create_reflog_flag,
                                  UPDATE_REFS_DIE_ON_ERR);
 }
index 41e7714396dc3334bcb29f2454d210d63a649243..c4abbde2b87e00944eb86fe2393daf477da976b6 100644 (file)
@@ -47,6 +47,26 @@ static int git_worktree_config(const char *var, const char *value, void *cb)
        return git_default_config(var, value, cb);
 }
 
+static int delete_git_dir(const char *id)
+{
+       struct strbuf sb = STRBUF_INIT;
+       int ret;
+
+       strbuf_addstr(&sb, git_common_path("worktrees/%s", id));
+       ret = remove_dir_recursively(&sb, 0);
+       if (ret < 0 && errno == ENOTDIR)
+               ret = unlink(sb.buf);
+       if (ret)
+               error_errno(_("failed to delete '%s'"), sb.buf);
+       strbuf_release(&sb);
+       return ret;
+}
+
+static void delete_worktrees_dir_if_empty(void)
+{
+       rmdir(git_path("worktrees")); /* ignore failed removal */
+}
+
 static int prune_worktree(const char *id, struct strbuf *reason)
 {
        struct stat st;
@@ -116,10 +136,8 @@ static int prune_worktree(const char *id, struct strbuf *reason)
 static void prune_worktrees(void)
 {
        struct strbuf reason = STRBUF_INIT;
-       struct strbuf path = STRBUF_INIT;
        DIR *dir = opendir(git_path("worktrees"));
        struct dirent *d;
-       int ret;
        if (!dir)
                return;
        while ((d = readdir(dir)) != NULL) {
@@ -132,18 +150,12 @@ static void prune_worktrees(void)
                        printf("%s\n", reason.buf);
                if (show_only)
                        continue;
-               git_path_buf(&path, "worktrees/%s", d->d_name);
-               ret = remove_dir_recursively(&path, 0);
-               if (ret < 0 && errno == ENOTDIR)
-                       ret = unlink(path.buf);
-               if (ret)
-                       error_errno(_("failed to remove '%s'"), path.buf);
+               delete_git_dir(d->d_name);
        }
        closedir(dir);
        if (!show_only)
-               rmdir(git_path("worktrees"));
+               delete_worktrees_dir_if_empty();
        strbuf_release(&reason);
-       strbuf_release(&path);
 }
 
 static int prune(int ac, const char **av, const char *prefix)
@@ -212,6 +224,43 @@ static const char *worktree_basename(const char *path, int *olen)
        return name;
 }
 
+static void validate_worktree_add(const char *path, const struct add_opts *opts)
+{
+       struct worktree **worktrees;
+       struct worktree *wt;
+       int locked;
+
+       if (file_exists(path) && !is_empty_dir(path))
+               die(_("'%s' already exists"), path);
+
+       worktrees = get_worktrees(0);
+       /*
+        * find_worktree()'s suffix matching may undesirably find the main
+        * rather than a linked worktree (for instance, when the basenames
+        * of the main worktree and the one being created are the same).
+        * We're only interested in linked worktrees, so skip the main
+        * worktree with +1.
+        */
+       wt = find_worktree(worktrees + 1, NULL, path);
+       if (!wt)
+               goto done;
+
+       locked = !!is_worktree_locked(wt);
+       if ((!locked && opts->force) || (locked && opts->force > 1)) {
+               if (delete_git_dir(wt->id))
+                   die(_("unable to re-add worktree '%s'"), path);
+               goto done;
+       }
+
+       if (locked)
+               die(_("'%s' is a missing but locked worktree;\nuse 'add -f -f' to override, or 'unlock' and 'prune' or 'remove' to clear"), path);
+       else
+               die(_("'%s' is a missing but already registered worktree;\nuse 'add -f' to override, or 'prune' or 'remove' to clear"), path);
+
+done:
+       free_worktrees(worktrees);
+}
+
 static int add_worktree(const char *path, const char *refname,
                        const struct add_opts *opts)
 {
@@ -226,8 +275,7 @@ static int add_worktree(const char *path, const char *refname,
        struct commit *commit = NULL;
        int is_branch = 0;
 
-       if (file_exists(path) && !is_empty_dir(path))
-               die(_("'%s' already exists"), path);
+       validate_worktree_add(path, opts);
 
        /* is 'refname' a branch or commit? */
        if (!opts->detach && !strbuf_check_branch_ref(&symref, refname) &&
@@ -697,13 +745,17 @@ static void validate_no_submodules(const struct worktree *wt)
 
 static int move_worktree(int ac, const char **av, const char *prefix)
 {
+       int force = 0;
        struct option options[] = {
+               OPT__FORCE(&force,
+                        N_("force move even if worktree is dirty or locked"),
+                        PARSE_OPT_NOCOMPLETE),
                OPT_END()
        };
        struct worktree **worktrees, *wt;
        struct strbuf dst = STRBUF_INIT;
        struct strbuf errmsg = STRBUF_INIT;
-       const char *reason;
+       const char *reason = NULL;
        char *path;
 
        ac = parse_options(ac, av, prefix, options, worktree_usage, 0);
@@ -734,12 +786,13 @@ static int move_worktree(int ac, const char **av, const char *prefix)
 
        validate_no_submodules(wt);
 
-       reason = is_worktree_locked(wt);
+       if (force < 2)
+               reason = is_worktree_locked(wt);
        if (reason) {
                if (*reason)
-                       die(_("cannot move a locked working tree, lock reason: %s"),
+                       die(_("cannot move a locked working tree, lock reason: %s\nuse 'move -f -f' to override or unlock first"),
                            reason);
-               die(_("cannot move a locked working tree"));
+               die(_("cannot move a locked working tree;\nuse 'move -f -f' to override or unlock first"));
        }
        if (validate_worktree(wt, &errmsg, 0))
                die(_("validation failed, cannot move working tree: %s"),
@@ -822,32 +875,18 @@ static int delete_git_work_tree(struct worktree *wt)
        return ret;
 }
 
-static int delete_git_dir(struct worktree *wt)
-{
-       struct strbuf sb = STRBUF_INIT;
-       int ret = 0;
-
-       strbuf_addstr(&sb, git_common_path("worktrees/%s", wt->id));
-       if (remove_dir_recursively(&sb, 0)) {
-               error_errno(_("failed to delete '%s'"), sb.buf);
-               ret = -1;
-       }
-       strbuf_release(&sb);
-       return ret;
-}
-
 static int remove_worktree(int ac, const char **av, const char *prefix)
 {
        int force = 0;
        struct option options[] = {
                OPT__FORCE(&force,
-                        N_("force removing even if the worktree is dirty"),
+                        N_("force removal even if worktree is dirty or locked"),
                         PARSE_OPT_NOCOMPLETE),
                OPT_END()
        };
        struct worktree **worktrees, *wt;
        struct strbuf errmsg = STRBUF_INIT;
-       const char *reason;
+       const char *reason = NULL;
        int ret = 0;
 
        ac = parse_options(ac, av, prefix, options, worktree_usage, 0);
@@ -860,12 +899,13 @@ static int remove_worktree(int ac, const char **av, const char *prefix)
                die(_("'%s' is not a working tree"), av[0]);
        if (is_main_worktree(wt))
                die(_("'%s' is a main working tree"), av[0]);
-       reason = is_worktree_locked(wt);
+       if (force < 2)
+               reason = is_worktree_locked(wt);
        if (reason) {
                if (*reason)
-                       die(_("cannot remove a locked working tree, lock reason: %s"),
+                       die(_("cannot remove a locked working tree, lock reason: %s\nuse 'remove -f -f' to override or unlock first"),
                            reason);
-               die(_("cannot remove a locked working tree"));
+               die(_("cannot remove a locked working tree;\nuse 'remove -f -f' to override or unlock first"));
        }
        if (validate_worktree(wt, &errmsg, WT_VALIDATE_WORKTREE_MISSING_OK))
                die(_("validation failed, cannot remove working tree: %s"),
@@ -882,7 +922,8 @@ static int remove_worktree(int ac, const char **av, const char *prefix)
         * continue on even if ret is non-zero, there's no going back
         * from here.
         */
-       ret |= delete_git_dir(wt);
+       ret |= delete_git_dir(wt->id);
+       delete_worktrees_dir_if_empty();
 
        free_worktrees(worktrees);
        return ret;
index 9f3b644811cd79b2c93ebf1dcebd711640c18419..409ecb566b3c863e453d10650d1bb21955e2dee3 100644 (file)
@@ -72,7 +72,7 @@ static int already_written(struct bulk_checkin_state *state, struct object_id *o
 
        /* Might want to keep the list sorted */
        for (i = 0; i < state->nr_written; i++)
-               if (!oidcmp(&state->written[i]->oid, oid))
+               if (oideq(&state->written[i]->oid, oid))
                        return 1;
 
        /* This is a new object we need to keep */
index 24cbe409863a83e8bda453af637c2237504a531d..14f2cfc24836b4a526df935e83d8e5899055f483 100644 (file)
--- a/bundle.c
+++ b/bundle.c
@@ -369,7 +369,7 @@ static int write_bundle_refs(int bundle_fd, struct rev_info *revs)
                 * commit that is referenced by the tag, and not the tag
                 * itself.
                 */
-               if (oidcmp(&oid, &e->item->oid)) {
+               if (!oideq(&oid, &e->item->oid)) {
                        /*
                         * Is this the positive end of a range expressed
                         * in terms of a tag (e.g. v2.0 from the range
index 16ea022c46d3b281d04a3956f865d8a886a5b714..5ce51468f0b402fd2121e2c8b51ff12703d431ba 100644 (file)
@@ -4,6 +4,7 @@
 #include "tree-walk.h"
 #include "cache-tree.h"
 #include "object-store.h"
+#include "replace-object.h"
 
 #ifndef DEBUG
 #define DEBUG 0
@@ -433,7 +434,9 @@ int cache_tree_update(struct index_state *istate, int flags)
 
        if (i)
                return i;
+       trace_performance_enter();
        i = update_one(it, cache, entries, "", 0, &skip, flags);
+       trace_performance_leave("cache_tree_update");
        if (i < 0)
                return i;
        istate->cache_changed |= CACHE_TREE_CHANGED;
@@ -714,7 +717,84 @@ int cache_tree_matches_traversal(struct cache_tree *root,
 
        it = find_cache_tree_from_traversal(root, info);
        it = cache_tree_find(it, ent->path);
-       if (it && it->entry_count > 0 && !oidcmp(ent->oid, &it->oid))
+       if (it && it->entry_count > 0 && oideq(ent->oid, &it->oid))
                return it->entry_count;
        return 0;
 }
+
+static void verify_one(struct index_state *istate,
+                      struct cache_tree *it,
+                      struct strbuf *path)
+{
+       int i, pos, len = path->len;
+       struct strbuf tree_buf = STRBUF_INIT;
+       struct object_id new_oid;
+
+       for (i = 0; i < it->subtree_nr; i++) {
+               strbuf_addf(path, "%s/", it->down[i]->name);
+               verify_one(istate, it->down[i]->cache_tree, path);
+               strbuf_setlen(path, len);
+       }
+
+       if (it->entry_count < 0 ||
+           /* no verification on tests (t7003) that replace trees */
+           lookup_replace_object(the_repository, &it->oid) != &it->oid)
+               return;
+
+       if (path->len) {
+               pos = index_name_pos(istate, path->buf, path->len);
+               pos = -pos - 1;
+       } else {
+               pos = 0;
+       }
+
+       i = 0;
+       while (i < it->entry_count) {
+               struct cache_entry *ce = istate->cache[pos + i];
+               const char *slash;
+               struct cache_tree_sub *sub = NULL;
+               const struct object_id *oid;
+               const char *name;
+               unsigned mode;
+               int entlen;
+
+               if (ce->ce_flags & (CE_STAGEMASK | CE_INTENT_TO_ADD | CE_REMOVE))
+                       BUG("%s with flags 0x%x should not be in cache-tree",
+                           ce->name, ce->ce_flags);
+               name = ce->name + path->len;
+               slash = strchr(name, '/');
+               if (slash) {
+                       entlen = slash - name;
+                       sub = find_subtree(it, ce->name + path->len, entlen, 0);
+                       if (!sub || sub->cache_tree->entry_count < 0)
+                               BUG("bad subtree '%.*s'", entlen, name);
+                       oid = &sub->cache_tree->oid;
+                       mode = S_IFDIR;
+                       i += sub->cache_tree->entry_count;
+               } else {
+                       oid = &ce->oid;
+                       mode = ce->ce_mode;
+                       entlen = ce_namelen(ce) - path->len;
+                       i++;
+               }
+               strbuf_addf(&tree_buf, "%o %.*s%c", mode, entlen, name, '\0');
+               strbuf_add(&tree_buf, oid->hash, the_hash_algo->rawsz);
+       }
+       hash_object_file(tree_buf.buf, tree_buf.len, tree_type, &new_oid);
+       if (oidcmp(&new_oid, &it->oid))
+               BUG("cache-tree for path %.*s does not match. "
+                   "Expected %s got %s", len, path->buf,
+                   oid_to_hex(&new_oid), oid_to_hex(&it->oid));
+       strbuf_setlen(path, len);
+       strbuf_release(&tree_buf);
+}
+
+void cache_tree_verify(struct index_state *istate)
+{
+       struct strbuf path = STRBUF_INIT;
+
+       if (!istate->cache_tree)
+               return;
+       verify_one(istate, istate->cache_tree, &path);
+       strbuf_release(&path);
+}
index fc0c842e773f648d64858d1891f64d222633fc3a..0ab6784ffea5da0581c905738aaa66215266b5f9 100644 (file)
@@ -32,6 +32,7 @@ struct cache_tree *cache_tree_read(const char *buffer, unsigned long size);
 
 int cache_tree_fully_valid(struct cache_tree *);
 int cache_tree_update(struct index_state *, int);
+void cache_tree_verify(struct index_state *);
 
 /* bitmasks to write_cache_as_tree flags */
 #define WRITE_TREE_MISSING_OK 1
diff --git a/cache.h b/cache.h
index 4d014541ab7bc7692919c871a5306543bbf361c5..d508f3d4f8837caef469389c71950ee96eea709b 100644 (file)
--- a/cache.h
+++ b/cache.h
@@ -1041,14 +1041,24 @@ static inline int oidcmp(const struct object_id *oid1, const struct object_id *o
        return hashcmp(oid1->hash, oid2->hash);
 }
 
+static inline int hasheq(const unsigned char *sha1, const unsigned char *sha2)
+{
+       return !hashcmp(sha1, sha2);
+}
+
+static inline int oideq(const struct object_id *oid1, const struct object_id *oid2)
+{
+       return hasheq(oid1->hash, oid2->hash);
+}
+
 static inline int is_null_sha1(const unsigned char *sha1)
 {
-       return !hashcmp(sha1, null_sha1);
+       return hasheq(sha1, null_sha1);
 }
 
 static inline int is_null_oid(const struct object_id *oid)
 {
-       return !hashcmp(oid->hash, null_sha1);
+       return hasheq(oid->hash, null_sha1);
 }
 
 static inline void hashcpy(unsigned char *sha_dst, const unsigned char *sha_src)
@@ -1085,22 +1095,22 @@ static inline void oidread(struct object_id *oid, const unsigned char *hash)
 
 static inline int is_empty_blob_sha1(const unsigned char *sha1)
 {
-       return !hashcmp(sha1, the_hash_algo->empty_blob->hash);
+       return hasheq(sha1, the_hash_algo->empty_blob->hash);
 }
 
 static inline int is_empty_blob_oid(const struct object_id *oid)
 {
-       return !oidcmp(oid, the_hash_algo->empty_blob);
+       return oideq(oid, the_hash_algo->empty_blob);
 }
 
 static inline int is_empty_tree_sha1(const unsigned char *sha1)
 {
-       return !hashcmp(sha1, the_hash_algo->empty_tree->hash);
+       return hasheq(sha1, the_hash_algo->empty_tree->hash);
 }
 
 static inline int is_empty_tree_oid(const struct object_id *oid)
 {
-       return !oidcmp(oid, the_hash_algo->empty_tree);
+       return oideq(oid, the_hash_algo->empty_tree);
 }
 
 const char *empty_tree_oid_hex(void);
@@ -1518,6 +1528,7 @@ struct checkout {
        unsigned force:1,
                 quiet:1,
                 not_new:1,
+                clone:1,
                 refresh_cache:1;
 };
 #define CHECKOUT_INIT { NULL, "" }
index de7695e72824f6f041e0979e29302ded6e50d611..0fed4ca529c7da81cde2b84bbe276e55ad87d0e4 100644 (file)
@@ -1138,8 +1138,8 @@ static void show_patch_diff(struct combine_diff_path *elem, int num_parent,
        for (i = 0; i < num_parent; i++) {
                int j;
                for (j = 0; j < i; j++) {
-                       if (!oidcmp(&elem->parent[i].oid,
-                                    &elem->parent[j].oid)) {
+                       if (oideq(&elem->parent[i].oid,
+                                 &elem->parent[j].oid)) {
                                reuse_combine_diff(sline, cnt, i, j);
                                break;
                        }
index a9dda3b8af6a754564f8f840f0ca63d93f6c88dc..c36ea3c18226cb6212eb8dcbf5b6e5df5886c922 100644 (file)
@@ -123,6 +123,7 @@ git-merge-index                         plumbingmanipulators
 git-merge-one-file                      purehelpers
 git-mergetool                           ancillarymanipulators           complete
 git-merge-tree                          ancillaryinterrogators
+git-multi-pack-index                    plumbingmanipulators
 git-mktag                               plumbingmanipulators
 git-mktree                              plumbingmanipulators
 git-mv                                  mainporcelain           worktree
index 8a1bec7b8aa420dd3d4ecadc95dee31029533c07..a6867586039b507e7802a469a1a3bed82e44bddb 100644 (file)
@@ -13,6 +13,9 @@
 #include "commit-graph.h"
 #include "object-store.h"
 #include "alloc.h"
+#include "hashmap.h"
+#include "replace-object.h"
+#include "progress.h"
 
 #define GRAPH_SIGNATURE 0x43475048 /* "CGPH" */
 #define GRAPH_CHUNKID_OIDFANOUT 0x4f494446 /* "OIDF" */
@@ -56,6 +59,28 @@ static struct commit_graph *alloc_commit_graph(void)
        return g;
 }
 
+extern int read_replace_refs;
+
+static int commit_graph_compatible(struct repository *r)
+{
+       if (!r->gitdir)
+               return 0;
+
+       if (read_replace_refs) {
+               prepare_replace_object(r);
+               if (hashmap_get_size(&r->objects->replace_map->map))
+                       return 0;
+       }
+
+       prepare_commit_graft(r);
+       if (r->parsed_objects && r->parsed_objects->grafts_nr)
+               return 0;
+       if (is_repository_shallow(r))
+               return 0;
+
+       return 1;
+}
+
 struct commit_graph *load_commit_graph_one(const char *graph_file)
 {
        void *graph_map;
@@ -213,8 +238,9 @@ static int prepare_commit_graph(struct repository *r)
                return !!r->objects->commit_graph;
        r->objects->commit_graph_attempted = 1;
 
-       if (repo_config_get_bool(r, "core.commitgraph", &config_value) ||
-           !config_value)
+       if (!git_env_bool(GIT_TEST_COMMIT_GRAPH, 0) &&
+           (repo_config_get_bool(r, "core.commitgraph", &config_value) ||
+           !config_value))
                /*
                 * This repository is not configured to use commit graphs, so
                 * do not load one. (But report commit_graph_attempted anyway
@@ -223,6 +249,9 @@ static int prepare_commit_graph(struct repository *r)
                 */
                return 0;
 
+       if (!commit_graph_compatible(r))
+               return 0;
+
        obj_dir = r->objects->objectdir;
        prepare_commit_graph_one(r, obj_dir);
        prepare_alt_odb(r);
@@ -233,10 +262,28 @@ static int prepare_commit_graph(struct repository *r)
        return !!r->objects->commit_graph;
 }
 
-static void close_commit_graph(void)
+int generation_numbers_enabled(struct repository *r)
 {
-       free_commit_graph(the_repository->objects->commit_graph);
-       the_repository->objects->commit_graph = NULL;
+       uint32_t first_generation;
+       struct commit_graph *g;
+       if (!prepare_commit_graph(r))
+              return 0;
+
+       g = r->objects->commit_graph;
+
+       if (!g->num_commits)
+               return 0;
+
+       first_generation = get_be32(g->chunk_commit_data +
+                                   g->hash_len + 8) >> 2;
+
+       return !!first_generation;
+}
+
+void close_commit_graph(struct repository *r)
+{
+       free_commit_graph(r->objects->commit_graph);
+       r->objects->commit_graph = NULL;
 }
 
 static int bsearch_graph(struct commit_graph *g, struct object_id *oid, uint32_t *pos)
@@ -548,6 +595,8 @@ struct packed_oid_list {
        struct object_id *list;
        int nr;
        int alloc;
+       struct progress *progress;
+       int progress_done;
 };
 
 static int add_packed_commits(const struct object_id *oid,
@@ -560,6 +609,9 @@ static int add_packed_commits(const struct object_id *oid,
        off_t offset = nth_packed_object_offset(pack, pos);
        struct object_info oi = OBJECT_INFO_INIT;
 
+       if (list->progress)
+               display_progress(list->progress, ++list->progress_done);
+
        oi.typep = &type;
        if (packed_object_info(the_repository, pack, offset, &oi) < 0)
                die(_("unable to get type of object %s"), oid_to_hex(oid));
@@ -587,12 +639,18 @@ static void add_missing_parents(struct packed_oid_list *oids, struct commit *com
        }
 }
 
-static void close_reachable(struct packed_oid_list *oids)
+static void close_reachable(struct packed_oid_list *oids, int report_progress)
 {
        int i;
        struct commit *commit;
+       struct progress *progress = NULL;
+       int j = 0;
 
+       if (report_progress)
+               progress = start_delayed_progress(
+                       _("Annotating commits in commit graph"), 0);
        for (i = 0; i < oids->nr; i++) {
+               display_progress(progress, ++j);
                commit = lookup_commit(the_repository, &oids->list[i]);
                if (commit)
                        commit->object.flags |= UNINTERESTING;
@@ -604,6 +662,7 @@ static void close_reachable(struct packed_oid_list *oids)
         * closure.
         */
        for (i = 0; i < oids->nr; i++) {
+               display_progress(progress, ++j);
                commit = lookup_commit(the_repository, &oids->list[i]);
 
                if (commit && !parse_commit(commit))
@@ -611,19 +670,28 @@ static void close_reachable(struct packed_oid_list *oids)
        }
 
        for (i = 0; i < oids->nr; i++) {
+               display_progress(progress, ++j);
                commit = lookup_commit(the_repository, &oids->list[i]);
 
                if (commit)
                        commit->object.flags &= ~UNINTERESTING;
        }
+       stop_progress(&progress);
 }
 
-static void compute_generation_numbers(struct packed_commit_list* commits)
+static void compute_generation_numbers(struct packed_commit_list* commits,
+                                      int report_progress)
 {
        int i;
        struct commit_list *list = NULL;
+       struct progress *progress = NULL;
 
+       if (report_progress)
+               progress = start_progress(
+                       _("Computing commit graph generation numbers"),
+                       commits->nr);
        for (i = 0; i < commits->nr; i++) {
+               display_progress(progress, i + 1);
                if (commits->list[i]->generation != GENERATION_NUMBER_INFINITY &&
                    commits->list[i]->generation != GENERATION_NUMBER_ZERO)
                        continue;
@@ -655,6 +723,7 @@ static void compute_generation_numbers(struct packed_commit_list* commits)
                        }
                }
        }
+       stop_progress(&progress);
 }
 
 static int add_ref_to_list(const char *refname,
@@ -667,19 +736,20 @@ static int add_ref_to_list(const char *refname,
        return 0;
 }
 
-void write_commit_graph_reachable(const char *obj_dir, int append)
+void write_commit_graph_reachable(const char *obj_dir, int append,
+                                 int report_progress)
 {
        struct string_list list;
 
        string_list_init(&list, 1);
        for_each_ref(add_ref_to_list, &list);
-       write_commit_graph(obj_dir, NULL, &list, append);
+       write_commit_graph(obj_dir, NULL, &list, append, report_progress);
 }
 
 void write_commit_graph(const char *obj_dir,
                        struct string_list *pack_indexes,
                        struct string_list *commit_hex,
-                       int append)
+                       int append, int report_progress)
 {
        struct packed_oid_list oids;
        struct packed_commit_list commits;
@@ -692,9 +762,15 @@ void write_commit_graph(const char *obj_dir,
        int num_chunks;
        int num_extra_edges;
        struct commit_list *parent;
+       struct progress *progress = NULL;
+
+       if (!commit_graph_compatible(the_repository))
+               return;
 
        oids.nr = 0;
        oids.alloc = approximate_object_count() / 4;
+       oids.progress = NULL;
+       oids.progress_done = 0;
 
        if (append) {
                prepare_commit_graph_one(the_repository, obj_dir);
@@ -721,6 +797,11 @@ void write_commit_graph(const char *obj_dir,
                int dirlen;
                strbuf_addf(&packname, "%s/pack/", obj_dir);
                dirlen = packname.len;
+               if (report_progress) {
+                       oids.progress = start_delayed_progress(
+                               _("Finding commits for commit graph"), 0);
+                       oids.progress_done = 0;
+               }
                for (i = 0; i < pack_indexes->nr; i++) {
                        struct packed_git *p;
                        strbuf_setlen(&packname, dirlen);
@@ -733,15 +814,21 @@ void write_commit_graph(const char *obj_dir,
                        for_each_object_in_pack(p, add_packed_commits, &oids, 0);
                        close_pack(p);
                }
+               stop_progress(&oids.progress);
                strbuf_release(&packname);
        }
 
        if (commit_hex) {
+               if (report_progress)
+                       progress = start_delayed_progress(
+                               _("Finding commits for commit graph"),
+                               commit_hex->nr);
                for (i = 0; i < commit_hex->nr; i++) {
                        const char *end;
                        struct object_id oid;
                        struct commit *result;
 
+                       display_progress(progress, i + 1);
                        if (commit_hex->items[i].string &&
                            parse_oid_hex(commit_hex->items[i].string, &oid, &end))
                                continue;
@@ -754,18 +841,24 @@ void write_commit_graph(const char *obj_dir,
                                oids.nr++;
                        }
                }
+               stop_progress(&progress);
        }
 
-       if (!pack_indexes && !commit_hex)
+       if (!pack_indexes && !commit_hex) {
+               if (report_progress)
+                       oids.progress = start_delayed_progress(
+                               _("Finding commits for commit graph"), 0);
                for_each_packed_object(add_packed_commits, &oids, 0);
+               stop_progress(&oids.progress);
+       }
 
-       close_reachable(&oids);
+       close_reachable(&oids, report_progress);
 
        QSORT(oids.list, oids.nr, commit_compare);
 
        count_distinct = 1;
        for (i = 1; i < oids.nr; i++) {
-               if (oidcmp(&oids.list[i-1], &oids.list[i]))
+               if (!oideq(&oids.list[i - 1], &oids.list[i]))
                        count_distinct++;
        }
 
@@ -779,7 +872,7 @@ void write_commit_graph(const char *obj_dir,
        num_extra_edges = 0;
        for (i = 0; i < oids.nr; i++) {
                int num_parents = 0;
-               if (i > 0 && !oidcmp(&oids.list[i-1], &oids.list[i]))
+               if (i > 0 && oideq(&oids.list[i - 1], &oids.list[i]))
                        continue;
 
                commits.list[commits.nr] = lookup_commit(the_repository, &oids.list[i]);
@@ -799,7 +892,7 @@ void write_commit_graph(const char *obj_dir,
        if (commits.nr >= GRAPH_PARENT_MISSING)
                die(_("too many commits to write graph"));
 
-       compute_generation_numbers(&commits);
+       compute_generation_numbers(&commits, report_progress);
 
        graph_name = get_commit_graph_filename(obj_dir);
        if (safe_create_leading_directories(graph_name))
@@ -845,7 +938,7 @@ void write_commit_graph(const char *obj_dir,
        write_graph_chunk_data(f, GRAPH_OID_LEN, commits.list, commits.nr);
        write_graph_chunk_large_edges(f, commits.list, commits.nr);
 
-       close_commit_graph();
+       close_commit_graph(the_repository);
        finalize_hashfile(f, NULL, CSUM_HASH_IN_STREAM | CSUM_FSYNC);
        commit_lock_file(&lk);
 
@@ -878,6 +971,7 @@ int verify_commit_graph(struct repository *r, struct commit_graph *g)
        int generation_zero = 0;
        struct hashfile *f;
        int devnull;
+       struct progress *progress = NULL;
 
        if (!g) {
                graph_report("no commit-graph file loaded");
@@ -900,7 +994,7 @@ int verify_commit_graph(struct repository *r, struct commit_graph *g)
        f = hashfd(devnull, NULL);
        hashwrite(f, g->data, g->data_len - g->hash_len);
        finalize_hashfile(f, checksum.hash, CSUM_CLOSE);
-       if (hashcmp(checksum.hash, g->data + g->data_len - g->hash_len)) {
+       if (!hasheq(checksum.hash, g->data + g->data_len - g->hash_len)) {
                graph_report(_("the commit-graph file has incorrect checksum and is likely corrupt"));
                verify_commit_graph_error = VERIFY_COMMIT_GRAPH_ERROR_HASH;
        }
@@ -945,11 +1039,14 @@ int verify_commit_graph(struct repository *r, struct commit_graph *g)
        if (verify_commit_graph_error & ~VERIFY_COMMIT_GRAPH_ERROR_HASH)
                return verify_commit_graph_error;
 
+       progress = start_progress(_("Verifying commits in commit graph"),
+                                 g->num_commits);
        for (i = 0; i < g->num_commits; i++) {
                struct commit *graph_commit, *odb_commit;
                struct commit_list *graph_parents, *odb_parents;
                uint32_t max_generation = 0;
 
+               display_progress(progress, i + 1);
                hashcpy(cur_oid.hash, g->chunk_oid_lookup + g->hash_len * i);
 
                graph_commit = lookup_commit(r, &cur_oid);
@@ -960,7 +1057,7 @@ int verify_commit_graph(struct repository *r, struct commit_graph *g)
                        continue;
                }
 
-               if (oidcmp(&get_commit_tree_in_graph_one(g, graph_commit)->object.oid,
+               if (!oideq(&get_commit_tree_in_graph_one(g, graph_commit)->object.oid,
                           get_commit_tree_oid(odb_commit)))
                        graph_report("root tree OID for commit %s in commit-graph is %s != %s",
                                     oid_to_hex(&cur_oid),
@@ -977,7 +1074,7 @@ int verify_commit_graph(struct repository *r, struct commit_graph *g)
                                break;
                        }
 
-                       if (oidcmp(&graph_parents->item->object.oid, &odb_parents->item->object.oid))
+                       if (!oideq(&graph_parents->item->object.oid, &odb_parents->item->object.oid))
                                graph_report("commit-graph parent for %s is %s != %s",
                                             oid_to_hex(&cur_oid),
                                             oid_to_hex(&graph_parents->item->object.oid),
@@ -1026,6 +1123,7 @@ int verify_commit_graph(struct repository *r, struct commit_graph *g)
                                     graph_commit->date,
                                     odb_commit->date);
        }
+       stop_progress(&progress);
 
        return verify_commit_graph_error;
 }
index eea62f8c0ee53b56630a1a2b0c2c716b4cd63670..9db40b4d3aadb75b4dcea5027d07d373bdf169e1 100644 (file)
@@ -6,6 +6,8 @@
 #include "string-list.h"
 #include "cache.h"
 
+#define GIT_TEST_COMMIT_GRAPH "GIT_TEST_COMMIT_GRAPH"
+
 struct commit;
 
 char *get_commit_graph_filename(const char *obj_dir);
@@ -52,14 +54,22 @@ struct commit_graph {
 
 struct commit_graph *load_commit_graph_one(const char *graph_file);
 
-void write_commit_graph_reachable(const char *obj_dir, int append);
+/*
+ * Return 1 if and only if the repository has a commit-graph
+ * file and generation numbers are computed in that file.
+ */
+int generation_numbers_enabled(struct repository *r);
+
+void write_commit_graph_reachable(const char *obj_dir, int append,
+                                 int report_progress);
 void write_commit_graph(const char *obj_dir,
                        struct string_list *pack_indexes,
                        struct string_list *commit_hex,
-                       int append);
+                       int append, int report_progress);
 
 int verify_commit_graph(struct repository *r, struct commit_graph *g);
 
+void close_commit_graph(struct repository *);
 void free_commit_graph(struct commit_graph *);
 
 #endif
diff --git a/commit-reach.c b/commit-reach.c
new file mode 100644 (file)
index 0000000..67e1792
--- /dev/null
@@ -0,0 +1,690 @@
+#include "cache.h"
+#include "commit.h"
+#include "commit-graph.h"
+#include "decorate.h"
+#include "prio-queue.h"
+#include "tree.h"
+#include "ref-filter.h"
+#include "revision.h"
+#include "tag.h"
+#include "commit-reach.h"
+
+/* Remember to update object flag allocation in object.h */
+#define REACHABLE       (1u<<15)
+#define PARENT1                (1u<<16)
+#define PARENT2                (1u<<17)
+#define STALE          (1u<<18)
+#define RESULT         (1u<<19)
+
+static const unsigned all_flags = (PARENT1 | PARENT2 | STALE | RESULT);
+
+static int queue_has_nonstale(struct prio_queue *queue)
+{
+       int i;
+       for (i = 0; i < queue->nr; i++) {
+               struct commit *commit = queue->array[i].data;
+               if (!(commit->object.flags & STALE))
+                       return 1;
+       }
+       return 0;
+}
+
+/* all input commits in one and twos[] must have been parsed! */
+static struct commit_list *paint_down_to_common(struct commit *one, int n,
+                                               struct commit **twos,
+                                               int min_generation)
+{
+       struct prio_queue queue = { compare_commits_by_gen_then_commit_date };
+       struct commit_list *result = NULL;
+       int i;
+       uint32_t last_gen = GENERATION_NUMBER_INFINITY;
+
+       if (!min_generation)
+               queue.compare = compare_commits_by_commit_date;
+
+       one->object.flags |= PARENT1;
+       if (!n) {
+               commit_list_append(one, &result);
+               return result;
+       }
+       prio_queue_put(&queue, one);
+
+       for (i = 0; i < n; i++) {
+               twos[i]->object.flags |= PARENT2;
+               prio_queue_put(&queue, twos[i]);
+       }
+
+       while (queue_has_nonstale(&queue)) {
+               struct commit *commit = prio_queue_get(&queue);
+               struct commit_list *parents;
+               int flags;
+
+               if (min_generation && commit->generation > last_gen)
+                       BUG("bad generation skip %8x > %8x at %s",
+                           commit->generation, last_gen,
+                           oid_to_hex(&commit->object.oid));
+               last_gen = commit->generation;
+
+               if (commit->generation < min_generation)
+                       break;
+
+               flags = commit->object.flags & (PARENT1 | PARENT2 | STALE);
+               if (flags == (PARENT1 | PARENT2)) {
+                       if (!(commit->object.flags & RESULT)) {
+                               commit->object.flags |= RESULT;
+                               commit_list_insert_by_date(commit, &result);
+                       }
+                       /* Mark parents of a found merge stale */
+                       flags |= STALE;
+               }
+               parents = commit->parents;
+               while (parents) {
+                       struct commit *p = parents->item;
+                       parents = parents->next;
+                       if ((p->object.flags & flags) == flags)
+                               continue;
+                       if (parse_commit(p))
+                               return NULL;
+                       p->object.flags |= flags;
+                       prio_queue_put(&queue, p);
+               }
+       }
+
+       clear_prio_queue(&queue);
+       return result;
+}
+
+static struct commit_list *merge_bases_many(struct commit *one, int n, struct commit **twos)
+{
+       struct commit_list *list = NULL;
+       struct commit_list *result = NULL;
+       int i;
+
+       for (i = 0; i < n; i++) {
+               if (one == twos[i])
+                       /*
+                        * We do not mark this even with RESULT so we do not
+                        * have to clean it up.
+                        */
+                       return commit_list_insert(one, &result);
+       }
+
+       if (parse_commit(one))
+               return NULL;
+       for (i = 0; i < n; i++) {
+               if (parse_commit(twos[i]))
+                       return NULL;
+       }
+
+       list = paint_down_to_common(one, n, twos, 0);
+
+       while (list) {
+               struct commit *commit = pop_commit(&list);
+               if (!(commit->object.flags & STALE))
+                       commit_list_insert_by_date(commit, &result);
+       }
+       return result;
+}
+
+struct commit_list *get_octopus_merge_bases(struct commit_list *in)
+{
+       struct commit_list *i, *j, *k, *ret = NULL;
+
+       if (!in)
+               return ret;
+
+       commit_list_insert(in->item, &ret);
+
+       for (i = in->next; i; i = i->next) {
+               struct commit_list *new_commits = NULL, *end = NULL;
+
+               for (j = ret; j; j = j->next) {
+                       struct commit_list *bases;
+                       bases = get_merge_bases(i->item, j->item);
+                       if (!new_commits)
+                               new_commits = bases;
+                       else
+                               end->next = bases;
+                       for (k = bases; k; k = k->next)
+                               end = k;
+               }
+               ret = new_commits;
+       }
+       return ret;
+}
+
+static int remove_redundant(struct commit **array, int cnt)
+{
+       /*
+        * Some commit in the array may be an ancestor of
+        * another commit.  Move such commit to the end of
+        * the array, and return the number of commits that
+        * are independent from each other.
+        */
+       struct commit **work;
+       unsigned char *redundant;
+       int *filled_index;
+       int i, j, filled;
+
+       work = xcalloc(cnt, sizeof(*work));
+       redundant = xcalloc(cnt, 1);
+       ALLOC_ARRAY(filled_index, cnt - 1);
+
+       for (i = 0; i < cnt; i++)
+               parse_commit(array[i]);
+       for (i = 0; i < cnt; i++) {
+               struct commit_list *common;
+               uint32_t min_generation = array[i]->generation;
+
+               if (redundant[i])
+                       continue;
+               for (j = filled = 0; j < cnt; j++) {
+                       if (i == j || redundant[j])
+                               continue;
+                       filled_index[filled] = j;
+                       work[filled++] = array[j];
+
+                       if (array[j]->generation < min_generation)
+                               min_generation = array[j]->generation;
+               }
+               common = paint_down_to_common(array[i], filled, work,
+                                             min_generation);
+               if (array[i]->object.flags & PARENT2)
+                       redundant[i] = 1;
+               for (j = 0; j < filled; j++)
+                       if (work[j]->object.flags & PARENT1)
+                               redundant[filled_index[j]] = 1;
+               clear_commit_marks(array[i], all_flags);
+               clear_commit_marks_many(filled, work, all_flags);
+               free_commit_list(common);
+       }
+
+       /* Now collect the result */
+       COPY_ARRAY(work, array, cnt);
+       for (i = filled = 0; i < cnt; i++)
+               if (!redundant[i])
+                       array[filled++] = work[i];
+       for (j = filled, i = 0; i < cnt; i++)
+               if (redundant[i])
+                       array[j++] = work[i];
+       free(work);
+       free(redundant);
+       free(filled_index);
+       return filled;
+}
+
+static struct commit_list *get_merge_bases_many_0(struct commit *one,
+                                                 int n,
+                                                 struct commit **twos,
+                                                 int cleanup)
+{
+       struct commit_list *list;
+       struct commit **rslt;
+       struct commit_list *result;
+       int cnt, i;
+
+       result = merge_bases_many(one, n, twos);
+       for (i = 0; i < n; i++) {
+               if (one == twos[i])
+                       return result;
+       }
+       if (!result || !result->next) {
+               if (cleanup) {
+                       clear_commit_marks(one, all_flags);
+                       clear_commit_marks_many(n, twos, all_flags);
+               }
+               return result;
+       }
+
+       /* There are more than one */
+       cnt = commit_list_count(result);
+       rslt = xcalloc(cnt, sizeof(*rslt));
+       for (list = result, i = 0; list; list = list->next)
+               rslt[i++] = list->item;
+       free_commit_list(result);
+
+       clear_commit_marks(one, all_flags);
+       clear_commit_marks_many(n, twos, all_flags);
+
+       cnt = remove_redundant(rslt, cnt);
+       result = NULL;
+       for (i = 0; i < cnt; i++)
+               commit_list_insert_by_date(rslt[i], &result);
+       free(rslt);
+       return result;
+}
+
+struct commit_list *get_merge_bases_many(struct commit *one,
+                                        int n,
+                                        struct commit **twos)
+{
+       return get_merge_bases_many_0(one, n, twos, 1);
+}
+
+struct commit_list *get_merge_bases_many_dirty(struct commit *one,
+                                              int n,
+                                              struct commit **twos)
+{
+       return get_merge_bases_many_0(one, n, twos, 0);
+}
+
+struct commit_list *get_merge_bases(struct commit *one, struct commit *two)
+{
+       return get_merge_bases_many_0(one, 1, &two, 1);
+}
+
+/*
+ * Is "commit" a descendant of one of the elements on the "with_commit" list?
+ */
+int is_descendant_of(struct commit *commit, struct commit_list *with_commit)
+{
+       if (!with_commit)
+               return 1;
+
+       if (generation_numbers_enabled(the_repository)) {
+               struct commit_list *from_list = NULL;
+               int result;
+               commit_list_insert(commit, &from_list);
+               result = can_all_from_reach(from_list, with_commit, 0);
+               free_commit_list(from_list);
+               return result;
+       } else {
+               while (with_commit) {
+                       struct commit *other;
+
+                       other = with_commit->item;
+                       with_commit = with_commit->next;
+                       if (in_merge_bases(other, commit))
+                               return 1;
+               }
+               return 0;
+       }
+}
+
+/*
+ * Is "commit" an ancestor of one of the "references"?
+ */
+int in_merge_bases_many(struct commit *commit, int nr_reference, struct commit **reference)
+{
+       struct commit_list *bases;
+       int ret = 0, i;
+       uint32_t min_generation = GENERATION_NUMBER_INFINITY;
+
+       if (parse_commit(commit))
+               return ret;
+       for (i = 0; i < nr_reference; i++) {
+               if (parse_commit(reference[i]))
+                       return ret;
+               if (reference[i]->generation < min_generation)
+                       min_generation = reference[i]->generation;
+       }
+
+       if (commit->generation > min_generation)
+               return ret;
+
+       bases = paint_down_to_common(commit, nr_reference, reference, commit->generation);
+       if (commit->object.flags & PARENT2)
+               ret = 1;
+       clear_commit_marks(commit, all_flags);
+       clear_commit_marks_many(nr_reference, reference, all_flags);
+       free_commit_list(bases);
+       return ret;
+}
+
+/*
+ * Is "commit" an ancestor of (i.e. reachable from) the "reference"?
+ */
+int in_merge_bases(struct commit *commit, struct commit *reference)
+{
+       return in_merge_bases_many(commit, 1, &reference);
+}
+
+struct commit_list *reduce_heads(struct commit_list *heads)
+{
+       struct commit_list *p;
+       struct commit_list *result = NULL, **tail = &result;
+       struct commit **array;
+       int num_head, i;
+
+       if (!heads)
+               return NULL;
+
+       /* Uniquify */
+       for (p = heads; p; p = p->next)
+               p->item->object.flags &= ~STALE;
+       for (p = heads, num_head = 0; p; p = p->next) {
+               if (p->item->object.flags & STALE)
+                       continue;
+               p->item->object.flags |= STALE;
+               num_head++;
+       }
+       array = xcalloc(num_head, sizeof(*array));
+       for (p = heads, i = 0; p; p = p->next) {
+               if (p->item->object.flags & STALE) {
+                       array[i++] = p->item;
+                       p->item->object.flags &= ~STALE;
+               }
+       }
+       num_head = remove_redundant(array, num_head);
+       for (i = 0; i < num_head; i++)
+               tail = &commit_list_insert(array[i], tail)->next;
+       free(array);
+       return result;
+}
+
+void reduce_heads_replace(struct commit_list **heads)
+{
+       struct commit_list *result = reduce_heads(*heads);
+       free_commit_list(*heads);
+       *heads = result;
+}
+
+int ref_newer(const struct object_id *new_oid, const struct object_id *old_oid)
+{
+       struct object *o;
+       struct commit *old_commit, *new_commit;
+       struct commit_list *old_commit_list = NULL;
+
+       /*
+        * Both new_commit and old_commit must be commit-ish and new_commit is descendant of
+        * old_commit.  Otherwise we require --force.
+        */
+       o = deref_tag(the_repository, parse_object(the_repository, old_oid),
+                     NULL, 0);
+       if (!o || o->type != OBJ_COMMIT)
+               return 0;
+       old_commit = (struct commit *) o;
+
+       o = deref_tag(the_repository, parse_object(the_repository, new_oid),
+                     NULL, 0);
+       if (!o || o->type != OBJ_COMMIT)
+               return 0;
+       new_commit = (struct commit *) o;
+
+       if (parse_commit(new_commit) < 0)
+               return 0;
+
+       commit_list_insert(old_commit, &old_commit_list);
+       return is_descendant_of(new_commit, old_commit_list);
+}
+
+/*
+ * Mimicking the real stack, this stack lives on the heap, avoiding stack
+ * overflows.
+ *
+ * At each recursion step, the stack items points to the commits whose
+ * ancestors are to be inspected.
+ */
+struct contains_stack {
+       int nr, alloc;
+       struct contains_stack_entry {
+               struct commit *commit;
+               struct commit_list *parents;
+       } *contains_stack;
+};
+
+static int in_commit_list(const struct commit_list *want, struct commit *c)
+{
+       for (; want; want = want->next)
+               if (!oidcmp(&want->item->object.oid, &c->object.oid))
+                       return 1;
+       return 0;
+}
+
+/*
+ * Test whether the candidate is contained in the list.
+ * Do not recurse to find out, though, but return -1 if inconclusive.
+ */
+static enum contains_result contains_test(struct commit *candidate,
+                                         const struct commit_list *want,
+                                         struct contains_cache *cache,
+                                         uint32_t cutoff)
+{
+       enum contains_result *cached = contains_cache_at(cache, candidate);
+
+       /* If we already have the answer cached, return that. */
+       if (*cached)
+               return *cached;
+
+       /* or are we it? */
+       if (in_commit_list(want, candidate)) {
+               *cached = CONTAINS_YES;
+               return CONTAINS_YES;
+       }
+
+       /* Otherwise, we don't know; prepare to recurse */
+       parse_commit_or_die(candidate);
+
+       if (candidate->generation < cutoff)
+               return CONTAINS_NO;
+
+       return CONTAINS_UNKNOWN;
+}
+
+static void push_to_contains_stack(struct commit *candidate, struct contains_stack *contains_stack)
+{
+       ALLOC_GROW(contains_stack->contains_stack, contains_stack->nr + 1, contains_stack->alloc);
+       contains_stack->contains_stack[contains_stack->nr].commit = candidate;
+       contains_stack->contains_stack[contains_stack->nr++].parents = candidate->parents;
+}
+
+static enum contains_result contains_tag_algo(struct commit *candidate,
+                                             const struct commit_list *want,
+                                             struct contains_cache *cache)
+{
+       struct contains_stack contains_stack = { 0, 0, NULL };
+       enum contains_result result;
+       uint32_t cutoff = GENERATION_NUMBER_INFINITY;
+       const struct commit_list *p;
+
+       for (p = want; p; p = p->next) {
+               struct commit *c = p->item;
+               load_commit_graph_info(the_repository, c);
+               if (c->generation < cutoff)
+                       cutoff = c->generation;
+       }
+
+       result = contains_test(candidate, want, cache, cutoff);
+       if (result != CONTAINS_UNKNOWN)
+               return result;
+
+       push_to_contains_stack(candidate, &contains_stack);
+       while (contains_stack.nr) {
+               struct contains_stack_entry *entry = &contains_stack.contains_stack[contains_stack.nr - 1];
+               struct commit *commit = entry->commit;
+               struct commit_list *parents = entry->parents;
+
+               if (!parents) {
+                       *contains_cache_at(cache, commit) = CONTAINS_NO;
+                       contains_stack.nr--;
+               }
+               /*
+                * If we just popped the stack, parents->item has been marked,
+                * therefore contains_test will return a meaningful yes/no.
+                */
+               else switch (contains_test(parents->item, want, cache, cutoff)) {
+               case CONTAINS_YES:
+                       *contains_cache_at(cache, commit) = CONTAINS_YES;
+                       contains_stack.nr--;
+                       break;
+               case CONTAINS_NO:
+                       entry->parents = parents->next;
+                       break;
+               case CONTAINS_UNKNOWN:
+                       push_to_contains_stack(parents->item, &contains_stack);
+                       break;
+               }
+       }
+       free(contains_stack.contains_stack);
+       return contains_test(candidate, want, cache, cutoff);
+}
+
+int commit_contains(struct ref_filter *filter, struct commit *commit,
+                   struct commit_list *list, struct contains_cache *cache)
+{
+       if (filter->with_commit_tag_algo)
+               return contains_tag_algo(commit, list, cache) == CONTAINS_YES;
+       return is_descendant_of(commit, list);
+}
+
+static int compare_commits_by_gen(const void *_a, const void *_b)
+{
+       const struct commit *a = (const struct commit *)_a;
+       const struct commit *b = (const struct commit *)_b;
+
+       if (a->generation < b->generation)
+               return -1;
+       if (a->generation > b->generation)
+               return 1;
+       return 0;
+}
+
+int can_all_from_reach_with_flag(struct object_array *from,
+                                unsigned int with_flag,
+                                unsigned int assign_flag,
+                                time_t min_commit_date,
+                                uint32_t min_generation)
+{
+       struct commit **list = NULL;
+       int i;
+       int nr_commits;
+       int result = 1;
+
+       ALLOC_ARRAY(list, from->nr);
+       nr_commits = 0;
+       for (i = 0; i < from->nr; i++) {
+               struct object *from_one = from->objects[i].item;
+
+               if (!from_one || from_one->flags & assign_flag)
+                       continue;
+
+               from_one = deref_tag(the_repository, from_one,
+                                    "a from object", 0);
+               if (!from_one || from_one->type != OBJ_COMMIT) {
+                       /*
+                        * no way to tell if this is reachable by
+                        * looking at the ancestry chain alone, so
+                        * leave a note to ourselves not to worry about
+                        * this object anymore.
+                        */
+                       from->objects[i].item->flags |= assign_flag;
+                       continue;
+               }
+
+               list[nr_commits] = (struct commit *)from_one;
+               if (parse_commit(list[nr_commits]) ||
+                   list[nr_commits]->generation < min_generation) {
+                       result = 0;
+                       goto cleanup;
+               }
+
+               nr_commits++;
+       }
+
+       QSORT(list, nr_commits, compare_commits_by_gen);
+
+       for (i = 0; i < nr_commits; i++) {
+               /* DFS from list[i] */
+               struct commit_list *stack = NULL;
+
+               list[i]->object.flags |= assign_flag;
+               commit_list_insert(list[i], &stack);
+
+               while (stack) {
+                       struct commit_list *parent;
+
+                       if (stack->item->object.flags & with_flag) {
+                               pop_commit(&stack);
+                               continue;
+                       }
+
+                       for (parent = stack->item->parents; parent; parent = parent->next) {
+                               if (parent->item->object.flags & (with_flag | RESULT))
+                                       stack->item->object.flags |= RESULT;
+
+                               if (!(parent->item->object.flags & assign_flag)) {
+                                       parent->item->object.flags |= assign_flag;
+
+                                       if (parse_commit(parent->item) ||
+                                           parent->item->date < min_commit_date ||
+                                           parent->item->generation < min_generation)
+                                               continue;
+
+                                       commit_list_insert(parent->item, &stack);
+                                       break;
+                               }
+                       }
+
+                       if (!parent)
+                               pop_commit(&stack);
+               }
+
+               if (!(list[i]->object.flags & (with_flag | RESULT))) {
+                       result = 0;
+                       goto cleanup;
+               }
+       }
+
+cleanup:
+       clear_commit_marks_many(nr_commits, list, RESULT | assign_flag);
+       free(list);
+
+       for (i = 0; i < from->nr; i++)
+               from->objects[i].item->flags &= ~assign_flag;
+
+       return result;
+}
+
+int can_all_from_reach(struct commit_list *from, struct commit_list *to,
+                      int cutoff_by_min_date)
+{
+       struct object_array from_objs = OBJECT_ARRAY_INIT;
+       time_t min_commit_date = cutoff_by_min_date ? from->item->date : 0;
+       struct commit_list *from_iter = from, *to_iter = to;
+       int result;
+       uint32_t min_generation = GENERATION_NUMBER_INFINITY;
+
+       while (from_iter) {
+               add_object_array(&from_iter->item->object, NULL, &from_objs);
+
+               if (!parse_commit(from_iter->item)) {
+                       if (from_iter->item->date < min_commit_date)
+                               min_commit_date = from_iter->item->date;
+
+                       if (from_iter->item->generation < min_generation)
+                               min_generation = from_iter->item->generation;
+               }
+
+               from_iter = from_iter->next;
+       }
+
+       while (to_iter) {
+               if (!parse_commit(to_iter->item)) {
+                       if (to_iter->item->date < min_commit_date)
+                               min_commit_date = to_iter->item->date;
+
+                       if (to_iter->item->generation < min_generation)
+                               min_generation = to_iter->item->generation;
+               }
+
+               to_iter->item->object.flags |= PARENT2;
+
+               to_iter = to_iter->next;
+       }
+
+       result = can_all_from_reach_with_flag(&from_objs, PARENT2, PARENT1,
+                                             min_commit_date, min_generation);
+
+       while (from) {
+               clear_commit_marks(from->item, PARENT1);
+               from = from->next;
+       }
+
+       while (to) {
+               clear_commit_marks(to->item, PARENT2);
+               to = to->next;
+       }
+
+       object_array_clear(&from_objs);
+       return result;
+}
diff --git a/commit-reach.h b/commit-reach.h
new file mode 100644 (file)
index 0000000..7d313e2
--- /dev/null
@@ -0,0 +1,77 @@
+#ifndef __COMMIT_REACH_H__
+#define __COMMIT_REACH_H__
+
+#include "commit-slab.h"
+
+struct commit;
+struct commit_list;
+struct contains_cache;
+struct ref_filter;
+
+struct commit_list *get_merge_bases_many(struct commit *one,
+                                        int n,
+                                        struct commit **twos);
+struct commit_list *get_merge_bases_many_dirty(struct commit *one,
+                                              int n,
+                                              struct commit **twos);
+struct commit_list *get_merge_bases(struct commit *one, struct commit *two);
+struct commit_list *get_octopus_merge_bases(struct commit_list *in);
+
+/* To be used only when object flags after this call no longer matter */
+struct commit_list *get_merge_bases_many_dirty(struct commit *one, int n, struct commit **twos);
+
+int is_descendant_of(struct commit *commit, struct commit_list *with_commit);
+int in_merge_bases_many(struct commit *commit, int nr_reference, struct commit **reference);
+int in_merge_bases(struct commit *commit, struct commit *reference);
+
+/*
+ * Takes a list of commits and returns a new list where those
+ * have been removed that can be reached from other commits in
+ * the list. It is useful for, e.g., reducing the commits
+ * randomly thrown at the git-merge command and removing
+ * redundant commits that the user shouldn't have given to it.
+ *
+ * This function destroys the STALE bit of the commit objects'
+ * flags.
+ */
+struct commit_list *reduce_heads(struct commit_list *heads);
+
+/*
+ * Like `reduce_heads()`, except it replaces the list. Use this
+ * instead of `foo = reduce_heads(foo);` to avoid memory leaks.
+ */
+void reduce_heads_replace(struct commit_list **heads);
+
+int ref_newer(const struct object_id *new_oid, const struct object_id *old_oid);
+
+/*
+ * Unknown has to be "0" here, because that's the default value for
+ * contains_cache slab entries that have not yet been assigned.
+ */
+enum contains_result {
+       CONTAINS_UNKNOWN = 0,
+       CONTAINS_NO,
+       CONTAINS_YES
+};
+
+define_commit_slab(contains_cache, enum contains_result);
+
+int commit_contains(struct ref_filter *filter, struct commit *commit,
+                   struct commit_list *list, struct contains_cache *cache);
+
+/*
+ * Determine if every commit in 'from' can reach at least one commit
+ * that is marked with 'with_flag'. As we traverse, use 'assign_flag'
+ * as a marker for commits that are already visited. Do not walk
+ * commits with date below 'min_commit_date' or generation below
+ * 'min_generation'.
+ */
+int can_all_from_reach_with_flag(struct object_array *from,
+                                unsigned int with_flag,
+                                unsigned int assign_flag,
+                                time_t min_commit_date,
+                                uint32_t min_generation);
+int can_all_from_reach(struct commit_list *from, struct commit_list *to,
+                      int commit_date_cutoff);
+
+#endif
index 449c1f4920cff631f5cf2479772c350c9b8b7325..dc8a39d52a1c31f979068a34bbdeeca14f3a7547 100644 (file)
--- a/commit.c
+++ b/commit.c
@@ -46,7 +46,7 @@ struct commit *lookup_commit_or_die(const struct object_id *oid, const char *ref
        struct commit *c = lookup_commit_reference(the_repository, oid);
        if (!c)
                die(_("could not parse %s"), ref_name);
-       if (oidcmp(oid, &c->object.oid)) {
+       if (!oideq(oid, &c->object.oid)) {
                warning(_("%s %s is not a commit!"),
                        ref_name, oid_to_hex(oid));
        }
@@ -209,7 +209,7 @@ static int read_graft_file(struct repository *r, const char *graft_file)
        return 0;
 }
 
-static void prepare_commit_graft(struct repository *r)
+void prepare_commit_graft(struct repository *r)
 {
        char *graft_file;
 
@@ -843,367 +843,6 @@ void sort_in_topological_order(struct commit_list **list, enum rev_sort_order so
                clear_author_date_slab(&author_date);
 }
 
-/* merge-base stuff */
-
-/* Remember to update object flag allocation in object.h */
-#define PARENT1                (1u<<16)
-#define PARENT2                (1u<<17)
-#define STALE          (1u<<18)
-#define RESULT         (1u<<19)
-
-static const unsigned all_flags = (PARENT1 | PARENT2 | STALE | RESULT);
-
-static int queue_has_nonstale(struct prio_queue *queue)
-{
-       int i;
-       for (i = 0; i < queue->nr; i++) {
-               struct commit *commit = queue->array[i].data;
-               if (!(commit->object.flags & STALE))
-                       return 1;
-       }
-       return 0;
-}
-
-/* all input commits in one and twos[] must have been parsed! */
-static struct commit_list *paint_down_to_common(struct commit *one, int n,
-                                               struct commit **twos,
-                                               int min_generation)
-{
-       struct prio_queue queue = { compare_commits_by_gen_then_commit_date };
-       struct commit_list *result = NULL;
-       int i;
-       uint32_t last_gen = GENERATION_NUMBER_INFINITY;
-
-       if (!min_generation)
-               queue.compare = compare_commits_by_commit_date;
-
-       one->object.flags |= PARENT1;
-       if (!n) {
-               commit_list_append(one, &result);
-               return result;
-       }
-       prio_queue_put(&queue, one);
-
-       for (i = 0; i < n; i++) {
-               twos[i]->object.flags |= PARENT2;
-               prio_queue_put(&queue, twos[i]);
-       }
-
-       while (queue_has_nonstale(&queue)) {
-               struct commit *commit = prio_queue_get(&queue);
-               struct commit_list *parents;
-               int flags;
-
-               if (min_generation && commit->generation > last_gen)
-                       BUG("bad generation skip %8x > %8x at %s",
-                           commit->generation, last_gen,
-                           oid_to_hex(&commit->object.oid));
-               last_gen = commit->generation;
-
-               if (commit->generation < min_generation)
-                       break;
-
-               flags = commit->object.flags & (PARENT1 | PARENT2 | STALE);
-               if (flags == (PARENT1 | PARENT2)) {
-                       if (!(commit->object.flags & RESULT)) {
-                               commit->object.flags |= RESULT;
-                               commit_list_insert_by_date(commit, &result);
-                       }
-                       /* Mark parents of a found merge stale */
-                       flags |= STALE;
-               }
-               parents = commit->parents;
-               while (parents) {
-                       struct commit *p = parents->item;
-                       parents = parents->next;
-                       if ((p->object.flags & flags) == flags)
-                               continue;
-                       if (parse_commit(p))
-                               return NULL;
-                       p->object.flags |= flags;
-                       prio_queue_put(&queue, p);
-               }
-       }
-
-       clear_prio_queue(&queue);
-       return result;
-}
-
-static struct commit_list *merge_bases_many(struct commit *one, int n, struct commit **twos)
-{
-       struct commit_list *list = NULL;
-       struct commit_list *result = NULL;
-       int i;
-
-       for (i = 0; i < n; i++) {
-               if (one == twos[i])
-                       /*
-                        * We do not mark this even with RESULT so we do not
-                        * have to clean it up.
-                        */
-                       return commit_list_insert(one, &result);
-       }
-
-       if (parse_commit(one))
-               return NULL;
-       for (i = 0; i < n; i++) {
-               if (parse_commit(twos[i]))
-                       return NULL;
-       }
-
-       list = paint_down_to_common(one, n, twos, 0);
-
-       while (list) {
-               struct commit *commit = pop_commit(&list);
-               if (!(commit->object.flags & STALE))
-                       commit_list_insert_by_date(commit, &result);
-       }
-       return result;
-}
-
-struct commit_list *get_octopus_merge_bases(struct commit_list *in)
-{
-       struct commit_list *i, *j, *k, *ret = NULL;
-
-       if (!in)
-               return ret;
-
-       commit_list_insert(in->item, &ret);
-
-       for (i = in->next; i; i = i->next) {
-               struct commit_list *new_commits = NULL, *end = NULL;
-
-               for (j = ret; j; j = j->next) {
-                       struct commit_list *bases;
-                       bases = get_merge_bases(i->item, j->item);
-                       if (!new_commits)
-                               new_commits = bases;
-                       else
-                               end->next = bases;
-                       for (k = bases; k; k = k->next)
-                               end = k;
-               }
-               ret = new_commits;
-       }
-       return ret;
-}
-
-static int remove_redundant(struct commit **array, int cnt)
-{
-       /*
-        * Some commit in the array may be an ancestor of
-        * another commit.  Move such commit to the end of
-        * the array, and return the number of commits that
-        * are independent from each other.
-        */
-       struct commit **work;
-       unsigned char *redundant;
-       int *filled_index;
-       int i, j, filled;
-
-       work = xcalloc(cnt, sizeof(*work));
-       redundant = xcalloc(cnt, 1);
-       ALLOC_ARRAY(filled_index, cnt - 1);
-
-       for (i = 0; i < cnt; i++)
-               parse_commit(array[i]);
-       for (i = 0; i < cnt; i++) {
-               struct commit_list *common;
-               uint32_t min_generation = array[i]->generation;
-
-               if (redundant[i])
-                       continue;
-               for (j = filled = 0; j < cnt; j++) {
-                       if (i == j || redundant[j])
-                               continue;
-                       filled_index[filled] = j;
-                       work[filled++] = array[j];
-
-                       if (array[j]->generation < min_generation)
-                               min_generation = array[j]->generation;
-               }
-               common = paint_down_to_common(array[i], filled, work,
-                                             min_generation);
-               if (array[i]->object.flags & PARENT2)
-                       redundant[i] = 1;
-               for (j = 0; j < filled; j++)
-                       if (work[j]->object.flags & PARENT1)
-                               redundant[filled_index[j]] = 1;
-               clear_commit_marks(array[i], all_flags);
-               clear_commit_marks_many(filled, work, all_flags);
-               free_commit_list(common);
-       }
-
-       /* Now collect the result */
-       COPY_ARRAY(work, array, cnt);
-       for (i = filled = 0; i < cnt; i++)
-               if (!redundant[i])
-                       array[filled++] = work[i];
-       for (j = filled, i = 0; i < cnt; i++)
-               if (redundant[i])
-                       array[j++] = work[i];
-       free(work);
-       free(redundant);
-       free(filled_index);
-       return filled;
-}
-
-static struct commit_list *get_merge_bases_many_0(struct commit *one,
-                                                 int n,
-                                                 struct commit **twos,
-                                                 int cleanup)
-{
-       struct commit_list *list;
-       struct commit **rslt;
-       struct commit_list *result;
-       int cnt, i;
-
-       result = merge_bases_many(one, n, twos);
-       for (i = 0; i < n; i++) {
-               if (one == twos[i])
-                       return result;
-       }
-       if (!result || !result->next) {
-               if (cleanup) {
-                       clear_commit_marks(one, all_flags);
-                       clear_commit_marks_many(n, twos, all_flags);
-               }
-               return result;
-       }
-
-       /* There are more than one */
-       cnt = commit_list_count(result);
-       rslt = xcalloc(cnt, sizeof(*rslt));
-       for (list = result, i = 0; list; list = list->next)
-               rslt[i++] = list->item;
-       free_commit_list(result);
-
-       clear_commit_marks(one, all_flags);
-       clear_commit_marks_many(n, twos, all_flags);
-
-       cnt = remove_redundant(rslt, cnt);
-       result = NULL;
-       for (i = 0; i < cnt; i++)
-               commit_list_insert_by_date(rslt[i], &result);
-       free(rslt);
-       return result;
-}
-
-struct commit_list *get_merge_bases_many(struct commit *one,
-                                        int n,
-                                        struct commit **twos)
-{
-       return get_merge_bases_many_0(one, n, twos, 1);
-}
-
-struct commit_list *get_merge_bases_many_dirty(struct commit *one,
-                                              int n,
-                                              struct commit **twos)
-{
-       return get_merge_bases_many_0(one, n, twos, 0);
-}
-
-struct commit_list *get_merge_bases(struct commit *one, struct commit *two)
-{
-       return get_merge_bases_many_0(one, 1, &two, 1);
-}
-
-/*
- * Is "commit" a descendant of one of the elements on the "with_commit" list?
- */
-int is_descendant_of(struct commit *commit, struct commit_list *with_commit)
-{
-       if (!with_commit)
-               return 1;
-       while (with_commit) {
-               struct commit *other;
-
-               other = with_commit->item;
-               with_commit = with_commit->next;
-               if (in_merge_bases(other, commit))
-                       return 1;
-       }
-       return 0;
-}
-
-/*
- * Is "commit" an ancestor of one of the "references"?
- */
-int in_merge_bases_many(struct commit *commit, int nr_reference, struct commit **reference)
-{
-       struct commit_list *bases;
-       int ret = 0, i;
-       uint32_t min_generation = GENERATION_NUMBER_INFINITY;
-
-       if (parse_commit(commit))
-               return ret;
-       for (i = 0; i < nr_reference; i++) {
-               if (parse_commit(reference[i]))
-                       return ret;
-               if (reference[i]->generation < min_generation)
-                       min_generation = reference[i]->generation;
-       }
-
-       if (commit->generation > min_generation)
-               return ret;
-
-       bases = paint_down_to_common(commit, nr_reference, reference, commit->generation);
-       if (commit->object.flags & PARENT2)
-               ret = 1;
-       clear_commit_marks(commit, all_flags);
-       clear_commit_marks_many(nr_reference, reference, all_flags);
-       free_commit_list(bases);
-       return ret;
-}
-
-/*
- * Is "commit" an ancestor of (i.e. reachable from) the "reference"?
- */
-int in_merge_bases(struct commit *commit, struct commit *reference)
-{
-       return in_merge_bases_many(commit, 1, &reference);
-}
-
-struct commit_list *reduce_heads(struct commit_list *heads)
-{
-       struct commit_list *p;
-       struct commit_list *result = NULL, **tail = &result;
-       struct commit **array;
-       int num_head, i;
-
-       if (!heads)
-               return NULL;
-
-       /* Uniquify */
-       for (p = heads; p; p = p->next)
-               p->item->object.flags &= ~STALE;
-       for (p = heads, num_head = 0; p; p = p->next) {
-               if (p->item->object.flags & STALE)
-                       continue;
-               p->item->object.flags |= STALE;
-               num_head++;
-       }
-       array = xcalloc(num_head, sizeof(*array));
-       for (p = heads, i = 0; p; p = p->next) {
-               if (p->item->object.flags & STALE) {
-                       array[i++] = p->item;
-                       p->item->object.flags &= ~STALE;
-               }
-       }
-       num_head = remove_redundant(array, num_head);
-       for (i = 0; i < num_head; i++)
-               tail = &commit_list_insert(array[i], tail)->next;
-       free(array);
-       return result;
-}
-
-void reduce_heads_replace(struct commit_list **heads)
-{
-       struct commit_list *result = reduce_heads(*heads);
-       free_commit_list(*heads);
-       *heads = result;
-}
-
 static const char gpg_sig_header[] = "gpgsig";
 static const int gpg_sig_header_len = sizeof(gpg_sig_header) - 1;
 
@@ -1787,10 +1426,10 @@ const char *find_commit_header(const char *msg, const char *key, size_t *out_len
  * Returns the number of bytes from the tail to ignore, to be fed as
  * the second parameter to append_signoff().
  */
-int ignore_non_trailer(const char *buf, size_t len)
+size_t ignore_non_trailer(const char *buf, size_t len)
 {
-       int boc = 0;
-       int bol = 0;
+       size_t boc = 0;
+       size_t bol = 0;
        int in_old_conflicts_block = 0;
        size_t cutoff = wt_status_locate_end(buf, len);
 
index da0db36eba2bf16277dbb7aadcb3384e29191ecc..1d260d62f57a24864986252892faa89c17572210 100644 (file)
--- a/commit.h
+++ b/commit.h
@@ -202,15 +202,9 @@ typedef int (*each_commit_graft_fn)(const struct commit_graft *, void *);
 
 struct commit_graft *read_graft_line(struct strbuf *line);
 int register_commit_graft(struct repository *r, struct commit_graft *, int);
+void prepare_commit_graft(struct repository *r);
 struct commit_graft *lookup_commit_graft(struct repository *r, const struct object_id *oid);
 
-extern struct commit_list *get_merge_bases(struct commit *rev1, struct commit *rev2);
-extern struct commit_list *get_merge_bases_many(struct commit *one, int n, struct commit **twos);
-extern struct commit_list *get_octopus_merge_bases(struct commit_list *in);
-
-/* To be used only when object flags after this call no longer matter */
-extern struct commit_list *get_merge_bases_many_dirty(struct commit *one, int n, struct commit **twos);
-
 /* largest positive number a signed 32-bit integer can contain */
 #define INFINITE_DEPTH 0x7fffffff
 
@@ -258,32 +252,10 @@ extern int delayed_reachability_test(struct shallow_info *si, int c);
 extern void prune_shallow(int show_only);
 extern struct trace_key trace_shallow;
 
-int is_descendant_of(struct commit *, struct commit_list *);
-int in_merge_bases(struct commit *, struct commit *);
-int in_merge_bases_many(struct commit *, int, struct commit **);
-
 extern int interactive_add(int argc, const char **argv, const char *prefix, int patch);
 extern int run_add_interactive(const char *revision, const char *patch_mode,
                               const struct pathspec *pathspec);
 
-/*
- * Takes a list of commits and returns a new list where those
- * have been removed that can be reached from other commits in
- * the list. It is useful for, e.g., reducing the commits
- * randomly thrown at the git-merge command and removing
- * redundant commits that the user shouldn't have given to it.
- *
- * This function destroys the STALE bit of the commit objects'
- * flags.
- */
-extern struct commit_list *reduce_heads(struct commit_list *heads);
-
-/*
- * Like `reduce_heads()`, except it replaces the list. Use this
- * instead of `foo = reduce_heads(foo);` to avoid memory leaks.
- */
-extern void reduce_heads_replace(struct commit_list **heads);
-
 struct commit_extra_header {
        struct commit_extra_header *next;
        char *key;
@@ -322,7 +294,7 @@ extern const char *find_commit_header(const char *msg, const char *key,
                                      size_t *out_len);
 
 /* Find the end of the log message, the right place for a new trailer. */
-extern int ignore_non_trailer(const char *buf, size_t len);
+extern size_t ignore_non_trailer(const char *buf, size_t len);
 
 typedef int (*each_mergetag_fn)(struct commit *commit, struct commit_extra_header *extra,
                                 void *cb_data);
index 858ca14a57351062d2bba147c72ed460fb0aa533..18caf21969a5917e8e6c6d754beffbba2741e6c2 100644 (file)
@@ -341,6 +341,19 @@ int mingw_mkdir(const char *path, int mode)
        return ret;
 }
 
+/*
+ * Calling CreateFile() using FILE_APPEND_DATA and without FILE_WRITE_DATA
+ * is documented in [1] as opening a writable file handle in append mode.
+ * (It is believed that) this is atomic since it is maintained by the
+ * kernel unlike the O_APPEND flag which is racily maintained by the CRT.
+ *
+ * [1] https://docs.microsoft.com/en-us/windows/desktop/fileio/file-access-rights-constants
+ *
+ * This trick does not appear to work for named pipes.  Instead it creates
+ * a named pipe client handle that cannot be written to.  Callers should
+ * just use the regular _wopen() for them.  (And since client handle gets
+ * bound to a unique server handle, it isn't really an issue.)
+ */
 static int mingw_open_append(wchar_t const *wfilename, int oflags, ...)
 {
        HANDLE handle;
@@ -360,10 +373,12 @@ static int mingw_open_append(wchar_t const *wfilename, int oflags, ...)
                        NULL, create, FILE_ATTRIBUTE_NORMAL, NULL);
        if (handle == INVALID_HANDLE_VALUE)
                return errno = err_win_to_posix(GetLastError()), -1;
+
        /*
         * No O_APPEND here, because the CRT uses it only to reset the
-        * file pointer to EOF on write(); but that is not necessary
-        * for a file created with FILE_APPEND_DATA.
+        * file pointer to EOF before each write(); but that is not
+        * necessary (and may lead to races) for a file created with
+        * FILE_APPEND_DATA.
         */
        fd = _open_osfhandle((intptr_t)handle, O_BINARY);
        if (fd < 0)
@@ -371,6 +386,21 @@ static int mingw_open_append(wchar_t const *wfilename, int oflags, ...)
        return fd;
 }
 
+/*
+ * Does the pathname map to the local named pipe filesystem?
+ * That is, does it have a "//./pipe/" prefix?
+ */
+static int is_local_named_pipe_path(const char *filename)
+{
+       return (is_dir_sep(filename[0]) &&
+               is_dir_sep(filename[1]) &&
+               filename[2] == '.'  &&
+               is_dir_sep(filename[3]) &&
+               !strncasecmp(filename+4, "pipe", 4) &&
+               is_dir_sep(filename[8]) &&
+               filename[9]);
+}
+
 int mingw_open (const char *filename, int oflags, ...)
 {
        typedef int (*open_fn_t)(wchar_t const *wfilename, int oflags, ...);
@@ -387,7 +417,7 @@ int mingw_open (const char *filename, int oflags, ...)
        if (filename && !strcmp(filename, "/dev/null"))
                filename = "nul";
 
-       if (oflags & O_APPEND)
+       if ((oflags & O_APPEND) && !is_local_named_pipe_path(filename))
                open_fn = mingw_open_append;
        else
                open_fn = _wopen;
index 9a998149d99a8eeebf311d5e84d43f146ed0f90d..92d268137f61e825c1529e5eedca9e39c539dce2 100644 (file)
@@ -7,6 +7,7 @@ CFLAGS += -pedantic
 CFLAGS += -DUSE_PARENS_AROUND_GETTEXT_N=0
 endif
 CFLAGS += -Wdeclaration-after-statement
+CFLAGS += -Wformat-security
 CFLAGS += -Wno-format-zero-length
 CFLAGS += -Wold-style-definition
 CFLAGS += -Woverflow
index 94547e5056b008d9cd9d00680a795d833e1f0528..24281b608284ee74b262237c467ff054874d8a8e 100644 (file)
--- a/connect.c
+++ b/connect.c
@@ -224,7 +224,7 @@ static int process_dummy_ref(const char *line)
                return 0;
        name++;
 
-       return !oidcmp(&null_oid, &oid) && !strcmp(name, "capabilities^{}");
+       return oideq(&null_oid, &oid) && !strcmp(name, "capabilities^{}");
 }
 
 static void check_no_capabilities(const char *line, int len)
index aec3345adb4f0fb83b511335e0727f1097f97e29..c49aa558f0fe6b74b9d4d4a7779f0a9f32747388 100644 (file)
@@ -15,10 +15,10 @@ expression c;
 identifier f !~ "^(get_commit_tree|get_commit_tree_in_graph_one|load_tree_for_commit)$";
 expression c;
 @@
-  f(...) {...
+  f(...) {<...
 - c->maybe_tree
 + get_commit_tree(c)
-  ...}
+  ...>}
 
 @@
 expression c;
index 09afdbf99462166e45ed623fe70a849e59045a3b..d8bdb4871273da92d937d30a787d8b149fbe1552 100644 (file)
@@ -20,10 +20,10 @@ expression E1;
 identifier f != oid_to_hex;
 expression E1;
 @@
-  f(...) {...
+  f(...) {<...
 - sha1_to_hex(E1->hash)
 + oid_to_hex(E1)
-  ...}
+  ...>}
 
 @@
 expression E1, E2;
@@ -35,10 +35,10 @@ expression E1, E2;
 identifier f != oid_to_hex_r;
 expression E1, E2;
 @@
-   f(...) {...
+   f(...) {<...
 - sha1_to_hex_r(E1, E2->hash)
 + oid_to_hex_r(E1, E2)
-  ...}
+  ...>}
 
 @@
 expression E1;
@@ -50,10 +50,10 @@ expression E1;
 identifier f != oidclr;
 expression E1;
 @@
-  f(...) {...
+  f(...) {<...
 - hashclr(E1->hash)
 + oidclr(E1)
-  ...}
+  ...>}
 
 @@
 expression E1, E2;
@@ -65,10 +65,10 @@ expression E1, E2;
 identifier f != oidcmp;
 expression E1, E2;
 @@
-  f(...) {...
+  f(...) {<...
 - hashcmp(E1->hash, E2->hash)
 + oidcmp(E1, E2)
-  ...}
+  ...>}
 
 @@
 expression E1, E2;
@@ -92,10 +92,10 @@ expression E1, E2;
 identifier f != oidcpy;
 expression E1, E2;
 @@
-  f(...) {...
+  f(...) {<...
 - hashcpy(E1->hash, E2->hash)
 + oidcpy(E1, E2)
-  ...}
+  ...>}
 
 @@
 expression E1, E2;
@@ -108,3 +108,33 @@ expression E1, E2;
 @@
 - hashcpy(E1.hash, E2->hash)
 + oidcpy(&E1, E2)
+
+@@
+expression E1, E2;
+@@
+- oidcmp(E1, E2) == 0
++ oideq(E1, E2)
+
+@@
+identifier f != hasheq;
+expression E1, E2;
+@@
+  f(...) {<...
+- hashcmp(E1, E2) == 0
++ hasheq(E1, E2)
+  ...>}
+
+@@
+expression E1, E2;
+@@
+- oidcmp(E1, E2) != 0
++ !oideq(E1, E2)
+
+@@
+identifier f != hasheq;
+expression E1, E2;
+@@
+  f(...) {<...
+- hashcmp(E1, E2) != 0
++ !hasheq(E1, E2)
+  ...>}
index 6057f1f58015ad7cae9792309dd635a1e7933586..e0848226d21bbbeb26d9ac6217fe1e61ccc71fca 100644 (file)
--- a/convert.c
+++ b/convert.c
@@ -1297,6 +1297,7 @@ static void convert_attrs(const struct index_state *istate,
                          struct conv_attrs *ca, const char *path)
 {
        static struct attr_check *check;
+       struct attr_check_item *ccheck = NULL;
 
        if (!check) {
                check = attr_check_initl("crlf", "ident", "filter",
@@ -1306,30 +1307,25 @@ static void convert_attrs(const struct index_state *istate,
                git_config(read_convert_config, NULL);
        }
 
-       if (!git_check_attr(istate, path, check)) {
-               struct attr_check_item *ccheck = check->items;
-               ca->crlf_action = git_path_check_crlf(ccheck + 4);
-               if (ca->crlf_action == CRLF_UNDEFINED)
-                       ca->crlf_action = git_path_check_crlf(ccheck + 0);
-               ca->ident = git_path_check_ident(ccheck + 1);
-               ca->drv = git_path_check_convert(ccheck + 2);
-               if (ca->crlf_action != CRLF_BINARY) {
-                       enum eol eol_attr = git_path_check_eol(ccheck + 3);
-                       if (ca->crlf_action == CRLF_AUTO && eol_attr == EOL_LF)
-                               ca->crlf_action = CRLF_AUTO_INPUT;
-                       else if (ca->crlf_action == CRLF_AUTO && eol_attr == EOL_CRLF)
-                               ca->crlf_action = CRLF_AUTO_CRLF;
-                       else if (eol_attr == EOL_LF)
-                               ca->crlf_action = CRLF_TEXT_INPUT;
-                       else if (eol_attr == EOL_CRLF)
-                               ca->crlf_action = CRLF_TEXT_CRLF;
-               }
-               ca->working_tree_encoding = git_path_check_encoding(ccheck + 5);
-       } else {
-               ca->drv = NULL;
-               ca->crlf_action = CRLF_UNDEFINED;
-               ca->ident = 0;
+       git_check_attr(istate, path, check);
+       ccheck = check->items;
+       ca->crlf_action = git_path_check_crlf(ccheck + 4);
+       if (ca->crlf_action == CRLF_UNDEFINED)
+               ca->crlf_action = git_path_check_crlf(ccheck + 0);
+       ca->ident = git_path_check_ident(ccheck + 1);
+       ca->drv = git_path_check_convert(ccheck + 2);
+       if (ca->crlf_action != CRLF_BINARY) {
+               enum eol eol_attr = git_path_check_eol(ccheck + 3);
+               if (ca->crlf_action == CRLF_AUTO && eol_attr == EOL_LF)
+                       ca->crlf_action = CRLF_AUTO_INPUT;
+               else if (ca->crlf_action == CRLF_AUTO && eol_attr == EOL_CRLF)
+                       ca->crlf_action = CRLF_AUTO_CRLF;
+               else if (eol_attr == EOL_LF)
+                       ca->crlf_action = CRLF_TEXT_INPUT;
+               else if (eol_attr == EOL_CRLF)
+                       ca->crlf_action = CRLF_TEXT_CRLF;
        }
+       ca->working_tree_encoding = git_path_check_encoding(ccheck + 5);
 
        /* Save attr and make a decision for action */
        ca->attr_action = ca->crlf_action;
diff --git a/delta-islands.c b/delta-islands.c
new file mode 100644 (file)
index 0000000..8e5018e
--- /dev/null
@@ -0,0 +1,502 @@
+#include "cache.h"
+#include "attr.h"
+#include "object.h"
+#include "blob.h"
+#include "commit.h"
+#include "tag.h"
+#include "tree.h"
+#include "delta.h"
+#include "pack.h"
+#include "tree-walk.h"
+#include "diff.h"
+#include "revision.h"
+#include "list-objects.h"
+#include "progress.h"
+#include "refs.h"
+#include "khash.h"
+#include "pack-bitmap.h"
+#include "pack-objects.h"
+#include "delta-islands.h"
+#include "sha1-array.h"
+#include "config.h"
+
+KHASH_INIT(str, const char *, void *, 1, kh_str_hash_func, kh_str_hash_equal)
+
+static khash_sha1 *island_marks;
+static unsigned island_counter;
+static unsigned island_counter_core;
+
+static kh_str_t *remote_islands;
+
+struct remote_island {
+       uint64_t hash;
+       struct oid_array oids;
+};
+
+struct island_bitmap {
+       uint32_t refcount;
+       uint32_t bits[FLEX_ARRAY];
+};
+
+static uint32_t island_bitmap_size;
+
+/*
+ * Allocate a new bitmap; if "old" is not NULL, the new bitmap will be a copy
+ * of "old". Otherwise, the new bitmap is empty.
+ */
+static struct island_bitmap *island_bitmap_new(const struct island_bitmap *old)
+{
+       size_t size = sizeof(struct island_bitmap) + (island_bitmap_size * 4);
+       struct island_bitmap *b = xcalloc(1, size);
+
+       if (old)
+               memcpy(b, old, size);
+
+       b->refcount = 1;
+       return b;
+}
+
+static void island_bitmap_or(struct island_bitmap *a, const struct island_bitmap *b)
+{
+       uint32_t i;
+
+       for (i = 0; i < island_bitmap_size; ++i)
+               a->bits[i] |= b->bits[i];
+}
+
+static int island_bitmap_is_subset(struct island_bitmap *self,
+               struct island_bitmap *super)
+{
+       uint32_t i;
+
+       if (self == super)
+               return 1;
+
+       for (i = 0; i < island_bitmap_size; ++i) {
+               if ((self->bits[i] & super->bits[i]) != self->bits[i])
+                       return 0;
+       }
+
+       return 1;
+}
+
+#define ISLAND_BITMAP_BLOCK(x) (x / 32)
+#define ISLAND_BITMAP_MASK(x) (1 << (x % 32))
+
+static void island_bitmap_set(struct island_bitmap *self, uint32_t i)
+{
+       self->bits[ISLAND_BITMAP_BLOCK(i)] |= ISLAND_BITMAP_MASK(i);
+}
+
+static int island_bitmap_get(struct island_bitmap *self, uint32_t i)
+{
+       return (self->bits[ISLAND_BITMAP_BLOCK(i)] & ISLAND_BITMAP_MASK(i)) != 0;
+}
+
+int in_same_island(const struct object_id *trg_oid, const struct object_id *src_oid)
+{
+       khiter_t trg_pos, src_pos;
+
+       /* If we aren't using islands, assume everything goes together. */
+       if (!island_marks)
+               return 1;
+
+       /*
+        * If we don't have a bitmap for the target, we can delta it
+        * against anything -- it's not an important object
+        */
+       trg_pos = kh_get_sha1(island_marks, trg_oid->hash);
+       if (trg_pos >= kh_end(island_marks))
+               return 1;
+
+       /*
+        * if the source (our delta base) doesn't have a bitmap,
+        * we don't want to base any deltas on it!
+        */
+       src_pos = kh_get_sha1(island_marks, src_oid->hash);
+       if (src_pos >= kh_end(island_marks))
+               return 0;
+
+       return island_bitmap_is_subset(kh_value(island_marks, trg_pos),
+                               kh_value(island_marks, src_pos));
+}
+
+int island_delta_cmp(const struct object_id *a, const struct object_id *b)
+{
+       khiter_t a_pos, b_pos;
+       struct island_bitmap *a_bitmap = NULL, *b_bitmap = NULL;
+
+       if (!island_marks)
+               return 0;
+
+       a_pos = kh_get_sha1(island_marks, a->hash);
+       if (a_pos < kh_end(island_marks))
+               a_bitmap = kh_value(island_marks, a_pos);
+
+       b_pos = kh_get_sha1(island_marks, b->hash);
+       if (b_pos < kh_end(island_marks))
+               b_bitmap = kh_value(island_marks, b_pos);
+
+       if (a_bitmap) {
+               if (!b_bitmap || !island_bitmap_is_subset(a_bitmap, b_bitmap))
+                       return -1;
+       }
+       if (b_bitmap) {
+               if (!a_bitmap || !island_bitmap_is_subset(b_bitmap, a_bitmap))
+                       return 1;
+       }
+
+       return 0;
+}
+
+static struct island_bitmap *create_or_get_island_marks(struct object *obj)
+{
+       khiter_t pos;
+       int hash_ret;
+
+       pos = kh_put_sha1(island_marks, obj->oid.hash, &hash_ret);
+       if (hash_ret)
+               kh_value(island_marks, pos) = island_bitmap_new(NULL);
+
+       return kh_value(island_marks, pos);
+}
+
+static void set_island_marks(struct object *obj, struct island_bitmap *marks)
+{
+       struct island_bitmap *b;
+       khiter_t pos;
+       int hash_ret;
+
+       pos = kh_put_sha1(island_marks, obj->oid.hash, &hash_ret);
+       if (hash_ret) {
+               /*
+                * We don't have one yet; make a copy-on-write of the
+                * parent.
+                */
+               marks->refcount++;
+               kh_value(island_marks, pos) = marks;
+               return;
+       }
+
+       /*
+        * We do have it. Make sure we split any copy-on-write before
+        * updating.
+        */
+       b = kh_value(island_marks, pos);
+       if (b->refcount > 1) {
+               b->refcount--;
+               b = kh_value(island_marks, pos) = island_bitmap_new(b);
+       }
+       island_bitmap_or(b, marks);
+}
+
+static void mark_remote_island_1(struct remote_island *rl, int is_core_island)
+{
+       uint32_t i;
+
+       for (i = 0; i < rl->oids.nr; ++i) {
+               struct island_bitmap *marks;
+               struct object *obj = parse_object(the_repository, &rl->oids.oid[i]);
+
+               if (!obj)
+                       continue;
+
+               marks = create_or_get_island_marks(obj);
+               island_bitmap_set(marks, island_counter);
+
+               if (is_core_island && obj->type == OBJ_COMMIT)
+                       obj->flags |= NEEDS_BITMAP;
+
+               /* If it was a tag, also make sure we hit the underlying object. */
+               while (obj && obj->type == OBJ_TAG) {
+                       obj = ((struct tag *)obj)->tagged;
+                       if (obj) {
+                               parse_object(the_repository, &obj->oid);
+                               marks = create_or_get_island_marks(obj);
+                               island_bitmap_set(marks, island_counter);
+                       }
+               }
+       }
+
+       if (is_core_island)
+               island_counter_core = island_counter;
+
+       island_counter++;
+}
+
+struct tree_islands_todo {
+       struct object_entry *entry;
+       unsigned int depth;
+};
+
+static int tree_depth_compare(const void *a, const void *b)
+{
+       const struct tree_islands_todo *todo_a = a;
+       const struct tree_islands_todo *todo_b = b;
+
+       return todo_a->depth - todo_b->depth;
+}
+
+void resolve_tree_islands(int progress, struct packing_data *to_pack)
+{
+       struct progress *progress_state = NULL;
+       struct tree_islands_todo *todo;
+       int nr = 0;
+       int i;
+
+       if (!island_marks)
+               return;
+
+       /*
+        * We process only trees, as commits and tags have already been handled
+        * (and passed their marks on to root trees, as well. We must make sure
+        * to process them in descending tree-depth order so that marks
+        * propagate down the tree properly, even if a sub-tree is found in
+        * multiple parent trees.
+        */
+       ALLOC_ARRAY(todo, to_pack->nr_objects);
+       for (i = 0; i < to_pack->nr_objects; i++) {
+               if (oe_type(&to_pack->objects[i]) == OBJ_TREE) {
+                       todo[nr].entry = &to_pack->objects[i];
+                       todo[nr].depth = oe_tree_depth(to_pack, &to_pack->objects[i]);
+                       nr++;
+               }
+       }
+       QSORT(todo, nr, tree_depth_compare);
+
+       if (progress)
+               progress_state = start_progress(_("Propagating island marks"), nr);
+
+       for (i = 0; i < nr; i++) {
+               struct object_entry *ent = todo[i].entry;
+               struct island_bitmap *root_marks;
+               struct tree *tree;
+               struct tree_desc desc;
+               struct name_entry entry;
+               khiter_t pos;
+
+               pos = kh_get_sha1(island_marks, ent->idx.oid.hash);
+               if (pos >= kh_end(island_marks))
+                       continue;
+
+               root_marks = kh_value(island_marks, pos);
+
+               tree = lookup_tree(the_repository, &ent->idx.oid);
+               if (!tree || parse_tree(tree) < 0)
+                       die(_("bad tree object %s"), oid_to_hex(&ent->idx.oid));
+
+               init_tree_desc(&desc, tree->buffer, tree->size);
+               while (tree_entry(&desc, &entry)) {
+                       struct object *obj;
+
+                       if (S_ISGITLINK(entry.mode))
+                               continue;
+
+                       obj = lookup_object(the_repository, entry.oid->hash);
+                       if (!obj)
+                               continue;
+
+                       set_island_marks(obj, root_marks);
+               }
+
+               free_tree_buffer(tree);
+
+               display_progress(progress_state, i+1);
+       }
+
+       stop_progress(&progress_state);
+       free(todo);
+}
+
+static regex_t *island_regexes;
+static unsigned int island_regexes_alloc, island_regexes_nr;
+static const char *core_island_name;
+
+static int island_config_callback(const char *k, const char *v, void *cb)
+{
+       if (!strcmp(k, "pack.island")) {
+               struct strbuf re = STRBUF_INIT;
+
+               if (!v)
+                       return config_error_nonbool(k);
+
+               ALLOC_GROW(island_regexes, island_regexes_nr + 1, island_regexes_alloc);
+
+               if (*v != '^')
+                       strbuf_addch(&re, '^');
+               strbuf_addstr(&re, v);
+
+               if (regcomp(&island_regexes[island_regexes_nr], re.buf, REG_EXTENDED))
+                       die(_("failed to load island regex for '%s': %s"), k, re.buf);
+
+               strbuf_release(&re);
+               island_regexes_nr++;
+               return 0;
+       }
+
+       if (!strcmp(k, "pack.islandcore"))
+               return git_config_string(&core_island_name, k, v);
+
+       return 0;
+}
+
+static void add_ref_to_island(const char *island_name, const struct object_id *oid)
+{
+       uint64_t sha_core;
+       struct remote_island *rl = NULL;
+
+       int hash_ret;
+       khiter_t pos = kh_put_str(remote_islands, island_name, &hash_ret);
+
+       if (hash_ret) {
+               kh_key(remote_islands, pos) = xstrdup(island_name);
+               kh_value(remote_islands, pos) = xcalloc(1, sizeof(struct remote_island));
+       }
+
+       rl = kh_value(remote_islands, pos);
+       oid_array_append(&rl->oids, oid);
+
+       memcpy(&sha_core, oid->hash, sizeof(uint64_t));
+       rl->hash += sha_core;
+}
+
+static int find_island_for_ref(const char *refname, const struct object_id *oid,
+                              int flags, void *data)
+{
+       /*
+        * We should advertise 'ARRAY_SIZE(matches) - 2' as the max,
+        * so we can diagnose below a config with more capture groups
+        * than we support.
+        */
+       regmatch_t matches[16];
+       int i, m;
+       struct strbuf island_name = STRBUF_INIT;
+
+       /* walk backwards to get last-one-wins ordering */
+       for (i = island_regexes_nr - 1; i >= 0; i--) {
+               if (!regexec(&island_regexes[i], refname,
+                            ARRAY_SIZE(matches), matches, 0))
+                       break;
+       }
+
+       if (i < 0)
+               return 0;
+
+       if (matches[ARRAY_SIZE(matches) - 1].rm_so != -1)
+               warning(_("island regex from config has "
+                         "too many capture groups (max=%d)"),
+                       (int)ARRAY_SIZE(matches) - 2);
+
+       for (m = 1; m < ARRAY_SIZE(matches); m++) {
+               regmatch_t *match = &matches[m];
+
+               if (match->rm_so == -1)
+                       continue;
+
+               if (island_name.len)
+                       strbuf_addch(&island_name, '-');
+
+               strbuf_add(&island_name, refname + match->rm_so, match->rm_eo - match->rm_so);
+       }
+
+       add_ref_to_island(island_name.buf, oid);
+       strbuf_release(&island_name);
+       return 0;
+}
+
+static struct remote_island *get_core_island(void)
+{
+       if (core_island_name) {
+               khiter_t pos = kh_get_str(remote_islands, core_island_name);
+               if (pos < kh_end(remote_islands))
+                       return kh_value(remote_islands, pos);
+       }
+
+       return NULL;
+}
+
+static void deduplicate_islands(void)
+{
+       struct remote_island *island, *core = NULL, **list;
+       unsigned int island_count, dst, src, ref, i = 0;
+
+       island_count = kh_size(remote_islands);
+       ALLOC_ARRAY(list, island_count);
+
+       kh_foreach_value(remote_islands, island, {
+               list[i++] = island;
+       });
+
+       for (ref = 0; ref + 1 < island_count; ref++) {
+               for (src = ref + 1, dst = src; src < island_count; src++) {
+                       if (list[ref]->hash == list[src]->hash)
+                               continue;
+
+                       if (src != dst)
+                               list[dst] = list[src];
+
+                       dst++;
+               }
+               island_count = dst;
+       }
+
+       island_bitmap_size = (island_count / 32) + 1;
+       core = get_core_island();
+
+       for (i = 0; i < island_count; ++i) {
+               mark_remote_island_1(list[i], core && list[i]->hash == core->hash);
+       }
+
+       free(list);
+}
+
+void load_delta_islands(void)
+{
+       island_marks = kh_init_sha1();
+       remote_islands = kh_init_str();
+
+       git_config(island_config_callback, NULL);
+       for_each_ref(find_island_for_ref, NULL);
+       deduplicate_islands();
+
+       fprintf(stderr, _("Marked %d islands, done.\n"), island_counter);
+}
+
+void propagate_island_marks(struct commit *commit)
+{
+       khiter_t pos = kh_get_sha1(island_marks, commit->object.oid.hash);
+
+       if (pos < kh_end(island_marks)) {
+               struct commit_list *p;
+               struct island_bitmap *root_marks = kh_value(island_marks, pos);
+
+               parse_commit(commit);
+               set_island_marks(&get_commit_tree(commit)->object, root_marks);
+               for (p = commit->parents; p; p = p->next)
+                       set_island_marks(&p->item->object, root_marks);
+       }
+}
+
+int compute_pack_layers(struct packing_data *to_pack)
+{
+       uint32_t i;
+
+       if (!core_island_name || !island_marks)
+               return 1;
+
+       for (i = 0; i < to_pack->nr_objects; ++i) {
+               struct object_entry *entry = &to_pack->objects[i];
+               khiter_t pos = kh_get_sha1(island_marks, entry->idx.oid.hash);
+
+               oe_set_layer(to_pack, entry, 1);
+
+               if (pos < kh_end(island_marks)) {
+                       struct island_bitmap *bitmap = kh_value(island_marks, pos);
+
+                       if (island_bitmap_get(bitmap, island_counter_core))
+                               oe_set_layer(to_pack, entry, 0);
+               }
+       }
+
+       return 2;
+}
diff --git a/delta-islands.h b/delta-islands.h
new file mode 100644 (file)
index 0000000..b635cd0
--- /dev/null
@@ -0,0 +1,15 @@
+#ifndef DELTA_ISLANDS_H
+#define DELTA_ISLANDS_H
+
+struct object_id;
+struct packing_data;
+struct commit;
+
+int island_delta_cmp(const struct object_id *a, const struct object_id *b);
+int in_same_island(const struct object_id *, const struct object_id *);
+void resolve_tree_islands(int progress, struct packing_data *to_pack);
+void load_delta_islands(void);
+void propagate_island_marks(struct commit *commit);
+int compute_pack_layers(struct packing_data *to_pack);
+
+#endif /* DELTA_ISLANDS_H */
index 88a98b1c06e8792136eced81a7da5f9aacb8542f..30bf9a2399fa392b31338489278e0363a55eb726 100644 (file)
@@ -342,7 +342,7 @@ static int show_modified(struct rev_info *revs,
        }
 
        if (revs->combine_merges && !cached &&
-           (oidcmp(oid, &old_entry->oid) || oidcmp(&old_entry->oid, &new_entry->oid))) {
+           (!oideq(oid, &old_entry->oid) || !oideq(&old_entry->oid, &new_entry->oid))) {
                struct combine_diff_path *p;
                int pathlen = ce_namelen(new_entry);
 
@@ -366,7 +366,7 @@ static int show_modified(struct rev_info *revs,
        }
 
        oldmode = old_entry->ce_mode;
-       if (mode == oldmode && !oidcmp(oid, &old_entry->oid) && !dirty_submodule &&
+       if (mode == oldmode && oideq(oid, &old_entry->oid) && !dirty_submodule &&
            !revs->diffopt.flags.find_copies_harder)
                return 0;
 
@@ -518,11 +518,11 @@ static int diff_cache(struct rev_info *revs,
 int run_diff_index(struct rev_info *revs, int cached)
 {
        struct object_array_entry *ent;
-       uint64_t start = getnanotime();
 
        if (revs->pending.nr != 1)
                BUG("run_diff_index must be passed exactly one tree");
 
+       trace_performance_enter();
        ent = revs->pending.objects;
        if (diff_cache(revs, &ent->item->oid, ent->name, cached))
                exit(128);
@@ -531,7 +531,7 @@ int run_diff_index(struct rev_info *revs, int cached)
        diffcore_fix_diff_index(&revs->diffopt);
        diffcore_std(&revs->diffopt);
        diff_flush(&revs->diffopt);
-       trace_performance_since(start, "diff-index");
+       trace_performance_leave("diff-index");
        return 0;
 }
 
diff --git a/diff.c b/diff.c
index 145cfbae5929c69224f9f9e5bc473f2a221603de..f0c7557b40443da060c3070c602dd03f49b0d689 100644 (file)
--- a/diff.c
+++ b/diff.c
@@ -624,42 +624,54 @@ static void check_blank_at_eof(mmfile_t *mf1, mmfile_t *mf2,
 }
 
 static void emit_line_0(struct diff_options *o,
-                       const char *set, unsigned reverse, const char *reset,
+                       const char *set_sign, const char *set, unsigned reverse, const char *reset,
                        int first, const char *line, int len)
 {
        int has_trailing_newline, has_trailing_carriage_return;
-       int nofirst;
+       int needs_reset = 0; /* at the end of the line */
        FILE *file = o->file;
 
-       if (first)
-               fputs(diff_line_prefix(o), file);
-       else if (!len)
-               return;
+       fputs(diff_line_prefix(o), file);
 
-       if (len == 0) {
-               has_trailing_newline = (first == '\n');
-               has_trailing_carriage_return = (!has_trailing_newline &&
-                                               (first == '\r'));
-               nofirst = has_trailing_newline || has_trailing_carriage_return;
-       } else {
-               has_trailing_newline = (len > 0 && line[len-1] == '\n');
-               if (has_trailing_newline)
-                       len--;
-               has_trailing_carriage_return = (len > 0 && line[len-1] == '\r');
-               if (has_trailing_carriage_return)
-                       len--;
-               nofirst = 0;
+       has_trailing_newline = (len > 0 && line[len-1] == '\n');
+       if (has_trailing_newline)
+               len--;
+
+       has_trailing_carriage_return = (len > 0 && line[len-1] == '\r');
+       if (has_trailing_carriage_return)
+               len--;
+
+       if (!len && !first)
+               goto end_of_line;
+
+       if (reverse && want_color(o->use_color)) {
+               fputs(GIT_COLOR_REVERSE, file);
+               needs_reset = 1;
        }
 
-       if (len || !nofirst) {
-               if (reverse && want_color(o->use_color))
-                       fputs(GIT_COLOR_REVERSE, file);
+       if (set_sign) {
+               fputs(set_sign, file);
+               needs_reset = 1;
+       }
+
+       if (first)
+               fputc(first, file);
+
+       if (!len)
+               goto end_of_line;
+
+       if (set) {
+               if (set_sign && set != set_sign)
+                       fputs(reset, file);
                fputs(set, file);
-               if (first && !nofirst)
-                       fputc(first, file);
-               fwrite(line, len, 1, file);
-               fputs(reset, file);
+               needs_reset = 1;
        }
+       fwrite(line, len, 1, file);
+       needs_reset = 1; /* 'line' may contain color codes. */
+
+end_of_line:
+       if (needs_reset)
+               fputs(reset, file);
        if (has_trailing_carriage_return)
                fputc('\r', file);
        if (has_trailing_newline)
@@ -669,7 +681,7 @@ static void emit_line_0(struct diff_options *o,
 static void emit_line(struct diff_options *o, const char *set, const char *reset,
                      const char *line, int len)
 {
-       emit_line_0(o, set, 0, reset, line[0], line+1, len-1);
+       emit_line_0(o, set, NULL, 0, reset, 0, line, len);
 }
 
 enum diff_symbol {
@@ -968,8 +980,13 @@ static void pmb_advance_or_null_multi_match(struct diff_options *o,
                        /* Carry the white space delta forward */
                        pmb[i]->next_line->wsd = pmb[i]->wsd;
                        pmb[i] = pmb[i]->next_line;
-               } else
+               } else {
+                       if (pmb[i]->wsd) {
+                               free(pmb[i]->wsd->string);
+                               FREE_AND_NULL(pmb[i]->wsd);
+                       }
                        pmb[i] = NULL;
+               }
        }
 }
 
@@ -990,10 +1007,6 @@ static int shrink_potential_moved_blocks(struct moved_entry **pmb,
 
                if (lp < pmb_nr && rp > -1 && lp < rp) {
                        pmb[lp] = pmb[rp];
-                       if (pmb[rp]->wsd) {
-                               free(pmb[rp]->wsd->string);
-                               FREE_AND_NULL(pmb[rp]->wsd);
-                       }
                        pmb[rp] = NULL;
                        rp--;
                        lp++;
@@ -1187,9 +1200,9 @@ static void dim_moved_lines(struct diff_options *o)
 }
 
 static void emit_line_ws_markup(struct diff_options *o,
-                               const char *set, const char *reset,
-                               const char *line, int len,
-                               const char *set_sign, char sign,
+                               const char *set_sign, const char *set,
+                               const char *reset,
+                               char sign, const char *line, int len,
                                unsigned ws_rule, int blank_at_eof)
 {
        const char *ws = NULL;
@@ -1201,18 +1214,15 @@ static void emit_line_ws_markup(struct diff_options *o,
        }
 
        if (!ws && !set_sign)
-               emit_line_0(o, set, 0, reset, sign, line, len);
+               emit_line_0(o, set, NULL, 0, reset, sign, line, len);
        else if (!ws) {
-               /* Emit just the prefix, then the rest. */
-               emit_line_0(o, set_sign ? set_sign : set, !!set_sign, reset,
-                           sign, "", 0);
-               emit_line_0(o, set, 0, reset, 0, line, len);
+               emit_line_0(o, set_sign, set, !!set_sign, reset, sign, line, len);
        } else if (blank_at_eof)
                /* Blank line at EOF - paint '+' as well */
-               emit_line_0(o, ws, 0, reset, sign, line, len);
+               emit_line_0(o, ws, NULL, 0, reset, sign, line, len);
        else {
                /* Emit just the prefix, then the rest. */
-               emit_line_0(o, set_sign ? set_sign : set, !!set_sign, reset,
+               emit_line_0(o, set_sign ? set_sign : set, NULL, !!set_sign, reset,
                            sign, "", 0);
                ws_check_emit(line, len, ws_rule,
                              o->file, set, reset, ws);
@@ -1236,7 +1246,7 @@ static void emit_diff_symbol_from_struct(struct diff_options *o,
                context = diff_get_color_opt(o, DIFF_CONTEXT);
                reset = diff_get_color_opt(o, DIFF_RESET);
                putc('\n', o->file);
-               emit_line_0(o, context, 0, reset, '\\',
+               emit_line_0(o, context, NULL, 0, reset, '\\',
                            nneof, strlen(nneof));
                break;
        case DIFF_SYMBOL_SUBMODULE_HEADER:
@@ -1274,7 +1284,9 @@ static void emit_diff_symbol_from_struct(struct diff_options *o,
                        else if (c == '-')
                                set = diff_get_color_opt(o, DIFF_FILE_OLD);
                }
-               emit_line_ws_markup(o, set, reset, line, len, set_sign, ' ',
+               emit_line_ws_markup(o, set_sign, set, reset,
+                                   o->output_indicators[OUTPUT_INDICATOR_CONTEXT],
+                                   line, len,
                                    flags & (DIFF_SYMBOL_CONTENT_WS_MASK), 0);
                break;
        case DIFF_SYMBOL_PLUS:
@@ -1317,7 +1329,9 @@ static void emit_diff_symbol_from_struct(struct diff_options *o,
                                set = diff_get_color_opt(o, DIFF_CONTEXT_BOLD);
                        flags &= ~DIFF_SYMBOL_CONTENT_WS_MASK;
                }
-               emit_line_ws_markup(o, set, reset, line, len, set_sign, '+',
+               emit_line_ws_markup(o, set_sign, set, reset,
+                                   o->output_indicators[OUTPUT_INDICATOR_NEW],
+                                   line, len,
                                    flags & DIFF_SYMBOL_CONTENT_WS_MASK,
                                    flags & DIFF_SYMBOL_CONTENT_BLANK_LINE_EOF);
                break;
@@ -1360,7 +1374,9 @@ static void emit_diff_symbol_from_struct(struct diff_options *o,
                        else
                                set = diff_get_color_opt(o, DIFF_CONTEXT_DIM);
                }
-               emit_line_ws_markup(o, set, reset, line, len, set_sign, '-',
+               emit_line_ws_markup(o, set_sign, set, reset,
+                                   o->output_indicators[OUTPUT_INDICATOR_OLD],
+                                   line, len,
                                    flags & DIFF_SYMBOL_CONTENT_WS_MASK, 0);
                break;
        case DIFF_SYMBOL_WORDS_PORCELAIN:
@@ -2933,16 +2949,11 @@ static void show_dirstat(struct diff_options *options)
                struct diff_filepair *p = q->queue[i];
                const char *name;
                unsigned long copied, added, damage;
-               int content_changed;
 
                name = p->two->path ? p->two->path : p->one->path;
 
-               if (p->one->oid_valid && p->two->oid_valid)
-                       content_changed = oidcmp(&p->one->oid, &p->two->oid);
-               else
-                       content_changed = 1;
-
-               if (!content_changed) {
+               if (p->one->oid_valid && p->two->oid_valid &&
+                   oideq(&p->one->oid, &p->two->oid)) {
                        /*
                         * The SHA1 has not changed, so pre-/post-content is
                         * identical. We can therefore skip looking at the
@@ -2989,7 +3000,7 @@ static void show_dirstat(struct diff_options *options)
                 * made to the preimage.
                 * If the resulting damage is zero, we know that
                 * diffcore_count_changes() considers the two entries to
-                * be identical, but since content_changed is true, we
+                * be identical, but since the oid changed, we
                 * know that there must have been _some_ kind of change,
                 * so we force all entries to have damage > 0.
                 */
@@ -3404,7 +3415,7 @@ static void builtin_diff(const char *name_a,
                if (!one->data && !two->data &&
                    S_ISREG(one->mode) && S_ISREG(two->mode) &&
                    !o->flags.binary) {
-                       if (!oidcmp(&one->oid, &two->oid)) {
+                       if (oideq(&one->oid, &two->oid)) {
                                if (must_show_header)
                                        emit_diff_symbol(o, DIFF_SYMBOL_HEADER,
                                                         header.buf, header.len,
@@ -3569,7 +3580,7 @@ static void builtin_diffstat(const char *name_a, const char *name_b,
                return;
        }
 
-       same_contents = !oidcmp(&one->oid, &two->oid);
+       same_contents = oideq(&one->oid, &two->oid);
 
        if (diff_filespec_is_binary(one) || diff_filespec_is_binary(two)) {
                data->is_binary = 1;
@@ -3765,7 +3776,7 @@ static int reuse_worktree_file(const char *name, const struct object_id *oid, in
         * This is not the sha1 we are looking for, or
         * unreusable because it is not a regular file.
         */
-       if (oidcmp(oid, &ce->oid) || !S_ISREG(ce->ce_mode))
+       if (!oideq(oid, &ce->oid) || !S_ISREG(ce->ce_mode))
                return 0;
 
        /*
@@ -4170,7 +4181,7 @@ static void fill_metainfo(struct strbuf *msg,
        default:
                *must_show_header = 0;
        }
-       if (one && two && oidcmp(&one->oid, &two->oid)) {
+       if (one && two && !oideq(&one->oid, &two->oid)) {
                const unsigned hexsz = the_hash_algo->hexsz;
                int abbrev = o->flags.full_index ? hexsz : DEFAULT_ABBREV;
 
@@ -4375,6 +4386,9 @@ void diff_setup(struct diff_options *options)
 
        options->file = stdout;
 
+       options->output_indicators[OUTPUT_INDICATOR_NEW] = '+';
+       options->output_indicators[OUTPUT_INDICATOR_OLD] = '-';
+       options->output_indicators[OUTPUT_INDICATOR_CONTEXT] = ' ';
        options->abbrev = DEFAULT_ABBREV;
        options->line_termination = '\n';
        options->break_opt = -1;
@@ -4852,6 +4866,12 @@ int diff_opt_parse(struct diff_options *options,
                 options->output_format |= DIFF_FORMAT_DIFFSTAT;
        } else if (!strcmp(arg, "--no-compact-summary"))
                 options->flags.stat_with_summary = 0;
+       else if (skip_prefix(arg, "--output-indicator-new=", &arg))
+               options->output_indicators[OUTPUT_INDICATOR_NEW] = arg[0];
+       else if (skip_prefix(arg, "--output-indicator-old=", &arg))
+               options->output_indicators[OUTPUT_INDICATOR_OLD] = arg[0];
+       else if (skip_prefix(arg, "--output-indicator-context=", &arg))
+               options->output_indicators[OUTPUT_INDICATOR_CONTEXT] = arg[0];
 
        /* renames options */
        else if (starts_with(arg, "-B") ||
@@ -5323,7 +5343,7 @@ int diff_unmodified_pair(struct diff_filepair *p)
         * dealing with a change.
         */
        if (one->oid_valid && two->oid_valid &&
-           !oidcmp(&one->oid, &two->oid) &&
+           oideq(&one->oid, &two->oid) &&
            !one->dirty_submodule && !two->dirty_submodule)
                return 1; /* no change */
        if (!one->oid_valid && !two->oid_valid)
@@ -5457,7 +5477,7 @@ static void diff_resolve_rename_copy(void)
                        else
                                p->status = DIFF_STATUS_RENAMED;
                }
-               else if (oidcmp(&p->one->oid, &p->two->oid) ||
+               else if (!oideq(&p->one->oid, &p->two->oid) ||
                         p->one->mode != p->two->mode ||
                         p->one->dirty_submodule ||
                         p->two->dirty_submodule ||
diff --git a/diff.h b/diff.h
index 89544e64bc5797fe40f069e84657d85589f0252a..a30cc35ec3b4cb525340691a39ca88e89e379e4c 100644 (file)
--- a/diff.h
+++ b/diff.h
@@ -194,6 +194,11 @@ struct diff_options {
        FILE *file;
        int close_file;
 
+#define OUTPUT_INDICATOR_NEW 0
+#define OUTPUT_INDICATOR_OLD 1
+#define OUTPUT_INDICATOR_CONTEXT 2
+       char output_indicators[3];
+
        struct pathspec pathspec;
        pathchange_fn_t pathchange;
        change_fn_t change;
index c64359f489c87910b9d41756918dacda49276004..e11fcfdb391425ac9efaea829d4756891dcb941e 100644 (file)
@@ -58,7 +58,7 @@ static int should_break(struct diff_filespec *src,
        }
 
        if (src->oid_valid && dst->oid_valid &&
-           !oidcmp(&src->oid, &dst->oid))
+           oideq(&src->oid, &dst->oid))
                return 0; /* they are the same */
 
        if (diff_populate_filespec(src, 0) || diff_populate_filespec(dst, 0))
index d775183c2fd1bed06a97facb56c4721e4e5df2f6..daddd9b28af1e20fe82aefa654bad04f4b52c1d3 100644 (file)
@@ -286,7 +286,7 @@ static int find_identical_files(struct hashmap *srcs,
                struct diff_filespec *source = p->filespec;
 
                /* False hash collision? */
-               if (oidcmp(&source->oid, &target->oid))
+               if (!oideq(&source->oid, &target->oid))
                        continue;
                /* Non-regular files? If so, the modes must match! */
                if (!S_ISREG(source->mode) || !S_ISREG(target->mode)) {
diff --git a/dir.c b/dir.c
index aceb0d48692b7d727cfd2645ae88b0d45d660c09..47c2fca8dc33970a1f4f041360ef954da6fa75a8 100644 (file)
--- a/dir.c
+++ b/dir.c
@@ -1282,7 +1282,7 @@ static void prep_exclude(struct dir_struct *dir,
                 * order, though, if you do that.
                 */
                if (untracked &&
-                   oidcmp(&oid_stat.oid, &untracked->exclude_oid)) {
+                   !oideq(&oid_stat.oid, &untracked->exclude_oid)) {
                        invalidate_gitignore(dir->untracked, untracked);
                        oidcpy(&untracked->exclude_oid, &oid_stat.oid);
                }
@@ -2248,12 +2248,12 @@ static struct untracked_cache_dir *validate_untracked_cache(struct dir_struct *d
 
        /* Validate $GIT_DIR/info/exclude and core.excludesfile */
        root = dir->untracked->root;
-       if (oidcmp(&dir->ss_info_exclude.oid,
+       if (!oideq(&dir->ss_info_exclude.oid,
                   &dir->untracked->ss_info_exclude.oid)) {
                invalidate_gitignore(dir->untracked, root);
                dir->untracked->ss_info_exclude = dir->ss_info_exclude;
        }
-       if (oidcmp(&dir->ss_excludes_file.oid,
+       if (!oideq(&dir->ss_excludes_file.oid,
                   &dir->untracked->ss_excludes_file.oid)) {
                invalidate_gitignore(dir->untracked, root);
                dir->untracked->ss_excludes_file = dir->ss_excludes_file;
@@ -2268,10 +2268,13 @@ int read_directory(struct dir_struct *dir, struct index_state *istate,
                   const char *path, int len, const struct pathspec *pathspec)
 {
        struct untracked_cache_dir *untracked;
-       uint64_t start = getnanotime();
 
-       if (has_symlink_leading_path(path, len))
+       trace_performance_enter();
+
+       if (has_symlink_leading_path(path, len)) {
+               trace_performance_leave("read directory %.*s", len, path);
                return dir->nr;
+       }
 
        untracked = validate_untracked_cache(dir, len, pathspec);
        if (!untracked)
@@ -2307,7 +2310,7 @@ int read_directory(struct dir_struct *dir, struct index_state *istate,
                dir->nr = i;
        }
 
-       trace_performance_since(start, "read directory %.*s", len, path);
+       trace_performance_leave("read directory %.*s", len, path);
        if (dir->untracked) {
                static int force_untracked_cache = -1;
                static struct trace_key trace_untracked_stats = TRACE_KEY_INIT(UNTRACKED_STATS);
diff --git a/entry.c b/entry.c
index 2a2ab6c839490aba1b9d2723b8e05837855f5b39..5d136c5d55e0811b70fdb10f59ea506ceb1b273b 100644 (file)
--- a/entry.c
+++ b/entry.c
@@ -399,6 +399,34 @@ static int check_path(const char *path, int len, struct stat *st, int skiplen)
        return lstat(path, st);
 }
 
+static void mark_colliding_entries(const struct checkout *state,
+                                  struct cache_entry *ce, struct stat *st)
+{
+       int i, trust_ino = check_stat;
+
+#if defined(GIT_WINDOWS_NATIVE)
+       trust_ino = 0;
+#endif
+
+       ce->ce_flags |= CE_MATCHED;
+
+       for (i = 0; i < state->istate->cache_nr; i++) {
+               struct cache_entry *dup = state->istate->cache[i];
+
+               if (dup == ce)
+                       break;
+
+               if (dup->ce_flags & (CE_MATCHED | CE_VALID | CE_SKIP_WORKTREE))
+                       continue;
+
+               if ((trust_ino && dup->ce_stat_data.sd_ino == st->st_ino) ||
+                   (!trust_ino && !fspathcmp(ce->name, dup->name))) {
+                       dup->ce_flags |= CE_MATCHED;
+                       break;
+               }
+       }
+}
+
 /*
  * Write the contents from ce out to the working tree.
  *
@@ -456,6 +484,9 @@ int checkout_entry(struct cache_entry *ce,
                        return -1;
                }
 
+               if (state->clone)
+                       mark_colliding_entries(state, ce, &st);
+
                /*
                 * We unlink the old file, to get the new one with the
                 * right permissions (including umask, which is nasty
index 7cdfdd0c020e37abbc30624b2ea1bcf4ef0ddfee..d487966935e2d81aeece7f57f382c941d14325d7 100644 (file)
@@ -19,6 +19,8 @@
 #ifndef __EWOK_RLW_H__
 #define __EWOK_RLW_H__
 
+#include "ewok.h"
+
 #define RLW_RUNNING_BITS (sizeof(eword_t) * 4)
 #define RLW_LITERAL_BITS (sizeof(eword_t) * 8 - 1 - RLW_RUNNING_BITS)
 
index 89bb0c9db3de9b380aad53709d882cb04f7d054a..95600c78e048f9d3d28672737d4bc2a941073abc 100644 (file)
@@ -171,6 +171,7 @@ Format of STDIN stream:
 #include "packfile.h"
 #include "object-store.h"
 #include "mem-pool.h"
+#include "commit-reach.h"
 
 #define PACK_ID_BITS 16
 #define MAX_PACK_ID ((1<<PACK_ID_BITS)-1)
@@ -572,7 +573,7 @@ static struct object_entry *find_object(struct object_id *oid)
        unsigned int h = oid->hash[0] << 8 | oid->hash[1];
        struct object_entry *e;
        for (e = object_table[h]; e; e = e->next)
-               if (!oidcmp(oid, &e->idx.oid))
+               if (oideq(oid, &e->idx.oid))
                        return e;
        return NULL;
 }
@@ -583,7 +584,7 @@ static struct object_entry *insert_object(struct object_id *oid)
        struct object_entry *e = object_table[h];
 
        while (e) {
-               if (!oidcmp(oid, &e->idx.oid))
+               if (oideq(oid, &e->idx.oid))
                        return e;
                e = e->next;
        }
@@ -1068,7 +1069,7 @@ static int store_object(
                duplicate_count_by_type[type]++;
                return 1;
        } else if (find_sha1_pack(oid.hash,
-                                 get_packed_git(the_repository))) {
+                                 get_all_packs(the_repository))) {
                e->type = type;
                e->pack_id = MAX_PACK_ID;
                e->idx.offset = 1; /* just not zero! */
@@ -1266,7 +1267,7 @@ static void stream_blob(uintmax_t len, struct object_id *oidout, uintmax_t mark)
                truncate_pack(&checkpoint);
 
        } else if (find_sha1_pack(oid.hash,
-                                 get_packed_git(the_repository))) {
+                                 get_all_packs(the_repository))) {
                e->type = OBJ_BLOB;
                e->pack_id = MAX_PACK_ID;
                e->idx.offset = 1; /* just not zero! */
@@ -1533,7 +1534,7 @@ static int tree_content_set(
                        if (!*slash1) {
                                if (!S_ISDIR(mode)
                                                && e->versions[1].mode == mode
-                                               && !oidcmp(&e->versions[1].oid, oid))
+                                               && oideq(&e->versions[1].oid, oid))
                                        return 0;
                                e->versions[1].mode = mode;
                                oidcpy(&e->versions[1].oid, oid);
@@ -2649,7 +2650,7 @@ static int parse_from(struct branch *b)
                struct object_entry *oe = find_mark(idnum);
                if (oe->type != OBJ_COMMIT)
                        die("Mark :%" PRIuMAX " not a commit", idnum);
-               if (oidcmp(&b->oid, &oe->idx.oid)) {
+               if (!oideq(&b->oid, &oe->idx.oid)) {
                        oidcpy(&b->oid, &oe->idx.oid);
                        if (oe->pack_id != MAX_PACK_ID) {
                                unsigned long size;
@@ -2667,7 +2668,7 @@ static int parse_from(struct branch *b)
        else
                die("Invalid ref name or SHA1 expression: %s", from);
 
-       if (b->branch_tree.tree && oidcmp(&oid, &b->branch_tree.versions[1].oid)) {
+       if (b->branch_tree.tree && !oideq(&oid, &b->branch_tree.versions[1].oid)) {
                release_tree_content_recursive(b->branch_tree.tree);
                b->branch_tree.tree = NULL;
        }
index 853624f811c59c17af88814ebeecf4154095a19c..42665488008bc0fffafa5d4e0455bc22b510b92a 100644 (file)
@@ -23,21 +23,16 @@ static void fetch_refs(const char *remote_name, struct ref *ref)
        fetch_if_missing = original_fetch_if_missing;
 }
 
-void fetch_object(const char *remote_name, const unsigned char *sha1)
-{
-       struct ref *ref = alloc_ref(sha1_to_hex(sha1));
-       hashcpy(ref->old_oid.hash, sha1);
-       fetch_refs(remote_name, ref);
-}
-
-void fetch_objects(const char *remote_name, const struct oid_array *to_fetch)
+void fetch_objects(const char *remote_name, const struct object_id *oids,
+                  int oid_nr)
 {
        struct ref *ref = NULL;
        int i;
 
-       for (i = 0; i < to_fetch->nr; i++) {
-               struct ref *new_ref = alloc_ref(oid_to_hex(&to_fetch->oid[i]));
-               oidcpy(&new_ref->old_oid, &to_fetch->oid[i]);
+       for (i = 0; i < oid_nr; i++) {
+               struct ref *new_ref = alloc_ref(oid_to_hex(&oids[i]));
+               oidcpy(&new_ref->old_oid, &oids[i]);
+               new_ref->exact_oid = 1;
                new_ref->next = ref;
                ref = new_ref;
        }
index 4b269d07ed635b3e6b1f5f7b2118d290d27e7e4e..d2f996d4e8c79c7bde78780dad5ad543910fcc29 100644 (file)
@@ -1,11 +1,7 @@
 #ifndef FETCH_OBJECT_H
 #define FETCH_OBJECT_H
 
-#include "sha1-array.h"
-
-extern void fetch_object(const char *remote_name, const unsigned char *sha1);
-
-extern void fetch_objects(const char *remote_name,
-                         const struct oid_array *to_fetch);
+void fetch_objects(const char *remote_name, const struct object_id *oids,
+                  int oid_nr);
 
 #endif
index 88a078e9befd281cf5f03e9e64615b14ca768a35..75047a4b2a491e805f7c500dc804e78a8538bfa2 100644 (file)
@@ -599,7 +599,7 @@ static void filter_refs(struct fetch_pack_args *args,
                        continue;
                if (parse_oid_hex(ref->name, &oid, &p) ||
                    *p != '\0' ||
-                   oidcmp(&oid, &ref->old_oid))
+                   !oideq(&oid, &ref->old_oid))
                        continue;
 
                if ((allow_unadvertised_object_request &
diff --git a/fsck.c b/fsck.c
index a0cee0be590020e4ff4b42da86c322ba4d0010ae..38624d251126ed5a75cd6b8e8b7adfddbb29b283 100644 (file)
--- a/fsck.c
+++ b/fsck.c
@@ -10,7 +10,6 @@
 #include "fsck.h"
 #include "refs.h"
 #include "utf8.h"
-#include "sha1-array.h"
 #include "decorate.h"
 #include "oidset.h"
 #include "packfile.h"
@@ -67,6 +66,8 @@ static struct oidset gitmodules_done = OIDSET_INIT;
        FUNC(GITMODULES_LARGE, ERROR) \
        FUNC(GITMODULES_NAME, ERROR) \
        FUNC(GITMODULES_SYMLINK, ERROR) \
+       FUNC(GITMODULES_URL, ERROR) \
+       FUNC(GITMODULES_PATH, ERROR) \
        /* warnings */ \
        FUNC(BAD_FILEMODE, WARN) \
        FUNC(EMPTY_NAME, WARN) \
@@ -182,40 +183,37 @@ static int fsck_msg_type(enum fsck_msg_id msg_id,
 
 static void init_skiplist(struct fsck_options *options, const char *path)
 {
-       static struct oid_array skiplist = OID_ARRAY_INIT;
-       int sorted, fd;
-       char buffer[GIT_MAX_HEXSZ + 1];
+       FILE *fp;
+       struct strbuf sb = STRBUF_INIT;
        struct object_id oid;
 
-       if (options->skiplist)
-               sorted = options->skiplist->sorted;
-       else {
-               sorted = 1;
-               options->skiplist = &skiplist;
-       }
-
-       fd = open(path, O_RDONLY);
-       if (fd < 0)
+       fp = fopen(path, "r");
+       if (!fp)
                die("Could not open skip list: %s", path);
-       for (;;) {
+       while (!strbuf_getline(&sb, fp)) {
                const char *p;
-               int result = read_in_full(fd, buffer, sizeof(buffer));
-               if (result < 0)
-                       die_errno("Could not read '%s'", path);
-               if (!result)
-                       break;
-               if (parse_oid_hex(buffer, &oid, &p) || *p != '\n')
-                       die("Invalid SHA-1: %s", buffer);
-               oid_array_append(&skiplist, &oid);
-               if (sorted && skiplist.nr > 1 &&
-                               oidcmp(&skiplist.oid[skiplist.nr - 2],
-                                      &oid) > 0)
-                       sorted = 0;
-       }
-       close(fd);
+               const char *hash;
 
-       if (sorted)
-               skiplist.sorted = 1;
+               /*
+                * Allow trailing comments, leading whitespace
+                * (including before commits), and empty or whitespace
+                * only lines.
+                */
+               hash = strchr(sb.buf, '#');
+               if (hash)
+                       strbuf_setlen(&sb, hash - sb.buf);
+               strbuf_trim(&sb);
+               if (!sb.len)
+                       continue;
+
+               if (parse_oid_hex(sb.buf, &oid, &p) || *p != '\0')
+                       die("Invalid SHA-1: %s", sb.buf);
+               oidset_insert(&options->skiplist, &oid);
+       }
+       if (ferror(fp))
+               die_errno("Could not read '%s'", path);
+       fclose(fp);
+       strbuf_release(&sb);
 }
 
 static int parse_msg_type(const char *str)
@@ -320,9 +318,7 @@ static void append_msg_id(struct strbuf *sb, const char *msg_id)
 
 static int object_on_skiplist(struct fsck_options *opts, struct object *obj)
 {
-       if (opts && opts->skiplist && obj)
-               return oid_array_lookup(opts->skiplist, &obj->oid) >= 0;
-       return 0;
+       return opts && obj && oidset_contains(&opts->skiplist, &obj->oid);
 }
 
 __attribute__((format (printf, 4, 5)))
@@ -992,6 +988,18 @@ static int fsck_gitmodules_fn(const char *var, const char *value, void *vdata)
                                    FSCK_MSG_GITMODULES_NAME,
                                    "disallowed submodule name: %s",
                                    name);
+       if (!strcmp(key, "url") && value &&
+           looks_like_command_line_option(value))
+               data->ret |= report(data->options, data->obj,
+                                   FSCK_MSG_GITMODULES_URL,
+                                   "disallowed submodule url: %s",
+                                   value);
+       if (!strcmp(key, "path") && value &&
+           looks_like_command_line_option(value))
+               data->ret |= report(data->options, data->obj,
+                                   FSCK_MSG_GITMODULES_PATH,
+                                   "disallowed submodule path: %s",
+                                   value);
        free(name);
 
        return 0;
diff --git a/fsck.h b/fsck.h
index 0c7e8c9428bbc808abf27ff5dfe77d8d7309b470..b95595ae5fee6c065c4f54ad66ddc0edb8824d37 100644 (file)
--- a/fsck.h
+++ b/fsck.h
@@ -1,6 +1,8 @@
 #ifndef GIT_FSCK_H
 #define GIT_FSCK_H
 
+#include "oidset.h"
+
 #define FSCK_ERROR 1
 #define FSCK_WARN 2
 #define FSCK_IGNORE 3
@@ -35,12 +37,12 @@ struct fsck_options {
        fsck_error error_func;
        unsigned strict:1;
        int *msg_type;
-       struct oid_array *skiplist;
+       struct oidset skiplist;
        struct decoration *object_names;
 };
 
-#define FSCK_OPTIONS_DEFAULT { NULL, fsck_error_function, 0, NULL }
-#define FSCK_OPTIONS_STRICT { NULL, fsck_error_function, 1, NULL }
+#define FSCK_OPTIONS_DEFAULT { NULL, fsck_error_function, 0, NULL, OIDSET_INIT }
+#define FSCK_OPTIONS_STRICT { NULL, fsck_error_function, 1, NULL, OIDSET_INIT }
 
 /* descend in all linked child objects
  * the return value is:
index 1cb2c0a31b416f9db979cb86badac5f958792453..1b568e29b9a09af9fdecf19ddb75ffd2bdc6e094 100755 (executable)
@@ -534,31 +534,19 @@ cmd_update()
                "$@" || echo "#unmatched" $?
        } | {
        err=
-       while read -r mode sha1 stage just_cloned sm_path
+       while read -r quickabort sha1 just_cloned sm_path
        do
-               die_if_unmatched "$mode" "$sha1"
+               die_if_unmatched "$quickabort" "$sha1"
 
-               name=$(git submodule--helper name "$sm_path") || exit
-               if ! test -z "$update"
-               then
-                       update_module=$update
-               else
-                       update_module=$(git config submodule."$name".update)
-                       if test -z "$update_module"
-                       then
-                               update_module="checkout"
-                       fi
-               fi
+               git submodule--helper ensure-core-worktree "$sm_path"
+
+               update_module=$(git submodule--helper update-module-mode $just_cloned "$sm_path" $update)
 
                displaypath=$(git submodule--helper relative-path "$prefix$sm_path" "$wt_prefix")
 
                if test $just_cloned -eq 1
                then
                        subsha1=
-                       case "$update_module" in
-                       merge | rebase | none)
-                               update_module=checkout ;;
-                       esac
                else
                        subsha1=$(sanitize_submodule_env; cd "$sm_path" &&
                                git rev-parse --verify HEAD) ||
@@ -630,7 +618,7 @@ cmd_update()
                                must_die_on_failure=yes
                                ;;
                        *)
-                               die "$(eval_gettext "Invalid update mode '$update_module' for submodule '$name'")"
+                               die "$(eval_gettext "Invalid update mode '$update_module' for submodule path '$path'")"
                        esac
 
                        if (sanitize_submodule_env; cd "$sm_path" && $command "$sha1")
diff --git a/git.c b/git.c
index c27c38738b2a9d9d61460b150d5ab4d36bb9cf5b..5920f8019bb3b26db0c7c50f9ea02f8ff96230e8 100644 (file)
--- a/git.c
+++ b/git.c
@@ -508,6 +508,7 @@ static struct cmd_struct commands[] = {
        { "merge-tree", cmd_merge_tree, RUN_SETUP | NO_PARSEOPT },
        { "mktag", cmd_mktag, RUN_SETUP | NO_PARSEOPT },
        { "mktree", cmd_mktree, RUN_SETUP },
+       { "multi-pack-index", cmd_multi_pack_index, RUN_SETUP_GENTLY },
        { "mv", cmd_mv, RUN_SETUP | NEED_WORK_TREE },
        { "name-rev", cmd_name_rev, RUN_SETUP },
        { "notes", cmd_notes, RUN_SETUP },
@@ -674,6 +675,8 @@ static void execv_dashed_external(const char **argv)
 static int run_argv(int *argcp, const char ***argv)
 {
        int done_alias = 0;
+       struct string_list cmd_list = STRING_LIST_INIT_NODUP;
+       struct string_list_item *seen;
 
        while (1) {
                /*
@@ -691,17 +694,37 @@ static int run_argv(int *argcp, const char ***argv)
                /* .. then try the external ones */
                execv_dashed_external(*argv);
 
-               /* It could be an alias -- this works around the insanity
+               seen = unsorted_string_list_lookup(&cmd_list, *argv[0]);
+               if (seen) {
+                       int i;
+                       struct strbuf sb = STRBUF_INIT;
+                       for (i = 0; i < cmd_list.nr; i++) {
+                               struct string_list_item *item = &cmd_list.items[i];
+
+                               strbuf_addf(&sb, "\n  %s", item->string);
+                               if (item == seen)
+                                       strbuf_addstr(&sb, " <==");
+                               else if (i == cmd_list.nr - 1)
+                                       strbuf_addstr(&sb, " ==>");
+                       }
+                       die(_("alias loop detected: expansion of '%s' does"
+                             " not terminate:%s"), cmd_list.items[0].string, sb.buf);
+               }
+
+               string_list_append(&cmd_list, *argv[0]);
+
+               /*
+                * It could be an alias -- this works around the insanity
                 * of overriding "git log" with "git show" by having
                 * alias.log = show
                 */
-               if (done_alias)
-                       break;
                if (!handle_alias(argcp, argv))
                        break;
                done_alias = 1;
        }
 
+       string_list_clear(&cmd_list, 0);
+
        return done_alias;
 }
 
index 458642ef72b879a2f53d6e1f8f192847ed814111..9e894f197f91ee3565b0f3c618fdb4042e2f229f 100644 (file)
@@ -595,13 +595,13 @@ static void get_info_packs(struct strbuf *hdr, char *arg)
        size_t cnt = 0;
 
        select_getanyfile(hdr);
-       for (p = get_packed_git(the_repository); p; p = p->next) {
+       for (p = get_all_packs(the_repository); p; p = p->next) {
                if (p->pack_local)
                        cnt++;
        }
 
        strbuf_grow(&buf, cnt * 53 + 2);
-       for (p = get_packed_git(the_repository); p; p = p->next) {
+       for (p = get_all_packs(the_repository); p; p = p->next) {
                if (p->pack_local)
                        strbuf_addf(&buf, "P %s\n", p->pack_name + objdirlen + 6);
        }
index 5eaf551b51e7aaf87219d4af0ca19873484c90ca..1bbb0cdb6d03353fbc145d6ba748a30c9eabcec0 100644 (file)
@@ -14,7 +14,7 @@
 #include "argv-array.h"
 #include "packfile.h"
 #include "object-store.h"
-
+#include "commit-reach.h"
 
 #ifdef EXPAT_NEEDS_XMLPARSE_H
 #include <xmlparse.h>
@@ -1859,7 +1859,7 @@ int cmd_main(int argc, const char **argv)
                        continue;
                }
 
-               if (!oidcmp(&ref->old_oid, &ref->peer_ref->new_oid)) {
+               if (oideq(&ref->old_oid, &ref->peer_ref->new_oid)) {
                        if (push_verbosely)
                                fprintf(stderr, "'%s': up-to-date\n", ref->name);
                        if (helper_status)
index 7cdfb2f24c76d2f09b39ae2fa97685ecec2d1630..b3334bf657b5ea64e779eb470525eb0b7337baa9 100644 (file)
@@ -483,7 +483,7 @@ static int fetch_object(struct walker *walker, unsigned char *sha1)
 
        list_for_each(pos, head) {
                obj_req = list_entry(pos, struct object_request, node);
-               if (!hashcmp(obj_req->oid.hash, sha1))
+               if (hasheq(obj_req->oid.hash, sha1))
                        break;
        }
        if (obj_req == NULL)
@@ -543,7 +543,7 @@ static int fetch_object(struct walker *walker, unsigned char *sha1)
        } else if (req->zret != Z_STREAM_END) {
                walker->corrupt_object_found++;
                ret = error("File %s (%s) corrupt", hex, req->url);
-       } else if (hashcmp(obj_req->oid.hash, req->real_sha1)) {
+       } else if (!hasheq(obj_req->oid.hash, req->real_sha1)) {
                ret = error("File %s has bad hash", hex);
        } else if (req->rename < 0) {
                struct strbuf buf = STRBUF_INIT;
diff --git a/http.c b/http.c
index 4162860ee316365091ac3978a83d2311682b8079..98ff122585c4da84894ccae97db54859e45f3bec 100644 (file)
--- a/http.c
+++ b/http.c
@@ -2394,7 +2394,7 @@ int finish_http_object_request(struct http_object_request *freq)
                unlink_or_warn(freq->tmpfile.buf);
                return -1;
        }
-       if (hashcmp(freq->sha1, freq->real_sha1)) {
+       if (!hasheq(freq->sha1, freq->real_sha1)) {
                unlink_or_warn(freq->tmpfile.buf);
                return -1;
        }
diff --git a/interdiff.c b/interdiff.c
new file mode 100644 (file)
index 0000000..c81d680
--- /dev/null
@@ -0,0 +1,28 @@
+#include "cache.h"
+#include "commit.h"
+#include "revision.h"
+#include "interdiff.h"
+
+static struct strbuf *idiff_prefix_cb(struct diff_options *opt, void *data)
+{
+       return data;
+}
+
+void show_interdiff(struct rev_info *rev, int indent)
+{
+       struct diff_options opts;
+       struct strbuf prefix = STRBUF_INIT;
+
+       memcpy(&opts, &rev->diffopt, sizeof(opts));
+       opts.output_format = DIFF_FORMAT_PATCH;
+       opts.output_prefix = idiff_prefix_cb;
+       strbuf_addchars(&prefix, ' ', indent);
+       opts.output_prefix_data = &prefix;
+       diff_setup_done(&opts);
+
+       diff_tree_oid(rev->idiff_oid1, rev->idiff_oid2, "", &opts);
+       diffcore_std(&opts);
+       diff_flush(&opts);
+
+       strbuf_release(&prefix);
+}
diff --git a/interdiff.h b/interdiff.h
new file mode 100644 (file)
index 0000000..01c730a
--- /dev/null
@@ -0,0 +1,8 @@
+#ifndef INTERDIFF_H
+#define INTERDIFF_H
+
+struct rev_info;
+
+void show_interdiff(struct rev_info *, int indent);
+
+#endif
index fc18acc7d913d170797b65b4f80071bce5310ba7..83906b09c173894111383b3fc3a0ec7e398373c7 100644 (file)
@@ -42,6 +42,8 @@
  * of the given strings.
  */
 
+#include "strbuf.h"
+
 struct json_writer
 {
        /*
index 9b3e56e283ccb98c57ca8a7b85dcf48b05b15a4f..ecffc09be6ec5c1a9646ac152a899f3415f6321d 100644 (file)
@@ -19,6 +19,12 @@ void compute_assignment(int column_count, int row_count, int *cost,
        int *free_row, free_count = 0, saved_free_count, *pred, *col;
        int i, j, phase;
 
+       if (column_count < 2) {
+               memset(column2row, 0, sizeof(int) * column_count);
+               memset(row2column, 0, sizeof(int) * row_count);
+               return;
+       }
+
        memset(column2row, -1, sizeof(int) * column_count);
        memset(row2column, -1, sizeof(int) * row_count);
        ALLOC_ARRAY(v, column_count);
index 0e2800f7bb46bdf9e1713d210a5dc1c02fbbaa26..1936fee9e1c54e8c0dc049ba48ef1cd209967d31 100644 (file)
@@ -371,13 +371,12 @@ int ll_merge(mmbuffer_t *result_buf,
        if (!check)
                check = attr_check_initl("merge", "conflict-marker-size", NULL);
 
-       if (!git_check_attr(&the_index, path, check)) {
-               ll_driver_name = check->items[0].value;
-               if (check->items[1].value) {
-                       marker_size = atoi(check->items[1].value);
-                       if (marker_size <= 0)
-                               marker_size = DEFAULT_CONFLICT_MARKER_SIZE;
-               }
+       git_check_attr(&the_index, path, check);
+       ll_driver_name = check->items[0].value;
+       if (check->items[1].value) {
+               marker_size = atoi(check->items[1].value);
+               if (marker_size <= 0)
+                       marker_size = DEFAULT_CONFLICT_MARKER_SIZE;
        }
        driver = find_ll_merge_driver(ll_driver_name);
 
@@ -398,7 +397,8 @@ int ll_merge_marker_size(const char *path)
 
        if (!check)
                check = attr_check_initl("conflict-marker-size", NULL);
-       if (!git_check_attr(&the_index, path, check) && check->items[0].value) {
+       git_check_attr(&the_index, path, check);
+       if (check->items[0].value) {
                marker_size = atoi(check->items[0].value);
                if (marker_size <= 0)
                        marker_size = DEFAULT_CONFLICT_MARKER_SIZE;
index f401c979f0ed907b40bfb509ab8f06ad2632c19e..35403ccc0d586c4732411a5813cb97761dddc435 100644 (file)
@@ -263,8 +263,8 @@ static inline int close_lock_file_gently(struct lock_file *lk)
  *   nobody else) to inspect the contents you wrote, while still
  *   holding the lock yourself.
  *
- * * `reopen_lock_file()` to reopen the lockfile. Make further updates
- *   to the contents.
+ * * `reopen_lock_file()` to reopen the lockfile, truncating the existing
+ *   contents. Write out the new contents.
  *
  * * `commit_lock_file()` to make the final version permanent.
  */
index 7443e5fcc74b7964c478327c6557d53eb4f6b425..7a83e99250c5245bbbbd43908f3c116567f39b62 100644 (file)
@@ -15,6 +15,8 @@
 #include "sequencer.h"
 #include "line-log.h"
 #include "help.h"
+#include "interdiff.h"
+#include "range-diff.h"
 
 static struct decoration name_decoration = { "object names" };
 static int decoration_loaded;
@@ -472,7 +474,7 @@ static int which_parent(const struct object_id *oid, const struct commit *commit
        const struct commit_list *parent;
 
        for (nth = 0, parent = commit->parents; parent; parent = parent->next) {
-               if (!oidcmp(&parent->item->object.oid, oid))
+               if (oideq(&parent->item->object.oid, oid))
                        return nth;
                nth++;
        }
@@ -506,8 +508,8 @@ static int show_one_mergetag(struct commit *commit,
        if (parse_tag_buffer(the_repository, tag, extra->value, extra->len))
                strbuf_addstr(&verify_message, "malformed mergetag\n");
        else if (is_common_merge(commit) &&
-                !oidcmp(&tag->tagged->oid,
-                         &commit->parents->next->item->object.oid))
+                oideq(&tag->tagged->oid,
+                      &commit->parents->next->item->object.oid))
                strbuf_addf(&verify_message,
                            "merged tag '%s'\n", tag->tag);
        else if ((nth = which_parent(&tag->tagged->oid, commit)) < 0)
@@ -542,6 +544,16 @@ static int show_mergetag(struct rev_info *opt, struct commit *commit)
        return for_each_mergetag(show_one_mergetag, commit, opt);
 }
 
+static void next_commentary_block(struct rev_info *opt, struct strbuf *sb)
+{
+       const char *x = opt->shown_dashes ? "\n" : "---\n";
+       if (sb)
+               strbuf_addstr(sb, x);
+       else
+               fputs(x, opt->diffopt.file);
+       opt->shown_dashes = 1;
+}
+
 void show_log(struct rev_info *opt)
 {
        struct strbuf msgbuf = STRBUF_INIT;
@@ -699,10 +711,8 @@ void show_log(struct rev_info *opt)
 
        if ((ctx.fmt != CMIT_FMT_USERFORMAT) &&
            ctx.notes_message && *ctx.notes_message) {
-               if (cmit_fmt_is_mail(ctx.fmt)) {
-                       strbuf_addstr(&msgbuf, "---\n");
-                       opt->shown_dashes = 1;
-               }
+               if (cmit_fmt_is_mail(ctx.fmt))
+                       next_commentary_block(opt, &msgbuf);
                strbuf_addstr(&msgbuf, ctx.notes_message);
        }
 
@@ -729,6 +739,33 @@ void show_log(struct rev_info *opt)
 
        strbuf_release(&msgbuf);
        free(ctx.notes_message);
+
+       if (cmit_fmt_is_mail(ctx.fmt) && opt->idiff_oid1) {
+               struct diff_queue_struct dq;
+
+               memcpy(&dq, &diff_queued_diff, sizeof(diff_queued_diff));
+               DIFF_QUEUE_CLEAR(&diff_queued_diff);
+
+               next_commentary_block(opt, NULL);
+               fprintf_ln(opt->diffopt.file, "%s", opt->idiff_title);
+               show_interdiff(opt, 2);
+
+               memcpy(&diff_queued_diff, &dq, sizeof(diff_queued_diff));
+       }
+
+       if (cmit_fmt_is_mail(ctx.fmt) && opt->rdiff1) {
+               struct diff_queue_struct dq;
+
+               memcpy(&dq, &diff_queued_diff, sizeof(diff_queued_diff));
+               DIFF_QUEUE_CLEAR(&diff_queued_diff);
+
+               next_commentary_block(opt, NULL);
+               fprintf_ln(opt->diffopt.file, "%s", opt->rdiff_title);
+               show_range_diff(opt->rdiff1, opt->rdiff2,
+                               opt->creation_factor, 1, &opt->diffopt);
+
+               memcpy(&diff_queued_diff, &dq, sizeof(diff_queued_diff));
+       }
 }
 
 int log_tree_diff_flush(struct rev_info *opt)
@@ -766,9 +803,10 @@ int log_tree_diff_flush(struct rev_info *opt)
 
                        /*
                         * We may have shown three-dashes line early
-                        * between notes and the log message, in which
-                        * case we only want a blank line after the
-                        * notes without (an extra) three-dashes line.
+                        * between generated commentary (notes, etc.)
+                        * and the log message, in which case we only
+                        * want a blank line after the commentary
+                        * without (an extra) three-dashes line.
                         * Otherwise, we show the three-dashes line if
                         * we are showing the patch with diffstat, but
                         * in that case, there is no extra blank line
index 3281a37d51830a60e864c78dff16dbf4eba9d800..b395adbdf2a405567bc40c254125265f30b12bbd 100644 (file)
@@ -237,11 +237,22 @@ static int slurp_attr(const char *line, const char *name, struct strbuf *attr)
        return 1;
 }
 
+static int has_attr_value(const char *line, const char *name, const char *value)
+{
+       struct strbuf sb = STRBUF_INIT;
+       int rc = slurp_attr(line, name, &sb) && !strcasecmp(sb.buf, value);
+       strbuf_release(&sb);
+       return rc;
+}
+
 static void handle_content_type(struct mailinfo *mi, struct strbuf *line)
 {
        struct strbuf *boundary = xmalloc(sizeof(struct strbuf));
        strbuf_init(boundary, line->len);
 
+       mi->format_flowed = has_attr_value(line->buf, "format=", "flowed");
+       mi->delsp = has_attr_value(line->buf, "delsp=", "yes");
+
        if (slurp_attr(line->buf, "boundary=", boundary)) {
                strbuf_insert(boundary, 0, "--", 2);
                if (++mi->content_top >= &mi->content[MAX_BOUNDARIES]) {
@@ -964,6 +975,52 @@ static int handle_boundary(struct mailinfo *mi, struct strbuf *line)
        return 1;
 }
 
+static void handle_filter_flowed(struct mailinfo *mi, struct strbuf *line,
+                                struct strbuf *prev)
+{
+       size_t len = line->len;
+       const char *rest;
+
+       if (!mi->format_flowed) {
+               handle_filter(mi, line);
+               return;
+       }
+
+       if (line->buf[len - 1] == '\n') {
+               len--;
+               if (len && line->buf[len - 1] == '\r')
+                       len--;
+       }
+
+       /* Keep signature separator as-is. */
+       if (skip_prefix(line->buf, "-- ", &rest) && rest - line->buf == len) {
+               if (prev->len) {
+                       handle_filter(mi, prev);
+                       strbuf_reset(prev);
+               }
+               handle_filter(mi, line);
+               return;
+       }
+
+       /* Unstuff space-stuffed line. */
+       if (len && line->buf[0] == ' ') {
+               strbuf_remove(line, 0, 1);
+               len--;
+       }
+
+       /* Save flowed line for later, but without the soft line break. */
+       if (len && line->buf[len - 1] == ' ') {
+               strbuf_add(prev, line->buf, len - !!mi->delsp);
+               return;
+       }
+
+       /* Prepend any previous partial lines */
+       strbuf_insert(line, 0, prev->buf, prev->len);
+       strbuf_reset(prev);
+
+       handle_filter(mi, line);
+}
+
 static void handle_body(struct mailinfo *mi, struct strbuf *line)
 {
        struct strbuf prev = STRBUF_INIT;
@@ -1012,7 +1069,7 @@ static void handle_body(struct mailinfo *mi, struct strbuf *line)
                                                strbuf_addbuf(&prev, sb);
                                                break;
                                        }
-                               handle_filter(mi, sb);
+                               handle_filter_flowed(mi, sb, &prev);
                        }
                        /*
                         * The partial chunk is saved in "prev" and will be
@@ -1022,13 +1079,16 @@ static void handle_body(struct mailinfo *mi, struct strbuf *line)
                        break;
                }
                default:
-                       handle_filter(mi, line);
+                       handle_filter_flowed(mi, line, &prev);
                }
 
                if (mi->input_error)
                        break;
        } while (!strbuf_getwholeline(line, mi->input, '\n'));
 
+       if (prev.len)
+               handle_filter(mi, &prev);
+
        flush_inbody_header_accum(mi);
 
 handle_body_out:
index 766c03dd1d29d00beb75b7553fc6717e24978384..6830e1e6259bb050cf2af9c2e203c8aa52037da1 100644 (file)
@@ -22,6 +22,8 @@ struct mailinfo {
        struct strbuf *content[MAX_BOUNDARIES];
        struct strbuf **content_top;
        struct strbuf charset;
+       unsigned int format_flowed:1;
+       unsigned int delsp:1;
        char *message_id;
        enum  {
                TE_DONTCARE, TE_QP, TE_BASE64
index 37653308d3601e56191d2f83f967cc77ad1517af..2b6d31ef9d35d9422e7ef90792651dcb7a2f9189 100644 (file)
@@ -106,7 +106,7 @@ static int score_trees(const struct object_id *hash1, const struct object_id *ha
                        update_tree_entry(&two);
                } else {
                        /* path appears in both */
-                       if (oidcmp(one.entry.oid, two.entry.oid)) {
+                       if (!oideq(one.entry.oid, two.entry.oid)) {
                                /* they are different */
                                score += score_differs(one.entry.mode,
                                                       two.entry.mode,
index e5243dbc542d786a645a9c038c855a6ca30acaba..0684dab77965b6abd0f8429bc53cd936a826482b 100644 (file)
@@ -27,6 +27,7 @@
 #include "dir.h"
 #include "submodule.h"
 #include "revision.h"
+#include "commit-reach.h"
 
 struct path_hashmap_entry {
        struct hashmap_entry e;
@@ -156,7 +157,7 @@ static struct tree *shift_tree_object(struct tree *one, struct tree *two,
                shift_tree_by(&one->object.oid, &two->object.oid, &shifted,
                              subtree_shift);
        }
-       if (!oidcmp(&two->object.oid, &shifted))
+       if (oideq(&two->object.oid, &shifted))
                return two;
        return lookup_tree(the_repository, &shifted);
 }
@@ -179,7 +180,7 @@ static int oid_eq(const struct object_id *a, const struct object_id *b)
 {
        if (!a && !b)
                return 2;
-       return a && b && oidcmp(a, b) == 0;
+       return a && b && oideq(a, b);
 }
 
 enum rename_type {
@@ -1273,14 +1274,14 @@ static int merge_submodule(struct merge_options *o,
        return 0;
 }
 
-static int merge_file_1(struct merge_options *o,
-                       const struct diff_filespec *one,
-                       const struct diff_filespec *a,
-                       const struct diff_filespec *b,
-                       const char *filename,
-                       const char *branch1,
-                       const char *branch2,
-                       struct merge_file_info *result)
+static int merge_mode_and_contents(struct merge_options *o,
+                                  const struct diff_filespec *one,
+                                  const struct diff_filespec *a,
+                                  const struct diff_filespec *b,
+                                  const char *filename,
+                                  const char *branch1,
+                                  const char *branch2,
+                                  struct merge_file_info *result)
 {
        result->merge = 0;
        result->clean = 1;
@@ -1365,56 +1366,6 @@ static int merge_file_1(struct merge_options *o,
        return 0;
 }
 
-static int merge_file_special_markers(struct merge_options *o,
-                                     const struct diff_filespec *one,
-                                     const struct diff_filespec *a,
-                                     const struct diff_filespec *b,
-                                     const char *target_filename,
-                                     const char *branch1,
-                                     const char *filename1,
-                                     const char *branch2,
-                                     const char *filename2,
-                                     struct merge_file_info *mfi)
-{
-       char *side1 = NULL;
-       char *side2 = NULL;
-       int ret;
-
-       if (filename1)
-               side1 = xstrfmt("%s:%s", branch1, filename1);
-       if (filename2)
-               side2 = xstrfmt("%s:%s", branch2, filename2);
-
-       ret = merge_file_1(o, one, a, b, target_filename,
-                          side1 ? side1 : branch1,
-                          side2 ? side2 : branch2, mfi);
-
-       free(side1);
-       free(side2);
-       return ret;
-}
-
-static int merge_file_one(struct merge_options *o,
-                         const char *path,
-                         const struct object_id *o_oid, int o_mode,
-                         const struct object_id *a_oid, int a_mode,
-                         const struct object_id *b_oid, int b_mode,
-                         const char *branch1,
-                         const char *branch2,
-                         struct merge_file_info *mfi)
-{
-       struct diff_filespec one, a, b;
-
-       one.path = a.path = b.path = (char *)path;
-       oidcpy(&one.oid, o_oid);
-       one.mode = o_mode;
-       oidcpy(&a.oid, a_oid);
-       a.mode = a_mode;
-       oidcpy(&b.oid, b_oid);
-       b.mode = b_mode;
-       return merge_file_1(o, &one, &a, &b, path, branch1, branch2, mfi);
-}
-
 static int handle_rename_via_dir(struct merge_options *o,
                                 struct diff_filepair *pair,
                                 const char *rename_branch,
@@ -1658,11 +1609,8 @@ static int handle_rename_rename_1to2(struct merge_options *o,
                struct merge_file_info mfi;
                struct diff_filespec other;
                struct diff_filespec *add;
-               if (merge_file_one(o, one->path,
-                                &one->oid, one->mode,
-                                &a->oid, a->mode,
-                                &b->oid, b->mode,
-                                ci->branch1, ci->branch2, &mfi))
+               if (merge_mode_and_contents(o, one, a, b, one->path,
+                                           ci->branch1, ci->branch2, &mfi))
                        return -1;
 
                /*
@@ -1728,14 +1676,10 @@ static int handle_rename_rename_2to1(struct merge_options *o,
 
        path_side_1_desc = xstrfmt("%s (was %s)", path, a->path);
        path_side_2_desc = xstrfmt("%s (was %s)", path, b->path);
-       if (merge_file_special_markers(o, a, c1, &ci->ren1_other,
-                                      path_side_1_desc,
-                                      o->branch1, c1->path,
-                                      o->branch2, ci->ren1_other.path, &mfi_c1) ||
-           merge_file_special_markers(o, b, &ci->ren2_other, c2,
-                                      path_side_2_desc,
-                                      o->branch1, ci->ren2_other.path,
-                                      o->branch2, c2->path, &mfi_c2))
+       if (merge_mode_and_contents(o, a, c1, &ci->ren1_other, path_side_1_desc,
+                                   o->branch1, o->branch2, &mfi_c1) ||
+           merge_mode_and_contents(o, b, &ci->ren2_other, c2, path_side_2_desc,
+                                   o->branch1, o->branch2, &mfi_c2))
                return -1;
        free(path_side_1_desc);
        free(path_side_2_desc);
@@ -2239,7 +2183,7 @@ static struct dir_rename_entry *check_dir_renamed(const char *path,
 {
        char *temp = xstrdup(path);
        char *end;
-       struct dir_rename_entry *entry = NULL;;
+       struct dir_rename_entry *entry = NULL;
 
        while ((end = strrchr(temp, '/'))) {
                *end = '\0';
@@ -2765,12 +2709,23 @@ static int process_renames(struct merge_options *o,
                                       ren1_dst, branch2);
                                if (o->call_depth) {
                                        struct merge_file_info mfi;
-                                       if (merge_file_one(o, ren1_dst, &null_oid, 0,
-                                                          &ren1->pair->two->oid,
-                                                          ren1->pair->two->mode,
-                                                          &dst_other.oid,
-                                                          dst_other.mode,
-                                                          branch1, branch2, &mfi)) {
+                                       struct diff_filespec one, a, b;
+
+                                       oidcpy(&one.oid, &null_oid);
+                                       one.mode = 0;
+                                       one.path = ren1->pair->two->path;
+
+                                       oidcpy(&a.oid, &ren1->pair->two->oid);
+                                       a.mode = ren1->pair->two->mode;
+                                       a.path = one.path;
+
+                                       oidcpy(&b.oid, &dst_other.oid);
+                                       b.mode = dst_other.mode;
+                                       b.path = one.path;
+
+                                       if (merge_mode_and_contents(o, &one, &a, &b, ren1_dst,
+                                                                   branch1, branch2,
+                                                                   &mfi)) {
                                                clean_merge = -1;
                                                goto cleanup_and_return;
                                        }
@@ -3020,13 +2975,13 @@ static int handle_modify_delete(struct merge_options *o,
                                    _("modify"), _("modified"));
 }
 
-static int merge_content(struct merge_options *o,
-                        const char *path,
-                        int is_dirty,
-                        struct object_id *o_oid, int o_mode,
-                        struct object_id *a_oid, int a_mode,
-                        struct object_id *b_oid, int b_mode,
-                        struct rename_conflict_info *rename_conflict_info)
+static int handle_content_merge(struct merge_options *o,
+                               const char *path,
+                               int is_dirty,
+                               struct object_id *o_oid, int o_mode,
+                               struct object_id *a_oid, int a_mode,
+                               struct object_id *b_oid, int b_mode,
+                               struct rename_conflict_info *rename_conflict_info)
 {
        const char *reason = _("content");
        const char *path1 = NULL, *path2 = NULL;
@@ -3058,14 +3013,16 @@ static int merge_content(struct merge_options *o,
                path2 = (rename_conflict_info->pair2 ||
                         o->branch2 == rename_conflict_info->branch1) ?
                        pair1->two->path : pair1->one->path;
+               one.path = pair1->one->path;
+               a.path = (char *)path1;
+               b.path = (char *)path2;
 
                if (dir_in_way(path, !o->call_depth,
                               S_ISGITLINK(pair1->two->mode)))
                        df_conflict_remains = 1;
        }
-       if (merge_file_special_markers(o, &one, &a, &b, path,
-                                      o->branch1, path1,
-                                      o->branch2, path2, &mfi))
+       if (merge_mode_and_contents(o, &one, &a, &b, path,
+                                   o->branch1, o->branch2, &mfi))
                return -1;
 
        /*
@@ -3156,9 +3113,9 @@ static int handle_rename_normal(struct merge_options *o,
                                struct rename_conflict_info *ci)
 {
        /* Merge the content and write it out */
-       return merge_content(o, path, was_dirty(o, path),
-                            o_oid, o_mode, a_oid, a_mode, b_oid, b_mode,
-                            ci);
+       return handle_content_merge(o, path, was_dirty(o, path),
+                                   o_oid, o_mode, a_oid, a_mode, b_oid, b_mode,
+                                   ci);
 }
 
 /* Per entry merge function */
@@ -3282,9 +3239,11 @@ static int process_entry(struct merge_options *o,
                /* Case C: Added in both (check for same permissions) and */
                /* case D: Modified in both, but differently. */
                int is_dirty = 0; /* unpack_trees would have bailed if dirty */
-               clean_merge = merge_content(o, path, is_dirty,
-                                           o_oid, o_mode, a_oid, a_mode, b_oid, b_mode,
-                                           NULL);
+               clean_merge = handle_content_merge(o, path, is_dirty,
+                                                  o_oid, o_mode,
+                                                  a_oid, a_mode,
+                                                  b_oid, b_mode,
+                                                  NULL);
        } else if (!o_oid && !a_oid && !b_oid) {
                /*
                 * this entry was deleted altogether. a_mode == 0 means
diff --git a/midx.c b/midx.c
new file mode 100644 (file)
index 0000000..713d6f9
--- /dev/null
+++ b/midx.c
@@ -0,0 +1,1007 @@
+#include "cache.h"
+#include "config.h"
+#include "csum-file.h"
+#include "dir.h"
+#include "lockfile.h"
+#include "packfile.h"
+#include "object-store.h"
+#include "sha1-lookup.h"
+#include "midx.h"
+#include "progress.h"
+
+#define MIDX_SIGNATURE 0x4d494458 /* "MIDX" */
+#define MIDX_VERSION 1
+#define MIDX_BYTE_FILE_VERSION 4
+#define MIDX_BYTE_HASH_VERSION 5
+#define MIDX_BYTE_NUM_CHUNKS 6
+#define MIDX_BYTE_NUM_PACKS 8
+#define MIDX_HASH_VERSION 1
+#define MIDX_HEADER_SIZE 12
+#define MIDX_HASH_LEN 20
+#define MIDX_MIN_SIZE (MIDX_HEADER_SIZE + MIDX_HASH_LEN)
+
+#define MIDX_MAX_CHUNKS 5
+#define MIDX_CHUNK_ALIGNMENT 4
+#define MIDX_CHUNKID_PACKNAMES 0x504e414d /* "PNAM" */
+#define MIDX_CHUNKID_OIDFANOUT 0x4f494446 /* "OIDF" */
+#define MIDX_CHUNKID_OIDLOOKUP 0x4f49444c /* "OIDL" */
+#define MIDX_CHUNKID_OBJECTOFFSETS 0x4f4f4646 /* "OOFF" */
+#define MIDX_CHUNKID_LARGEOFFSETS 0x4c4f4646 /* "LOFF" */
+#define MIDX_CHUNKLOOKUP_WIDTH (sizeof(uint32_t) + sizeof(uint64_t))
+#define MIDX_CHUNK_FANOUT_SIZE (sizeof(uint32_t) * 256)
+#define MIDX_CHUNK_OFFSET_WIDTH (2 * sizeof(uint32_t))
+#define MIDX_CHUNK_LARGE_OFFSET_WIDTH (sizeof(uint64_t))
+#define MIDX_LARGE_OFFSET_NEEDED 0x80000000
+
+static char *get_midx_filename(const char *object_dir)
+{
+       return xstrfmt("%s/pack/multi-pack-index", object_dir);
+}
+
+struct multi_pack_index *load_multi_pack_index(const char *object_dir, int local)
+{
+       struct multi_pack_index *m = NULL;
+       int fd;
+       struct stat st;
+       size_t midx_size;
+       void *midx_map = NULL;
+       uint32_t hash_version;
+       char *midx_name = get_midx_filename(object_dir);
+       uint32_t i;
+       const char *cur_pack_name;
+
+       fd = git_open(midx_name);
+
+       if (fd < 0)
+               goto cleanup_fail;
+       if (fstat(fd, &st)) {
+               error_errno(_("failed to read %s"), midx_name);
+               goto cleanup_fail;
+       }
+
+       midx_size = xsize_t(st.st_size);
+
+       if (midx_size < MIDX_MIN_SIZE) {
+               error(_("multi-pack-index file %s is too small"), midx_name);
+               goto cleanup_fail;
+       }
+
+       FREE_AND_NULL(midx_name);
+
+       midx_map = xmmap(NULL, midx_size, PROT_READ, MAP_PRIVATE, fd, 0);
+
+       FLEX_ALLOC_MEM(m, object_dir, object_dir, strlen(object_dir));
+       m->fd = fd;
+       m->data = midx_map;
+       m->data_len = midx_size;
+       m->local = local;
+
+       m->signature = get_be32(m->data);
+       if (m->signature != MIDX_SIGNATURE)
+               die(_("multi-pack-index signature 0x%08x does not match signature 0x%08x"),
+                     m->signature, MIDX_SIGNATURE);
+
+       m->version = m->data[MIDX_BYTE_FILE_VERSION];
+       if (m->version != MIDX_VERSION)
+               die(_("multi-pack-index version %d not recognized"),
+                     m->version);
+
+       hash_version = m->data[MIDX_BYTE_HASH_VERSION];
+       if (hash_version != MIDX_HASH_VERSION)
+               die(_("hash version %u does not match"), hash_version);
+       m->hash_len = MIDX_HASH_LEN;
+
+       m->num_chunks = m->data[MIDX_BYTE_NUM_CHUNKS];
+
+       m->num_packs = get_be32(m->data + MIDX_BYTE_NUM_PACKS);
+
+       for (i = 0; i < m->num_chunks; i++) {
+               uint32_t chunk_id = get_be32(m->data + MIDX_HEADER_SIZE +
+                                            MIDX_CHUNKLOOKUP_WIDTH * i);
+               uint64_t chunk_offset = get_be64(m->data + MIDX_HEADER_SIZE + 4 +
+                                                MIDX_CHUNKLOOKUP_WIDTH * i);
+
+               if (chunk_offset >= m->data_len)
+                       die(_("invalid chunk offset (too large)"));
+
+               switch (chunk_id) {
+                       case MIDX_CHUNKID_PACKNAMES:
+                               m->chunk_pack_names = m->data + chunk_offset;
+                               break;
+
+                       case MIDX_CHUNKID_OIDFANOUT:
+                               m->chunk_oid_fanout = (uint32_t *)(m->data + chunk_offset);
+                               break;
+
+                       case MIDX_CHUNKID_OIDLOOKUP:
+                               m->chunk_oid_lookup = m->data + chunk_offset;
+                               break;
+
+                       case MIDX_CHUNKID_OBJECTOFFSETS:
+                               m->chunk_object_offsets = m->data + chunk_offset;
+                               break;
+
+                       case MIDX_CHUNKID_LARGEOFFSETS:
+                               m->chunk_large_offsets = m->data + chunk_offset;
+                               break;
+
+                       case 0:
+                               die(_("terminating multi-pack-index chunk id appears earlier than expected"));
+                               break;
+
+                       default:
+                               /*
+                                * Do nothing on unrecognized chunks, allowing future
+                                * extensions to add optional chunks.
+                                */
+                               break;
+               }
+       }
+
+       if (!m->chunk_pack_names)
+               die(_("multi-pack-index missing required pack-name chunk"));
+       if (!m->chunk_oid_fanout)
+               die(_("multi-pack-index missing required OID fanout chunk"));
+       if (!m->chunk_oid_lookup)
+               die(_("multi-pack-index missing required OID lookup chunk"));
+       if (!m->chunk_object_offsets)
+               die(_("multi-pack-index missing required object offsets chunk"));
+
+       m->num_objects = ntohl(m->chunk_oid_fanout[255]);
+
+       m->pack_names = xcalloc(m->num_packs, sizeof(*m->pack_names));
+       m->packs = xcalloc(m->num_packs, sizeof(*m->packs));
+
+       cur_pack_name = (const char *)m->chunk_pack_names;
+       for (i = 0; i < m->num_packs; i++) {
+               m->pack_names[i] = cur_pack_name;
+
+               cur_pack_name += strlen(cur_pack_name) + 1;
+
+               if (i && strcmp(m->pack_names[i], m->pack_names[i - 1]) <= 0)
+                       die(_("multi-pack-index pack names out of order: '%s' before '%s'"),
+                             m->pack_names[i - 1],
+                             m->pack_names[i]);
+       }
+
+       return m;
+
+cleanup_fail:
+       free(m);
+       free(midx_name);
+       if (midx_map)
+               munmap(midx_map, midx_size);
+       if (0 <= fd)
+               close(fd);
+       return NULL;
+}
+
+static void close_midx(struct multi_pack_index *m)
+{
+       uint32_t i;
+       munmap((unsigned char *)m->data, m->data_len);
+       close(m->fd);
+       m->fd = -1;
+
+       for (i = 0; i < m->num_packs; i++) {
+               if (m->packs[i]) {
+                       close_pack(m->packs[i]);
+                       free(m->packs);
+               }
+       }
+       FREE_AND_NULL(m->packs);
+       FREE_AND_NULL(m->pack_names);
+}
+
+int prepare_midx_pack(struct multi_pack_index *m, uint32_t pack_int_id)
+{
+       struct strbuf pack_name = STRBUF_INIT;
+
+       if (pack_int_id >= m->num_packs)
+               die(_("bad pack-int-id: %u (%u total packs"),
+                   pack_int_id, m->num_packs);
+
+       if (m->packs[pack_int_id])
+               return 0;
+
+       strbuf_addf(&pack_name, "%s/pack/%s", m->object_dir,
+                   m->pack_names[pack_int_id]);
+
+       m->packs[pack_int_id] = add_packed_git(pack_name.buf, pack_name.len, m->local);
+       strbuf_release(&pack_name);
+       return !m->packs[pack_int_id];
+}
+
+int bsearch_midx(const struct object_id *oid, struct multi_pack_index *m, uint32_t *result)
+{
+       return bsearch_hash(oid->hash, m->chunk_oid_fanout, m->chunk_oid_lookup,
+                           MIDX_HASH_LEN, result);
+}
+
+struct object_id *nth_midxed_object_oid(struct object_id *oid,
+                                       struct multi_pack_index *m,
+                                       uint32_t n)
+{
+       if (n >= m->num_objects)
+               return NULL;
+
+       hashcpy(oid->hash, m->chunk_oid_lookup + m->hash_len * n);
+       return oid;
+}
+
+static off_t nth_midxed_offset(struct multi_pack_index *m, uint32_t pos)
+{
+       const unsigned char *offset_data;
+       uint32_t offset32;
+
+       offset_data = m->chunk_object_offsets + pos * MIDX_CHUNK_OFFSET_WIDTH;
+       offset32 = get_be32(offset_data + sizeof(uint32_t));
+
+       if (m->chunk_large_offsets && offset32 & MIDX_LARGE_OFFSET_NEEDED) {
+               if (sizeof(off_t) < sizeof(uint64_t))
+                       die(_("multi-pack-index stores a 64-bit offset, but off_t is too small"));
+
+               offset32 ^= MIDX_LARGE_OFFSET_NEEDED;
+               return get_be64(m->chunk_large_offsets + sizeof(uint64_t) * offset32);
+       }
+
+       return offset32;
+}
+
+static uint32_t nth_midxed_pack_int_id(struct multi_pack_index *m, uint32_t pos)
+{
+       return get_be32(m->chunk_object_offsets + pos * MIDX_CHUNK_OFFSET_WIDTH);
+}
+
+static int nth_midxed_pack_entry(struct multi_pack_index *m, struct pack_entry *e, uint32_t pos)
+{
+       uint32_t pack_int_id;
+       struct packed_git *p;
+
+       if (pos >= m->num_objects)
+               return 0;
+
+       pack_int_id = nth_midxed_pack_int_id(m, pos);
+
+       if (prepare_midx_pack(m, pack_int_id))
+               die(_("error preparing packfile from multi-pack-index"));
+       p = m->packs[pack_int_id];
+
+       /*
+       * We are about to tell the caller where they can locate the
+       * requested object.  We better make sure the packfile is
+       * still here and can be accessed before supplying that
+       * answer, as it may have been deleted since the MIDX was
+       * loaded!
+       */
+       if (!is_pack_valid(p))
+               return 0;
+
+       if (p->num_bad_objects) {
+               uint32_t i;
+               struct object_id oid;
+               nth_midxed_object_oid(&oid, m, pos);
+               for (i = 0; i < p->num_bad_objects; i++)
+                       if (!hashcmp(oid.hash,
+                                    p->bad_object_sha1 + the_hash_algo->rawsz * i))
+                               return 0;
+       }
+
+       e->offset = nth_midxed_offset(m, pos);
+       e->p = p;
+
+       return 1;
+}
+
+int fill_midx_entry(const struct object_id *oid, struct pack_entry *e, struct multi_pack_index *m)
+{
+       uint32_t pos;
+
+       if (!bsearch_midx(oid, m, &pos))
+               return 0;
+
+       return nth_midxed_pack_entry(m, e, pos);
+}
+
+int midx_contains_pack(struct multi_pack_index *m, const char *idx_name)
+{
+       uint32_t first = 0, last = m->num_packs;
+
+       while (first < last) {
+               uint32_t mid = first + (last - first) / 2;
+               const char *current;
+               int cmp;
+
+               current = m->pack_names[mid];
+               cmp = strcmp(idx_name, current);
+               if (!cmp)
+                       return 1;
+               if (cmp > 0) {
+                       first = mid + 1;
+                       continue;
+               }
+               last = mid;
+       }
+
+       return 0;
+}
+
+int prepare_multi_pack_index_one(struct repository *r, const char *object_dir, int local)
+{
+       struct multi_pack_index *m;
+       struct multi_pack_index *m_search;
+       int config_value;
+
+       if (repo_config_get_bool(r, "core.multipackindex", &config_value) ||
+           !config_value)
+               return 0;
+
+       for (m_search = r->objects->multi_pack_index; m_search; m_search = m_search->next)
+               if (!strcmp(object_dir, m_search->object_dir))
+                       return 1;
+
+       m = load_multi_pack_index(object_dir, local);
+
+       if (m) {
+               m->next = r->objects->multi_pack_index;
+               r->objects->multi_pack_index = m;
+               return 1;
+       }
+
+       return 0;
+}
+
+static size_t write_midx_header(struct hashfile *f,
+                               unsigned char num_chunks,
+                               uint32_t num_packs)
+{
+       unsigned char byte_values[4];
+
+       hashwrite_be32(f, MIDX_SIGNATURE);
+       byte_values[0] = MIDX_VERSION;
+       byte_values[1] = MIDX_HASH_VERSION;
+       byte_values[2] = num_chunks;
+       byte_values[3] = 0; /* unused */
+       hashwrite(f, byte_values, sizeof(byte_values));
+       hashwrite_be32(f, num_packs);
+
+       return MIDX_HEADER_SIZE;
+}
+
+struct pack_list {
+       struct packed_git **list;
+       char **names;
+       uint32_t nr;
+       uint32_t alloc_list;
+       uint32_t alloc_names;
+       size_t pack_name_concat_len;
+       struct multi_pack_index *m;
+};
+
+static void add_pack_to_midx(const char *full_path, size_t full_path_len,
+                            const char *file_name, void *data)
+{
+       struct pack_list *packs = (struct pack_list *)data;
+
+       if (ends_with(file_name, ".idx")) {
+               if (packs->m && midx_contains_pack(packs->m, file_name))
+                       return;
+
+               ALLOC_GROW(packs->list, packs->nr + 1, packs->alloc_list);
+               ALLOC_GROW(packs->names, packs->nr + 1, packs->alloc_names);
+
+               packs->list[packs->nr] = add_packed_git(full_path,
+                                                       full_path_len,
+                                                       0);
+
+               if (!packs->list[packs->nr]) {
+                       warning(_("failed to add packfile '%s'"),
+                               full_path);
+                       return;
+               }
+
+               if (open_pack_index(packs->list[packs->nr])) {
+                       warning(_("failed to open pack-index '%s'"),
+                               full_path);
+                       close_pack(packs->list[packs->nr]);
+                       FREE_AND_NULL(packs->list[packs->nr]);
+                       return;
+               }
+
+               packs->names[packs->nr] = xstrdup(file_name);
+               packs->pack_name_concat_len += strlen(file_name) + 1;
+               packs->nr++;
+       }
+}
+
+struct pack_pair {
+       uint32_t pack_int_id;
+       char *pack_name;
+};
+
+static int pack_pair_compare(const void *_a, const void *_b)
+{
+       struct pack_pair *a = (struct pack_pair *)_a;
+       struct pack_pair *b = (struct pack_pair *)_b;
+       return strcmp(a->pack_name, b->pack_name);
+}
+
+static void sort_packs_by_name(char **pack_names, uint32_t nr_packs, uint32_t *perm)
+{
+       uint32_t i;
+       struct pack_pair *pairs;
+
+       ALLOC_ARRAY(pairs, nr_packs);
+
+       for (i = 0; i < nr_packs; i++) {
+               pairs[i].pack_int_id = i;
+               pairs[i].pack_name = pack_names[i];
+       }
+
+       QSORT(pairs, nr_packs, pack_pair_compare);
+
+       for (i = 0; i < nr_packs; i++) {
+               pack_names[i] = pairs[i].pack_name;
+               perm[pairs[i].pack_int_id] = i;
+       }
+
+       free(pairs);
+}
+
+struct pack_midx_entry {
+       struct object_id oid;
+       uint32_t pack_int_id;
+       time_t pack_mtime;
+       uint64_t offset;
+};
+
+static int midx_oid_compare(const void *_a, const void *_b)
+{
+       const struct pack_midx_entry *a = (const struct pack_midx_entry *)_a;
+       const struct pack_midx_entry *b = (const struct pack_midx_entry *)_b;
+       int cmp = oidcmp(&a->oid, &b->oid);
+
+       if (cmp)
+               return cmp;
+
+       if (a->pack_mtime > b->pack_mtime)
+               return -1;
+       else if (a->pack_mtime < b->pack_mtime)
+               return 1;
+
+       return a->pack_int_id - b->pack_int_id;
+}
+
+static int nth_midxed_pack_midx_entry(struct multi_pack_index *m,
+                                     uint32_t *pack_perm,
+                                     struct pack_midx_entry *e,
+                                     uint32_t pos)
+{
+       if (pos >= m->num_objects)
+               return 1;
+
+       nth_midxed_object_oid(&e->oid, m, pos);
+       e->pack_int_id = pack_perm[nth_midxed_pack_int_id(m, pos)];
+       e->offset = nth_midxed_offset(m, pos);
+
+       /* consider objects in midx to be from "old" packs */
+       e->pack_mtime = 0;
+       return 0;
+}
+
+static void fill_pack_entry(uint32_t pack_int_id,
+                           struct packed_git *p,
+                           uint32_t cur_object,
+                           struct pack_midx_entry *entry)
+{
+       if (!nth_packed_object_oid(&entry->oid, p, cur_object))
+               die(_("failed to locate object %d in packfile"), cur_object);
+
+       entry->pack_int_id = pack_int_id;
+       entry->pack_mtime = p->mtime;
+
+       entry->offset = nth_packed_object_offset(p, cur_object);
+}
+
+/*
+ * It is possible to artificially get into a state where there are many
+ * duplicate copies of objects. That can create high memory pressure if
+ * we are to create a list of all objects before de-duplication. To reduce
+ * this memory pressure without a significant performance drop, automatically
+ * group objects by the first byte of their object id. Use the IDX fanout
+ * tables to group the data, copy to a local array, then sort.
+ *
+ * Copy only the de-duplicated entries (selected by most-recent modified time
+ * of a packfile containing the object).
+ */
+static struct pack_midx_entry *get_sorted_entries(struct multi_pack_index *m,
+                                                 struct packed_git **p,
+                                                 uint32_t *perm,
+                                                 uint32_t nr_packs,
+                                                 uint32_t *nr_objects)
+{
+       uint32_t cur_fanout, cur_pack, cur_object;
+       uint32_t alloc_fanout, alloc_objects, total_objects = 0;
+       struct pack_midx_entry *entries_by_fanout = NULL;
+       struct pack_midx_entry *deduplicated_entries = NULL;
+       uint32_t start_pack = m ? m->num_packs : 0;
+
+       for (cur_pack = start_pack; cur_pack < nr_packs; cur_pack++)
+               total_objects += p[cur_pack]->num_objects;
+
+       /*
+        * As we de-duplicate by fanout value, we expect the fanout
+        * slices to be evenly distributed, with some noise. Hence,
+        * allocate slightly more than one 256th.
+        */
+       alloc_objects = alloc_fanout = total_objects > 3200 ? total_objects / 200 : 16;
+
+       ALLOC_ARRAY(entries_by_fanout, alloc_fanout);
+       ALLOC_ARRAY(deduplicated_entries, alloc_objects);
+       *nr_objects = 0;
+
+       for (cur_fanout = 0; cur_fanout < 256; cur_fanout++) {
+               uint32_t nr_fanout = 0;
+
+               if (m) {
+                       uint32_t start = 0, end;
+
+                       if (cur_fanout)
+                               start = ntohl(m->chunk_oid_fanout[cur_fanout - 1]);
+                       end = ntohl(m->chunk_oid_fanout[cur_fanout]);
+
+                       for (cur_object = start; cur_object < end; cur_object++) {
+                               ALLOC_GROW(entries_by_fanout, nr_fanout + 1, alloc_fanout);
+                               nth_midxed_pack_midx_entry(m, perm,
+                                                          &entries_by_fanout[nr_fanout],
+                                                          cur_object);
+                               nr_fanout++;
+                       }
+               }
+
+               for (cur_pack = start_pack; cur_pack < nr_packs; cur_pack++) {
+                       uint32_t start = 0, end;
+
+                       if (cur_fanout)
+                               start = get_pack_fanout(p[cur_pack], cur_fanout - 1);
+                       end = get_pack_fanout(p[cur_pack], cur_fanout);
+
+                       for (cur_object = start; cur_object < end; cur_object++) {
+                               ALLOC_GROW(entries_by_fanout, nr_fanout + 1, alloc_fanout);
+                               fill_pack_entry(perm[cur_pack], p[cur_pack], cur_object, &entries_by_fanout[nr_fanout]);
+                               nr_fanout++;
+                       }
+               }
+
+               QSORT(entries_by_fanout, nr_fanout, midx_oid_compare);
+
+               /*
+                * The batch is now sorted by OID and then mtime (descending).
+                * Take only the first duplicate.
+                */
+               for (cur_object = 0; cur_object < nr_fanout; cur_object++) {
+                       if (cur_object && !oidcmp(&entries_by_fanout[cur_object - 1].oid,
+                                                 &entries_by_fanout[cur_object].oid))
+                               continue;
+
+                       ALLOC_GROW(deduplicated_entries, *nr_objects + 1, alloc_objects);
+                       memcpy(&deduplicated_entries[*nr_objects],
+                              &entries_by_fanout[cur_object],
+                              sizeof(struct pack_midx_entry));
+                       (*nr_objects)++;
+               }
+       }
+
+       free(entries_by_fanout);
+       return deduplicated_entries;
+}
+
+static size_t write_midx_pack_names(struct hashfile *f,
+                                   char **pack_names,
+                                   uint32_t num_packs)
+{
+       uint32_t i;
+       unsigned char padding[MIDX_CHUNK_ALIGNMENT];
+       size_t written = 0;
+
+       for (i = 0; i < num_packs; i++) {
+               size_t writelen = strlen(pack_names[i]) + 1;
+
+               if (i && strcmp(pack_names[i], pack_names[i - 1]) <= 0)
+                       BUG("incorrect pack-file order: %s before %s",
+                           pack_names[i - 1],
+                           pack_names[i]);
+
+               hashwrite(f, pack_names[i], writelen);
+               written += writelen;
+       }
+
+       /* add padding to be aligned */
+       i = MIDX_CHUNK_ALIGNMENT - (written % MIDX_CHUNK_ALIGNMENT);
+       if (i < MIDX_CHUNK_ALIGNMENT) {
+               memset(padding, 0, sizeof(padding));
+               hashwrite(f, padding, i);
+               written += i;
+       }
+
+       return written;
+}
+
+static size_t write_midx_oid_fanout(struct hashfile *f,
+                                   struct pack_midx_entry *objects,
+                                   uint32_t nr_objects)
+{
+       struct pack_midx_entry *list = objects;
+       struct pack_midx_entry *last = objects + nr_objects;
+       uint32_t count = 0;
+       uint32_t i;
+
+       /*
+       * Write the first-level table (the list is sorted,
+       * but we use a 256-entry lookup to be able to avoid
+       * having to do eight extra binary search iterations).
+       */
+       for (i = 0; i < 256; i++) {
+               struct pack_midx_entry *next = list;
+
+               while (next < last && next->oid.hash[0] == i) {
+                       count++;
+                       next++;
+               }
+
+               hashwrite_be32(f, count);
+               list = next;
+       }
+
+       return MIDX_CHUNK_FANOUT_SIZE;
+}
+
+static size_t write_midx_oid_lookup(struct hashfile *f, unsigned char hash_len,
+                                   struct pack_midx_entry *objects,
+                                   uint32_t nr_objects)
+{
+       struct pack_midx_entry *list = objects;
+       uint32_t i;
+       size_t written = 0;
+
+       for (i = 0; i < nr_objects; i++) {
+               struct pack_midx_entry *obj = list++;
+
+               if (i < nr_objects - 1) {
+                       struct pack_midx_entry *next = list;
+                       if (oidcmp(&obj->oid, &next->oid) >= 0)
+                               BUG("OIDs not in order: %s >= %s",
+                                   oid_to_hex(&obj->oid),
+                                   oid_to_hex(&next->oid));
+               }
+
+               hashwrite(f, obj->oid.hash, (int)hash_len);
+               written += hash_len;
+       }
+
+       return written;
+}
+
+static size_t write_midx_object_offsets(struct hashfile *f, int large_offset_needed,
+                                       struct pack_midx_entry *objects, uint32_t nr_objects)
+{
+       struct pack_midx_entry *list = objects;
+       uint32_t i, nr_large_offset = 0;
+       size_t written = 0;
+
+       for (i = 0; i < nr_objects; i++) {
+               struct pack_midx_entry *obj = list++;
+
+               hashwrite_be32(f, obj->pack_int_id);
+
+               if (large_offset_needed && obj->offset >> 31)
+                       hashwrite_be32(f, MIDX_LARGE_OFFSET_NEEDED | nr_large_offset++);
+               else if (!large_offset_needed && obj->offset >> 32)
+                       BUG("object %s requires a large offset (%"PRIx64") but the MIDX is not writing large offsets!",
+                           oid_to_hex(&obj->oid),
+                           obj->offset);
+               else
+                       hashwrite_be32(f, (uint32_t)obj->offset);
+
+               written += MIDX_CHUNK_OFFSET_WIDTH;
+       }
+
+       return written;
+}
+
+static size_t write_midx_large_offsets(struct hashfile *f, uint32_t nr_large_offset,
+                                      struct pack_midx_entry *objects, uint32_t nr_objects)
+{
+       struct pack_midx_entry *list = objects;
+       size_t written = 0;
+
+       while (nr_large_offset) {
+               struct pack_midx_entry *obj = list++;
+               uint64_t offset = obj->offset;
+
+               if (!(offset >> 31))
+                       continue;
+
+               hashwrite_be32(f, offset >> 32);
+               hashwrite_be32(f, offset & 0xffffffffUL);
+               written += 2 * sizeof(uint32_t);
+
+               nr_large_offset--;
+       }
+
+       return written;
+}
+
+int write_midx_file(const char *object_dir)
+{
+       unsigned char cur_chunk, num_chunks = 0;
+       char *midx_name;
+       uint32_t i;
+       struct hashfile *f = NULL;
+       struct lock_file lk;
+       struct pack_list packs;
+       uint32_t *pack_perm = NULL;
+       uint64_t written = 0;
+       uint32_t chunk_ids[MIDX_MAX_CHUNKS + 1];
+       uint64_t chunk_offsets[MIDX_MAX_CHUNKS + 1];
+       uint32_t nr_entries, num_large_offsets = 0;
+       struct pack_midx_entry *entries = NULL;
+       int large_offsets_needed = 0;
+
+       midx_name = get_midx_filename(object_dir);
+       if (safe_create_leading_directories(midx_name)) {
+               UNLEAK(midx_name);
+               die_errno(_("unable to create leading directories of %s"),
+                         midx_name);
+       }
+
+       packs.m = load_multi_pack_index(object_dir, 1);
+
+       packs.nr = 0;
+       packs.alloc_list = packs.m ? packs.m->num_packs : 16;
+       packs.alloc_names = packs.alloc_list;
+       packs.list = NULL;
+       packs.names = NULL;
+       packs.pack_name_concat_len = 0;
+       ALLOC_ARRAY(packs.list, packs.alloc_list);
+       ALLOC_ARRAY(packs.names, packs.alloc_names);
+
+       if (packs.m) {
+               for (i = 0; i < packs.m->num_packs; i++) {
+                       ALLOC_GROW(packs.list, packs.nr + 1, packs.alloc_list);
+                       ALLOC_GROW(packs.names, packs.nr + 1, packs.alloc_names);
+
+                       packs.list[packs.nr] = NULL;
+                       packs.names[packs.nr] = xstrdup(packs.m->pack_names[i]);
+                       packs.pack_name_concat_len += strlen(packs.names[packs.nr]) + 1;
+                       packs.nr++;
+               }
+       }
+
+       for_each_file_in_pack_dir(object_dir, add_pack_to_midx, &packs);
+
+       if (packs.m && packs.nr == packs.m->num_packs)
+               goto cleanup;
+
+       if (packs.pack_name_concat_len % MIDX_CHUNK_ALIGNMENT)
+               packs.pack_name_concat_len += MIDX_CHUNK_ALIGNMENT -
+                                             (packs.pack_name_concat_len % MIDX_CHUNK_ALIGNMENT);
+
+       ALLOC_ARRAY(pack_perm, packs.nr);
+       sort_packs_by_name(packs.names, packs.nr, pack_perm);
+
+       entries = get_sorted_entries(packs.m, packs.list, pack_perm, packs.nr, &nr_entries);
+
+       for (i = 0; i < nr_entries; i++) {
+               if (entries[i].offset > 0x7fffffff)
+                       num_large_offsets++;
+               if (entries[i].offset > 0xffffffff)
+                       large_offsets_needed = 1;
+       }
+
+       hold_lock_file_for_update(&lk, midx_name, LOCK_DIE_ON_ERROR);
+       f = hashfd(lk.tempfile->fd, lk.tempfile->filename.buf);
+       FREE_AND_NULL(midx_name);
+
+       if (packs.m)
+               close_midx(packs.m);
+
+       cur_chunk = 0;
+       num_chunks = large_offsets_needed ? 5 : 4;
+
+       written = write_midx_header(f, num_chunks, packs.nr);
+
+       chunk_ids[cur_chunk] = MIDX_CHUNKID_PACKNAMES;
+       chunk_offsets[cur_chunk] = written + (num_chunks + 1) * MIDX_CHUNKLOOKUP_WIDTH;
+
+       cur_chunk++;
+       chunk_ids[cur_chunk] = MIDX_CHUNKID_OIDFANOUT;
+       chunk_offsets[cur_chunk] = chunk_offsets[cur_chunk - 1] + packs.pack_name_concat_len;
+
+       cur_chunk++;
+       chunk_ids[cur_chunk] = MIDX_CHUNKID_OIDLOOKUP;
+       chunk_offsets[cur_chunk] = chunk_offsets[cur_chunk - 1] + MIDX_CHUNK_FANOUT_SIZE;
+
+       cur_chunk++;
+       chunk_ids[cur_chunk] = MIDX_CHUNKID_OBJECTOFFSETS;
+       chunk_offsets[cur_chunk] = chunk_offsets[cur_chunk - 1] + nr_entries * MIDX_HASH_LEN;
+
+       cur_chunk++;
+       chunk_offsets[cur_chunk] = chunk_offsets[cur_chunk - 1] + nr_entries * MIDX_CHUNK_OFFSET_WIDTH;
+       if (large_offsets_needed) {
+               chunk_ids[cur_chunk] = MIDX_CHUNKID_LARGEOFFSETS;
+
+               cur_chunk++;
+               chunk_offsets[cur_chunk] = chunk_offsets[cur_chunk - 1] +
+                                          num_large_offsets * MIDX_CHUNK_LARGE_OFFSET_WIDTH;
+       }
+
+       chunk_ids[cur_chunk] = 0;
+
+       for (i = 0; i <= num_chunks; i++) {
+               if (i && chunk_offsets[i] < chunk_offsets[i - 1])
+                       BUG("incorrect chunk offsets: %"PRIu64" before %"PRIu64,
+                           chunk_offsets[i - 1],
+                           chunk_offsets[i]);
+
+               if (chunk_offsets[i] % MIDX_CHUNK_ALIGNMENT)
+                       BUG("chunk offset %"PRIu64" is not properly aligned",
+                           chunk_offsets[i]);
+
+               hashwrite_be32(f, chunk_ids[i]);
+               hashwrite_be32(f, chunk_offsets[i] >> 32);
+               hashwrite_be32(f, chunk_offsets[i]);
+
+               written += MIDX_CHUNKLOOKUP_WIDTH;
+       }
+
+       for (i = 0; i < num_chunks; i++) {
+               if (written != chunk_offsets[i])
+                       BUG("incorrect chunk offset (%"PRIu64" != %"PRIu64") for chunk id %"PRIx32,
+                           chunk_offsets[i],
+                           written,
+                           chunk_ids[i]);
+
+               switch (chunk_ids[i]) {
+                       case MIDX_CHUNKID_PACKNAMES:
+                               written += write_midx_pack_names(f, packs.names, packs.nr);
+                               break;
+
+                       case MIDX_CHUNKID_OIDFANOUT:
+                               written += write_midx_oid_fanout(f, entries, nr_entries);
+                               break;
+
+                       case MIDX_CHUNKID_OIDLOOKUP:
+                               written += write_midx_oid_lookup(f, MIDX_HASH_LEN, entries, nr_entries);
+                               break;
+
+                       case MIDX_CHUNKID_OBJECTOFFSETS:
+                               written += write_midx_object_offsets(f, large_offsets_needed, entries, nr_entries);
+                               break;
+
+                       case MIDX_CHUNKID_LARGEOFFSETS:
+                               written += write_midx_large_offsets(f, num_large_offsets, entries, nr_entries);
+                               break;
+
+                       default:
+                               BUG("trying to write unknown chunk id %"PRIx32,
+                                   chunk_ids[i]);
+               }
+       }
+
+       if (written != chunk_offsets[num_chunks])
+               BUG("incorrect final offset %"PRIu64" != %"PRIu64,
+                   written,
+                   chunk_offsets[num_chunks]);
+
+       finalize_hashfile(f, NULL, CSUM_FSYNC | CSUM_HASH_IN_STREAM);
+       commit_lock_file(&lk);
+
+cleanup:
+       for (i = 0; i < packs.nr; i++) {
+               if (packs.list[i]) {
+                       close_pack(packs.list[i]);
+                       free(packs.list[i]);
+               }
+               free(packs.names[i]);
+       }
+
+       free(packs.list);
+       free(packs.names);
+       free(entries);
+       free(pack_perm);
+       free(midx_name);
+       return 0;
+}
+
+void clear_midx_file(const char *object_dir)
+{
+       char *midx = get_midx_filename(object_dir);
+
+       if (remove_path(midx)) {
+               UNLEAK(midx);
+               die(_("failed to clear multi-pack-index at %s"), midx);
+       }
+
+       free(midx);
+}
+
+static int verify_midx_error;
+
+static void midx_report(const char *fmt, ...)
+{
+       va_list ap;
+       verify_midx_error = 1;
+       va_start(ap, fmt);
+       vfprintf(stderr, fmt, ap);
+       fprintf(stderr, "\n");
+       va_end(ap);
+}
+
+int verify_midx_file(const char *object_dir)
+{
+       uint32_t i;
+       struct progress *progress = NULL;
+       struct multi_pack_index *m = load_multi_pack_index(object_dir, 1);
+       verify_midx_error = 0;
+
+       if (!m)
+               return 0;
+
+       for (i = 0; i < m->num_packs; i++) {
+               if (prepare_midx_pack(m, i))
+                       midx_report("failed to load pack in position %d", i);
+       }
+
+       for (i = 0; i < 255; i++) {
+               uint32_t oid_fanout1 = ntohl(m->chunk_oid_fanout[i]);
+               uint32_t oid_fanout2 = ntohl(m->chunk_oid_fanout[i + 1]);
+
+               if (oid_fanout1 > oid_fanout2)
+                       midx_report(_("oid fanout out of order: fanout[%d] = %"PRIx32" > %"PRIx32" = fanout[%d]"),
+                                   i, oid_fanout1, oid_fanout2, i + 1);
+       }
+
+       for (i = 0; i < m->num_objects - 1; i++) {
+               struct object_id oid1, oid2;
+
+               nth_midxed_object_oid(&oid1, m, i);
+               nth_midxed_object_oid(&oid2, m, i + 1);
+
+               if (oidcmp(&oid1, &oid2) >= 0)
+                       midx_report(_("oid lookup out of order: oid[%d] = %s >= %s = oid[%d]"),
+                                   i, oid_to_hex(&oid1), oid_to_hex(&oid2), i + 1);
+       }
+
+       progress = start_progress(_("Verifying object offsets"), m->num_objects);
+       for (i = 0; i < m->num_objects; i++) {
+               struct object_id oid;
+               struct pack_entry e;
+               off_t m_offset, p_offset;
+
+               nth_midxed_object_oid(&oid, m, i);
+               if (!fill_midx_entry(&oid, &e, m)) {
+                       midx_report(_("failed to load pack entry for oid[%d] = %s"),
+                                   i, oid_to_hex(&oid));
+                       continue;
+               }
+
+               if (open_pack_index(e.p)) {
+                       midx_report(_("failed to load pack-index for packfile %s"),
+                                   e.p->pack_name);
+                       break;
+               }
+
+               m_offset = e.offset;
+               p_offset = find_pack_entry_one(oid.hash, e.p);
+
+               if (m_offset != p_offset)
+                       midx_report(_("incorrect object offset for oid[%d] = %s: %"PRIx64" != %"PRIx64),
+                                   i, oid_to_hex(&oid), m_offset, p_offset);
+
+               display_progress(progress, i + 1);
+       }
+       stop_progress(&progress);
+
+       return verify_midx_error;
+}
diff --git a/midx.h b/midx.h
new file mode 100644 (file)
index 0000000..2d7c9c6
--- /dev/null
+++ b/midx.h
@@ -0,0 +1,51 @@
+#ifndef __MIDX_H__
+#define __MIDX_H__
+
+#include "repository.h"
+
+struct object_id;
+struct pack_entry;
+
+struct multi_pack_index {
+       struct multi_pack_index *next;
+
+       int fd;
+
+       const unsigned char *data;
+       size_t data_len;
+
+       uint32_t signature;
+       unsigned char version;
+       unsigned char hash_len;
+       unsigned char num_chunks;
+       uint32_t num_packs;
+       uint32_t num_objects;
+
+       int local;
+
+       const unsigned char *chunk_pack_names;
+       const uint32_t *chunk_oid_fanout;
+       const unsigned char *chunk_oid_lookup;
+       const unsigned char *chunk_object_offsets;
+       const unsigned char *chunk_large_offsets;
+
+       const char **pack_names;
+       struct packed_git **packs;
+       char object_dir[FLEX_ARRAY];
+};
+
+struct multi_pack_index *load_multi_pack_index(const char *object_dir, int local);
+int prepare_midx_pack(struct multi_pack_index *m, uint32_t pack_int_id);
+int bsearch_midx(const struct object_id *oid, struct multi_pack_index *m, uint32_t *result);
+struct object_id *nth_midxed_object_oid(struct object_id *oid,
+                                       struct multi_pack_index *m,
+                                       uint32_t n);
+int fill_midx_entry(const struct object_id *oid, struct pack_entry *e, struct multi_pack_index *m);
+int midx_contains_pack(struct multi_pack_index *m, const char *idx_name);
+int prepare_multi_pack_index_one(struct repository *r, const char *object_dir, int local);
+
+int write_midx_file(const char *object_dir);
+void clear_midx_file(const char *object_dir);
+int verify_midx_file(const char *object_dir);
+
+#endif
index 163849831c9f11316ce97c649b77c32cf2eed276..1fcda73cb396e305a75aa8dc5869aaf85011761e 100644 (file)
@@ -578,10 +578,10 @@ static void threaded_lazy_init_name_hash(
 
 static void lazy_init_name_hash(struct index_state *istate)
 {
-       uint64_t start = getnanotime();
 
        if (istate->name_hash_initialized)
                return;
+       trace_performance_enter();
        hashmap_init(&istate->name_hash, cache_entry_cmp, NULL, istate->cache_nr);
        hashmap_init(&istate->dir_hash, dir_entry_cmp, NULL, istate->cache_nr);
 
@@ -602,7 +602,7 @@ static void lazy_init_name_hash(struct index_state *istate)
        }
 
        istate->name_hash_initialized = 1;
-       trace_performance_since(start, "initialize name hash");
+       trace_performance_leave("initialize name hash");
 }
 
 /*
index 76ab19e702423aeb511bfc1e939a7998c61fab61..5764e2b0ef06a3251d93e5535e85861ad3503fe9 100644 (file)
@@ -12,6 +12,7 @@
 #include "notes-merge.h"
 #include "strbuf.h"
 #include "notes-utils.h"
+#include "commit-reach.h"
 
 struct notes_merge_pair {
        struct object_id obj, base, local, remote;
@@ -151,7 +152,7 @@ static struct notes_merge_pair *diff_tree_remote(struct notes_merge_options *o,
                mp = find_notes_merge_pair_pos(changes, len, &obj, 1, &occupied);
                if (occupied) {
                        /* We've found an addition/deletion pair */
-                       assert(!oidcmp(&mp->obj, &obj));
+                       assert(oideq(&mp->obj, &obj));
                        if (is_null_oid(&p->one->oid)) { /* addition */
                                assert(is_null_oid(&mp->remote));
                                oidcpy(&mp->remote, &p->two->oid);
@@ -218,7 +219,7 @@ static void diff_tree_local(struct notes_merge_options *o,
                        continue;
                }
 
-               assert(!oidcmp(&mp->obj, &obj));
+               assert(oideq(&mp->obj, &obj));
                if (is_null_oid(&p->two->oid)) { /* deletion */
                        /*
                         * Either this is a true deletion (1), or it is part
@@ -229,7 +230,7 @@ static void diff_tree_local(struct notes_merge_options *o,
                         * (3) mp->local is uninitialized; set it to null_sha1
                         *     (will be overwritten by following addition)
                         */
-                       if (!oidcmp(&mp->local, &uninitialized))
+                       if (oideq(&mp->local, &uninitialized))
                                oidclr(&mp->local);
                } else if (is_null_oid(&p->one->oid)) { /* addition */
                        /*
@@ -241,7 +242,7 @@ static void diff_tree_local(struct notes_merge_options *o,
                         * (3) mp->local is null_sha1;     set to p->two->sha1
                         */
                        assert(is_null_oid(&mp->local) ||
-                              !oidcmp(&mp->local, &uninitialized));
+                              oideq(&mp->local, &uninitialized));
                        oidcpy(&mp->local, &p->two->oid);
                } else { /* modification */
                        /*
@@ -249,8 +250,8 @@ static void diff_tree_local(struct notes_merge_options *o,
                         * match mp->base, and mp->local shall be uninitialized.
                         * Set mp->local to p->two->sha1.
                         */
-                       assert(!oidcmp(&p->one->oid, &mp->base));
-                       assert(!oidcmp(&mp->local, &uninitialized));
+                       assert(oideq(&p->one->oid, &mp->base));
+                       assert(oideq(&mp->local, &uninitialized));
                        oidcpy(&mp->local, &p->two->oid);
                }
                trace_printf("\t\tStored local change for %s: %.7s -> %.7s\n",
@@ -480,14 +481,14 @@ static int merge_changes(struct notes_merge_options *o,
                       oid_to_hex(&p->local),
                       oid_to_hex(&p->remote));
 
-               if (!oidcmp(&p->base, &p->remote)) {
+               if (oideq(&p->base, &p->remote)) {
                        /* no remote change; nothing to do */
                        trace_printf("\t\t\tskipping (no remote change)\n");
-               } else if (!oidcmp(&p->local, &p->remote)) {
+               } else if (oideq(&p->local, &p->remote)) {
                        /* same change in local and remote; nothing to do */
                        trace_printf("\t\t\tskipping (local == remote)\n");
-               } else if (!oidcmp(&p->local, &uninitialized) ||
-                          !oidcmp(&p->local, &p->base)) {
+               } else if (oideq(&p->local, &uninitialized) ||
+                          oideq(&p->local, &p->base)) {
                        /* no local change; adopt remote change */
                        trace_printf("\t\t\tno local change, adopted remote\n");
                        if (add_note(t, &p->obj, &p->remote,
@@ -621,14 +622,14 @@ int notes_merge(struct notes_merge_options *o,
                        oid_to_hex(&local->object.oid),
                        oid_to_hex(base_oid));
 
-       if (!oidcmp(&remote->object.oid, base_oid)) {
+       if (oideq(&remote->object.oid, base_oid)) {
                /* Already merged; result == local commit */
                if (o->verbosity >= 2)
                        printf("Already up to date!\n");
                oidcpy(result_oid, &local->object.oid);
                goto found_result;
        }
-       if (!oidcmp(&local->object.oid, base_oid)) {
+       if (oideq(&local->object.oid, base_oid)) {
                /* Fast-forward; result == remote commit */
                if (o->verbosity >= 2)
                        printf("Fast-forward\n");
diff --git a/notes.c b/notes.c
index 32d3dbcc1e74ce344da89b9aa2b9ca09f6e62d2b..25cdce28b71a3ff15da424c8d1b1a89e28356dd5 100644 (file)
--- a/notes.c
+++ b/notes.c
@@ -147,7 +147,7 @@ static struct leaf_node *note_tree_find(struct notes_tree *t,
        void **p = note_tree_search(t, &tree, &n, key_sha1);
        if (GET_PTR_TYPE(*p) == PTR_TYPE_NOTE) {
                struct leaf_node *l = (struct leaf_node *) CLR_PTR_TYPE(*p);
-               if (!hashcmp(key_sha1, l->key_oid.hash))
+               if (hasheq(key_sha1, l->key_oid.hash))
                        return l;
        }
        return NULL;
@@ -206,7 +206,7 @@ static void note_tree_remove(struct notes_tree *t,
        if (GET_PTR_TYPE(*p) != PTR_TYPE_NOTE)
                return; /* type mismatch, nothing to remove */
        l = (struct leaf_node *) CLR_PTR_TYPE(*p);
-       if (oidcmp(&l->key_oid, &entry->key_oid))
+       if (!oideq(&l->key_oid, &entry->key_oid))
                return; /* key mismatch, nothing to remove */
 
        /* we have found a matching entry */
@@ -266,9 +266,9 @@ static int note_tree_insert(struct notes_tree *t, struct int_node *tree,
        case PTR_TYPE_NOTE:
                switch (type) {
                case PTR_TYPE_NOTE:
-                       if (!oidcmp(&l->key_oid, &entry->key_oid)) {
+                       if (oideq(&l->key_oid, &entry->key_oid)) {
                                /* skip concatenation if l == entry */
-                               if (!oidcmp(&l->val_oid, &entry->val_oid))
+                               if (oideq(&l->val_oid, &entry->val_oid))
                                        return 0;
 
                                ret = combine_notes(&l->val_oid,
index 67e66227d9c41e2f3036d0aeb89afaf5a17ec98a..63b7605a3e0b3000641d8f39f22e1d5c8139f19b 100644 (file)
@@ -88,6 +88,8 @@ struct packed_git {
        char pack_name[FLEX_ARRAY]; /* more */
 };
 
+struct multi_pack_index;
+
 struct raw_object_store {
        /*
         * Path to the repository's object store.
@@ -110,6 +112,13 @@ struct raw_object_store {
        struct commit_graph *commit_graph;
        unsigned commit_graph_attempted : 1; /* if loading has been attempted */
 
+       /*
+        * private data
+        *
+        * should only be accessed directly by packfile.c and midx.c
+        */
+       struct multi_pack_index *multi_pack_index;
+
        /*
         * private data
         *
@@ -120,6 +129,12 @@ struct raw_object_store {
        /* A most-recently-used ordered version of the packed_git list. */
        struct list_head packed_git_mru;
 
+       /*
+        * A linked list containing all packfiles, starting with those
+        * contained in the multi_pack_index.
+        */
+       struct packed_git *all_packs;
+
        /*
         * A fast, rough count of the number of objects in the repository.
         * These two fields are not meant for direct access. Use
index 51c45945156c421ada403139faefdf145918f4f7..e54160550c5a8e864f2e2bbcc58a29ff73045f8f 100644 (file)
--- a/object.c
+++ b/object.c
@@ -95,7 +95,7 @@ struct object *lookup_object(struct repository *r, const unsigned char *sha1)
 
        first = i = hash_obj(sha1, r->parsed_objects->obj_hash_size);
        while ((obj = r->parsed_objects->obj_hash[i]) != NULL) {
-               if (!hashcmp(sha1, obj->oid.hash))
+               if (hasheq(sha1, obj->oid.hash))
                        break;
                i++;
                if (i == r->parsed_objects->obj_hash_size)
index 6e28fdd0b426a3e276001e674675cf386f7d86c1..0feb90ae613be04d56ac091307c9b2be88724e42 100644 (file)
--- a/object.h
+++ b/object.h
@@ -63,12 +63,12 @@ struct object_array {
  * fetch-pack.c:             01
  * negotiator/default.c:       2--5
  * walker.c:                 0-2
- * upload-pack.c:                4       11----------------19
+ * upload-pack.c:                4       11-----14  16-----19
  * builtin/blame.c:                        12-13
  * bisect.c:                                        16
  * bundle.c:                                        16
  * http-push.c:                                     16-----19
- * commit.c:                                        16-----19
+ * commit-reach.c:                                15-------19
  * sha1-name.c:                                              20
  * list-objects-filter.c:                                      21
  * builtin/fsck.c:           0--3
index d9fb19ba6a354e376780fa0a3624642532a2f6cf..b0841a0f5870bafdcaf3482ed725e43ddaf5d461 100644 (file)
--- a/oidmap.c
+++ b/oidmap.c
@@ -1,14 +1,14 @@
 #include "cache.h"
 #include "oidmap.h"
 
-static int cmpfn(const void *hashmap_cmp_fn_data,
-                const void *entry, const void *entry_or_key,
-                const void *keydata)
+static int oidmap_neq(const void *hashmap_cmp_fn_data,
+                     const void *entry, const void *entry_or_key,
+                     const void *keydata)
 {
        const struct oidmap_entry *entry_ = entry;
        if (keydata)
-               return oidcmp(&entry_->oid, (const struct object_id *) keydata);
-       return oidcmp(&entry_->oid,
+               return !oideq(&entry_->oid, (const struct object_id *) keydata);
+       return !oideq(&entry_->oid,
                      &((const struct oidmap_entry *) entry_or_key)->oid);
 }
 
@@ -21,7 +21,7 @@ static int hash(const struct object_id *oid)
 
 void oidmap_init(struct oidmap *map, size_t initial_size)
 {
-       hashmap_init(&map->map, cmpfn, NULL, initial_size);
+       hashmap_init(&map->map, oidmap_neq, NULL, initial_size);
 }
 
 void oidmap_free(struct oidmap *map, int free_entries)
index d977e9bacb19ed766dd0a501b6fbcae800bb4ae0..fc82f37a02772244ee93aee41bef9238075c07ba 100644 (file)
@@ -11,6 +11,7 @@
 #include "pack-bitmap.h"
 #include "sha1-lookup.h"
 #include "pack-objects.h"
+#include "commit-reach.h"
 
 struct bitmapped_commit {
        struct commit *commit;
index f0a1937a1cc5fbb13fc705df8d193a43a0648198..5848cc93aa254b8549f4c569882b5d781984ca47 100644 (file)
@@ -86,10 +86,11 @@ struct bitmap_index {
        /* Bitmap result of the last performed walk */
        struct bitmap *result;
 
+       /* "have" bitmap from the last performed walk */
+       struct bitmap *haves;
+
        /* Version of the bitmap index */
        unsigned int version;
-
-       unsigned loaded : 1;
 };
 
 static struct ewah_bitmap *lookup_stored_bitmap(struct stored_bitmap *st)
@@ -303,7 +304,7 @@ static int open_pack_bitmap_1(struct bitmap_index *bitmap_git, struct packed_git
 
 static int load_pack_bitmap(struct bitmap_index *bitmap_git)
 {
-       assert(bitmap_git->map && !bitmap_git->loaded);
+       assert(bitmap_git->map);
 
        bitmap_git->bitmaps = kh_init_sha1();
        bitmap_git->ext_index.positions = kh_init_sha1_pos();
@@ -318,7 +319,6 @@ static int load_pack_bitmap(struct bitmap_index *bitmap_git)
        if (load_bitmap_entries_v1(bitmap_git) < 0)
                goto failed;
 
-       bitmap_git->loaded = 1;
        return 0;
 
 failed:
@@ -333,9 +333,9 @@ static int open_pack_bitmap(struct bitmap_index *bitmap_git)
        struct packed_git *p;
        int ret = -1;
 
-       assert(!bitmap_git->map && !bitmap_git->loaded);
+       assert(!bitmap_git->map);
 
-       for (p = get_packed_git(the_repository); p; p = p->next) {
+       for (p = get_all_packs(the_repository); p; p = p->next) {
                if (open_pack_bitmap_1(bitmap_git, p) == 0)
                        ret = 0;
        }
@@ -735,7 +735,7 @@ struct bitmap_index *prepare_bitmap_walk(struct rev_info *revs)
         * from disk. this is the point of no return; after this the rev_list
         * becomes invalidated and we must perform the revwalk through bitmaps
         */
-       if (!bitmap_git->loaded && load_pack_bitmap(bitmap_git) < 0)
+       if (load_pack_bitmap(bitmap_git) < 0)
                goto cleanup;
 
        object_array_clear(&revs->pending);
@@ -759,8 +759,8 @@ struct bitmap_index *prepare_bitmap_walk(struct rev_info *revs)
                bitmap_and_not(wants_bitmap, haves_bitmap);
 
        bitmap_git->result = wants_bitmap;
+       bitmap_git->haves = haves_bitmap;
 
-       bitmap_free(haves_bitmap);
        return bitmap_git;
 
 cleanup:
@@ -845,9 +845,6 @@ void traverse_bitmap_commit_list(struct bitmap_index *bitmap_git,
                OBJ_TAG, show_reachable);
 
        show_extended_objects(bitmap_git, show_reachable);
-
-       bitmap_free(bitmap_git->result);
-       bitmap_git->result = NULL;
 }
 
 static uint32_t count_object_type(struct bitmap_index *bitmap_git,
@@ -1114,5 +1111,23 @@ void free_bitmap_index(struct bitmap_index *b)
        free(b->ext_index.objects);
        free(b->ext_index.hashes);
        bitmap_free(b->result);
+       bitmap_free(b->haves);
        free(b);
 }
+
+int bitmap_has_sha1_in_uninteresting(struct bitmap_index *bitmap_git,
+                                    const unsigned char *sha1)
+{
+       int pos;
+
+       if (!bitmap_git)
+               return 0; /* no bitmap loaded */
+       if (!bitmap_git->haves)
+               return 0; /* walk had no "haves" */
+
+       pos = bitmap_position_packfile(bitmap_git, sha1);
+       if (pos < 0)
+               return 0;
+
+       return bitmap_get(bitmap_git->haves, pos);
+}
index 8a04741e1253b0ba801445a76ceb8e4937121f73..189dd68ad30b14281b42ea43bb21739006ea6061 100644 (file)
@@ -53,6 +53,13 @@ int rebuild_existing_bitmaps(struct bitmap_index *, struct packing_data *mapping
                             khash_sha1 *reused_bitmaps, int show_progress);
 void free_bitmap_index(struct bitmap_index *);
 
+/*
+ * After a traversal has been performed by prepare_bitmap_walk(), this can be
+ * queried to see if a particular object was reachable from any of the
+ * objects flagged as UNINTERESTING.
+ */
+int bitmap_has_sha1_in_uninteresting(struct bitmap_index *, const unsigned char *sha1);
+
 void bitmap_writer_show_progress(int show);
 void bitmap_writer_set_checksum(unsigned char *sha1);
 void bitmap_writer_build_type_index(struct packing_data *to_pack,
index d3a57df34f2d2bf0ef9935a51f171f24d8ed6a72..fa5f0ff8fa57461486ab2f82e58ad3eeffab77f1 100644 (file)
@@ -79,10 +79,10 @@ static int verify_packfile(struct packed_git *p,
        } while (offset < pack_sig_ofs);
        the_hash_algo->final_fn(hash, &ctx);
        pack_sig = use_pack(p, w_curs, pack_sig_ofs, NULL);
-       if (hashcmp(hash, pack_sig))
+       if (!hasheq(hash, pack_sig))
                err = error("%s pack checksum mismatch",
                            p->pack_name);
-       if (hashcmp(index_base + index_size - the_hash_algo->hexsz, pack_sig))
+       if (!hasheq(index_base + index_size - the_hash_algo->hexsz, pack_sig))
                err = error("%s pack checksum does not match its index",
                            p->pack_name);
        unuse_pack(w_curs);
@@ -180,7 +180,7 @@ int verify_pack_index(struct packed_git *p)
        the_hash_algo->init_fn(&ctx);
        the_hash_algo->update_fn(&ctx, index_base, (unsigned int)(index_size - the_hash_algo->rawsz));
        the_hash_algo->final_fn(hash, &ctx);
-       if (hashcmp(hash, index_base + index_size - the_hash_algo->rawsz))
+       if (!hasheq(hash, index_base + index_size - the_hash_algo->rawsz))
                err = error("Packfile index for %s hash mismatch",
                            p->pack_name);
        return err;
index 6ef87e5683aacdf738c86679712078988c0899fd..7e624c30ebd7f344d711b608b29b2d48d21a1ba0 100644 (file)
@@ -16,7 +16,7 @@ static uint32_t locate_object_entry_hash(struct packing_data *pdata,
        while (pdata->index[i] > 0) {
                uint32_t pos = pdata->index[i] - 1;
 
-               if (!hashcmp(sha1, pdata->objects[pos].idx.oid.hash)) {
+               if (hasheq(sha1, pdata->objects[pos].idx.oid.hash)) {
                        *found = 1;
                        return i;
                }
@@ -99,7 +99,7 @@ static void prepare_in_pack_by_idx(struct packing_data *pdata)
         * (i.e. in_pack_idx also zero) should return NULL.
         */
        mapping[cnt++] = NULL;
-       for (p = get_packed_git(the_repository); p; p = p->next, cnt++) {
+       for (p = get_all_packs(the_repository); p; p = p->next, cnt++) {
                if (cnt == nr) {
                        free(mapping);
                        return;
@@ -164,6 +164,12 @@ struct object_entry *packlist_alloc(struct packing_data *pdata,
                        REALLOC_ARRAY(pdata->in_pack, pdata->nr_alloc);
                if (pdata->delta_size)
                        REALLOC_ARRAY(pdata->delta_size, pdata->nr_alloc);
+
+               if (pdata->tree_depth)
+                       REALLOC_ARRAY(pdata->tree_depth, pdata->nr_alloc);
+
+               if (pdata->layer)
+                       REALLOC_ARRAY(pdata->layer, pdata->nr_alloc);
        }
 
        new_entry = pdata->objects + pdata->nr_objects++;
@@ -179,5 +185,30 @@ struct object_entry *packlist_alloc(struct packing_data *pdata,
        if (pdata->in_pack)
                pdata->in_pack[pdata->nr_objects - 1] = NULL;
 
+       if (pdata->tree_depth)
+               pdata->tree_depth[pdata->nr_objects - 1] = 0;
+
+       if (pdata->layer)
+               pdata->layer[pdata->nr_objects - 1] = 0;
+
        return new_entry;
 }
+
+void oe_set_delta_ext(struct packing_data *pdata,
+                     struct object_entry *delta,
+                     const unsigned char *sha1)
+{
+       struct object_entry *base;
+
+       ALLOC_GROW(pdata->ext_bases, pdata->nr_ext + 1, pdata->alloc_ext);
+       base = &pdata->ext_bases[pdata->nr_ext++];
+       memset(base, 0, sizeof(*base));
+       hashcpy(base->idx.oid.hash, sha1);
+
+       /* These flags mark that we are not part of the actual pack output. */
+       base->preferred_base = 1;
+       base->filled = 1;
+
+       delta->ext_base = 1;
+       delta->delta_idx = base - pdata->ext_bases + 1;
+}
index 62806ccc39ea31b425089f4f38121d81a02fe5dd..2ca39cfcfe26aea164fc4173c49f0ffd05d4ef04 100644 (file)
@@ -103,6 +103,7 @@ struct object_entry {
        unsigned no_try_delta:1;
        unsigned type_:TYPE_BITS;
        unsigned in_pack_type:TYPE_BITS; /* could be delta */
+
        unsigned preferred_base:1; /*
                                    * we do not pack this, but is available
                                    * to be used as the base object to delta
@@ -112,6 +113,7 @@ struct object_entry {
        unsigned filled:1; /* assigned write-order */
        unsigned dfs_state:OE_DFS_STATE_BITS;
        unsigned depth:OE_DEPTH_BITS;
+       unsigned ext_base:1; /* delta_idx points outside packlist */
 
        /*
         * pahole results on 64-bit linux (gcc and clang)
@@ -147,8 +149,20 @@ struct packing_data {
        pthread_mutex_t lock;
 #endif
 
+       /*
+        * This list contains entries for bases which we know the other side
+        * has (e.g., via reachability bitmaps), but which aren't in our
+        * "objects" list.
+        */
+       struct object_entry *ext_bases;
+       uint32_t nr_ext, alloc_ext;
+
        uintmax_t oe_size_limit;
        uintmax_t oe_delta_size_limit;
+
+       /* delta islands */
+       unsigned int *tree_depth;
+       unsigned char *layer;
 };
 
 void prepare_packing_data(struct packing_data *pdata);
@@ -249,9 +263,12 @@ static inline struct object_entry *oe_delta(
                const struct packing_data *pack,
                const struct object_entry *e)
 {
-       if (e->delta_idx)
+       if (!e->delta_idx)
+               return NULL;
+       if (e->ext_base)
+               return &pack->ext_bases[e->delta_idx - 1];
+       else
                return &pack->objects[e->delta_idx - 1];
-       return NULL;
 }
 
 static inline void oe_set_delta(struct packing_data *pack,
@@ -264,6 +281,10 @@ static inline void oe_set_delta(struct packing_data *pack,
                e->delta_idx = 0;
 }
 
+void oe_set_delta_ext(struct packing_data *pack,
+                     struct object_entry *e,
+                     const unsigned char *sha1);
+
 static inline struct object_entry *oe_delta_child(
                const struct packing_data *pack,
                const struct object_entry *e)
@@ -384,4 +405,38 @@ static inline void oe_set_delta_size(struct packing_data *pack,
        }
 }
 
+static inline unsigned int oe_tree_depth(struct packing_data *pack,
+                                        struct object_entry *e)
+{
+       if (!pack->tree_depth)
+               return 0;
+       return pack->tree_depth[e - pack->objects];
+}
+
+static inline void oe_set_tree_depth(struct packing_data *pack,
+                                    struct object_entry *e,
+                                    unsigned int tree_depth)
+{
+       if (!pack->tree_depth)
+               ALLOC_ARRAY(pack->tree_depth, pack->nr_objects);
+       pack->tree_depth[e - pack->objects] = tree_depth;
+}
+
+static inline unsigned char oe_layer(struct packing_data *pack,
+                                    struct object_entry *e)
+{
+       if (!pack->layer)
+               return 0;
+       return pack->layer[e - pack->objects];
+}
+
+static inline void oe_set_layer(struct packing_data *pack,
+                               struct object_entry *e,
+                               unsigned char layer)
+{
+       if (!pack->layer)
+               ALLOC_ARRAY(pack->layer, pack->nr_objects);
+       pack->layer[e - pack->objects] = layer;
+}
+
 #endif
index a9d46bc03f63b27ff85cceecb763d4e39f47898f..29d17a9bec279eed01a27f7089de1992dfc1dc4f 100644 (file)
@@ -124,7 +124,7 @@ const char *write_idx_file(const char *index_name, struct pack_idx_entry **objec
                }
                hashwrite(f, obj->oid.hash, the_hash_algo->rawsz);
                if ((opts->flags & WRITE_IDX_STRICT) &&
-                   (i && !oidcmp(&list[-2]->oid, &obj->oid)))
+                   (i && oideq(&list[-2]->oid, &obj->oid)))
                        die("The same object %s appears twice in the pack",
                            oid_to_hex(&obj->oid));
        }
@@ -260,7 +260,7 @@ void fixup_pack_header_footer(int pack_fd,
                if (partial_pack_offset == 0) {
                        unsigned char hash[GIT_MAX_RAWSZ];
                        the_hash_algo->final_fn(hash, &old_hash_ctx);
-                       if (hashcmp(hash, partial_pack_hash) != 0)
+                       if (!hasheq(hash, partial_pack_hash))
                                die("Unexpected checksum for %s "
                                    "(disk corruption?)", pack_name);
                        /*
index ebcb5742ec748d730f8d730ad8b0744e9094d121..841b36182fcd938da5ee0a4b065e0717ef4a18ed 100644 (file)
@@ -15,6 +15,7 @@
 #include "tree-walk.h"
 #include "tree.h"
 #include "object-store.h"
+#include "midx.h"
 
 char *odb_pack_name(struct strbuf *buf,
                    const unsigned char *sha1,
@@ -196,6 +197,23 @@ int open_pack_index(struct packed_git *p)
        return ret;
 }
 
+uint32_t get_pack_fanout(struct packed_git *p, uint32_t value)
+{
+       const uint32_t *level1_ofs = p->index_data;
+
+       if (!level1_ofs) {
+               if (open_pack_index(p))
+                       return 0;
+               level1_ofs = p->index_data;
+       }
+
+       if (p->index_version > 1) {
+               level1_ofs += 2;
+       }
+
+       return ntohl(level1_ofs[value]);
+}
+
 static struct packed_git *alloc_packed_git(int extra)
 {
        struct packed_git *p = xmalloc(st_add(sizeof(*p), extra));
@@ -451,8 +469,19 @@ static int open_packed_git_1(struct packed_git *p)
        ssize_t read_result;
        const unsigned hashsz = the_hash_algo->rawsz;
 
-       if (!p->index_data && open_pack_index(p))
-               return error("packfile %s index unavailable", p->pack_name);
+       if (!p->index_data) {
+               struct multi_pack_index *m;
+               const char *pack_name = strrchr(p->pack_name, '/');
+
+               for (m = the_repository->objects->multi_pack_index;
+                    m; m = m->next) {
+                       if (midx_contains_pack(m, pack_name))
+                               break;
+               }
+
+               if (!m && open_pack_index(p))
+                       return error("packfile %s index unavailable", p->pack_name);
+       }
 
        if (!pack_max_fds) {
                unsigned int max_fds = get_max_fd_limit();
@@ -503,6 +532,10 @@ static int open_packed_git_1(struct packed_git *p)
                        " supported (try upgrading GIT to a newer version)",
                        p->pack_name, ntohl(hdr.hdr_version));
 
+       /* Skip index checking if in multi-pack-index */
+       if (!p->index_data)
+               return 0;
+
        /* Verify the pack matches its index. */
        if (p->num_objects != ntohl(hdr.hdr_entries))
                return error("packfile %s claims to have %"PRIu32" objects"
@@ -517,7 +550,7 @@ static int open_packed_git_1(struct packed_git *p)
        if (read_result != hashsz)
                return error("packfile %s signature is unavailable", p->pack_name);
        idx_hash = ((unsigned char *)p->index_data) + p->index_size - hashsz * 2;
-       if (hashcmp(hash, idx_hash))
+       if (!hasheq(hash, idx_hash))
                return error("packfile %s does not match index", p->pack_name);
        return 0;
 }
@@ -738,13 +771,14 @@ static void report_pack_garbage(struct string_list *list)
        report_helper(list, seen_bits, first, list->nr);
 }
 
-static void prepare_packed_git_one(struct repository *r, char *objdir, int local)
+void for_each_file_in_pack_dir(const char *objdir,
+                              each_file_in_pack_dir_fn fn,
+                              void *data)
 {
        struct strbuf path = STRBUF_INIT;
        size_t dirnamelen;
        DIR *dir;
        struct dirent *de;
-       struct string_list garbage = STRING_LIST_INIT_DUP;
 
        strbuf_addstr(&path, objdir);
        strbuf_addstr(&path, "/pack");
@@ -759,53 +793,87 @@ static void prepare_packed_git_one(struct repository *r, char *objdir, int local
        strbuf_addch(&path, '/');
        dirnamelen = path.len;
        while ((de = readdir(dir)) != NULL) {
-               struct packed_git *p;
-               size_t base_len;
-
                if (is_dot_or_dotdot(de->d_name))
                        continue;
 
                strbuf_setlen(&path, dirnamelen);
                strbuf_addstr(&path, de->d_name);
 
-               base_len = path.len;
-               if (strip_suffix_mem(path.buf, &base_len, ".idx")) {
-                       /* Don't reopen a pack we already have. */
-                       for (p = r->objects->packed_git; p;
-                            p = p->next) {
-                               size_t len;
-                               if (strip_suffix(p->pack_name, ".pack", &len) &&
-                                   len == base_len &&
-                                   !memcmp(p->pack_name, path.buf, len))
-                                       break;
-                       }
-                       if (p == NULL &&
-                           /*
-                            * See if it really is a valid .idx file with
-                            * corresponding .pack file that we can map.
-                            */
-                           (p = add_packed_git(path.buf, path.len, local)) != NULL)
-                               install_packed_git(r, p);
-               }
-
-               if (!report_garbage)
-                       continue;
-
-               if (ends_with(de->d_name, ".idx") ||
-                   ends_with(de->d_name, ".pack") ||
-                   ends_with(de->d_name, ".bitmap") ||
-                   ends_with(de->d_name, ".keep") ||
-                   ends_with(de->d_name, ".promisor"))
-                       string_list_append(&garbage, path.buf);
-               else
-                       report_garbage(PACKDIR_FILE_GARBAGE, path.buf);
+               fn(path.buf, path.len, de->d_name, data);
        }
+
        closedir(dir);
-       report_pack_garbage(&garbage);
-       string_list_clear(&garbage, 0);
        strbuf_release(&path);
 }
 
+struct prepare_pack_data {
+       struct repository *r;
+       struct string_list *garbage;
+       int local;
+       struct multi_pack_index *m;
+};
+
+static void prepare_pack(const char *full_name, size_t full_name_len,
+                        const char *file_name, void *_data)
+{
+       struct prepare_pack_data *data = (struct prepare_pack_data *)_data;
+       struct packed_git *p;
+       size_t base_len = full_name_len;
+
+       if (strip_suffix_mem(full_name, &base_len, ".idx") &&
+           !(data->m && midx_contains_pack(data->m, file_name))) {
+               /* Don't reopen a pack we already have. */
+               for (p = data->r->objects->packed_git; p; p = p->next) {
+                       size_t len;
+                       if (strip_suffix(p->pack_name, ".pack", &len) &&
+                           len == base_len &&
+                           !memcmp(p->pack_name, full_name, len))
+                               break;
+               }
+
+               if (!p) {
+                       p = add_packed_git(full_name, full_name_len, data->local);
+                       if (p)
+                               install_packed_git(data->r, p);
+               }
+       }
+
+       if (!report_garbage)
+               return;
+
+       if (!strcmp(file_name, "multi-pack-index"))
+               return;
+       if (ends_with(file_name, ".idx") ||
+           ends_with(file_name, ".pack") ||
+           ends_with(file_name, ".bitmap") ||
+           ends_with(file_name, ".keep") ||
+           ends_with(file_name, ".promisor"))
+               string_list_append(data->garbage, full_name);
+       else
+               report_garbage(PACKDIR_FILE_GARBAGE, full_name);
+}
+
+static void prepare_packed_git_one(struct repository *r, char *objdir, int local)
+{
+       struct prepare_pack_data data;
+       struct string_list garbage = STRING_LIST_INIT_DUP;
+
+       data.m = r->objects->multi_pack_index;
+
+       /* look for the multi-pack-index for this object directory */
+       while (data.m && strcmp(data.m->object_dir, objdir))
+               data.m = data.m->next;
+
+       data.r = r;
+       data.garbage = &garbage;
+       data.local = local;
+
+       for_each_file_in_pack_dir(objdir, prepare_pack, &data);
+
+       report_pack_garbage(data.garbage);
+       string_list_clear(data.garbage, 0);
+}
+
 static void prepare_packed_git(struct repository *r);
 /*
  * Give a fast, rough count of the number of objects in the repository. This
@@ -818,10 +886,13 @@ unsigned long approximate_object_count(void)
 {
        if (!the_repository->objects->approximate_object_count_valid) {
                unsigned long count;
+               struct multi_pack_index *m;
                struct packed_git *p;
 
                prepare_packed_git(the_repository);
                count = 0;
+               for (m = get_multi_pack_index(the_repository); m; m = m->next)
+                       count += m->num_objects;
                for (p = the_repository->objects->packed_git; p; p = p->next) {
                        if (open_pack_index(p))
                                continue;
@@ -893,11 +964,17 @@ static void prepare_packed_git(struct repository *r)
 
        if (r->objects->packed_git_initialized)
                return;
+       prepare_multi_pack_index_one(r, r->objects->objectdir, 1);
        prepare_packed_git_one(r, r->objects->objectdir, 1);
        prepare_alt_odb(r);
-       for (alt = r->objects->alt_odb_list; alt; alt = alt->next)
+       for (alt = r->objects->alt_odb_list; alt; alt = alt->next) {
+               prepare_multi_pack_index_one(r, alt->path, 0);
                prepare_packed_git_one(r, alt->path, 0);
+       }
        rearrange_packed_git(r);
+
+       r->objects->all_packs = NULL;
+
        prepare_packed_git_mru(r);
        r->objects->packed_git_initialized = 1;
 }
@@ -915,6 +992,36 @@ struct packed_git *get_packed_git(struct repository *r)
        return r->objects->packed_git;
 }
 
+struct multi_pack_index *get_multi_pack_index(struct repository *r)
+{
+       prepare_packed_git(r);
+       return r->objects->multi_pack_index;
+}
+
+struct packed_git *get_all_packs(struct repository *r)
+{
+       prepare_packed_git(r);
+
+       if (!r->objects->all_packs) {
+               struct packed_git *p = r->objects->packed_git;
+               struct multi_pack_index *m;
+
+               for (m = r->objects->multi_pack_index; m; m = m->next) {
+                       uint32_t i;
+                       for (i = 0; i < m->num_packs; i++) {
+                               if (!prepare_midx_pack(m, i)) {
+                                       m->packs[i]->next = p;
+                                       p = m->packs[i];
+                               }
+                       }
+               }
+
+               r->objects->all_packs = p;
+       }
+
+       return r->objects->all_packs;
+}
+
 struct list_head *get_packed_git_mru(struct repository *r)
 {
        prepare_packed_git(r);
@@ -1015,7 +1122,7 @@ void mark_bad_packed_object(struct packed_git *p, const unsigned char *sha1)
 {
        unsigned i;
        for (i = 0; i < p->num_bad_objects; i++)
-               if (!hashcmp(sha1, p->bad_object_sha1 + GIT_SHA1_RAWSZ * i))
+               if (hasheq(sha1, p->bad_object_sha1 + GIT_SHA1_RAWSZ * i))
                        return;
        p->bad_object_sha1 = xrealloc(p->bad_object_sha1,
                                      st_mult(GIT_MAX_RAWSZ,
@@ -1031,8 +1138,8 @@ const struct packed_git *has_packed_and_bad(const unsigned char *sha1)
 
        for (p = the_repository->objects->packed_git; p; p = p->next)
                for (i = 0; i < p->num_bad_objects; i++)
-                       if (!hashcmp(sha1,
-                                    p->bad_object_sha1 + the_hash_algo->rawsz * i))
+                       if (hasheq(sha1,
+                                  p->bad_object_sha1 + the_hash_algo->rawsz * i))
                                return p;
        return NULL;
 }
@@ -1830,8 +1937,8 @@ static int fill_pack_entry(const struct object_id *oid,
        if (p->num_bad_objects) {
                unsigned i;
                for (i = 0; i < p->num_bad_objects; i++)
-                       if (!hashcmp(oid->hash,
-                                    p->bad_object_sha1 + the_hash_algo->rawsz * i))
+                       if (hasheq(oid->hash,
+                                  p->bad_object_sha1 + the_hash_algo->rawsz * i))
                                return 0;
        }
 
@@ -1856,11 +1963,17 @@ static int fill_pack_entry(const struct object_id *oid,
 int find_pack_entry(struct repository *r, const struct object_id *oid, struct pack_entry *e)
 {
        struct list_head *pos;
+       struct multi_pack_index *m;
 
        prepare_packed_git(r);
-       if (!r->objects->packed_git)
+       if (!r->objects->packed_git && !r->objects->multi_pack_index)
                return 0;
 
+       for (m = r->objects->multi_pack_index; m; m = m->next) {
+               if (fill_midx_entry(oid, e, m))
+                       return 1;
+       }
+
        list_for_each(pos, &r->objects->packed_git_mru) {
                struct packed_git *p = list_entry(pos, struct packed_git, mru);
                if (fill_pack_entry(oid, e, p)) {
@@ -1923,7 +2036,7 @@ int for_each_packed_object(each_packed_object_fn cb, void *data,
        int pack_errors = 0;
 
        prepare_packed_git(the_repository);
-       for (p = the_repository->objects->packed_git; p; p = p->next) {
+       for (p = get_all_packs(the_repository); p; p = p->next) {
                if ((flags & FOR_EACH_OBJECT_LOCAL_ONLY) && !p->pack_local)
                        continue;
                if ((flags & FOR_EACH_OBJECT_PROMISOR_ONLY) &&
index 630f35cf31ef74975c04d17820314a85bba675af..442625723dea4b0c8f22b57d52e760d4b810540e 100644 (file)
@@ -33,6 +33,12 @@ extern char *sha1_pack_index_name(const unsigned char *sha1);
 
 extern struct packed_git *parse_pack_index(unsigned char *sha1, const char *idx_path);
 
+typedef void each_file_in_pack_dir_fn(const char *full_path, size_t full_path_len,
+                                     const char *file_pach, void *data);
+void for_each_file_in_pack_dir(const char *objdir,
+                              each_file_in_pack_dir_fn fn,
+                              void *data);
+
 /* A hook to report invalid files in pack directory */
 #define PACKDIR_FILE_PACK 1
 #define PACKDIR_FILE_IDX 2
@@ -44,6 +50,8 @@ extern void install_packed_git(struct repository *r, struct packed_git *pack);
 
 struct packed_git *get_packed_git(struct repository *r);
 struct list_head *get_packed_git_mru(struct repository *r);
+struct multi_pack_index *get_multi_pack_index(struct repository *r);
+struct packed_git *get_all_packs(struct repository *r);
 
 /*
  * Give a rough count of objects in the repository. This sacrifices accuracy
@@ -68,6 +76,8 @@ extern int open_pack_index(struct packed_git *);
  */
 extern void close_pack_index(struct packed_git *);
 
+extern uint32_t get_pack_fanout(struct packed_git *p, uint32_t value);
+
 extern unsigned char *use_pack(struct packed_git *, struct pack_window **, off_t, unsigned long *);
 extern void close_pack_windows(struct packed_git *);
 extern void close_pack(struct packed_git *);
index 56e0a5ede22c9396fc897bf1d3444dce92d8916f..b5c8594db60dd19fbe18b95ea79439c7bad3fa40 100644 (file)
@@ -40,24 +40,31 @@ void *patch_delta(const void *src_buf, unsigned long src_size,
                cmd = *data++;
                if (cmd & 0x80) {
                        unsigned long cp_off = 0, cp_size = 0;
-                       if (cmd & 0x01) cp_off = *data++;
-                       if (cmd & 0x02) cp_off |= (*data++ << 8);
-                       if (cmd & 0x04) cp_off |= (*data++ << 16);
-                       if (cmd & 0x08) cp_off |= ((unsigned) *data++ << 24);
-                       if (cmd & 0x10) cp_size = *data++;
-                       if (cmd & 0x20) cp_size |= (*data++ << 8);
-                       if (cmd & 0x40) cp_size |= (*data++ << 16);
+#define PARSE_CP_PARAM(bit, var, shift) do { \
+                       if (cmd & (bit)) { \
+                               if (data >= top) \
+                                       goto bad_length; \
+                               var |= ((unsigned) *data++ << (shift)); \
+                       } } while (0)
+                       PARSE_CP_PARAM(0x01, cp_off, 0);
+                       PARSE_CP_PARAM(0x02, cp_off, 8);
+                       PARSE_CP_PARAM(0x04, cp_off, 16);
+                       PARSE_CP_PARAM(0x08, cp_off, 24);
+                       PARSE_CP_PARAM(0x10, cp_size, 0);
+                       PARSE_CP_PARAM(0x20, cp_size, 8);
+                       PARSE_CP_PARAM(0x40, cp_size, 16);
+#undef PARSE_CP_PARAM
                        if (cp_size == 0) cp_size = 0x10000;
                        if (unsigned_add_overflows(cp_off, cp_size) ||
                            cp_off + cp_size > src_size ||
                            cp_size > size)
-                               break;
+                               goto bad_length;
                        memcpy(out, (char *) src_buf + cp_off, cp_size);
                        out += cp_size;
                        size -= cp_size;
                } else if (cmd) {
-                       if (cmd > size)
-                               break;
+                       if (cmd > size || cmd > top - data)
+                               goto bad_length;
                        memcpy(out, data, cmd);
                        out += cmd;
                        data += cmd;
@@ -75,6 +82,7 @@ void *patch_delta(const void *src_buf, unsigned long src_size,
 
        /* sanity check */
        if (data != top || size != 0) {
+               bad_length:
                error("delta replay has gone wild");
                bad:
                free(dst_buf);
index 8f7c25d5dbf5ccdcbf27db51fbe1970addebf9a7..960ea2405412cc5100e1bda4cb179248d04284df 100644 (file)
@@ -28,14 +28,14 @@ int commit_patch_id(struct commit *commit, struct diff_options *options,
 /*
  * When we cannot load the full patch-id for both commits for whatever
  * reason, the function returns -1 (i.e. return error(...)). Despite
- * the "cmp" in the name of this function, the caller only cares about
+ * the "neq" in the name of this function, the caller only cares about
  * the return value being zero (a and b are equivalent) or non-zero (a
  * and b are different), and returning non-zero would keep both in the
  * result, even if they actually were equivalent, in order to err on
  * the side of safety.  The actual value being negative does not have
  * any significance; only that it is non-zero matters.
  */
-static int patch_id_cmp(const void *cmpfn_data,
+static int patch_id_neq(const void *cmpfn_data,
                        const void *entry,
                        const void *entry_or_key,
                        const void *unused_keydata)
@@ -53,7 +53,7 @@ static int patch_id_cmp(const void *cmpfn_data,
            commit_patch_id(b->commit, opt, &b->patch_id, 0))
                return error("Could not get patch ID for %s",
                        oid_to_hex(&b->commit->object.oid));
-       return oidcmp(&a->patch_id, &b->patch_id);
+       return !oideq(&a->patch_id, &b->patch_id);
 }
 
 int init_patch_ids(struct patch_ids *ids)
@@ -63,7 +63,7 @@ int init_patch_ids(struct patch_ids *ids)
        ids->diffopts.detect_rename = 0;
        ids->diffopts.flags.recursive = 1;
        diff_setup_done(&ids->diffopts);
-       hashmap_init(&ids->patches, patch_id_cmp, &ids->diffopts, 256);
+       hashmap_init(&ids->patches, patch_id_neq, &ids->diffopts, 256);
        return 0;
 }
 
index 71cd2437a3b33b343696bf96067603e8dc9e4464..f7365761f4725d6f7ddbb6d8c9c8fe92488de6ce 100644 (file)
@@ -78,7 +78,6 @@ static void preload_index(struct index_state *index,
 {
        int threads, i, work, offset;
        struct thread_data data[MAX_PARALLEL];
-       uint64_t start = getnanotime();
 
        if (!core_preload_index)
                return;
@@ -88,6 +87,7 @@ static void preload_index(struct index_state *index,
                threads = 2;
        if (threads < 2)
                return;
+       trace_performance_enter();
        if (threads > MAX_PARALLEL)
                threads = MAX_PARALLEL;
        offset = 0;
@@ -109,7 +109,7 @@ static void preload_index(struct index_state *index,
                if (pthread_join(p->pthread, NULL))
                        die("unable to join threaded lstat");
        }
-       trace_performance_since(start, "preload index");
+       trace_performance_leave("preload index");
 }
 #endif
 
index 98cf5228f9e30fe5622bdfb4c54d996cb3153808..8ca29e92815608437ccb1fb9909e78d5b4989529 100644 (file)
--- a/pretty.c
+++ b/pretty.c
@@ -1304,6 +1304,9 @@ static size_t format_commit_one(struct strbuf *sb, /* in UTF-8 */
 
        if (skip_prefix(placeholder, "(trailers", &arg)) {
                struct process_trailer_options opts = PROCESS_TRAILER_OPTIONS_INIT;
+
+               opts.no_divider = 1;
+
                if (*arg == ':') {
                        arg++;
                        for (;;) {
index b6b9abac266f3a6a2abdfcdd815f80b7da930854..60edb2f518d6d696b8091ef6246791cd730f326f 100644 (file)
@@ -38,6 +38,14 @@ static int read_patches(const char *range, struct string_list *list)
 
        argv_array_pushl(&cp.args, "log", "--no-color", "-p", "--no-merges",
                        "--reverse", "--date-order", "--decorate=no",
+                       /*
+                        * Choose indicators that are not used anywhere
+                        * else in diffs, but still look reasonable
+                        * (e.g. will not be confusing when debugging)
+                        */
+                       "--output-indicator-new=>",
+                       "--output-indicator-old=<",
+                       "--output-indicator-context=#",
                        "--no-abbrev-commit", range,
                        NULL);
        cp.out = -1;
@@ -82,6 +90,7 @@ static int read_patches(const char *range, struct string_list *list)
                        strbuf_addch(&buf, '\n');
                        if (!util->diff_offset)
                                util->diff_offset = buf.len;
+                       strbuf_addch(&buf, ' ');
                        strbuf_addbuf(&buf, &line);
                } else if (in_header) {
                        if (starts_with(line.buf, "Author: ")) {
@@ -108,8 +117,19 @@ static int read_patches(const char *range, struct string_list *list)
                         * we are not interested.
                         */
                        continue;
-               else
+               else if (line.buf[0] == '>') {
+                       strbuf_addch(&buf, '+');
+                       strbuf_add(&buf, line.buf + 1, line.len - 1);
+               } else if (line.buf[0] == '<') {
+                       strbuf_addch(&buf, '-');
+                       strbuf_add(&buf, line.buf + 1, line.len - 1);
+               } else if (line.buf[0] == '#') {
+                       strbuf_addch(&buf, ' ');
+                       strbuf_add(&buf, line.buf + 1, line.len - 1);
+               } else {
+                       strbuf_addch(&buf, ' ');
                        strbuf_addbuf(&buf, &line);
+               }
 
                strbuf_addch(&buf, '\n');
                util->diffsize++;
@@ -323,7 +343,7 @@ static void output_pair_header(struct diff_options *diffopt,
        }
        strbuf_addf(buf, "%s\n", color_reset);
 
-       fwrite(buf->buf, buf->len, 1, stdout);
+       fwrite(buf->buf, buf->len, 1, diffopt->file);
 }
 
 static struct userdiff_driver no_func_name = {
@@ -409,8 +429,14 @@ static void output(struct string_list *a, struct string_list *b,
        strbuf_release(&dashes);
 }
 
+static struct strbuf *output_prefix_cb(struct diff_options *opt, void *data)
+{
+       return data;
+}
+
 int show_range_diff(const char *range1, const char *range2,
-                   int creation_factor, struct diff_options *diffopt)
+                   int creation_factor, int dual_color,
+                   struct diff_options *diffopt)
 {
        int res = 0;
 
@@ -423,9 +449,23 @@ int show_range_diff(const char *range1, const char *range2,
                res = error(_("could not parse log for '%s'"), range2);
 
        if (!res) {
+               struct diff_options opts;
+               struct strbuf indent = STRBUF_INIT;
+
+               memcpy(&opts, diffopt, sizeof(opts));
+               opts.output_format = DIFF_FORMAT_PATCH;
+               opts.flags.suppress_diff_headers = 1;
+               opts.flags.dual_color_diffed_diffs = dual_color;
+               opts.output_prefix = output_prefix_cb;
+               strbuf_addstr(&indent, "    ");
+               opts.output_prefix_data = &indent;
+               diff_setup_done(&opts);
+
                find_exact_matches(&branch1, &branch2);
                get_correspondences(&branch1, &branch2, creation_factor);
-               output(&branch1, &branch2, diffopt);
+               output(&branch1, &branch2, &opts);
+
+               strbuf_release(&indent);
        }
 
        string_list_clear(&branch1, 1);
index 2407d46a301e5554bf3a4f855d20cc408b1f89bd..190593f0c78151c99cc6e87161137b99c2615c80 100644 (file)
@@ -3,7 +3,10 @@
 
 #include "diff.h"
 
+#define RANGE_DIFF_CREATION_FACTOR_DEFAULT 60
+
 int show_range_diff(const char *range1, const char *range2,
-                   int creation_factor, struct diff_options *diffopt);
+                   int creation_factor, int dual_color,
+                   struct diff_options *diffopt);
 
 #endif
index 7b1354d7590a70ecbd6e508bdd95eafd4793efcc..8d04d78a5877aab74dd35e24a31bcfe3e0a3417d 100644 (file)
@@ -213,7 +213,7 @@ static int ce_compare_data(const struct cache_entry *ce, struct stat *st)
        if (fd >= 0) {
                struct object_id oid;
                if (!index_fd(&oid, fd, st, OBJ_BLOB, ce->name, 0))
-                       match = oidcmp(&oid, &ce->oid);
+                       match = !oideq(&oid, &ce->oid);
                /* index_fd() closed the file descriptor already */
        }
        return match;
@@ -254,7 +254,7 @@ static int ce_compare_gitlink(const struct cache_entry *ce)
         */
        if (resolve_gitlink_ref(ce->name, "HEAD", &oid) < 0)
                return 0;
-       return oidcmp(&oid, &ce->oid);
+       return !oideq(&oid, &ce->oid);
 }
 
 static int ce_modified_check_fs(const struct cache_entry *ce, struct stat *st)
@@ -767,7 +767,7 @@ int add_to_index(struct index_state *istate, const char *path, struct stat *st,
        /* It was suspected to be racily clean, but it turns out to be Ok */
        was_same = (alias &&
                    !ce_stage(alias) &&
-                   !oidcmp(&alias->oid, &ce->oid) &&
+                   oideq(&alias->oid, &ce->oid) &&
                    ce->ce_mode == alias->ce_mode);
 
        if (pretend)
@@ -1476,8 +1476,8 @@ int refresh_index(struct index_state *istate, unsigned int flags,
        const char *typechange_fmt;
        const char *added_fmt;
        const char *unmerged_fmt;
-       uint64_t start = getnanotime();
 
+       trace_performance_enter();
        modified_fmt = (in_porcelain ? "M\t%s\n" : "%s: needs update\n");
        deleted_fmt = (in_porcelain ? "D\t%s\n" : "%s: needs update\n");
        typechange_fmt = (in_porcelain ? "T\t%s\n" : "%s needs update\n");
@@ -1547,7 +1547,7 @@ int refresh_index(struct index_state *istate, unsigned int flags,
 
                replace_index_entry(istate, i, new_entry);
        }
-       trace_performance_since(start, "refresh index");
+       trace_performance_leave("refresh index");
        return has_errors;
 }
 
@@ -1668,7 +1668,7 @@ static int verify_hdr(struct cache_header *hdr, unsigned long size)
        the_hash_algo->init_fn(&c);
        the_hash_algo->update_fn(&c, hdr, size - the_hash_algo->rawsz);
        the_hash_algo->final_fn(hash, &c);
-       if (hashcmp(hash, (unsigned char *)hdr + size - the_hash_algo->rawsz))
+       if (!hasheq(hash, (unsigned char *)hdr + size - the_hash_algo->rawsz))
                return error("bad index file sha1 signature");
        return 0;
 }
@@ -2002,7 +2002,6 @@ static void freshen_shared_index(const char *shared_index, int warn)
 int read_index_from(struct index_state *istate, const char *path,
                    const char *gitdir)
 {
-       uint64_t start = getnanotime();
        struct split_index *split_index;
        int ret;
        char *base_oid_hex;
@@ -2012,8 +2011,9 @@ int read_index_from(struct index_state *istate, const char *path,
        if (istate->initialized)
                return istate->cache_nr;
 
+       trace_performance_enter();
        ret = do_read_index(istate, path, 0);
-       trace_performance_since(start, "read cache %s", path);
+       trace_performance_leave("read cache %s", path);
 
        split_index = istate->split_index;
        if (!split_index || is_null_oid(&split_index->base_oid)) {
@@ -2021,6 +2021,7 @@ int read_index_from(struct index_state *istate, const char *path,
                return ret;
        }
 
+       trace_performance_enter();
        if (split_index->base)
                discard_index(split_index->base);
        else
@@ -2029,7 +2030,7 @@ int read_index_from(struct index_state *istate, const char *path,
        base_oid_hex = oid_to_hex(&split_index->base_oid);
        base_path = xstrfmt("%s/sharedindex.%s", gitdir, base_oid_hex);
        ret = do_read_index(split_index->base, base_path, 1);
-       if (oidcmp(&split_index->base_oid, &split_index->base->oid))
+       if (!oideq(&split_index->base_oid, &split_index->base->oid))
                die("broken index, expect %s in %s, got %s",
                    base_oid_hex, base_path,
                    oid_to_hex(&split_index->base->oid));
@@ -2037,8 +2038,8 @@ int read_index_from(struct index_state *istate, const char *path,
        freshen_shared_index(base_path, 0);
        merge_base_index(istate);
        post_read_index_from(istate);
-       trace_performance_since(start, "read cache %s", base_path);
        free(base_path);
+       trace_performance_leave("read cache %s", base_path);
        return ret;
 }
 
@@ -2395,7 +2396,7 @@ static int verify_index_from(const struct index_state *istate, const char *path)
        if (n != the_hash_algo->rawsz)
                goto out;
 
-       if (hashcmp(istate->oid.hash, hash))
+       if (!hasheq(istate->oid.hash, hash))
                goto out;
 
        close(fd);
@@ -2743,6 +2744,9 @@ int write_locked_index(struct index_state *istate, struct lock_file *lock,
        int new_shared_index, ret;
        struct split_index *si = istate->split_index;
 
+       if (git_env_bool("GIT_TEST_CHECK_CACHE_TREE", 0))
+               cache_tree_verify(istate);
+
        if ((flags & SKIP_IF_UNCHANGED) && !istate->cache_changed) {
                if (flags & COMMIT_LOCK)
                        rollback_lock_file(lock);
@@ -2939,6 +2943,8 @@ void move_index_extensions(struct index_state *dst, struct index_state *src)
 {
        dst->untracked = src->untracked;
        src->untracked = NULL;
+       dst->cache_tree = src->cache_tree;
+       src->cache_tree = NULL;
 }
 
 struct cache_entry *dup_cache_entry(const struct cache_entry *ce,
index 0bccfceff2ae31200019838d9f2b67e13e32ef6f..e1bcb4ca8a1977e97b26da47962dfcd3781a021e 100644 (file)
@@ -19,6 +19,7 @@
 #include "wt-status.h"
 #include "commit-slab.h"
 #include "commit-graph.h"
+#include "commit-reach.h"
 
 static struct ref_msg {
        const char *gone;
@@ -263,6 +264,8 @@ static int trailers_atom_parser(const struct ref_format *format, struct used_ato
        struct string_list params = STRING_LIST_INIT_DUP;
        int i;
 
+       atom->u.contents.trailer_opts.no_divider = 1;
+
        if (arg) {
                string_list_split(&params, arg, ',', -1);
                for (i = 0; i < params.nr; i++) {
@@ -1673,144 +1676,6 @@ static int get_ref_atom_value(struct ref_array_item *ref, int atom,
        return 0;
 }
 
-/*
- * Unknown has to be "0" here, because that's the default value for
- * contains_cache slab entries that have not yet been assigned.
- */
-enum contains_result {
-       CONTAINS_UNKNOWN = 0,
-       CONTAINS_NO,
-       CONTAINS_YES
-};
-
-define_commit_slab(contains_cache, enum contains_result);
-
-struct ref_filter_cbdata {
-       struct ref_array *array;
-       struct ref_filter *filter;
-       struct contains_cache contains_cache;
-       struct contains_cache no_contains_cache;
-};
-
-/*
- * Mimicking the real stack, this stack lives on the heap, avoiding stack
- * overflows.
- *
- * At each recursion step, the stack items points to the commits whose
- * ancestors are to be inspected.
- */
-struct contains_stack {
-       int nr, alloc;
-       struct contains_stack_entry {
-               struct commit *commit;
-               struct commit_list *parents;
-       } *contains_stack;
-};
-
-static int in_commit_list(const struct commit_list *want, struct commit *c)
-{
-       for (; want; want = want->next)
-               if (!oidcmp(&want->item->object.oid, &c->object.oid))
-                       return 1;
-       return 0;
-}
-
-/*
- * Test whether the candidate is contained in the list.
- * Do not recurse to find out, though, but return -1 if inconclusive.
- */
-static enum contains_result contains_test(struct commit *candidate,
-                                         const struct commit_list *want,
-                                         struct contains_cache *cache,
-                                         uint32_t cutoff)
-{
-       enum contains_result *cached = contains_cache_at(cache, candidate);
-
-       /* If we already have the answer cached, return that. */
-       if (*cached)
-               return *cached;
-
-       /* or are we it? */
-       if (in_commit_list(want, candidate)) {
-               *cached = CONTAINS_YES;
-               return CONTAINS_YES;
-       }
-
-       /* Otherwise, we don't know; prepare to recurse */
-       parse_commit_or_die(candidate);
-
-       if (candidate->generation < cutoff)
-               return CONTAINS_NO;
-
-       return CONTAINS_UNKNOWN;
-}
-
-static void push_to_contains_stack(struct commit *candidate, struct contains_stack *contains_stack)
-{
-       ALLOC_GROW(contains_stack->contains_stack, contains_stack->nr + 1, contains_stack->alloc);
-       contains_stack->contains_stack[contains_stack->nr].commit = candidate;
-       contains_stack->contains_stack[contains_stack->nr++].parents = candidate->parents;
-}
-
-static enum contains_result contains_tag_algo(struct commit *candidate,
-                                             const struct commit_list *want,
-                                             struct contains_cache *cache)
-{
-       struct contains_stack contains_stack = { 0, 0, NULL };
-       enum contains_result result;
-       uint32_t cutoff = GENERATION_NUMBER_INFINITY;
-       const struct commit_list *p;
-
-       for (p = want; p; p = p->next) {
-               struct commit *c = p->item;
-               load_commit_graph_info(the_repository, c);
-               if (c->generation < cutoff)
-                       cutoff = c->generation;
-       }
-
-       result = contains_test(candidate, want, cache, cutoff);
-       if (result != CONTAINS_UNKNOWN)
-               return result;
-
-       push_to_contains_stack(candidate, &contains_stack);
-       while (contains_stack.nr) {
-               struct contains_stack_entry *entry = &contains_stack.contains_stack[contains_stack.nr - 1];
-               struct commit *commit = entry->commit;
-               struct commit_list *parents = entry->parents;
-
-               if (!parents) {
-                       *contains_cache_at(cache, commit) = CONTAINS_NO;
-                       contains_stack.nr--;
-               }
-               /*
-                * If we just popped the stack, parents->item has been marked,
-                * therefore contains_test will return a meaningful yes/no.
-                */
-               else switch (contains_test(parents->item, want, cache, cutoff)) {
-               case CONTAINS_YES:
-                       *contains_cache_at(cache, commit) = CONTAINS_YES;
-                       contains_stack.nr--;
-                       break;
-               case CONTAINS_NO:
-                       entry->parents = parents->next;
-                       break;
-               case CONTAINS_UNKNOWN:
-                       push_to_contains_stack(parents->item, &contains_stack);
-                       break;
-               }
-       }
-       free(contains_stack.contains_stack);
-       return contains_test(candidate, want, cache, cutoff);
-}
-
-static int commit_contains(struct ref_filter *filter, struct commit *commit,
-                          struct commit_list *list, struct contains_cache *cache)
-{
-       if (filter->with_commit_tag_algo)
-               return contains_tag_algo(commit, list, cache) == CONTAINS_YES;
-       return is_descendant_of(commit, list);
-}
-
 /*
  * Return 1 if the refname matches one of the patterns, otherwise 0.
  * A pattern can be a literal prefix (e.g. a refname "refs/heads/master"
@@ -2046,6 +1911,13 @@ static int filter_ref_kind(struct ref_filter *filter, const char *refname)
        return ref_kind_from_refname(refname);
 }
 
+struct ref_filter_cbdata {
+       struct ref_array *array;
+       struct ref_filter *filter;
+       struct contains_cache contains_cache;
+       struct contains_cache no_contains_cache;
+};
+
 /*
  * A call-back given to for_each_ref().  Filter refs and keep them for
  * later object processing.
diff --git a/refs.c b/refs.c
index de81c7be7ca8d3ca033b34a61f33b0bff069932f..bbcac921b6d78fd598380608e7195675aae1d4f6 100644 (file)
--- a/refs.c
+++ b/refs.c
@@ -702,7 +702,7 @@ static int write_pseudoref(const char *pseudoref, const struct object_id *oid,
                                    pseudoref);
                        rollback_lock_file(&lock);
                        goto done;
-               } else if (oidcmp(&actual_old_oid, old_oid)) {
+               } else if (!oideq(&actual_old_oid, old_oid)) {
                        strbuf_addf(err, _("unexpected object ID when writing '%s'"),
                                    pseudoref);
                        rollback_lock_file(&lock);
@@ -744,7 +744,7 @@ static int delete_pseudoref(const char *pseudoref, const struct object_id *old_o
                }
                if (read_ref(pseudoref, &actual_old_oid))
                        die(_("could not read ref '%s'"), pseudoref);
-               if (oidcmp(&actual_old_oid, old_oid)) {
+               if (!oideq(&actual_old_oid, old_oid)) {
                        error(_("unexpected object ID when deleting '%s'"),
                              pseudoref);
                        rollback_lock_file(&lock);
@@ -875,13 +875,13 @@ static int read_ref_at_ent(struct object_id *ooid, struct object_id *noid,
                 */
                if (!is_null_oid(&cb->ooid)) {
                        oidcpy(cb->oid, noid);
-                       if (oidcmp(&cb->ooid, noid))
+                       if (!oideq(&cb->ooid, noid))
                                warning(_("log for ref %s has gap after %s"),
                                        cb->refname, show_date(cb->date, cb->tz, DATE_MODE(RFC2822)));
                }
                else if (cb->date == cb->at_time)
                        oidcpy(cb->oid, noid);
-               else if (oidcmp(noid, cb->oid))
+               else if (!oideq(noid, cb->oid))
                        warning(_("log for ref %s unexpectedly ended on %s"),
                                cb->refname, show_date(cb->date, cb->tz,
                                                       DATE_MODE(RFC2822)));
@@ -1394,17 +1394,50 @@ struct ref_iterator *refs_ref_iterator_begin(
  * non-zero value, stop the iteration and return that value;
  * otherwise, return 0.
  */
+static int do_for_each_repo_ref(struct repository *r, const char *prefix,
+                               each_repo_ref_fn fn, int trim, int flags,
+                               void *cb_data)
+{
+       struct ref_iterator *iter;
+       struct ref_store *refs = get_main_ref_store(r);
+
+       if (!refs)
+               return 0;
+
+       iter = refs_ref_iterator_begin(refs, prefix, trim, flags);
+
+       return do_for_each_repo_ref_iterator(r, iter, fn, cb_data);
+}
+
+struct do_for_each_ref_help {
+       each_ref_fn *fn;
+       void *cb_data;
+};
+
+static int do_for_each_ref_helper(struct repository *r,
+                                 const char *refname,
+                                 const struct object_id *oid,
+                                 int flags,
+                                 void *cb_data)
+{
+       struct do_for_each_ref_help *hp = cb_data;
+
+       return hp->fn(refname, oid, flags, hp->cb_data);
+}
+
 static int do_for_each_ref(struct ref_store *refs, const char *prefix,
                           each_ref_fn fn, int trim, int flags, void *cb_data)
 {
        struct ref_iterator *iter;
+       struct do_for_each_ref_help hp = { fn, cb_data };
 
        if (!refs)
                return 0;
 
        iter = refs_ref_iterator_begin(refs, prefix, trim, flags);
 
-       return do_for_each_ref_iterator(iter, fn, cb_data);
+       return do_for_each_repo_ref_iterator(the_repository, iter,
+                                       do_for_each_ref_helper, &hp);
 }
 
 int refs_for_each_ref(struct ref_store *refs, each_ref_fn fn, void *cb_data)
@@ -1449,12 +1482,11 @@ int refs_for_each_fullref_in(struct ref_store *refs, const char *prefix,
        return do_for_each_ref(refs, prefix, fn, 0, flag, cb_data);
 }
 
-int for_each_replace_ref(struct repository *r, each_ref_fn fn, void *cb_data)
+int for_each_replace_ref(struct repository *r, each_repo_ref_fn fn, void *cb_data)
 {
-       return do_for_each_ref(get_main_ref_store(r),
-                              git_replace_ref_base, fn,
-                              strlen(git_replace_ref_base),
-                              DO_FOR_EACH_INCLUDE_BROKEN, cb_data);
+       return do_for_each_repo_ref(r, git_replace_ref_base, fn,
+                                   strlen(git_replace_ref_base),
+                                   DO_FOR_EACH_INCLUDE_BROKEN, cb_data);
 }
 
 int for_each_namespaced_ref(each_ref_fn fn, void *cb_data)
@@ -2033,10 +2065,12 @@ int refs_verify_refname_available(struct ref_store *refs,
 int refs_for_each_reflog(struct ref_store *refs, each_ref_fn fn, void *cb_data)
 {
        struct ref_iterator *iter;
+       struct do_for_each_ref_help hp = { fn, cb_data };
 
        iter = refs->be->reflog_iterator_begin(refs);
 
-       return do_for_each_ref_iterator(iter, fn, cb_data);
+       return do_for_each_repo_ref_iterator(the_repository, iter,
+                                            do_for_each_ref_helper, &hp);
 }
 
 int for_each_reflog(each_ref_fn fn, void *cb_data)
diff --git a/refs.h b/refs.h
index bd52c1bbae3a68fe8ca8f9e6cae7cc54bdbf9852..6cc0397679fd55bfdfc72b7a7f4cbf3ee5d028a0 100644 (file)
--- a/refs.h
+++ b/refs.h
@@ -276,6 +276,16 @@ struct ref_transaction;
 typedef int each_ref_fn(const char *refname,
                        const struct object_id *oid, int flags, void *cb_data);
 
+/*
+ * The same as each_ref_fn, but also with a repository argument that
+ * contains the repository associated with the callback.
+ */
+typedef int each_repo_ref_fn(struct repository *r,
+                            const char *refname,
+                            const struct object_id *oid,
+                            int flags,
+                            void *cb_data);
+
 /*
  * The following functions invoke the specified callback function for
  * each reference indicated.  If the function ever returns a nonzero
@@ -309,7 +319,7 @@ int for_each_fullref_in(const char *prefix, each_ref_fn fn, void *cb_data,
 int for_each_tag_ref(each_ref_fn fn, void *cb_data);
 int for_each_branch_ref(each_ref_fn fn, void *cb_data);
 int for_each_remote_ref(each_ref_fn fn, void *cb_data);
-int for_each_replace_ref(struct repository *r, each_ref_fn fn, void *cb_data);
+int for_each_replace_ref(struct repository *r, each_repo_ref_fn fn, void *cb_data);
 int for_each_glob_ref(each_ref_fn fn, const char *pattern, void *cb_data);
 int for_each_glob_ref_in(each_ref_fn fn, const char *pattern,
                         const char *prefix, void *cb_data);
index 1f1a98e4cb0610039e66f24dab59205377d90144..16ef9325e0d9985c36fd525016340246caaa1af8 100644 (file)
@@ -841,7 +841,7 @@ static int verify_lock(struct ref_store *ref_store, struct ref_lock *lock,
                        return 0;
                }
        }
-       if (old_oid && oidcmp(&lock->old_oid, old_oid)) {
+       if (old_oid && !oideq(&lock->old_oid, old_oid)) {
                strbuf_addf(err, "ref '%s' is at %s but expected %s",
                            lock->ref_name,
                            oid_to_hex(&lock->old_oid),
@@ -2307,7 +2307,7 @@ static int check_old_oid(struct ref_update *update, struct object_id *oid,
                         struct strbuf *err)
 {
        if (!(update->flags & REF_HAVE_OLD) ||
-                  !oidcmp(oid, &update->old_oid))
+                  oideq(oid, &update->old_oid))
                return 0;
 
        if (is_null_oid(&update->old_oid))
@@ -2443,7 +2443,7 @@ static int lock_ref_for_update(struct files_ref_store *refs,
            !(update->flags & REF_DELETING) &&
            !(update->flags & REF_LOG_ONLY)) {
                if (!(update->type & REF_ISSYMREF) &&
-                   !oidcmp(&lock->old_oid, &update->new_oid)) {
+                   oideq(&lock->old_oid, &update->new_oid)) {
                        /*
                         * The reference already has the desired
                         * value, so we don't need to write it.
index 2ac91ac3401c87108b9cdb4983acb7c165df82d9..629e00a122a7a867ae3350ce35469c2ceb1259a1 100644 (file)
@@ -407,15 +407,15 @@ struct ref_iterator *prefix_ref_iterator_begin(struct ref_iterator *iter0,
 
 struct ref_iterator *current_ref_iter = NULL;
 
-int do_for_each_ref_iterator(struct ref_iterator *iter,
-                            each_ref_fn fn, void *cb_data)
+int do_for_each_repo_ref_iterator(struct repository *r, struct ref_iterator *iter,
+                                 each_repo_ref_fn fn, void *cb_data)
 {
        int retval = 0, ok;
        struct ref_iterator *old_ref_iter = current_ref_iter;
 
        current_ref_iter = iter;
        while ((ok = ref_iterator_advance(iter)) == ITER_OK) {
-               retval = fn(iter->refname, iter->oid, iter->flags, cb_data);
+               retval = fn(r, iter->refname, iter->oid, iter->flags, cb_data);
                if (retval) {
                        /*
                         * If ref_iterator_abort() returns ITER_ERROR,
index d447a731da0932e60c918030c7cd07a1843dfd35..74e2996e93ad0033bd4650f8dd7d894a46d563b3 100644 (file)
@@ -1160,7 +1160,7 @@ static int write_with_updates(struct packed_ref_store *refs,
                                                    "reference already exists",
                                                    update->refname);
                                        goto error;
-                               } else if (oidcmp(&update->old_oid, iter->oid)) {
+                               } else if (!oideq(&update->old_oid, iter->oid)) {
                                        strbuf_addf(err, "cannot update ref '%s': "
                                                    "is at %s but expected %s",
                                                    update->refname,
index 640245d3b9f23a8ae7866dd477a5ed9bbfc71cde..a01a0aff9c77280d7b3e605230af822fdab24ca9 100644 (file)
@@ -1,6 +1,8 @@
 #ifndef REFS_PACKED_BACKEND_H
 #define REFS_PACKED_BACKEND_H
 
+struct ref_transaction;
+
 /*
  * Support for storing references in a `packed-refs` file.
  *
index 9b110c8494ff802fea730570f8c8f3b522292dbf..b7052f72e2f4e61ea9f2bba5efee262e6db8fd4a 100644 (file)
@@ -272,7 +272,7 @@ static int is_dup_ref(const struct ref_entry *ref1, const struct ref_entry *ref2
                /* This is impossible by construction */
                die("Reference directory conflict: %s", ref1->name);
 
-       if (oidcmp(&ref1->u.value.oid, &ref2->u.value.oid))
+       if (!oideq(&ref1->u.value.oid, &ref2->u.value.oid))
                die("Duplicated ref, and SHA1s don't match: %s", ref1->name);
 
        warning("Duplicated ref: %s", ref1->name);
index eda65e73edd2d978e7c07065b888219f42348d87..3bfb89d2b343e958ed094a7fd922f2fd6e0c4178 100644 (file)
@@ -1,7 +1,10 @@
 #ifndef REFS_REF_CACHE_H
 #define REFS_REF_CACHE_H
 
+#include "cache.h"
+
 struct ref_dir;
+struct ref_store;
 
 /*
  * If this ref_cache is filled lazily, this function is used to load
index 04425d6d1e45e2401454e7b89b0ed4b3959926de..f2d8c0123a7724bca18eb76de71748d3760391ee 100644 (file)
@@ -1,8 +1,12 @@
 #ifndef REFS_REFS_INTERNAL_H
 #define REFS_REFS_INTERNAL_H
 
+#include "cache.h"
+#include "refs.h"
 #include "iterator.h"
 
+struct ref_transaction;
+
 /*
  * Data structures and functions for the internal use of the refs
  * module. Code outside of the refs module should use only the public
@@ -282,7 +286,7 @@ int refs_rename_ref_available(struct ref_store *refs,
  *
  *             // Access information about the current reference:
  *             if (!(iter->flags & REF_ISSYMREF))
- *                     printf("%s is %s\n", iter->refname, oid_to_hex(&iter->oid));
+ *                     printf("%s is %s\n", iter->refname, oid_to_hex(iter->oid));
  *
  *             // If you need to peel the reference:
  *             ref_iterator_peel(iter, &oid);
@@ -474,8 +478,9 @@ extern struct ref_iterator *current_ref_iter;
  * adapter between the callback style of reference iteration and the
  * iterator style.
  */
-int do_for_each_ref_iterator(struct ref_iterator *iter,
-                            each_ref_fn fn, void *cb_data);
+int do_for_each_repo_ref_iterator(struct repository *r,
+                                 struct ref_iterator *iter,
+                                 each_repo_ref_fn fn, void *cb_data);
 
 /*
  * Only include per-worktree refs in a do_for_each_ref*() iteration.
index fb28309e850518018667dc746c630acc1f497981..762a55a75f6d9d3c510bfbf9ff0d41bc0a1afb6e 100644 (file)
@@ -178,7 +178,7 @@ static int set_option(const char *name, const char *value)
                options.no_dependents = 1;
                return 0;
        } else if (!strcmp(name, "filter")) {
-               options.filter = xstrdup(value);;
+               options.filter = xstrdup(value);
                return 0;
        } else {
                return 1 /* unsupported */;
index 7f6277a1451d147fc5af4ae2910e7c40dd330aec..682f2a01f949ce942597a9190cecfc991db0108e 100644 (file)
--- a/remote.c
+++ b/remote.c
@@ -12,6 +12,7 @@
 #include "string-list.h"
 #include "mergesort.h"
 #include "argv-array.h"
+#include "commit-reach.h"
 
 enum map_direction { FROM_SRC, FROM_DST };
 
@@ -1388,7 +1389,7 @@ void set_ref_status_for_push(struct ref *remote_refs, int send_mirror,
 
                ref->deletion = is_null_oid(&ref->new_oid);
                if (!ref->deletion &&
-                       !oidcmp(&ref->old_oid, &ref->new_oid)) {
+                       oideq(&ref->old_oid, &ref->new_oid)) {
                        ref->status = REF_STATUS_UPTODATE;
                        continue;
                }
@@ -1403,7 +1404,7 @@ void set_ref_status_for_push(struct ref *remote_refs, int send_mirror,
                 * branch.
                 */
                if (ref->expect_old_sha1) {
-                       if (oidcmp(&ref->old_oid, &ref->old_oid_expect))
+                       if (!oideq(&ref->old_oid, &ref->old_oid_expect))
                                reject_reason = REF_STATUS_REJECT_STALE;
                        else
                                /* If the ref isn't stale then force the update. */
@@ -1791,55 +1792,6 @@ int resolve_remote_symref(struct ref *ref, struct ref *list)
        return 1;
 }
 
-static void unmark_and_free(struct commit_list *list, unsigned int mark)
-{
-       while (list) {
-               struct commit *commit = pop_commit(&list);
-               commit->object.flags &= ~mark;
-       }
-}
-
-int ref_newer(const struct object_id *new_oid, const struct object_id *old_oid)
-{
-       struct object *o;
-       struct commit *old_commit, *new_commit;
-       struct commit_list *list, *used;
-       int found = 0;
-
-       /*
-        * Both new_commit and old_commit must be commit-ish and new_commit is descendant of
-        * old_commit.  Otherwise we require --force.
-        */
-       o = deref_tag(the_repository, parse_object(the_repository, old_oid),
-                     NULL, 0);
-       if (!o || o->type != OBJ_COMMIT)
-               return 0;
-       old_commit = (struct commit *) o;
-
-       o = deref_tag(the_repository, parse_object(the_repository, new_oid),
-                     NULL, 0);
-       if (!o || o->type != OBJ_COMMIT)
-               return 0;
-       new_commit = (struct commit *) o;
-
-       if (parse_commit(new_commit) < 0)
-               return 0;
-
-       used = list = NULL;
-       commit_list_insert(new_commit, &list);
-       while (list) {
-               new_commit = pop_most_recent_commit(&list, TMP_MARK);
-               commit_list_insert(new_commit, &used);
-               if (new_commit == old_commit) {
-                       found = 1;
-                       break;
-               }
-       }
-       unmark_and_free(list, TMP_MARK);
-       unmark_and_free(used, TMP_MARK);
-       return found;
-}
-
 /*
  * Lookup the upstream branch for the given branch and if present, optionally
  * compute the commit ahead/behind values for the pair.
@@ -2049,7 +2001,7 @@ struct ref *guess_remote_head(const struct ref *head,
        /* If refs/heads/master could be right, it is. */
        if (!all) {
                r = find_ref_by_name(refs, "refs/heads/master");
-               if (r && !oidcmp(&r->old_oid, &head->old_oid))
+               if (r && oideq(&r->old_oid, &head->old_oid))
                        return copy_ref(r);
        }
 
@@ -2057,7 +2009,7 @@ struct ref *guess_remote_head(const struct ref *head,
        for (r = refs; r; r = r->next) {
                if (r != head &&
                    starts_with(r->name, "refs/heads/") &&
-                   !oidcmp(&r->old_oid, &head->old_oid)) {
+                   oideq(&r->old_oid, &head->old_oid)) {
                        *tail = copy_ref(r);
                        tail = &((*tail)->next);
                        if (!all)
index 88f8480c71a2e5ac494ef65532619a4322c1d32a..da53ad570b91dbd4a820ffc79541fb0a4915ff6b 100644 (file)
--- a/remote.h
+++ b/remote.h
@@ -151,7 +151,6 @@ extern struct ref **get_remote_refs(int fd_out, struct packet_reader *reader,
                                    const struct string_list *server_options);
 
 int resolve_remote_symref(struct ref *ref, struct ref *list);
-int ref_newer(const struct object_id *new_oid, const struct object_id *old_oid);
 
 /*
  * Remove and free all but the first of any entries in the input list
index 4ec77ce41848311a912256046bd2bf8dc9ee63c0..e295e87943102c2a1ad903aea1a634d34bf603a0 100644 (file)
@@ -6,7 +6,8 @@
 #include "repository.h"
 #include "commit.h"
 
-static int register_replace_ref(const char *refname,
+static int register_replace_ref(struct repository *r,
+                               const char *refname,
                                const struct object_id *oid,
                                int flag, void *cb_data)
 {
@@ -25,13 +26,13 @@ static int register_replace_ref(const char *refname,
        oidcpy(&repl_obj->replacement, oid);
 
        /* Register new object */
-       if (oidmap_put(the_repository->objects->replace_map, repl_obj))
+       if (oidmap_put(r->objects->replace_map, repl_obj))
                die(_("duplicate replace ref: %s"), refname);
 
        return 0;
 }
 
-static void prepare_replace_object(struct repository *r)
+void prepare_replace_object(struct repository *r)
 {
        if (r->objects->replace_map)
                return;
index 9345e105ddcaa38cf9e856df70d960245a3b696c..16528df942f79e3d7ce65d99e0103395d0c69f51 100644 (file)
@@ -10,6 +10,8 @@ struct replace_object {
        struct object_id replacement;
 };
 
+void prepare_replace_object(struct repository *r);
+
 /*
  * This internal function is only declared here for the benefit of
  * lookup_replace_object().  Please do not call it directly.
index c7787aa07f80f00589e3c4f4e4e3cc825c43044e..7aa149e849775f110231a54cda7b71f60e75cb4f 100644 (file)
--- a/rerere.c
+++ b/rerere.c
@@ -213,7 +213,7 @@ static void read_rr(struct string_list *rr)
 
                /* There has to be the hash, tab, path and then NUL */
                if (buf.len < 42 || get_sha1_hex(buf.buf, sha1))
-                       die("corrupt MERGE_RR");
+                       die(_("corrupt MERGE_RR"));
 
                if (buf.buf[40] != '.') {
                        variant = 0;
@@ -222,10 +222,10 @@ static void read_rr(struct string_list *rr)
                        errno = 0;
                        variant = strtol(buf.buf + 41, &path, 10);
                        if (errno)
-                               die("corrupt MERGE_RR");
+                               die(_("corrupt MERGE_RR"));
                }
                if (*(path++) != '\t')
-                       die("corrupt MERGE_RR");
+                       die(_("corrupt MERGE_RR"));
                buf.buf[40] = '\0';
                id = new_rerere_id_hex(buf.buf);
                id->variant = variant;
@@ -260,12 +260,12 @@ static int write_rr(struct string_list *rr, int out_fd)
                                    rr->items[i].string, 0);
 
                if (write_in_full(out_fd, buf.buf, buf.len) < 0)
-                       die("unable to write rerere record");
+                       die(_("unable to write rerere record"));
 
                strbuf_release(&buf);
        }
        if (commit_lock_file(&write_lock) != 0)
-               die("unable to write rerere record");
+               die(_("unable to write rerere record"));
        return 0;
 }
 
@@ -303,38 +303,6 @@ static void rerere_io_putstr(const char *str, struct rerere_io *io)
                ferr_puts(str, io->output, &io->wrerror);
 }
 
-/*
- * Write a conflict marker to io->output (if defined).
- */
-static void rerere_io_putconflict(int ch, int size, struct rerere_io *io)
-{
-       char buf[64];
-
-       while (size) {
-               if (size <= sizeof(buf) - 2) {
-                       memset(buf, ch, size);
-                       buf[size] = '\n';
-                       buf[size + 1] = '\0';
-                       size = 0;
-               } else {
-                       int sz = sizeof(buf) - 1;
-
-                       /*
-                        * Make sure we will not write everything out
-                        * in this round by leaving at least 1 byte
-                        * for the next round, giving the next round
-                        * a chance to add the terminating LF.  Yuck.
-                        */
-                       if (size <= sz)
-                               sz -= (sz - size) + 1;
-                       memset(buf, ch, sz);
-                       buf[sz] = '\0';
-                       size -= sz;
-               }
-               rerere_io_putstr(buf, io);
-       }
-}
-
 static void rerere_io_putmem(const char *mem, size_t sz, struct rerere_io *io)
 {
        if (io->output)
@@ -385,89 +353,109 @@ static int is_cmarker(char *buf, int marker_char, int marker_size)
        return isspace(*buf);
 }
 
-/*
- * Read contents a file with conflicts, normalize the conflicts
- * by (1) discarding the common ancestor version in diff3-style,
- * (2) reordering our side and their side so that whichever sorts
- * alphabetically earlier comes before the other one, while
- * computing the "conflict ID", which is just an SHA-1 hash of
- * one side of the conflict, NUL, the other side of the conflict,
- * and NUL concatenated together.
- *
- * Return the number of conflict hunks found.
- *
- * NEEDSWORK: the logic and theory of operation behind this conflict
- * normalization may deserve to be documented somewhere, perhaps in
- * Documentation/technical/rerere.txt.
- */
-static int handle_path(unsigned char *sha1, struct rerere_io *io, int marker_size)
+static void rerere_strbuf_putconflict(struct strbuf *buf, int ch, size_t size)
+{
+       strbuf_addchars(buf, ch, size);
+       strbuf_addch(buf, '\n');
+}
+
+static int handle_conflict(struct strbuf *out, struct rerere_io *io,
+                          int marker_size, git_SHA_CTX *ctx)
 {
-       git_SHA_CTX ctx;
-       int hunk_no = 0;
        enum {
-               RR_CONTEXT = 0, RR_SIDE_1, RR_SIDE_2, RR_ORIGINAL
-       } hunk = RR_CONTEXT;
+               RR_SIDE_1 = 0, RR_SIDE_2, RR_ORIGINAL
+       } hunk = RR_SIDE_1;
        struct strbuf one = STRBUF_INIT, two = STRBUF_INIT;
-       struct strbuf buf = STRBUF_INIT;
-
-       if (sha1)
-               git_SHA1_Init(&ctx);
+       struct strbuf buf = STRBUF_INIT, conflict = STRBUF_INIT;
+       int has_conflicts = -1;
 
        while (!io->getline(&buf, io)) {
                if (is_cmarker(buf.buf, '<', marker_size)) {
-                       if (hunk != RR_CONTEXT)
-                               goto bad;
-                       hunk = RR_SIDE_1;
+                       if (handle_conflict(&conflict, io, marker_size, NULL) < 0)
+                               break;
+                       if (hunk == RR_SIDE_1)
+                               strbuf_addbuf(&one, &conflict);
+                       else
+                               strbuf_addbuf(&two, &conflict);
+                       strbuf_release(&conflict);
                } else if (is_cmarker(buf.buf, '|', marker_size)) {
                        if (hunk != RR_SIDE_1)
-                               goto bad;
+                               break;
                        hunk = RR_ORIGINAL;
                } else if (is_cmarker(buf.buf, '=', marker_size)) {
                        if (hunk != RR_SIDE_1 && hunk != RR_ORIGINAL)
-                               goto bad;
+                               break;
                        hunk = RR_SIDE_2;
                } else if (is_cmarker(buf.buf, '>', marker_size)) {
                        if (hunk != RR_SIDE_2)
-                               goto bad;
+                               break;
                        if (strbuf_cmp(&one, &two) > 0)
                                strbuf_swap(&one, &two);
-                       hunk_no++;
-                       hunk = RR_CONTEXT;
-                       rerere_io_putconflict('<', marker_size, io);
-                       rerere_io_putmem(one.buf, one.len, io);
-                       rerere_io_putconflict('=', marker_size, io);
-                       rerere_io_putmem(two.buf, two.len, io);
-                       rerere_io_putconflict('>', marker_size, io);
-                       if (sha1) {
-                               git_SHA1_Update(&ctx, one.buf ? one.buf : "",
+                       has_conflicts = 1;
+                       rerere_strbuf_putconflict(out, '<', marker_size);
+                       strbuf_addbuf(out, &one);
+                       rerere_strbuf_putconflict(out, '=', marker_size);
+                       strbuf_addbuf(out, &two);
+                       rerere_strbuf_putconflict(out, '>', marker_size);
+                       if (ctx) {
+                               git_SHA1_Update(ctx, one.buf ? one.buf : "",
                                            one.len + 1);
-                               git_SHA1_Update(&ctx, two.buf ? two.buf : "",
+                               git_SHA1_Update(ctx, two.buf ? two.buf : "",
                                            two.len + 1);
                        }
-                       strbuf_reset(&one);
-                       strbuf_reset(&two);
+                       break;
                } else if (hunk == RR_SIDE_1)
                        strbuf_addbuf(&one, &buf);
                else if (hunk == RR_ORIGINAL)
                        ; /* discard */
                else if (hunk == RR_SIDE_2)
                        strbuf_addbuf(&two, &buf);
-               else
-                       rerere_io_putstr(buf.buf, io);
-               continue;
-       bad:
-               hunk = 99; /* force error exit */
-               break;
        }
        strbuf_release(&one);
        strbuf_release(&two);
        strbuf_release(&buf);
 
+       return has_conflicts;
+}
+
+/*
+ * Read contents a file with conflicts, normalize the conflicts
+ * by (1) discarding the common ancestor version in diff3-style,
+ * (2) reordering our side and their side so that whichever sorts
+ * alphabetically earlier comes before the other one, while
+ * computing the "conflict ID", which is just an SHA-1 hash of
+ * one side of the conflict, NUL, the other side of the conflict,
+ * and NUL concatenated together.
+ *
+ * Return 1 if conflict hunks are found, 0 if there are no conflict
+ * hunks and -1 if an error occured.
+ */
+static int handle_path(unsigned char *sha1, struct rerere_io *io, int marker_size)
+{
+       git_SHA_CTX ctx;
+       struct strbuf buf = STRBUF_INIT, out = STRBUF_INIT;
+       int has_conflicts = 0;
+       if (sha1)
+               git_SHA1_Init(&ctx);
+
+       while (!io->getline(&buf, io)) {
+               if (is_cmarker(buf.buf, '<', marker_size)) {
+                       has_conflicts = handle_conflict(&out, io, marker_size,
+                                                       sha1 ? &ctx : NULL);
+                       if (has_conflicts < 0)
+                               break;
+                       rerere_io_putmem(out.buf, out.len, io);
+                       strbuf_reset(&out);
+               } else
+                       rerere_io_putstr(buf.buf, io);
+       }
+       strbuf_release(&buf);
+       strbuf_release(&out);
+
        if (sha1)
                git_SHA1_Final(sha1, &ctx);
-       if (hunk != RR_CONTEXT)
-               return -1;
-       return hunk_no;
+
+       return has_conflicts;
 }
 
 /*
@@ -476,7 +464,7 @@ static int handle_path(unsigned char *sha1, struct rerere_io *io, int marker_siz
  */
 static int handle_file(const char *path, unsigned char *sha1, const char *output)
 {
-       int hunk_no = 0;
+       int has_conflicts = 0;
        struct rerere_io_file io;
        int marker_size = ll_merge_marker_size(path);
 
@@ -485,34 +473,34 @@ static int handle_file(const char *path, unsigned char *sha1, const char *output
        io.input = fopen(path, "r");
        io.io.wrerror = 0;
        if (!io.input)
-               return error_errno("Could not open %s", path);
+               return error_errno(_("could not open '%s'"), path);
 
        if (output) {
                io.io.output = fopen(output, "w");
                if (!io.io.output) {
-                       error_errno("Could not write %s", output);
+                       error_errno(_("could not write '%s'"), output);
                        fclose(io.input);
                        return -1;
                }
        }
 
-       hunk_no = handle_path(sha1, (struct rerere_io *)&io, marker_size);
+       has_conflicts = handle_path(sha1, (struct rerere_io *)&io, marker_size);
 
        fclose(io.input);
        if (io.io.wrerror)
-               error("There were errors while writing %s (%s)",
+               error(_("there were errors while writing '%s' (%s)"),
                      path, strerror(io.io.wrerror));
        if (io.io.output && fclose(io.io.output))
-               io.io.wrerror = error_errno("Failed to flush %s", path);
+               io.io.wrerror = error_errno(_("failed to flush '%s'"), path);
 
-       if (hunk_no < 0) {
+       if (has_conflicts < 0) {
                if (output)
                        unlink_or_warn(output);
-               return error("Could not parse conflict hunks in %s", path);
+               return error(_("could not parse conflict hunks in '%s'"), path);
        }
        if (io.io.wrerror)
                return -1;
-       return hunk_no;
+       return has_conflicts;
 }
 
 /*
@@ -533,7 +521,7 @@ static int check_one_conflict(int i, int *type)
        }
 
        *type = PUNTED;
-       while (ce_stage(active_cache[i]) == 1)
+       while (i < active_nr && ce_stage(active_cache[i]) == 1)
                i++;
 
        /* Only handle regular files with both stages #2 and #3 */
@@ -569,7 +557,7 @@ static int find_conflict(struct string_list *conflict)
 {
        int i;
        if (read_cache() < 0)
-               return error("Could not read index");
+               return error(_("index file corrupt"));
 
        for (i = 0; i < active_nr;) {
                int conflict_type;
@@ -602,7 +590,7 @@ int rerere_remaining(struct string_list *merge_rr)
        if (setup_rerere(merge_rr, RERERE_READONLY))
                return 0;
        if (read_cache() < 0)
-               return error("Could not read index");
+               return error(_("index file corrupt"));
 
        for (i = 0; i < active_nr;) {
                int conflict_type;
@@ -685,17 +673,17 @@ static int merge(const struct rerere_id *id, const char *path)
         * Mark that "postimage" was used to help gc.
         */
        if (utime(rerere_path(id, "postimage"), NULL) < 0)
-               warning_errno("failed utime() on %s",
+               warning_errno(_("failed utime() on '%s'"),
                              rerere_path(id, "postimage"));
 
        /* Update "path" with the resolution */
        f = fopen(path, "w");
        if (!f)
-               return error_errno("Could not open %s", path);
+               return error_errno(_("could not open '%s'"), path);
        if (fwrite(result.ptr, result.size, 1, f) != 1)
-               error_errno("Could not write %s", path);
+               error_errno(_("could not write '%s'"), path);
        if (fclose(f))
-               return error_errno("Writing %s failed", path);
+               return error_errno(_("writing '%s' failed"), path);
 
 out:
        free(cur.ptr);
@@ -715,13 +703,13 @@ static void update_paths(struct string_list *update)
                struct string_list_item *item = &update->items[i];
                if (add_file_to_cache(item->string, 0))
                        exit(128);
-               fprintf(stderr, "Staged '%s' using previous resolution.\n",
+               fprintf_ln(stderr, _("Staged '%s' using previous resolution."),
                        item->string);
        }
 
        if (write_locked_index(&the_index, &index_lock,
                               COMMIT_LOCK | SKIP_IF_UNCHANGED))
-               die("Unable to write new index file");
+               die(_("unable to write new index file"));
 }
 
 static void remove_variant(struct rerere_id *id)
@@ -753,7 +741,7 @@ static void do_rerere_one_path(struct string_list_item *rr_item,
                if (!handle_file(path, NULL, NULL)) {
                        copy_file(rerere_path(id, "postimage"), path, 0666);
                        id->collection->status[variant] |= RR_HAS_POSTIMAGE;
-                       fprintf(stderr, "Recorded resolution for '%s'.\n", path);
+                       fprintf_ln(stderr, _("Recorded resolution for '%s'."), path);
                        free_rerere_id(rr_item);
                        rr_item->util = NULL;
                        return;
@@ -787,9 +775,9 @@ static void do_rerere_one_path(struct string_list_item *rr_item,
                if (rerere_autoupdate)
                        string_list_insert(update, path);
                else
-                       fprintf(stderr,
-                               "Resolved '%s' using previous resolution.\n",
-                               path);
+                       fprintf_ln(stderr,
+                                  _("Resolved '%s' using previous resolution."),
+                                  path);
                free_rerere_id(rr_item);
                rr_item->util = NULL;
                return;
@@ -803,11 +791,11 @@ static void do_rerere_one_path(struct string_list_item *rr_item,
        if (id->collection->status[variant] & RR_HAS_POSTIMAGE) {
                const char *path = rerere_path(id, "postimage");
                if (unlink(path))
-                       die_errno("cannot unlink stray '%s'", path);
+                       die_errno(_("cannot unlink stray '%s'"), path);
                id->collection->status[variant] &= ~RR_HAS_POSTIMAGE;
        }
        id->collection->status[variant] |= RR_HAS_PREIMAGE;
-       fprintf(stderr, "Recorded preimage for '%s'\n", path);
+       fprintf_ln(stderr, _("Recorded preimage for '%s'"), path);
 }
 
 static int do_plain_rerere(struct string_list *rr, int fd)
@@ -830,15 +818,16 @@ static int do_plain_rerere(struct string_list *rr, int fd)
                const char *path = conflict.items[i].string;
                int ret;
 
-               if (string_list_has_string(rr, path))
-                       continue;
-
                /*
                 * Ask handle_file() to scan and assign a
                 * conflict ID.  No need to write anything out
                 * yet.
                 */
                ret = handle_file(path, sha1, NULL);
+               if (ret != 0 && string_list_has_string(rr, path)) {
+                       remove_variant(string_list_lookup(rr, path)->util);
+                       string_list_remove(rr, path, 1);
+               }
                if (ret < 1)
                        continue;
 
@@ -879,7 +868,7 @@ static int is_rerere_enabled(void)
                return rr_cache_exists;
 
        if (!rr_cache_exists && mkdir_in_gitdir(git_path_rr_cache()))
-               die("Could not create directory %s", git_path_rr_cache());
+               die(_("could not create directory '%s'"), git_path_rr_cache());
        return 1;
 }
 
@@ -958,7 +947,7 @@ static int handle_cache(const char *path, unsigned char *sha1, const char *outpu
        mmfile_t mmfile[3] = {{NULL}};
        mmbuffer_t result = {NULL, 0};
        const struct cache_entry *ce;
-       int pos, len, i, hunk_no;
+       int pos, len, i, has_conflicts;
        struct rerere_io_mem io;
        int marker_size = ll_merge_marker_size(path);
 
@@ -1012,11 +1001,11 @@ static int handle_cache(const char *path, unsigned char *sha1, const char *outpu
         * Grab the conflict ID and optionally write the original
         * contents with conflict markers out.
         */
-       hunk_no = handle_path(sha1, (struct rerere_io *)&io, marker_size);
+       has_conflicts = handle_path(sha1, (struct rerere_io *)&io, marker_size);
        strbuf_release(&io.input);
        if (io.io.output)
                fclose(io.io.output);
-       return hunk_no;
+       return has_conflicts;
 }
 
 static int rerere_forget_one_path(const char *path, struct string_list *rr)
@@ -1033,7 +1022,7 @@ static int rerere_forget_one_path(const char *path, struct string_list *rr)
         */
        ret = handle_cache(path, sha1, NULL);
        if (ret < 1)
-               return error("Could not parse conflict hunks in '%s'", path);
+               return error(_("could not parse conflict hunks in '%s'"), path);
 
        /* Nuke the recorded resolution for the conflict */
        id = new_rerere_id(sha1);
@@ -1051,7 +1040,7 @@ static int rerere_forget_one_path(const char *path, struct string_list *rr)
                handle_cache(path, sha1, rerere_path(id, "thisimage"));
                if (read_mmfile(&cur, rerere_path(id, "thisimage"))) {
                        free(cur.ptr);
-                       error("Failed to update conflicted state in '%s'", path);
+                       error(_("failed to update conflicted state in '%s'"), path);
                        goto fail_exit;
                }
                cleanly_resolved = !try_merge(id, path, &cur, &result);
@@ -1062,16 +1051,16 @@ static int rerere_forget_one_path(const char *path, struct string_list *rr)
        }
 
        if (id->collection->status_nr <= id->variant) {
-               error("no remembered resolution for '%s'", path);
+               error(_("no remembered resolution for '%s'"), path);
                goto fail_exit;
        }
 
        filename = rerere_path(id, "postimage");
        if (unlink(filename)) {
                if (errno == ENOENT)
-                       error("no remembered resolution for %s", path);
+                       error(_("no remembered resolution for '%s'"), path);
                else
-                       error_errno("cannot unlink %s", filename);
+                       error_errno(_("cannot unlink '%s'"), filename);
                goto fail_exit;
        }
 
@@ -1081,7 +1070,7 @@ static int rerere_forget_one_path(const char *path, struct string_list *rr)
         * the postimage.
         */
        handle_cache(path, sha1, rerere_path(id, "preimage"));
-       fprintf(stderr, "Updated preimage for '%s'\n", path);
+       fprintf_ln(stderr, _("Updated preimage for '%s'"), path);
 
        /*
         * And remember that we can record resolution for this
@@ -1090,7 +1079,7 @@ static int rerere_forget_one_path(const char *path, struct string_list *rr)
        item = string_list_insert(rr, path);
        free_rerere_id(item);
        item->util = id;
-       fprintf(stderr, "Forgot resolution for %s\n", path);
+       fprintf(stderr, _("Forgot resolution for '%s'\n"), path);
        return 0;
 
 fail_exit:
@@ -1105,7 +1094,7 @@ int rerere_forget(struct pathspec *pathspec)
        struct string_list merge_rr = STRING_LIST_INIT_DUP;
 
        if (read_cache() < 0)
-               return error("Could not read index");
+               return error(_("index file corrupt"));
 
        fd = setup_rerere(&merge_rr, RERERE_NOAUTOUPDATE);
        if (fd < 0)
@@ -1193,7 +1182,7 @@ void rerere_gc(struct string_list *rr)
        git_config(git_default_config, NULL);
        dir = opendir(git_path("rr-cache"));
        if (!dir)
-               die_errno("unable to open rr-cache directory");
+               die_errno(_("unable to open rr-cache directory"));
        /* Collect stale conflict IDs ... */
        while ((e = readdir(dir))) {
                struct rerere_dir *rr_dir;
index de4dce600d00282655907e9014228791a352f813..e18bd530e4c50d0f5a1887714b074a6c2c63875b 100644 (file)
@@ -24,6 +24,7 @@
 #include "packfile.h"
 #include "worktree.h"
 #include "argv-array.h"
+#include "commit-reach.h"
 
 volatile show_early_output_fn_t show_early_output;
 
@@ -2318,7 +2319,7 @@ static void NORETURN diagnose_missing_default(const char *def)
  */
 int setup_revisions(int argc, const char **argv, struct rev_info *revs, struct setup_revision_opt *opt)
 {
-       int i, flags, left, seen_dashdash, read_from_stdin, got_rev_arg = 0, revarg_opt;
+       int i, flags, left, seen_dashdash, got_rev_arg = 0, revarg_opt;
        struct argv_array prune_data = ARGV_ARRAY_INIT;
        const char *submodule = NULL;
 
@@ -2348,7 +2349,6 @@ int setup_revisions(int argc, const char **argv, struct rev_info *revs, struct s
        revarg_opt = opt ? opt->revarg_opt : 0;
        if (seen_dashdash)
                revarg_opt |= REVARG_CANNOT_BE_FILENAME;
-       read_from_stdin = 0;
        for (left = i = 1; i < argc; i++) {
                const char *arg = argv[i];
                if (*arg == '-') {
@@ -2367,7 +2367,7 @@ int setup_revisions(int argc, const char **argv, struct rev_info *revs, struct s
                                        argv[left++] = arg;
                                        continue;
                                }
-                               if (read_from_stdin++)
+                               if (revs->read_from_stdin++)
                                        die("--stdin given twice?");
                                read_revisions_from_stdin(revs, &prune_data);
                                continue;
@@ -3238,7 +3238,7 @@ static void track_linear(struct rev_info *revs, struct commit *commit)
                struct commit_list *p;
                for (p = revs->previous_parents; p; p = p->next)
                        if (p->item == NULL || /* first commit */
-                           !oidcmp(&p->item->object.oid, &commit->object.oid))
+                           oideq(&p->item->object.oid, &commit->object.oid))
                                break;
                revs->linear = p != NULL;
        }
index 007278cc119fded9f97832d87ffe5d6b95a5ce41..2b30ac270d9295e00641b08483fe07cd44dc51f6 100644 (file)
@@ -82,6 +82,11 @@ struct rev_info {
         */
        int rev_input_given;
 
+       /*
+        * Whether we read from stdin due to the --stdin option.
+        */
+       int read_from_stdin;
+
        /* topo-sort */
        enum rev_sort_order sort_order;
 
@@ -214,6 +219,17 @@ struct rev_info {
        /* notes-specific options: which refs to show */
        struct display_notes_opt notes_opt;
 
+       /* interdiff */
+       const struct object_id *idiff_oid1;
+       const struct object_id *idiff_oid2;
+       const char *idiff_title;
+
+       /* range-diff */
+       const char *rdiff1;
+       const char *rdiff2;
+       int creation_factor;
+       const char *rdiff_title;
+
        /* commit counts */
        int count_left;
        int count_right;
index dc2c58d464c14be033f4bcba2ab4332886ead327..ddb41a62d955552e355abff2d6d1115c0d2b0361 100644 (file)
@@ -30,6 +30,7 @@
 #include "oidset.h"
 #include "commit-slab.h"
 #include "alias.h"
+#include "commit-reach.h"
 
 #define GIT_REFLOG_ACTION "GIT_REFLOG_ACTION"
 
@@ -225,13 +226,16 @@ static const char *get_todo_path(const struct replay_opts *opts)
  * Returns 3 when sob exists within conforming footer as last entry
  */
 static int has_conforming_footer(struct strbuf *sb, struct strbuf *sob,
-       int ignore_footer)
+       size_t ignore_footer)
 {
+       struct process_trailer_options opts = PROCESS_TRAILER_OPTIONS_INIT;
        struct trailer_info info;
-       int i;
+       size_t i;
        int found_sob = 0, found_sob_last = 0;
 
-       trailer_info_get(&info, sb->buf);
+       opts.no_divider = 1;
+
+       trailer_info_get(&info, sb->buf, &opts);
 
        if (info.trailer_start == info.trailer_end)
                return 0;
@@ -610,7 +614,7 @@ static int is_index_unchanged(void)
        if (!(cache_tree_oid = get_cache_tree_oid()))
                return -1;
 
-       return !oidcmp(cache_tree_oid, get_commit_tree_oid(head_commit));
+       return oideq(cache_tree_oid, get_commit_tree_oid(head_commit));
 }
 
 static int write_author_script(const char *message)
@@ -899,7 +903,7 @@ static int run_git_commit(const char *defmsg, struct replay_opts *opts,
        if ((flags & ALLOW_EMPTY))
                argv_array_push(&cmd.args, "--allow-empty");
 
-       if (opts->allow_empty_message)
+       if (!(flags & EDIT_MSG))
                argv_array_push(&cmd.args, "--allow-empty-message");
 
        if (cmd.err == -1) {
@@ -1217,7 +1221,7 @@ static int parse_head(struct commit **head)
                current_head = lookup_commit_reference(the_repository, &oid);
                if (!current_head)
                        return error(_("could not parse HEAD"));
-               if (oidcmp(&oid, &current_head->object.oid)) {
+               if (!oideq(&oid, &current_head->object.oid)) {
                        warning(_("HEAD %s is not a commit!"),
                                oid_to_hex(&oid));
                }
@@ -1287,9 +1291,9 @@ static int try_to_commit(struct strbuf *msg, const char *author,
                goto out;
        }
 
-       if (!(flags & ALLOW_EMPTY) && !oidcmp(current_head ?
-                                             get_commit_tree_oid(current_head) :
-                                             the_hash_algo->empty_tree, &tree)) {
+       if (!(flags & ALLOW_EMPTY) && oideq(current_head ?
+                                           get_commit_tree_oid(current_head) :
+                                           the_hash_algo->empty_tree, &tree)) {
                res = 1; /* run 'git commit' to display error message */
                goto out;
        }
@@ -1313,7 +1317,7 @@ static int try_to_commit(struct strbuf *msg, const char *author,
 
        if (cleanup != COMMIT_MSG_CLEANUP_NONE)
                strbuf_stripspace(msg, cleanup == COMMIT_MSG_CLEANUP_ALL);
-       if (!opts->allow_empty_message && message_is_empty(msg, cleanup)) {
+       if ((flags & EDIT_MSG) && message_is_empty(msg, cleanup)) {
                res = 1; /* run 'git commit' to display error message */
                goto out;
        }
@@ -1394,7 +1398,7 @@ static int is_original_commit_empty(struct commit *commit)
                ptree_oid = the_hash_algo->empty_tree; /* commit is root */
        }
 
-       return !oidcmp(ptree_oid, get_commit_tree_oid(commit));
+       return oideq(ptree_oid, get_commit_tree_oid(commit));
 }
 
 /*
@@ -1674,7 +1678,7 @@ static int do_pick_commit(enum todo_command command, struct commit *commit,
                unborn = get_oid("HEAD", &head);
                /* Do we want to generate a root commit? */
                if (is_pick_or_similar(command) && opts->have_squash_onto &&
-                   !oidcmp(&head, &opts->squash_onto)) {
+                   oideq(&head, &opts->squash_onto)) {
                        if (is_fixup(command))
                                return error(_("cannot fixup root commit"));
                        flags |= CREATE_ROOT_COMMIT;
@@ -1717,7 +1721,7 @@ static int do_pick_commit(enum todo_command command, struct commit *commit,
                        oid_to_hex(&commit->object.oid));
 
        if (opts->allow_ff && !is_fixup(command) &&
-           ((parent && !oidcmp(&parent->object.oid, &head)) ||
+           ((parent && oideq(&parent->object.oid, &head)) ||
             (!parent && unborn))) {
                if (is_rebase_i(opts))
                        write_author_script(msg.message);
@@ -2422,7 +2426,7 @@ static int rollback_is_safe(void)
        if (get_oid("HEAD", &actual_head))
                oidclr(&actual_head);
 
-       return !oidcmp(&actual_head, &expected_head);
+       return oideq(&actual_head, &expected_head);
 }
 
 static int reset_for_rollback(const struct object_id *oid)
@@ -2983,7 +2987,7 @@ static int do_merge(struct commit *commit, const char *arg, int arg_len,
        }
 
        if (opts->have_squash_onto &&
-           !oidcmp(&head_commit->object.oid, &opts->squash_onto)) {
+           oideq(&head_commit->object.oid, &opts->squash_onto)) {
                /*
                 * When the user tells us to "merge" something into a
                 * "[new root]", let's simply fast-forward to the merge head.
@@ -3052,8 +3056,8 @@ static int do_merge(struct commit *commit, const char *arg, int arg_len,
         * commit, we cannot fast-forward.
         */
        can_fast_forward = opts->allow_ff && commit && commit->parents &&
-               !oidcmp(&commit->parents->item->object.oid,
-                       &head_commit->object.oid);
+               oideq(&commit->parents->item->object.oid,
+                     &head_commit->object.oid);
 
        /*
         * If any merge head is different from the original one, we cannot
@@ -3063,7 +3067,7 @@ static int do_merge(struct commit *commit, const char *arg, int arg_len,
                struct commit_list *p = commit->parents->next;
 
                for (j = to_merge; j && p; j = j->next, p = p->next)
-                       if (oidcmp(&j->item->object.oid,
+                       if (!oideq(&j->item->object.oid,
                                   &p->item->object.oid)) {
                                can_fast_forward = 0;
                                break;
@@ -3131,8 +3135,8 @@ static int do_merge(struct commit *commit, const char *arg, int arg_len,
        write_message("no-ff", 5, git_path_merge_mode(the_repository), 0);
 
        bases = get_merge_bases(head_commit, merge_commit);
-       if (bases && !oidcmp(&merge_commit->object.oid,
-                            &bases->item->object.oid)) {
+       if (bases && oideq(&merge_commit->object.oid,
+                          &bases->item->object.oid)) {
                ret = 0;
                /* skip merging an ancestor of HEAD */
                goto leave_merge;
@@ -3378,9 +3382,9 @@ static int pick_commits(struct todo_list *todo_list, struct replay_opts *opts)
                                 */
                                if (item->command == TODO_REWORD &&
                                    !get_oid("HEAD", &oid) &&
-                                   (!oidcmp(&item->commit->object.oid, &oid) ||
+                                   (oideq(&item->commit->object.oid, &oid) ||
                                     (opts->have_squash_onto &&
-                                     !oidcmp(&opts->squash_onto, &oid))))
+                                     oideq(&opts->squash_onto, &oid))))
                                        to_amend = 1;
 
                                return res | error_with_patch(item->commit,
@@ -3595,7 +3599,7 @@ static int commit_staged_changes(struct replay_opts *opts,
                if (get_oid_hex(rev.buf, &to_amend))
                        return error(_("invalid contents: '%s'"),
                                rebase_path_amend());
-               if (!is_clean && oidcmp(&head, &to_amend))
+               if (!is_clean && !oideq(&head, &to_amend))
                        return error(_("\nYou have uncommitted changes in your "
                                       "working tree. Please, commit them\n"
                                       "first and then run 'git rebase "
@@ -3607,9 +3611,20 @@ static int commit_staged_changes(struct replay_opts *opts,
                 * the commit message and if there was a squash, let the user
                 * edit it.
                 */
-               if (is_clean && !oidcmp(&head, &to_amend) &&
-                   opts->current_fixup_count > 0 &&
-                   file_exists(rebase_path_stopped_sha())) {
+               if (!is_clean || !opts->current_fixup_count)
+                       ; /* this is not the final fixup */
+               else if (!oideq(&head, &to_amend) ||
+                        !file_exists(rebase_path_stopped_sha())) {
+                       /* was a final fixup or squash done manually? */
+                       if (!is_fixup(peek_command(todo_list, 0))) {
+                               unlink(rebase_path_fixup_msg());
+                               unlink(rebase_path_squash_msg());
+                               unlink(rebase_path_current_fixups());
+                               strbuf_reset(&opts->current_fixups);
+                               opts->current_fixup_count = 0;
+                       }
+               } else {
+                       /* we are in a fixup/squash chain */
                        const char *p = opts->current_fixups.buf;
                        int len = opts->current_fixups.len;
 
@@ -3828,7 +3843,7 @@ int sequencer_pick_revisions(struct replay_opts *opts)
        return res;
 }
 
-void append_signoff(struct strbuf *msgbuf, int ignore_footer, unsigned flag)
+void append_signoff(struct strbuf *msgbuf, size_t ignore_footer, unsigned flag)
 {
        unsigned no_dup_sob = flag & APPEND_SIGNOFF_DEDUP;
        struct strbuf sob = STRBUF_INIT;
@@ -4574,7 +4589,7 @@ int skip_unnecessary_picks(void)
                if (item->commit->parents->next)
                        break; /* merge commit */
                parent_oid = &item->commit->parents->item->object.oid;
-               if (hashcmp(parent_oid->hash, oid->hash))
+               if (!oideq(parent_oid, oid))
                        break;
                oid = &item->commit->object.oid;
        }
index c751c9d6e4f78e7d9e2700dcc3fb3157961fb049..c986bc825161f1f4702a0cd435c6d9705e3be2df 100644 (file)
@@ -90,7 +90,14 @@ int rearrange_squash(void);
 
 extern const char sign_off_header[];
 
-void append_signoff(struct strbuf *msgbuf, int ignore_footer, unsigned flag);
+/*
+ * Append a signoff to the commit message in "msgbuf". The ignore_footer
+ * parameter specifies the number of bytes at the end of msgbuf that should
+ * not be considered at all. I.e., they are not checked for existing trailers,
+ * and the new signoff will be spliced into the buffer before those bytes.
+ */
+void append_signoff(struct strbuf *msgbuf, size_t ignore_footer, unsigned flag);
+
 void append_conflicts_hint(struct strbuf *msgbuf);
 int message_is_empty(const struct strbuf *sb,
                     enum commit_msg_cleanup_mode cleanup_mode);
index 41050c2449b1adaaeddda30529f9eb1d62981396..e2b2d6a27a40b1a3683dd99459c0fab0d45dc0c9 100644 (file)
@@ -199,7 +199,7 @@ static void init_pack_info(const char *infofile, int force)
        objdir = get_object_directory();
        objdirlen = strlen(objdir);
 
-       for (p = get_packed_git(the_repository); p; p = p->next) {
+       for (p = get_all_packs(the_repository); p; p = p->next) {
                /* we ignore things on alternate path since they are
                 * not available to the pullers in general.
                 */
@@ -209,7 +209,7 @@ static void init_pack_info(const char *infofile, int force)
        }
        num_pack = i;
        info = xcalloc(num_pack, sizeof(struct pack_info *));
-       for (i = 0, p = get_packed_git(the_repository); p; p = p->next) {
+       for (i = 0, p = get_all_packs(the_repository); p; p = p->next) {
                if (!p->pack_local)
                        continue;
                info[i] = xcalloc(1, sizeof(struct pack_info));
index 265941fbf40d4a6c64bb1e77b2aef7a5950493a7..b94e0ec0f5e45fa56e1748bbb611a904ab81de67 100644 (file)
@@ -69,7 +69,7 @@ int oid_array_for_each_unique(struct oid_array *array,
 
        for (i = 0; i < array->nr; i++) {
                int ret;
-               if (i > 0 && !oidcmp(array->oid + i, array->oid + i - 1))
+               if (i > 0 && oideq(array->oid + i, array->oid + i - 1))
                        continue;
                ret = fn(array->oid + i, data);
                if (ret)
index 97b74238483e00c3f07bd5ab0879eb84bf5c8dfa..a4367b8f044c6e9254d2a253149187bdb2be6dd2 100644 (file)
@@ -149,10 +149,10 @@ static struct cached_object *find_cached_object(const struct object_id *oid)
        struct cached_object *co = cached_objects;
 
        for (i = 0; i < cached_object_nr; i++, co++) {
-               if (!oidcmp(&co->oid, oid))
+               if (oideq(&co->oid, oid))
                        return co;
        }
-       if (!oidcmp(oid, the_hash_algo->empty_tree))
+       if (oideq(oid, the_hash_algo->empty_tree))
                return &empty_tree;
        return NULL;
 }
@@ -825,7 +825,7 @@ int check_object_signature(const struct object_id *oid, void *map,
 
        if (map) {
                hash_object_file(map, size, type, &real_oid);
-               return oidcmp(oid, &real_oid) ? -1 : 0;
+               return !oideq(oid, &real_oid) ? -1 : 0;
        }
 
        st = open_istream(oid, &obj_type, &size, NULL);
@@ -852,7 +852,7 @@ int check_object_signature(const struct object_id *oid, void *map,
        }
        the_hash_algo->final_fn(real_oid.hash, &c);
        close_istream(st);
-       return oidcmp(oid, &real_oid) ? -1 : 0;
+       return !oideq(oid, &real_oid) ? -1 : 0;
 }
 
 int git_open_cloexec(const char *name, int flags)
@@ -1317,7 +1317,7 @@ int oid_object_info_extended(struct repository *r, const struct object_id *oid,
                         * TODO Pass a repository struct through fetch_object,
                         * such that arbitrary repositories work.
                         */
-                       fetch_object(repository_format_partial_clone, real->hash);
+                       fetch_objects(repository_format_partial_clone, real, 1);
                        already_retried = 1;
                        continue;
                }
@@ -1671,7 +1671,7 @@ static int write_loose_object(const struct object_id *oid, char *hdr,
                die(_("deflateEnd on object %s failed (%d)"), oid_to_hex(oid),
                    ret);
        the_hash_algo->final_fn(parano_oid.hash, &c);
-       if (oidcmp(oid, &parano_oid) != 0)
+       if (!oideq(oid, &parano_oid))
                die(_("confused by unstable object source data for %s"),
                    oid_to_hex(oid));
 
@@ -2213,7 +2213,7 @@ static int check_stream_sha1(git_zstream *stream,
        }
 
        the_hash_algo->final_fn(real_sha1, &c);
-       if (hashcmp(expected_sha1, real_sha1)) {
+       if (!hasheq(expected_sha1, real_sha1)) {
                error(_("sha1 mismatch for %s (expected %s)"), path,
                      sha1_to_hex(expected_sha1));
                return -1;
index c9cc1318b7394e86704bda95651c9a4db3015b9a..faa60f69e311f5f09cd5b91a8fbbaf3e3e744ab0 100644 (file)
@@ -12,6 +12,8 @@
 #include "packfile.h"
 #include "object-store.h"
 #include "repository.h"
+#include "midx.h"
+#include "commit-reach.h"
 
 static int get_oid_oneline(const char *, struct object_id *, struct commit_list *);
 
@@ -44,7 +46,7 @@ static void update_candidates(struct disambiguate_state *ds, const struct object
                oidcpy(&ds->candidate, current);
                ds->candidate_exists = 1;
                return;
-       } else if (!oidcmp(&ds->candidate, current)) {
+       } else if (oideq(&ds->candidate, current)) {
                /* the same as what we already have seen */
                return;
        }
@@ -149,6 +151,32 @@ static int match_sha(unsigned len, const unsigned char *a, const unsigned char *
        return 1;
 }
 
+static void unique_in_midx(struct multi_pack_index *m,
+                          struct disambiguate_state *ds)
+{
+       uint32_t num, i, first = 0;
+       const struct object_id *current = NULL;
+       num = m->num_objects;
+
+       if (!num)
+               return;
+
+       bsearch_midx(&ds->bin_pfx, m, &first);
+
+       /*
+        * At this point, "first" is the location of the lowest object
+        * with an object name that could match "bin_pfx".  See if we have
+        * 0, 1 or more objects that actually match(es).
+        */
+       for (i = first; i < num && !ds->ambiguous; i++) {
+               struct object_id oid;
+               current = nth_midxed_object_oid(&oid, m, i);
+               if (!match_sha(ds->len, ds->bin_pfx.hash, current->hash))
+                       break;
+               update_candidates(ds, current);
+       }
+}
+
 static void unique_in_pack(struct packed_git *p,
                           struct disambiguate_state *ds)
 {
@@ -177,8 +205,12 @@ static void unique_in_pack(struct packed_git *p,
 
 static void find_short_packed_object(struct disambiguate_state *ds)
 {
+       struct multi_pack_index *m;
        struct packed_git *p;
 
+       for (m = get_multi_pack_index(the_repository); m && !ds->ambiguous;
+            m = m->next)
+               unique_in_midx(m, ds);
        for (p = get_packed_git(the_repository); p && !ds->ambiguous;
             p = p->next)
                unique_in_pack(p, ds);
@@ -529,6 +561,42 @@ static int extend_abbrev_len(const struct object_id *oid, void *cb_data)
        return 0;
 }
 
+static void find_abbrev_len_for_midx(struct multi_pack_index *m,
+                                    struct min_abbrev_data *mad)
+{
+       int match = 0;
+       uint32_t num, first = 0;
+       struct object_id oid;
+       const struct object_id *mad_oid;
+
+       if (!m->num_objects)
+               return;
+
+       num = m->num_objects;
+       mad_oid = mad->oid;
+       match = bsearch_midx(mad_oid, m, &first);
+
+       /*
+        * first is now the position in the packfile where we would insert
+        * mad->hash if it does not exist (or the position of mad->hash if
+        * it does exist). Hence, we consider a maximum of two objects
+        * nearby for the abbreviation length.
+        */
+       mad->init_len = 0;
+       if (!match) {
+               if (nth_midxed_object_oid(&oid, m, first))
+                       extend_abbrev_len(&oid, mad);
+       } else if (first < num - 1) {
+               if (nth_midxed_object_oid(&oid, m, first + 1))
+                       extend_abbrev_len(&oid, mad);
+       }
+       if (first > 0) {
+               if (nth_midxed_object_oid(&oid, m, first - 1))
+                       extend_abbrev_len(&oid, mad);
+       }
+       mad->init_len = mad->cur_len;
+}
+
 static void find_abbrev_len_for_pack(struct packed_git *p,
                                     struct min_abbrev_data *mad)
 {
@@ -567,8 +635,11 @@ static void find_abbrev_len_for_pack(struct packed_git *p,
 
 static void find_abbrev_len_packed(struct min_abbrev_data *mad)
 {
+       struct multi_pack_index *m;
        struct packed_git *p;
 
+       for (m = get_multi_pack_index(the_repository); m; m = m->next)
+               find_abbrev_len_for_midx(m, mad);
        for (p = get_packed_git(the_repository); p; p = p->next)
                find_abbrev_len_for_pack(p, mad);
 }
index dbe8a2a2906abf9b393eeca1cdad8c9425c3e4bb..99fd2d1ba0f70aaf238f55aae006a12e38090342 100644 (file)
--- a/shallow.c
+++ b/shallow.c
@@ -16,6 +16,7 @@
 #include "list-objects.h"
 #include "commit-slab.h"
 #include "repository.h"
+#include "commit-reach.h"
 
 void set_alternate_shallow_file(struct repository *r, const char *path, int override)
 {
index 771c4550980e24eb8bf9db14e27c49f983c1ed4b..1f6063f2a27812ee27b5d510dc066249198b48e3 100644 (file)
@@ -195,16 +195,6 @@ void string_list_clear_func(struct string_list *list, string_list_clear_func_t c
        list->nr = list->alloc = 0;
 }
 
-
-void print_string_list(const struct string_list *p, const char *text)
-{
-       int i;
-       if ( text )
-               printf("%s\n", text);
-       for (i = 0; i < p->nr; i++)
-               printf("%s:%p\n", p->items[i].string, p->items[i].util);
-}
-
 struct string_list_item *string_list_append_nodup(struct string_list *list,
                                                  char *string)
 {
index ff8f6094a3300d7bbdbb1a1ed58c8037daff5c7a..18c718c12ce84609ddf7d2baf471042bf9fafc7a 100644 (file)
@@ -113,14 +113,6 @@ typedef int (*string_list_each_func_t)(struct string_list_item *, void *);
 void filter_string_list(struct string_list *list, int free_util,
                        string_list_each_func_t want, void *cb_data);
 
-/**
- * Dump a string_list to stdout, useful mainly for debugging
- * purposes. It can take an optional header argument and it writes out
- * the string-pointer pairs of the string_list, each one in its own
- * line.
- */
-void print_string_list(const struct string_list *p, const char *text);
-
 /**
  * Free a string_list. The `string` pointer of the items will be freed
  * in case the `strdup_strings` member of the string_list is set. The
index fc2c41b947cb471deef42323c83f8b28f42780d6..b132f7a80ba6fc692069f765caf0b92027b920d8 100644 (file)
@@ -45,7 +45,7 @@ static int config_path_cmp(const void *unused_cmp_data,
        const struct submodule_entry *b = entry_or_key;
 
        return strcmp(a->config->path, b->config->path) ||
-              oidcmp(&a->config->gitmodules_oid, &b->config->gitmodules_oid);
+              !oideq(&a->config->gitmodules_oid, &b->config->gitmodules_oid);
 }
 
 static int config_name_cmp(const void *unused_cmp_data,
@@ -57,7 +57,7 @@ static int config_name_cmp(const void *unused_cmp_data,
        const struct submodule_entry *b = entry_or_key;
 
        return strcmp(a->config->name, b->config->name) ||
-              oidcmp(&a->config->gitmodules_oid, &b->config->gitmodules_oid);
+              !oideq(&a->config->gitmodules_oid, &b->config->gitmodules_oid);
 }
 
 static struct submodule_cache *submodule_cache_alloc(void)
@@ -384,6 +384,12 @@ static void warn_multiple_config(const struct object_id *treeish_name,
                        commit_string, name, option);
 }
 
+static void warn_command_line_option(const char *var, const char *value)
+{
+       warning(_("ignoring '%s' which may be interpreted as"
+                 " a command-line option: %s"), var, value);
+}
+
 struct parse_config_parameter {
        struct submodule_cache *cache;
        const struct object_id *treeish_name;
@@ -409,6 +415,8 @@ static int parse_config(const char *var, const char *value, void *data)
        if (!strcmp(item.buf, "path")) {
                if (!value)
                        ret = config_error_nonbool(var);
+               else if (looks_like_command_line_option(value))
+                       warn_command_line_option(var, value);
                else if (!me->overwrite && submodule->path)
                        warn_multiple_config(me->treeish_name, submodule->name,
                                        "path");
@@ -449,6 +457,8 @@ static int parse_config(const char *var, const char *value, void *data)
        } else if (!strcmp(item.buf, "url")) {
                if (!value) {
                        ret = config_error_nonbool(var);
+               } else if (looks_like_command_line_option(value)) {
+                       warn_command_line_option(var, value);
                } else if (!me->overwrite && submodule->url) {
                        warn_multiple_config(me->treeish_name, submodule->name,
                                        "url");
index a2b266fbfae2cd89b00a11008fbcd28bf09777e2..b53cb6e9c4714c3582aab35fea5bfb3739658348 100644 (file)
@@ -22,6 +22,7 @@
 #include "worktree.h"
 #include "parse-options.h"
 #include "object-store.h"
+#include "commit-reach.h"
 
 static int config_update_recurse_submodules = RECURSE_SUBMODULES_OFF;
 static struct string_list changed_submodule_names = STRING_LIST_INIT_DUP;
@@ -65,8 +66,7 @@ int is_staging_gitmodules_ok(struct index_state *istate)
        if ((pos >= 0) && (pos < istate->cache_nr)) {
                struct stat st;
                if (lstat(GITMODULES_FILE, &st) == 0 &&
-                   ie_match_stat(istate, istate->cache[pos], &st,
-                                 CE_MATCH_IGNORE_FSMONITOR) & DATA_CHANGED)
+                   ie_match_stat(istate, istate->cache[pos], &st, 0) & DATA_CHANGED)
                        return 0;
        }
 
@@ -536,7 +536,7 @@ static void show_submodule_header(struct diff_options *o, const char *path,
                        fast_backward = 1;
        }
 
-       if (!oidcmp(one, two)) {
+       if (oideq(one, two)) {
                strbuf_release(&sb);
                return;
        }
index 9028b47d923ca027a146da82060dae395d3f7999..4d8dbc7c5f5800bb838a09a8ca87fab8a3de72ab 100644 (file)
--- a/t/README
+++ b/t/README
@@ -319,6 +319,14 @@ GIT_TEST_OE_DELTA_SIZE=<n> exercises the uncomon pack-objects code
 path where deltas larger than this limit require extra memory
 allocation for bookkeeping.
 
+GIT_TEST_VALIDATE_INDEX_CACHE_ENTRIES=<boolean> checks that cache-tree
+records are valid when the index is written out or after a merge. This
+is mostly to catch missing invalidation. Default is true.
+
+GIT_TEST_COMMIT_GRAPH=<boolean>, when true, forces the commit-graph to
+be written after every 'git commit' command, and overrides the
+'core.commitGraph' setting to true.
+
 Naming Tests
 ------------
 
@@ -393,13 +401,13 @@ This test harness library does the following things:
    consistently when command line arguments --verbose (or -v),
    --debug (or -d), and --immediate (or -i) is given.
 
-Do's, don'ts & things to keep in mind
--------------------------------------
+Do's & don'ts
+-------------
 
 Here are a few examples of things you probably should and shouldn't do
 when writing tests.
 
-Do:
+Here are the "do's:"
 
  - Put all code inside test_expect_success and other assertions.
 
@@ -444,16 +452,21 @@ Do:
    Windows, where the shell (MSYS bash) mangles absolute path names.
    For details, see the commit message of 4114156ae9.
 
-Don't:
+ - Remember that inside the <script> part, the standard output and
+   standard error streams are discarded, and the test harness only
+   reports "ok" or "not ok" to the end user running the tests. Under
+   --verbose, they are shown to help debug the tests.
 
- - exit() within a <script> part.
+And here are the "don'ts:"
+
+ - Don't exit() within a <script> part.
 
    The harness will catch this as a programming error of the test.
    Use test_done instead if you need to stop the tests early (see
    "Skipping tests" below).
 
- - use '! git cmd' when you want to make sure the git command exits
-   with failure in a controlled way by calling "die()".  Instead,
+ - Don't use '! git cmd' when you want to make sure the git command
+   exits with failure in a controlled way by calling "die()".  Instead,
    use 'test_must_fail git cmd'.  This will signal a failure if git
    dies in an unexpected way (e.g. segfault).
 
@@ -461,8 +474,35 @@ Don't:
    platform commands; just use '! cmd'.  We are not in the business
    of verifying that the world given to us sanely works.
 
- - use perl without spelling it as "$PERL_PATH". This is to help our
-   friends on Windows where the platform Perl often adds CR before
+ - Don't feed the output of a git command to a pipe, as in:
+
+     git -C repo ls-files |
+     xargs -n 1 basename |
+     grep foo
+
+   which will discard git's exit code and may mask a crash. In the
+   above example, all exit codes are ignored except grep's.
+
+   Instead, write the output of that command to a temporary
+   file with ">" or assign it to a variable with "x=$(git ...)" rather
+   than pipe it.
+
+ - Don't use command substitution in a way that discards git's exit
+   code. When assigning to a variable, the exit code is not discarded,
+   e.g.:
+
+     x=$(git cat-file -p $sha) &&
+     ...
+
+   is OK because a crash in "git cat-file" will cause the "&&" chain
+   to fail, but:
+
+     test "refs/heads/foo" = "$(git symbolic-ref HEAD)"
+
+   is not OK and a crash in git could go undetected.
+
+ - Don't use perl without spelling it as "$PERL_PATH". This is to help
+   our friends on Windows where the platform Perl often adds CR before
    the end of line, and they bundle Git with a version of Perl that
    does not do so, whose path is specified with $PERL_PATH. Note that we
    provide a "perl" function which uses $PERL_PATH under the hood, so
@@ -470,17 +510,17 @@ Don't:
    (but you do, for example, on a shebang line or in a sub script
    created via "write_script").
 
- - use sh without spelling it as "$SHELL_PATH", when the script can
-   be misinterpreted by broken platform shell (e.g. Solaris).
+ - Don't use sh without spelling it as "$SHELL_PATH", when the script
+   can be misinterpreted by broken platform shell (e.g. Solaris).
 
- - chdir around in tests.  It is not sufficient to chdir to
+ - Don't chdir around in tests.  It is not sufficient to chdir to
    somewhere and then chdir back to the original location later in
    the test, as any intermediate step can fail and abort the test,
    causing the next test to start in an unexpected directory.  Do so
    inside a subshell if necessary.
 
- - save and verify the standard error of compound commands, i.e. group
-   commands, subshells, and shell functions (except test helper
+ - Don't save and verify the standard error of compound commands, i.e.
+   group commands, subshells, and shell functions (except test helper
    functions like 'test_must_fail') like this:
 
      ( cd dir && git cmd ) 2>error &&
@@ -495,7 +535,7 @@ Don't:
      ( cd dir && git cmd 2>../error ) &&
      test_cmp expect error
 
- - Break the TAP output
+ - Don't break the TAP output
 
    The raw output from your test may be interpreted by a TAP harness. TAP
    harnesses will ignore everything they don't know about, but don't step
@@ -515,13 +555,6 @@ Don't:
    but the best indication is to just run the tests with prove(1),
    it'll complain if anything is amiss.
 
-Keep in mind:
-
- - Inside the <script> part, the standard output and standard error
-   streams are discarded, and the test harness only reports "ok" or
-   "not ok" to the end user running the tests. Under --verbose, they
-   are shown to help debugging the tests.
-
 
 Skipping tests
 --------------
@@ -806,6 +839,28 @@ library for your script to use.
    the symbolic link in the file system and a part that does; then only
    the latter part need be protected by a SYMLINKS prerequisite (see below).
 
+ - test_oid_init
+
+   This function loads facts and useful object IDs related to the hash
+   algorithm(s) in use from the files in t/oid-info.
+
+ - test_oid_cache
+
+   This function reads per-hash algorithm information from standard
+   input (usually a heredoc) in the format described in
+   t/oid-info/README.  This is useful for test-specific values, such as
+   object IDs, which must vary based on the hash algorithm.
+
+   Certain fixed values, such as hash sizes and common placeholder
+   object IDs, can be loaded with test_oid_init (described above).
+
+ - test_oid <key>
+
+   This function looks up a value for the hash algorithm in use, based
+   on the key given.  The value must have been loaded using
+   test_oid_init or test_oid_cache.  Providing an unknown key is an
+   error.
+
 Prerequisites
 -------------
 
index 34c7259248760ff8bdea495ac5effd5de2b04ae3..e749a49c88e66e4b3ce388b2c0762d36d4090f99 100644 (file)
@@ -34,8 +34,8 @@ int cmd__delta(int argc, const char **argv)
                return 1;
        }
        from_size = st.st_size;
-       from_buf = mmap(NULL, from_size, PROT_READ, MAP_PRIVATE, fd, 0);
-       if (from_buf == MAP_FAILED) {
+       from_buf = xmalloc(from_size);
+       if (read_in_full(fd, from_buf, from_size) < 0) {
                perror(argv[2]);
                close(fd);
                return 1;
@@ -48,8 +48,8 @@ int cmd__delta(int argc, const char **argv)
                return 1;
        }
        data_size = st.st_size;
-       data_buf = mmap(NULL, data_size, PROT_READ, MAP_PRIVATE, fd, 0);
-       if (data_buf == MAP_FAILED) {
+       data_buf = xmalloc(data_size);
+       if (read_in_full(fd, data_buf, data_size) < 0) {
                perror(argv[3]);
                close(fd);
                return 1;
index 98a4891f1dc936a486075703de319affdacb1c78..6a3f88f5f5d4a8af09dca7c13da6132d66093ca2 100644 (file)
@@ -33,7 +33,7 @@ static int dump_cache_tree(struct cache_tree *it,
        }
        else {
                dump_one(it, pfx, "");
-               if (oidcmp(&it->oid, &ref->oid) ||
+               if (!oideq(&it->oid, &ref->oid) ||
                    ref->entry_count != it->entry_count ||
                    ref->subtree_nr != it->subtree_nr) {
                        /* claims to be valid but is lying */
index ad452707e88b609d4d39e6f12bff9f73df2271c7..08e3684aff223670fb0786d80426d18bb8800a44 100644 (file)
@@ -1,6 +1,7 @@
+#include "test-tool.h"
 #include "cache.h"
 
-int cmd_main(int ac, const char **av)
+int cmd__dump_fsmonitor(int ac, const char **av)
 {
        struct index_state *istate = &the_index;
        int i;
index bd92fb305a239cb4b96d38b4a07fe885e71bbeb3..52870ebbb3ea7b2d69aecb1bb429dbbaf6388b51 100644 (file)
@@ -1,3 +1,4 @@
+#include "test-tool.h"
 #include "cache.h"
 #include "dir.h"
 
@@ -38,7 +39,7 @@ static void dump(struct untracked_cache_dir *ucd, struct strbuf *base)
        strbuf_setlen(base, len);
 }
 
-int cmd_main(int ac, const char **av)
+int cmd__dump_untracked_cache(int ac, const char **av)
 {
        struct untracked_cache *uc;
        struct strbuf base = STRBUF_INIT;
index 630c76d1275485d2d460896407e645116c997916..9cb8a0ea0f8ae7b3da8525b2a354bfa542db93c3 100644 (file)
@@ -1,3 +1,4 @@
+#include "test-tool.h"
 #include "cache.h"
 #include "parse-options.h"
 #include "string-list.h"
@@ -94,11 +95,11 @@ static void show(struct string_list *expect, int *status, const char *fmt, ...)
        strbuf_release(&buf);
 }
 
-int cmd_main(int argc, const char **argv)
+int cmd__parse_options(int argc, const char **argv)
 {
        const char *prefix = "prefix/";
        const char *usage[] = {
-               "test-parse-options <options>",
+               "test-tool parse-options <options>",
                "",
                "A helper function for the parse-options API.",
                NULL
index 30775f986f8067323808f6ccd064067f6c27489b..282d53638446bb2f0c91b81b313852cbadfb3c1f 100644 (file)
@@ -1,4 +1,5 @@
 #include "cache.h"
+#include "test-tool.h"
 #include "pkt-line.h"
 
 static void pack_line(const char *line)
@@ -79,7 +80,7 @@ static void unpack_sideband(void)
        }
 }
 
-int cmd_main(int argc, const char **argv)
+int cmd__pkt_line(int argc, const char **argv)
 {
        if (argc < 2)
                die("too few arguments");
diff --git a/t/helper/test-reach.c b/t/helper/test-reach.c
new file mode 100644 (file)
index 0000000..08d2ea6
--- /dev/null
@@ -0,0 +1,142 @@
+#include "test-tool.h"
+#include "cache.h"
+#include "commit.h"
+#include "commit-reach.h"
+#include "config.h"
+#include "parse-options.h"
+#include "ref-filter.h"
+#include "string-list.h"
+#include "tag.h"
+
+static void print_sorted_commit_ids(struct commit_list *list)
+{
+       int i;
+       struct string_list s = STRING_LIST_INIT_DUP;
+
+       while (list) {
+               string_list_append(&s, oid_to_hex(&list->item->object.oid));
+               list = list->next;
+       }
+
+       string_list_sort(&s);
+
+       for (i = 0; i < s.nr; i++)
+               printf("%s\n", s.items[i].string);
+
+       string_list_clear(&s, 0);
+}
+
+int cmd__reach(int ac, const char **av)
+{
+       struct object_id oid_A, oid_B;
+       struct commit *A, *B;
+       struct commit_list *X, *Y;
+       struct object_array X_obj = OBJECT_ARRAY_INIT;
+       struct commit **X_array;
+       int X_nr, X_alloc;
+       struct strbuf buf = STRBUF_INIT;
+       struct repository *r = the_repository;
+
+       setup_git_directory();
+
+       if (ac < 2)
+               exit(1);
+
+       A = B = NULL;
+       X = Y = NULL;
+       X_nr = 0;
+       X_alloc = 16;
+       ALLOC_ARRAY(X_array, X_alloc);
+
+       while (strbuf_getline(&buf, stdin) != EOF) {
+               struct object_id oid;
+               struct object *orig;
+               struct object *peeled;
+               struct commit *c;
+               if (buf.len < 3)
+                       continue;
+
+               if (get_oid_committish(buf.buf + 2, &oid))
+                       die("failed to resolve %s", buf.buf + 2);
+
+               orig = parse_object(r, &oid);
+               peeled = deref_tag_noverify(orig);
+
+               if (!peeled)
+                       die("failed to load commit for input %s resulting in oid %s\n",
+                           buf.buf, oid_to_hex(&oid));
+
+               c = object_as_type(r, peeled, OBJ_COMMIT, 0);
+
+               if (!c)
+                       die("failed to load commit for input %s resulting in oid %s\n",
+                           buf.buf, oid_to_hex(&oid));
+
+               switch (buf.buf[0]) {
+                       case 'A':
+                               oidcpy(&oid_A, &oid);
+                               A = c;
+                               break;
+
+                       case 'B':
+                               oidcpy(&oid_B, &oid);
+                               B = c;
+                               break;
+
+                       case 'X':
+                               commit_list_insert(c, &X);
+                               ALLOC_GROW(X_array, X_nr + 1, X_alloc);
+                               X_array[X_nr++] = c;
+                               add_object_array(orig, NULL, &X_obj);
+                               break;
+
+                       case 'Y':
+                               commit_list_insert(c, &Y);
+                               break;
+
+                       default:
+                               die("unexpected start of line: %c", buf.buf[0]);
+               }
+       }
+       strbuf_release(&buf);
+
+       if (!strcmp(av[1], "ref_newer"))
+               printf("%s(A,B):%d\n", av[1], ref_newer(&oid_A, &oid_B));
+       else if (!strcmp(av[1], "in_merge_bases"))
+               printf("%s(A,B):%d\n", av[1], in_merge_bases(A, B));
+       else if (!strcmp(av[1], "is_descendant_of"))
+               printf("%s(A,X):%d\n", av[1], is_descendant_of(A, X));
+       else if (!strcmp(av[1], "get_merge_bases_many")) {
+               struct commit_list *list = get_merge_bases_many(A, X_nr, X_array);
+               printf("%s(A,X):\n", av[1]);
+               print_sorted_commit_ids(list);
+       } else if (!strcmp(av[1], "reduce_heads")) {
+               struct commit_list *list = reduce_heads(X);
+               printf("%s(X):\n", av[1]);
+               print_sorted_commit_ids(list);
+       } else if (!strcmp(av[1], "can_all_from_reach")) {
+               printf("%s(X,Y):%d\n", av[1], can_all_from_reach(X, Y, 1));
+       } else if (!strcmp(av[1], "can_all_from_reach_with_flag")) {
+               struct commit_list *iter = Y;
+
+               while (iter) {
+                       iter->item->object.flags |= 2;
+                       iter = iter->next;
+               }
+
+               printf("%s(X,_,_,0,0):%d\n", av[1], can_all_from_reach_with_flag(&X_obj, 2, 4, 0, 0));
+       } else if (!strcmp(av[1], "commit_contains")) {
+               struct ref_filter filter;
+               struct contains_cache cache;
+               init_contains_cache(&cache);
+
+               if (ac > 2 && !strcmp(av[2], "--tag"))
+                       filter.with_commit_tag_algo = 1;
+               else
+                       filter.with_commit_tag_algo = 0;
+
+               printf("%s(_,A,X,_):%d\n", av[1], commit_contains(&filter, A, X, &cache));
+       }
+
+       exit(0);
+}
diff --git a/t/helper/test-read-midx.c b/t/helper/test-read-midx.c
new file mode 100644 (file)
index 0000000..831b586
--- /dev/null
@@ -0,0 +1,51 @@
+#include "test-tool.h"
+#include "cache.h"
+#include "midx.h"
+#include "repository.h"
+#include "object-store.h"
+
+static int read_midx_file(const char *object_dir)
+{
+       uint32_t i;
+       struct multi_pack_index *m = load_multi_pack_index(object_dir, 1);
+
+       if (!m)
+               return 1;
+
+       printf("header: %08x %d %d %d\n",
+              m->signature,
+              m->version,
+              m->num_chunks,
+              m->num_packs);
+
+       printf("chunks:");
+
+       if (m->chunk_pack_names)
+               printf(" pack-names");
+       if (m->chunk_oid_fanout)
+               printf(" oid-fanout");
+       if (m->chunk_oid_lookup)
+               printf(" oid-lookup");
+       if (m->chunk_object_offsets)
+               printf(" object-offsets");
+       if (m->chunk_large_offsets)
+               printf(" large-offsets");
+
+       printf("\nnum_objects: %d\n", m->num_objects);
+
+       printf("packs:\n");
+       for (i = 0; i < m->num_packs; i++)
+               printf("%s\n", m->pack_names[i]);
+
+       printf("object-dir: %s\n", m->object_dir);
+
+       return 0;
+}
+
+int cmd__read_midx(int argc, const char **argv)
+{
+       if (argc != 2)
+               usage("read-midx <object-dir>");
+
+       return read_midx_file(argv[1]);
+}
index 2762ca656262baa9494d96a2647248f4042a2e01..6a84a53efbf6919c83d3f1fd73786acd92ee7abf 100644 (file)
@@ -15,7 +15,10 @@ static void test_parse_commit_in_graph(const char *gitdir, const char *worktree,
        struct commit *c;
        struct commit_list *parent;
 
-       repo_init(&r, gitdir, worktree);
+       setup_git_env(gitdir);
+
+       if (repo_init(&r, gitdir, worktree))
+               die("Couldn't init repo");
 
        c = lookup_commit(&r, commit_oid);
 
@@ -38,7 +41,10 @@ static void test_get_commit_tree_in_graph(const char *gitdir,
        struct commit *c;
        struct tree *tree;
 
-       repo_init(&r, gitdir, worktree);
+       setup_git_env(gitdir);
+
+       if (repo_init(&r, gitdir, worktree))
+               die("Couldn't init repo");
 
        c = lookup_commit(&r, commit_oid);
 
index 0edafcfd65db7586bc1521d2e1afa99fbde50292..6b5836dc1b93b85333f2acb9cf1717a4462e209c 100644 (file)
@@ -14,7 +14,9 @@ static struct test_cmd cmds[] = {
        { "delta", cmd__delta },
        { "drop-caches", cmd__drop_caches },
        { "dump-cache-tree", cmd__dump_cache_tree },
+       { "dump-fsmonitor", cmd__dump_fsmonitor },
        { "dump-split-index", cmd__dump_split_index },
+       { "dump-untracked-cache", cmd__dump_untracked_cache },
        { "example-decorate", cmd__example_decorate },
        { "genrandom", cmd__genrandom },
        { "hashmap", cmd__hashmap },
@@ -25,17 +27,21 @@ static struct test_cmd cmds[] = {
        { "mergesort", cmd__mergesort },
        { "mktemp", cmd__mktemp },
        { "online-cpus", cmd__online_cpus },
+       { "parse-options", cmd__parse_options },
        { "path-utils", cmd__path_utils },
+       { "pkt-line", cmd__pkt_line },
        { "prio-queue", cmd__prio_queue },
+       { "reach", cmd__reach },
        { "read-cache", cmd__read_cache },
+       { "read-midx", cmd__read_midx },
        { "ref-store", cmd__ref_store },
        { "regex", cmd__regex },
        { "repository", cmd__repository },
        { "revision-walking", cmd__revision_walking },
        { "run-command", cmd__run_command },
        { "scrap-cache-tree", cmd__scrap_cache_tree },
-       { "sha1-array", cmd__sha1_array },
        { "sha1", cmd__sha1 },
+       { "sha1-array", cmd__sha1_array },
        { "sigchain", cmd__sigchain },
        { "strcmp-offset", cmd__strcmp_offset },
        { "string-list", cmd__string_list },
@@ -43,6 +49,9 @@ static struct test_cmd cmds[] = {
        { "subprocess", cmd__subprocess },
        { "urlmatch-normalization", cmd__urlmatch_normalization },
        { "wildmatch", cmd__wildmatch },
+#ifdef GIT_WINDOWS_NATIVE
+       { "windows-named-pipe", cmd__windows_named_pipe },
+#endif
        { "write-cache", cmd__write_cache },
 };
 
index e954e8c5222f77e882f577198091adde95cf2532..e4890566da552eb10ae718c4c921ad103c3f1a71 100644 (file)
@@ -10,7 +10,9 @@ int cmd__date(int argc, const char **argv);
 int cmd__delta(int argc, const char **argv);
 int cmd__drop_caches(int argc, const char **argv);
 int cmd__dump_cache_tree(int argc, const char **argv);
+int cmd__dump_fsmonitor(int argc, const char **argv);
 int cmd__dump_split_index(int argc, const char **argv);
+int cmd__dump_untracked_cache(int argc, const char **argv);
 int cmd__example_decorate(int argc, const char **argv);
 int cmd__genrandom(int argc, const char **argv);
 int cmd__hashmap(int argc, const char **argv);
@@ -21,17 +23,21 @@ int cmd__match_trees(int argc, const char **argv);
 int cmd__mergesort(int argc, const char **argv);
 int cmd__mktemp(int argc, const char **argv);
 int cmd__online_cpus(int argc, const char **argv);
+int cmd__parse_options(int argc, const char **argv);
 int cmd__path_utils(int argc, const char **argv);
+int cmd__pkt_line(int argc, const char **argv);
 int cmd__prio_queue(int argc, const char **argv);
+int cmd__reach(int argc, const char **argv);
 int cmd__read_cache(int argc, const char **argv);
+int cmd__read_midx(int argc, const char **argv);
 int cmd__ref_store(int argc, const char **argv);
 int cmd__regex(int argc, const char **argv);
 int cmd__repository(int argc, const char **argv);
 int cmd__revision_walking(int argc, const char **argv);
 int cmd__run_command(int argc, const char **argv);
 int cmd__scrap_cache_tree(int argc, const char **argv);
-int cmd__sha1_array(int argc, const char **argv);
 int cmd__sha1(int argc, const char **argv);
+int cmd__sha1_array(int argc, const char **argv);
 int cmd__sigchain(int argc, const char **argv);
 int cmd__strcmp_offset(int argc, const char **argv);
 int cmd__string_list(int argc, const char **argv);
@@ -39,6 +45,9 @@ int cmd__submodule_config(int argc, const char **argv);
 int cmd__subprocess(int argc, const char **argv);
 int cmd__urlmatch_normalization(int argc, const char **argv);
 int cmd__wildmatch(int argc, const char **argv);
+#ifdef GIT_WINDOWS_NATIVE
+int cmd__windows_named_pipe(int argc, const char **argv);
+#endif
 int cmd__write_cache(int argc, const char **argv);
 
 #endif
diff --git a/t/helper/test-windows-named-pipe.c b/t/helper/test-windows-named-pipe.c
new file mode 100644 (file)
index 0000000..b4b752b
--- /dev/null
@@ -0,0 +1,72 @@
+#include "test-tool.h"
+#include "git-compat-util.h"
+#include "strbuf.h"
+
+#ifdef GIT_WINDOWS_NATIVE
+static const char *usage_string = "<pipe-filename>";
+
+#define TEST_BUFSIZE (4096)
+
+int cmd__windows_named_pipe(int argc, const char **argv)
+{
+       const char *filename;
+       struct strbuf pathname = STRBUF_INIT;
+       int err;
+       HANDLE h;
+       BOOL connected;
+       char buf[TEST_BUFSIZE + 1];
+
+       if (argc < 2)
+               goto print_usage;
+       filename = argv[1];
+       if (strchr(filename, '/') || strchr(filename, '\\'))
+               goto print_usage;
+       strbuf_addf(&pathname, "//./pipe/%s", filename);
+
+       /*
+        * Create a single instance of the server side of the named pipe.
+        * This will allow exactly one client instance to connect to it.
+        */
+       h = CreateNamedPipeA(
+               pathname.buf,
+               PIPE_ACCESS_INBOUND | FILE_FLAG_FIRST_PIPE_INSTANCE,
+               PIPE_TYPE_MESSAGE | PIPE_READMODE_MESSAGE | PIPE_WAIT,
+               PIPE_UNLIMITED_INSTANCES,
+               TEST_BUFSIZE, TEST_BUFSIZE, 0, NULL);
+       if (h == INVALID_HANDLE_VALUE) {
+               err = err_win_to_posix(GetLastError());
+               fprintf(stderr, "CreateNamedPipe failed: %s\n",
+                       strerror(err));
+               return err;
+       }
+
+       connected = ConnectNamedPipe(h, NULL)
+               ? TRUE
+               : (GetLastError() == ERROR_PIPE_CONNECTED);
+       if (!connected) {
+               err = err_win_to_posix(GetLastError());
+               fprintf(stderr, "ConnectNamedPipe failed: %s\n",
+                       strerror(err));
+               CloseHandle(h);
+               return err;
+       }
+
+       while (1) {
+               DWORD nbr;
+               BOOL success = ReadFile(h, buf, TEST_BUFSIZE, &nbr, NULL);
+               if (!success || nbr == 0)
+                       break;
+               buf[nbr] = 0;
+
+               write(1, buf, nbr);
+       }
+
+       DisconnectNamedPipe(h);
+       CloseHandle(h);
+       return 0;
+
+print_usage:
+       fprintf(stderr, "usage: %s %s\n", argv[0], usage_string);
+       return 1;
+}
+#endif
index 3fe02876c1fc7e4f7488c1534a4745f5760f1ca0..f1277bef4fa65e72607ea1f2dac326048559e6aa 100755 (executable)
@@ -57,9 +57,12 @@ then
                echo | gpgsm --homedir "${GNUPGHOME}" 2>/dev/null \
                        --passphrase-fd 0 --pinentry-mode loopback \
                        --import "$TEST_DIRECTORY"/lib-gpg/gpgsm_cert.p12 &&
-               gpgsm --homedir "${GNUPGHOME}" 2>/dev/null -K \
-                       | grep fingerprint: | cut -d" " -f4 | tr -d '\n' > \
-                       ${GNUPGHOME}/trustlist.txt &&
+
+               gpgsm --homedir "${GNUPGHOME}" 2>/dev/null -K |
+               grep fingerprint: |
+               cut -d" " -f4 |
+               tr -d '\n' >"${GNUPGHOME}/trustlist.txt" &&
+
                echo " S relax" >> ${GNUPGHOME}/trustlist.txt &&
                (gpgconf --kill gpg-agent >/dev/null 2>&1 || : ) &&
                echo hello | gpgsm --homedir "${GNUPGHOME}" >/dev/null \
diff --git a/t/oid-info/README b/t/oid-info/README
new file mode 100644 (file)
index 0000000..27f843f
--- /dev/null
@@ -0,0 +1,19 @@
+This directory contains various per-hash values that are used in the testsuite.
+
+Each file contains lines containing a key-value pair; blank lines and lines
+starting with `#` are ignored.  The key and value are separated by whitespace
+(specifically, those whitespace in the default `$IFS`).  The key consists only
+of shell identifier characters, and the value consists of a hash algorithm,
+colon, and value.  The hash algorithm also consists only of shell identifier
+characters; it should match the value in sha1-file.c.
+
+For example, the following lines map the key "rawsz" to "20" if SHA-1 is in use
+and to "32" if SHA-256 is in use:
+
+----
+rawsz sha1:20
+rawsz sha256:32
+----
+
+The keys and values used here are loaded by `test_oid_init` (see the README file
+in the "t" directory) and are used by calling `test_oid`.
diff --git a/t/oid-info/hash-info b/t/oid-info/hash-info
new file mode 100644 (file)
index 0000000..ccdbfdf
--- /dev/null
@@ -0,0 +1,8 @@
+rawsz sha1:20
+rawsz sha256:32
+
+hexsz sha1:40
+hexsz sha256:64
+
+zero sha1:0000000000000000000000000000000000000000
+zero sha256:0000000000000000000000000000000000000000000000000000000000000000
diff --git a/t/oid-info/oid b/t/oid-info/oid
new file mode 100644 (file)
index 0000000..a754970
--- /dev/null
@@ -0,0 +1,29 @@
+# These are some common invalid and partial object IDs used in tests.
+001    sha1:0000000000000000000000000000000000000001
+001    sha256:0000000000000000000000000000000000000000000000000000000000000001
+002    sha1:0000000000000000000000000000000000000002
+002    sha256:0000000000000000000000000000000000000000000000000000000000000002
+003    sha1:0000000000000000000000000000000000000003
+003    sha256:0000000000000000000000000000000000000000000000000000000000000003
+004    sha1:0000000000000000000000000000000000000004
+004    sha256:0000000000000000000000000000000000000000000000000000000000000004
+005    sha1:0000000000000000000000000000000000000005
+005    sha256:0000000000000000000000000000000000000000000000000000000000000005
+006    sha1:0000000000000000000000000000000000000006
+006    sha256:0000000000000000000000000000000000000000000000000000000000000006
+007    sha1:0000000000000000000000000000000000000007
+007    sha256:0000000000000000000000000000000000000000000000000000000000000007
+# All zeros or Fs missing one or two hex segments.
+zero_1         sha1:000000000000000000000000000000000000000
+zero_1         sha256:000000000000000000000000000000000000000000000000000000000000000
+zero_2         sha1:00000000000000000000000000000000000000
+zero_2         sha256:00000000000000000000000000000000000000000000000000000000000000
+ff_1           sha1:fffffffffffffffffffffffffffffffffffffff
+ff_1           sha256:fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ff_2           sha1:ffffffffffffffffffffffffffffffffffffff
+ff_2           sha256:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+# More various invalid OIDs.
+numeric                sha1:0123456789012345678901234567890123456789
+numeric                sha256:0123456789012345678901234567890123456789012345678901234567890123
+deadbeef       sha1:deadbeefdeadbeefdeadbeefdeadbeefdeadbeef
+deadbeef       sha256:deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef
index 21321a0f361203aacc78516ae25f21f35107da02..be12090c3853b8d64bb903aa92b74001d0fdd19f 100644 (file)
@@ -168,3 +168,28 @@ that
   While we have tried to make sure that it can cope with embedded
   whitespace and other special characters, it will not work with
   multi-line data.
+
+Rather than tracking the performance by run-time as `test_perf` does, you
+may also track output size by using `test_size`. The stdout of the
+function should be a single numeric value, which will be captured and
+shown in the aggregated output. For example:
+
+       test_perf 'time foo' '
+               ./foo >foo.out
+       '
+
+       test_size 'output size'
+               wc -c <foo.out
+       '
+
+might produce output like:
+
+       Test                origin           HEAD
+       -------------------------------------------------------------
+       1234.1 time foo     0.37(0.79+0.02)  0.26(0.51+0.02) -29.7%
+       1234.2 output size             4.3M             3.6M -14.7%
+
+The item being measured (and its units) is up to the test; the context
+and the test title should make it clear to the user whether bigger or
+smaller numbers are better. Unlike test_perf, the test code will only be
+run once, since output sizes tend to be more deterministic than timings.
index bc865160e7e3370f9462beda9d8b3866e3c2111b..494907a892bba90b677448a152fd55c010b2ebda 100755 (executable)
@@ -13,27 +13,42 @@ sub get_times {
        my $line = <$fh>;
        return undef if not defined $line;
        close $fh or die "cannot close $name: $!";
-       $line =~ /^(?:(\d+):)?(\d+):(\d+(?:\.\d+)?) (\d+(?:\.\d+)?) (\d+(?:\.\d+)?)$/
-               or die "bad input line: $line";
-       my $rt = ((defined $1 ? $1 : 0.0)*60+$2)*60+$3;
-       return ($rt, $4, $5);
+       # times
+       if ($line =~ /^(?:(\d+):)?(\d+):(\d+(?:\.\d+)?) (\d+(?:\.\d+)?) (\d+(?:\.\d+)?)$/) {
+               my $rt = ((defined $1 ? $1 : 0.0)*60+$2)*60+$3;
+               return ($rt, $4, $5);
+       # size
+       } elsif ($line =~ /^\d+$/) {
+               return $&;
+       } else {
+               die "bad input line: $line";
+       }
+}
+
+sub relative_change {
+       my ($r, $firstr) = @_;
+       if ($firstr > 0) {
+               return sprintf "%+.1f%%", 100.0*($r-$firstr)/$firstr;
+       } elsif ($r == 0) {
+               return "=";
+       } else {
+               return "+inf";
+       }
 }
 
 sub format_times {
        my ($r, $u, $s, $firstr) = @_;
+       # no value means we did not finish the test
        if (!defined $r) {
                return "<missing>";
        }
-       my $out = sprintf "%.2f(%.2f+%.2f)", $r, $u, $s;
-       if (defined $firstr) {
-               if ($firstr > 0) {
-                       $out .= sprintf " %+.1f%%", 100.0*($r-$firstr)/$firstr;
-               } elsif ($r == 0) {
-                       $out .= " =";
-               } else {
-                       $out .= " +inf";
-               }
+       # a single value means we have a size, not times
+       if (!defined $u) {
+               return format_size($r, $firstr);
        }
+       # otherwise, we have real/user/system times
+       my $out = sprintf "%.2f(%.2f+%.2f)", $r, $u, $s;
+       $out .= ' ' . relative_change($r, $firstr) if defined $firstr;
        return $out;
 }
 
@@ -51,6 +66,25 @@ sub usage {
        exit(1);
 }
 
+sub human_size {
+       my $n = shift;
+       my @units = ('', qw(K M G));
+       while ($n > 900 && @units > 1) {
+               $n /= 1000;
+               shift @units;
+       }
+       return $n unless length $units[0];
+       return sprintf '%.1f%s', $n, $units[0];
+}
+
+sub format_size {
+       my ($size, $first) = @_;
+       # match the width of a time: 0.00(0.00+0.00)
+       my $out = sprintf '%15s', human_size($size);
+       $out .= ' ' . relative_change($size, $first) if defined $first;
+       return $out;
+}
+
 my (@dirs, %dirnames, %dirabbrevs, %prefixes, @tests,
     $codespeed, $sortby, $subsection, $reponame);
 
@@ -181,7 +215,14 @@ sub print_default_results {
                my $firstr;
                for my $i (0..$#dirs) {
                        my $d = $dirs[$i];
-                       $times{$prefixes{$d}.$t} = [get_times("$resultsdir/$prefixes{$d}$t.times")];
+                       my $base = "$resultsdir/$prefixes{$d}$t";
+                       $times{$prefixes{$d}.$t} = [];
+                       foreach my $type (qw(times size)) {
+                               if (-e "$base.$type") {
+                                       $times{$prefixes{$d}.$t} = [get_times("$base.$type")];
+                                       last;
+                               }
+                       }
                        my ($r,$u,$s) = @{$times{$prefixes{$d}.$t}};
                        my $w = length format_times($r,$u,$s,$firstr);
                        $colwidth[$i] = $w if $w > $colwidth[$i];
diff --git a/t/perf/p1450-fsck.sh b/t/perf/p1450-fsck.sh
new file mode 100755 (executable)
index 0000000..ae1b841
--- /dev/null
@@ -0,0 +1,13 @@
+#!/bin/sh
+
+test_description='Test fsck performance'
+
+. ./perf-lib.sh
+
+test_perf_large_repo
+
+test_perf 'fsck' '
+       git fsck
+'
+
+test_done
diff --git a/t/perf/p1451-fsck-skip-list.sh b/t/perf/p1451-fsck-skip-list.sh
new file mode 100755 (executable)
index 0000000..c2b97d2
--- /dev/null
@@ -0,0 +1,40 @@
+#!/bin/sh
+
+test_description='Test fsck skipList performance'
+
+. ./perf-lib.sh
+
+test_perf_fresh_repo
+
+n=1000000
+
+test_expect_success "setup $n bad commits" '
+       for i in $(test_seq 1 $n)
+       do
+               echo "commit refs/heads/master" &&
+               echo "committer C <c@example.com> 1234567890 +0000" &&
+               echo "data <<EOF" &&
+               echo "$i.Q." &&
+               echo "EOF"
+       done | q_to_nul | git fast-import
+'
+
+skip=0
+while test $skip -le $n
+do
+       test_expect_success "create skipList for $skip bad commits" '
+               git log --format=%H --max-count=$skip |
+               sort >skiplist
+       '
+
+       test_perf "fsck with $skip skipped bad commits" '
+               git -c fsck.skipList=skiplist fsck
+       '
+
+       case $skip in
+       0) skip=1 ;;
+       *) skip=${skip}0 ;;
+       esac
+done
+
+test_done
diff --git a/t/perf/p5311-pack-bitmaps-fetch.sh b/t/perf/p5311-pack-bitmaps-fetch.sh
new file mode 100755 (executable)
index 0000000..b045759
--- /dev/null
@@ -0,0 +1,45 @@
+#!/bin/sh
+
+test_description='performance of fetches from bitmapped packs'
+. ./perf-lib.sh
+
+test_perf_default_repo
+
+test_expect_success 'create bitmapped server repo' '
+       git config pack.writebitmaps true &&
+       git config pack.writebitmaphashcache true &&
+       git repack -ad
+'
+
+# simulate a fetch from a repository that last fetched N days ago, for
+# various values of N. We do so by following the first-parent chain,
+# and assume the first entry in the chain that is N days older than the current
+# HEAD is where the HEAD would have been then.
+for days in 1 2 4 8 16 32 64 128; do
+       title=$(printf '%10s' "($days days)")
+       test_expect_success "setup revs from $days days ago" '
+               now=$(git log -1 --format=%ct HEAD) &&
+               then=$(($now - ($days * 86400))) &&
+               tip=$(git rev-list -1 --first-parent --until=$then HEAD) &&
+               {
+                       echo HEAD &&
+                       echo ^$tip
+               } >revs
+       '
+
+       test_perf "server $title" '
+               git pack-objects --stdout --revs \
+                                --thin --delta-base-offset \
+                                <revs >tmp.pack
+       '
+
+       test_size "size   $title" '
+               wc -c <tmp.pack
+       '
+
+       test_perf "client $title" '
+               git index-pack --stdin --fix-thin <tmp.pack
+       '
+done
+
+test_done
index e4c343a6b795b6f990fa278788ad65a0a5cdc4e6..11d1922cf58edb78b8e311e3fa041ae54b625e74 100644 (file)
@@ -179,8 +179,8 @@ exit $ret' >&3 2>&4
        return "$eval_ret"
 }
 
-
-test_perf () {
+test_wrapper_ () {
+       test_wrapper_func_=$1; shift
        test_start_
        test "$#" = 3 && { test_prereq=$1; shift; } || test_prereq=
        test "$#" = 2 ||
@@ -191,35 +191,57 @@ test_perf () {
                base=$(basename "$0" .sh)
                echo "$test_count" >>"$perf_results_dir"/$base.subtests
                echo "$1" >"$perf_results_dir"/$base.$test_count.descr
-               if test -z "$verbose"; then
-                       printf "%s" "perf $test_count - $1:"
-               else
-                       echo "perf $test_count - $1:"
-               fi
-               for i in $(test_seq 1 $GIT_PERF_REPEAT_COUNT); do
-                       say >&3 "running: $2"
-                       if test_run_perf_ "$2"
-                       then
-                               if test -z "$verbose"; then
-                                       printf " %s" "$i"
-                               else
-                                       echo "* timing run $i/$GIT_PERF_REPEAT_COUNT:"
-                               fi
+               base="$perf_results_dir"/"$perf_results_prefix$(basename "$0" .sh)"."$test_count"
+               "$test_wrapper_func_" "$@"
+       fi
+
+       test_finish_
+}
+
+test_perf_ () {
+       if test -z "$verbose"; then
+               printf "%s" "perf $test_count - $1:"
+       else
+               echo "perf $test_count - $1:"
+       fi
+       for i in $(test_seq 1 $GIT_PERF_REPEAT_COUNT); do
+               say >&3 "running: $2"
+               if test_run_perf_ "$2"
+               then
+                       if test -z "$verbose"; then
+                               printf " %s" "$i"
                        else
-                               test -z "$verbose" && echo
-                               test_failure_ "$@"
-                               break
+                               echo "* timing run $i/$GIT_PERF_REPEAT_COUNT:"
                        fi
-               done
-               if test -z "$verbose"; then
-                       echo " ok"
                else
-                       test_ok_ "$1"
+                       test -z "$verbose" && echo
+                       test_failure_ "$@"
+                       break
                fi
-               base="$perf_results_dir"/"$perf_results_prefix$(basename "$0" .sh)"."$test_count"
-               "$TEST_DIRECTORY"/perf/min_time.perl test_time.* >"$base".times
+       done
+       if test -z "$verbose"; then
+               echo " ok"
+       else
+               test_ok_ "$1"
        fi
-       test_finish_
+       "$TEST_DIRECTORY"/perf/min_time.perl test_time.* >"$base".times
+}
+
+test_perf () {
+       test_wrapper_ test_perf_ "$@"
+}
+
+test_size_ () {
+       say >&3 "running: $2"
+       if test_eval_ "$2" 3>"$base".size; then
+               test_ok_ "$1"
+       else
+               test_failure_ "$@"
+       fi
+}
+
+test_size () {
+       test_wrapper_ test_size_ "$@"
 }
 
 # We extend test_done to print timings at the end (./run disables this
index 850f651e4e434084bbc8995fcfb3bb001d83875c..26601e698bd3abebd2e1f10ba6012f80c65ffa85 100755 (executable)
@@ -821,9 +821,87 @@ test_expect_success 'tests clean up even on failures' "
        EOF
 "
 
+test_expect_success 'test_oid setup' '
+       test_oid_init
+'
+
+test_expect_success 'test_oid provides sane info by default' '
+       test_oid zero >actual &&
+       grep "^00*\$" actual &&
+       rawsz="$(test_oid rawsz)" &&
+       hexsz="$(test_oid hexsz)" &&
+       test "$hexsz" -eq $(wc -c <actual) &&
+       test $(( $rawsz * 2)) -eq "$hexsz"
+'
+
+test_expect_success 'test_oid can look up data for SHA-1' '
+       test_when_finished "test_detect_hash" &&
+       test_set_hash sha1 &&
+       test_oid zero >actual &&
+       grep "^00*\$" actual &&
+       rawsz="$(test_oid rawsz)" &&
+       hexsz="$(test_oid hexsz)" &&
+       test $(wc -c <actual) -eq 40 &&
+       test "$rawsz" -eq 20 &&
+       test "$hexsz" -eq 40
+'
+
+test_expect_success 'test_oid can look up data for SHA-256' '
+       test_when_finished "test_detect_hash" &&
+       test_set_hash sha256 &&
+       test_oid zero >actual &&
+       grep "^00*\$" actual &&
+       rawsz="$(test_oid rawsz)" &&
+       hexsz="$(test_oid hexsz)" &&
+       test $(wc -c <actual) -eq 64 &&
+       test "$rawsz" -eq 32 &&
+       test "$hexsz" -eq 64
+'
+
 ################################################################
 # Basics of the basics
 
+test_oid_cache <<\EOF
+path0f sha1:f87290f8eb2cbbea7857214459a0739927eab154
+path0f sha256:638106af7c38be056f3212cbd7ac65bc1bac74f420ca5a436ff006a9d025d17d
+
+path0s sha1:15a98433ae33114b085f3eb3bb03b832b3180a01
+path0s sha256:3a24cc53cf68edddac490bbf94a418a52932130541361f685df685e41dd6c363
+
+path2f sha1:3feff949ed00a62d9f7af97c15cd8a30595e7ac7
+path2f sha256:2a7f36571c6fdbaf0e3f62751a0b25a3f4c54d2d1137b3f4af9cb794bb498e5f
+
+path2s sha1:d8ce161addc5173867a3c3c730924388daedbc38
+path2s sha256:18fd611b787c2e938ddcc248fabe4d66a150f9364763e9ec133dd01d5bb7c65a
+
+path2d sha1:58a09c23e2ca152193f2786e06986b7b6712bdbe
+path2d sha256:00e4b32b96e7e3d65d79112dcbea53238a22715f896933a62b811377e2650c17
+
+path3f sha1:0aa34cae68d0878578ad119c86ca2b5ed5b28376
+path3f sha256:09f58616b951bd571b8cb9dc76d372fbb09ab99db2393f5ab3189d26c45099ad
+
+path3s sha1:8599103969b43aff7e430efea79ca4636466794f
+path3s sha256:fce1aed087c053306f3f74c32c1a838c662bbc4551a7ac2420f5d6eb061374d0
+
+path3d sha1:21ae8269cacbe57ae09138dcc3a2887f904d02b3
+path3d sha256:9b60497be959cb830bf3f0dc82bcc9ad9e925a24e480837ade46b2295e47efe1
+
+subp3f sha1:00fb5908cb97c2564a9783c0c64087333b3b464f
+subp3f sha256:a1a9e16998c988453f18313d10375ee1d0ddefe757e710dcae0d66aa1e0c58b3
+
+subp3s sha1:6649a1ebe9e9f1c553b66f5a6e74136a07ccc57c
+subp3s sha256:81759d9f5e93c6546ecfcadb560c1ff057314b09f93fe8ec06e2d8610d34ef10
+
+subp3d sha1:3c5e5399f3a333eddecce7a9b9465b63f65f51e2
+subp3d sha256:76b4ef482d4fa1c754390344cf3851c7f883b27cf9bc999c6547928c46aeafb7
+
+root sha1:087704a96baf1c2d1c869a8b084481e121c88b5b
+root sha256:9481b52abab1b2ffeedbf9de63ce422b929f179c1b98ff7bee5f8f1bc0710751
+
+simpletree sha1:7bb943559a305bdd6bdee2cef6e5df2413c3d30a
+simpletree sha256:1710c07a6c86f9a3c7376364df04c47ee39e5a5e221fcdd84b743bc9bb7e2bc5
+EOF
+
 # updating a new file without --add should fail.
 test_expect_success 'git update-index without --add should fail adding' '
        test_must_fail git update-index should-be-empty
@@ -839,8 +917,8 @@ test_expect_success 'writing tree out with git write-tree' '
 '
 
 # we know the shape and contents of the tree and know the object ID for it.
-test_expect_success SHA1 'validate object ID of a known tree' '
-       test "$tree" = 7bb943559a305bdd6bdee2cef6e5df2413c3d30a
+test_expect_success 'validate object ID of a known tree' '
+       test "$tree" = "$(test_oid simpletree)"
     '
 
 # Removing paths.
@@ -882,16 +960,16 @@ test_expect_success 'showing stage with git ls-files --stage' '
        git ls-files --stage >current
 '
 
-test_expect_success SHA1 'validate git ls-files output for a known tree' '
-       cat >expected <<-\EOF &&
-       100644 f87290f8eb2cbbea7857214459a0739927eab154 0       path0
-       120000 15a98433ae33114b085f3eb3bb03b832b3180a01 0       path0sym
-       100644 3feff949ed00a62d9f7af97c15cd8a30595e7ac7 0       path2/file2
-       120000 d8ce161addc5173867a3c3c730924388daedbc38 0       path2/file2sym
-       100644 0aa34cae68d0878578ad119c86ca2b5ed5b28376 0       path3/file3
-       120000 8599103969b43aff7e430efea79ca4636466794f 0       path3/file3sym
-       100644 00fb5908cb97c2564a9783c0c64087333b3b464f 0       path3/subp3/file3
-       120000 6649a1ebe9e9f1c553b66f5a6e74136a07ccc57c 0       path3/subp3/file3sym
+test_expect_success 'validate git ls-files output for a known tree' '
+       cat >expected <<-EOF &&
+       100644 $(test_oid path0f) 0     path0
+       120000 $(test_oid path0s) 0     path0sym
+       100644 $(test_oid path2f) 0     path2/file2
+       120000 $(test_oid path2s) 0     path2/file2sym
+       100644 $(test_oid path3f) 0     path3/file3
+       120000 $(test_oid path3s) 0     path3/file3sym
+       100644 $(test_oid subp3f) 0     path3/subp3/file3
+       120000 $(test_oid subp3s) 0     path3/subp3/file3sym
        EOF
        test_cmp expected current
 '
@@ -900,20 +978,20 @@ test_expect_success 'writing tree out with git write-tree' '
        tree=$(git write-tree)
 '
 
-test_expect_success SHA1 'validate object ID for a known tree' '
-       test "$tree" = 087704a96baf1c2d1c869a8b084481e121c88b5b
+test_expect_success 'validate object ID for a known tree' '
+       test "$tree" = "$(test_oid root)"
 '
 
 test_expect_success 'showing tree with git ls-tree' '
     git ls-tree $tree >current
 '
 
-test_expect_success SHA1 'git ls-tree output for a known tree' '
-       cat >expected <<-\EOF &&
-       100644 blob f87290f8eb2cbbea7857214459a0739927eab154    path0
-       120000 blob 15a98433ae33114b085f3eb3bb03b832b3180a01    path0sym
-       040000 tree 58a09c23e2ca152193f2786e06986b7b6712bdbe    path2
-       040000 tree 21ae8269cacbe57ae09138dcc3a2887f904d02b3    path3
+test_expect_success 'git ls-tree output for a known tree' '
+       cat >expected <<-EOF &&
+       100644 blob $(test_oid path0f)  path0
+       120000 blob $(test_oid path0s)  path0sym
+       040000 tree $(test_oid path2d)  path2
+       040000 tree $(test_oid path3d)  path3
        EOF
        test_cmp expected current
 '
@@ -924,16 +1002,16 @@ test_expect_success 'showing tree with git ls-tree -r' '
        git ls-tree -r $tree >current
 '
 
-test_expect_success SHA1 'git ls-tree -r output for a known tree' '
-       cat >expected <<-\EOF &&
-       100644 blob f87290f8eb2cbbea7857214459a0739927eab154    path0
-       120000 blob 15a98433ae33114b085f3eb3bb03b832b3180a01    path0sym
-       100644 blob 3feff949ed00a62d9f7af97c15cd8a30595e7ac7    path2/file2
-       120000 blob d8ce161addc5173867a3c3c730924388daedbc38    path2/file2sym
-       100644 blob 0aa34cae68d0878578ad119c86ca2b5ed5b28376    path3/file3
-       120000 blob 8599103969b43aff7e430efea79ca4636466794f    path3/file3sym
-       100644 blob 00fb5908cb97c2564a9783c0c64087333b3b464f    path3/subp3/file3
-       120000 blob 6649a1ebe9e9f1c553b66f5a6e74136a07ccc57c    path3/subp3/file3sym
+test_expect_success 'git ls-tree -r output for a known tree' '
+       cat >expected <<-EOF &&
+       100644 blob $(test_oid path0f)  path0
+       120000 blob $(test_oid path0s)  path0sym
+       100644 blob $(test_oid path2f)  path2/file2
+       120000 blob $(test_oid path2s)  path2/file2sym
+       100644 blob $(test_oid path3f)  path3/file3
+       120000 blob $(test_oid path3s)  path3/file3sym
+       100644 blob $(test_oid subp3f)  path3/subp3/file3
+       120000 blob $(test_oid subp3s)  path3/subp3/file3sym
        EOF
        test_cmp expected current
 '
@@ -943,19 +1021,19 @@ test_expect_success 'showing tree with git ls-tree -r -t' '
        git ls-tree -r -t $tree >current
 '
 
-test_expect_success SHA1 'git ls-tree -r output for a known tree' '
-       cat >expected <<-\EOF &&
-       100644 blob f87290f8eb2cbbea7857214459a0739927eab154    path0
-       120000 blob 15a98433ae33114b085f3eb3bb03b832b3180a01    path0sym
-       040000 tree 58a09c23e2ca152193f2786e06986b7b6712bdbe    path2
-       100644 blob 3feff949ed00a62d9f7af97c15cd8a30595e7ac7    path2/file2
-       120000 blob d8ce161addc5173867a3c3c730924388daedbc38    path2/file2sym
-       040000 tree 21ae8269cacbe57ae09138dcc3a2887f904d02b3    path3
-       100644 blob 0aa34cae68d0878578ad119c86ca2b5ed5b28376    path3/file3
-       120000 blob 8599103969b43aff7e430efea79ca4636466794f    path3/file3sym
-       040000 tree 3c5e5399f3a333eddecce7a9b9465b63f65f51e2    path3/subp3
-       100644 blob 00fb5908cb97c2564a9783c0c64087333b3b464f    path3/subp3/file3
-       120000 blob 6649a1ebe9e9f1c553b66f5a6e74136a07ccc57c    path3/subp3/file3sym
+test_expect_success 'git ls-tree -r output for a known tree' '
+       cat >expected <<-EOF &&
+       100644 blob $(test_oid path0f)  path0
+       120000 blob $(test_oid path0s)  path0sym
+       040000 tree $(test_oid path2d)  path2
+       100644 blob $(test_oid path2f)  path2/file2
+       120000 blob $(test_oid path2s)  path2/file2sym
+       040000 tree $(test_oid path3d)  path3
+       100644 blob $(test_oid path3f)  path3/file3
+       120000 blob $(test_oid path3s)  path3/file3sym
+       040000 tree $(test_oid subp3d)  path3/subp3
+       100644 blob $(test_oid subp3f)  path3/subp3/file3
+       120000 blob $(test_oid subp3s)  path3/subp3/file3sym
        EOF
        test_cmp expected current
 '
@@ -964,26 +1042,27 @@ test_expect_success 'writing partial tree out with git write-tree --prefix' '
        ptree=$(git write-tree --prefix=path3)
 '
 
-test_expect_success SHA1 'validate object ID for a known tree' '
-       test "$ptree" = 21ae8269cacbe57ae09138dcc3a2887f904d02b3
+test_expect_success 'validate object ID for a known tree' '
+       test "$ptree" = $(test_oid path3d)
 '
 
 test_expect_success 'writing partial tree out with git write-tree --prefix' '
        ptree=$(git write-tree --prefix=path3/subp3)
 '
 
-test_expect_success SHA1 'validate object ID for a known tree' '
-       test "$ptree" = 3c5e5399f3a333eddecce7a9b9465b63f65f51e2
+test_expect_success 'validate object ID for a known tree' '
+       test "$ptree" = $(test_oid subp3d)
 '
 
 test_expect_success 'put invalid objects into the index' '
        rm -f .git/index &&
-       cat >badobjects <<-\EOF &&
-       100644 blob 1000000000000000000000000000000000000000    dir/file1
-       100644 blob 2000000000000000000000000000000000000000    dir/file2
-       100644 blob 3000000000000000000000000000000000000000    dir/file3
-       100644 blob 4000000000000000000000000000000000000000    dir/file4
-       100644 blob 5000000000000000000000000000000000000000    dir/file5
+       suffix=$(echo $ZERO_OID | sed -e "s/^.//") &&
+       cat >badobjects <<-EOF &&
+       100644 blob $(test_oid 001)     dir/file1
+       100644 blob $(test_oid 002)     dir/file2
+       100644 blob $(test_oid 003)     dir/file3
+       100644 blob $(test_oid 004)     dir/file4
+       100644 blob $(test_oid 005)     dir/file5
        EOF
        git update-index --index-info <badobjects
 '
@@ -1006,19 +1085,19 @@ test_expect_success 'git read-tree followed by write-tree should be idempotent'
        test "$newtree" = "$tree"
 '
 
-test_expect_success SHA1 'validate git diff-files output for a know cache/work tree state' '
-       cat >expected <<\EOF &&
-:100644 100644 f87290f8eb2cbbea7857214459a0739927eab154 0000000000000000000000000000000000000000 M     path0
-:120000 120000 15a98433ae33114b085f3eb3bb03b832b3180a01 0000000000000000000000000000000000000000 M     path0sym
-:100644 100644 3feff949ed00a62d9f7af97c15cd8a30595e7ac7 0000000000000000000000000000000000000000 M     path2/file2
-:120000 120000 d8ce161addc5173867a3c3c730924388daedbc38 0000000000000000000000000000000000000000 M     path2/file2sym
-:100644 100644 0aa34cae68d0878578ad119c86ca2b5ed5b28376 0000000000000000000000000000000000000000 M     path3/file3
-:120000 120000 8599103969b43aff7e430efea79ca4636466794f 0000000000000000000000000000000000000000 M     path3/file3sym
-:100644 100644 00fb5908cb97c2564a9783c0c64087333b3b464f 0000000000000000000000000000000000000000 M     path3/subp3/file3
-:120000 120000 6649a1ebe9e9f1c553b66f5a6e74136a07ccc57c 0000000000000000000000000000000000000000 M     path3/subp3/file3sym
+test_expect_success 'validate git diff-files output for a know cache/work tree state' '
+       cat >expected <<EOF &&
+:100644 100644 $(test_oid path0f) $ZERO_OID M  path0
+:120000 120000 $(test_oid path0s) $ZERO_OID M  path0sym
+:100644 100644 $(test_oid path2f) $ZERO_OID M  path2/file2
+:120000 120000 $(test_oid path2s) $ZERO_OID M  path2/file2sym
+:100644 100644 $(test_oid path3f) $ZERO_OID M  path3/file3
+:120000 120000 $(test_oid path3s) $ZERO_OID M  path3/file3sym
+:100644 100644 $(test_oid subp3f) $ZERO_OID M  path3/subp3/file3
+:120000 120000 $(test_oid subp3s) $ZERO_OID M  path3/subp3/file3sym
 EOF
        git diff-files >current &&
-       test_cmp current expected
+       test_cmp expected current
 '
 
 test_expect_success 'git update-index --refresh should succeed' '
@@ -1031,23 +1110,23 @@ test_expect_success 'no diff after checkout and git update-index --refresh' '
 '
 
 ################################################################
-P=087704a96baf1c2d1c869a8b084481e121c88b5b
+P=$(test_oid root)
 
-test_expect_success SHA1 'git commit-tree records the correct tree in a commit' '
+test_expect_success 'git commit-tree records the correct tree in a commit' '
        commit0=$(echo NO | git commit-tree $P) &&
        tree=$(git show --pretty=raw $commit0 |
                 sed -n -e "s/^tree //p" -e "/^author /q") &&
        test "z$tree" = "z$P"
 '
 
-test_expect_success SHA1 'git commit-tree records the correct parent in a commit' '
+test_expect_success 'git commit-tree records the correct parent in a commit' '
        commit1=$(echo NO | git commit-tree $P -p $commit0) &&
        parent=$(git show --pretty=raw $commit1 |
                sed -n -e "s/^parent //p" -e "/^author /q") &&
        test "z$commit0" = "z$parent"
 '
 
-test_expect_success SHA1 'git commit-tree omits duplicated parent in a commit' '
+test_expect_success 'git commit-tree omits duplicated parent in a commit' '
        commit2=$(echo NO | git commit-tree $P -p $commit0 -p $commit0) &&
             parent=$(git show --pretty=raw $commit2 |
                sed -n -e "s/^parent //p" -e "/^author /q" |
index 3691023d510a0d97bf1390b781afe1ac9fa270f4..0aa9908ea12d7592841e10ac09afe8b36f37ce7f 100755 (executable)
@@ -92,11 +92,12 @@ test_expect_success 'enter_repo non-strict mode' '
                mv .git .realgit &&
                echo "gitdir: .realgit" >.git
        ) &&
+       head=$(git -C enter_repo rev-parse HEAD) &&
        git ls-remote enter_repo >actual &&
-       cat >expected <<-\EOF &&
-       946e985ab20de757ca5b872b16d64e92ff3803a9        HEAD
-       946e985ab20de757ca5b872b16d64e92ff3803a9        refs/heads/master
-       946e985ab20de757ca5b872b16d64e92ff3803a9        refs/tags/foo
+       cat >expected <<-EOF &&
+       $head   HEAD
+       $head   refs/heads/master
+       $head   refs/tags/foo
        EOF
        test_cmp expected actual
 '
@@ -106,21 +107,23 @@ test_expect_success 'enter_repo linked checkout' '
                cd enter_repo &&
                git worktree add  ../foo refs/tags/foo
        ) &&
+       head=$(git -C enter_repo rev-parse HEAD) &&
        git ls-remote foo >actual &&
-       cat >expected <<-\EOF &&
-       946e985ab20de757ca5b872b16d64e92ff3803a9        HEAD
-       946e985ab20de757ca5b872b16d64e92ff3803a9        refs/heads/master
-       946e985ab20de757ca5b872b16d64e92ff3803a9        refs/tags/foo
+       cat >expected <<-EOF &&
+       $head   HEAD
+       $head   refs/heads/master
+       $head   refs/tags/foo
        EOF
        test_cmp expected actual
 '
 
 test_expect_success 'enter_repo strict mode' '
+       head=$(git -C enter_repo rev-parse HEAD) &&
        git ls-remote --upload-pack="git upload-pack --strict" foo/.git >actual &&
-       cat >expected <<-\EOF &&
-       946e985ab20de757ca5b872b16d64e92ff3803a9        HEAD
-       946e985ab20de757ca5b872b16d64e92ff3803a9        refs/heads/master
-       946e985ab20de757ca5b872b16d64e92ff3803a9        refs/tags/foo
+       cat >expected <<-EOF &&
+       $head   HEAD
+       $head   refs/heads/master
+       $head   refs/tags/foo
        EOF
        test_cmp expected actual
 '
diff --git a/t/t0014-alias.sh b/t/t0014-alias.sh
new file mode 100755 (executable)
index 0000000..a070e64
--- /dev/null
@@ -0,0 +1,40 @@
+#!/bin/sh
+
+test_description='git command aliasing'
+
+. ./test-lib.sh
+
+test_expect_success 'nested aliases - internal execution' '
+       git config alias.nested-internal-1 nested-internal-2 &&
+       git config alias.nested-internal-2 status &&
+       git nested-internal-1 >output &&
+       test_i18ngrep "^On branch " output
+'
+
+test_expect_success 'nested aliases - mixed execution' '
+       git config alias.nested-external-1 nested-external-2 &&
+       git config alias.nested-external-2 "!git nested-external-3" &&
+       git config alias.nested-external-3 status &&
+       git nested-external-1 >output &&
+       test_i18ngrep "^On branch " output
+'
+
+test_expect_success 'looping aliases - internal execution' '
+       git config alias.loop-internal-1 loop-internal-2 &&
+       git config alias.loop-internal-2 loop-internal-3 &&
+       git config alias.loop-internal-3 loop-internal-2 &&
+       test_must_fail git loop-internal-1 2>output &&
+       test_i18ngrep "^fatal: alias loop detected: expansion of" output
+'
+
+# This test is disabled until external loops are fixed, because would block
+# the test suite for a full minute.
+#
+#test_expect_failure 'looping aliases - mixed execution' '
+#      git config alias.loop-mixed-1 loop-mixed-2 &&
+#      git config alias.loop-mixed-2 "!git loop-mixed-1" &&
+#      test_must_fail git loop-mixed-1 2>output &&
+#      test_i18ngrep "^fatal: alias loop detected: expansion of" output
+#'
+
+test_done
index 308cd28f3bd7304a63628e8a64014e8d20faa82f..fd5f1ac649dc411bf3c70dcb54f7965e4006aca8 100755 (executable)
@@ -166,10 +166,10 @@ test_expect_success expanded_in_repo '
        rm -f expanded-keywords expanded-keywords-crlf &&
 
        git checkout -- expanded-keywords &&
-       test_cmp expanded-keywords expected-output &&
+       test_cmp expected-output expanded-keywords &&
 
        git checkout -- expanded-keywords-crlf &&
-       test_cmp expanded-keywords-crlf expected-output-crlf
+       test_cmp expected-output-crlf expanded-keywords-crlf
 '
 
 # The use of %f in a filter definition is expanded to the path to
index 5b0560fa20e3459a3fa62753ebfafaa2afa276e1..17d0c18feb84e2c47167b9b54f71f192d78b0e9b 100755 (executable)
@@ -8,7 +8,7 @@ test_description='our own option parser'
 . ./test-lib.sh
 
 cat >expect <<\EOF
-usage: test-parse-options <options>
+usage: test-tool parse-options <options>
 
     A helper function for the parse-options API.
 
@@ -52,7 +52,7 @@ Standard options
 EOF
 
 test_expect_success 'test help' '
-       test_must_fail test-parse-options -h >output 2>output.err &&
+       test_must_fail test-tool parse-options -h >output 2>output.err &&
        test_must_be_empty output.err &&
        test_i18ncmp expect output
 '
@@ -64,7 +64,7 @@ check () {
        shift &&
        expect="$1" &&
        shift &&
-       test-parse-options --expect="$what $expect" "$@"
+       test-tool parse-options --expect="$what $expect" "$@"
 }
 
 check_unknown_i18n() {
@@ -75,7 +75,7 @@ check_unknown_i18n() {
                echo error: unknown switch \`${1#-}\' >expect ;;
        esac &&
        cat expect.err >>expect &&
-       test_must_fail test-parse-options $* >output 2>output.err &&
+       test_must_fail test-tool parse-options $* >output 2>output.err &&
        test_must_be_empty output &&
        test_i18ncmp expect output.err
 }
@@ -133,7 +133,7 @@ file: prefix/my.file
 EOF
 
 test_expect_success 'short options' '
-       test-parse-options -s123 -b -i 1729 -m 16k -b -vv -n -F my.file \
+       test-tool parse-options -s123 -b -i 1729 -m 16k -b -vv -n -F my.file \
        >output 2>output.err &&
        test_cmp expect output &&
        test_must_be_empty output.err
@@ -153,7 +153,7 @@ file: prefix/fi.le
 EOF
 
 test_expect_success 'long options' '
-       test-parse-options --boolean --integer 1729 --magnitude 16k \
+       test-tool parse-options --boolean --integer 1729 --magnitude 16k \
                --boolean --string2=321 --verbose --verbose --no-dry-run \
                --abbrev=10 --file fi.le --obsolete \
                >output 2>output.err &&
@@ -162,9 +162,9 @@ test_expect_success 'long options' '
 '
 
 test_expect_success 'missing required value' '
-       test_expect_code 129 test-parse-options -s &&
-       test_expect_code 129 test-parse-options --string &&
-       test_expect_code 129 test-parse-options --file
+       test_expect_code 129 test-tool parse-options -s &&
+       test_expect_code 129 test-tool parse-options --string &&
+       test_expect_code 129 test-tool parse-options --file
 '
 
 cat >expect <<\EOF
@@ -184,7 +184,7 @@ arg 02: --boolean
 EOF
 
 test_expect_success 'intermingled arguments' '
-       test-parse-options a1 --string 123 b1 --boolean -j 13 -- --boolean \
+       test-tool parse-options a1 --string 123 b1 --boolean -j 13 -- --boolean \
                >output 2>output.err &&
        test_must_be_empty output.err &&
        test_cmp expect output
@@ -204,21 +204,21 @@ file: (not set)
 EOF
 
 test_expect_success 'unambiguously abbreviated option' '
-       test-parse-options --int 2 --boolean --no-bo >output 2>output.err &&
+       test-tool parse-options --int 2 --boolean --no-bo >output 2>output.err &&
        test_must_be_empty output.err &&
        test_cmp expect output
 '
 
 test_expect_success 'unambiguously abbreviated option with "="' '
-       test-parse-options --expect="integer: 2" --int=2
+       test-tool parse-options --expect="integer: 2" --int=2
 '
 
 test_expect_success 'ambiguously abbreviated option' '
-       test_expect_code 129 test-parse-options --strin 123
+       test_expect_code 129 test-tool parse-options --strin 123
 '
 
 test_expect_success 'non ambiguous option (after two options it abbreviates)' '
-       test-parse-options --expect="string: 123" --st 123
+       test-tool parse-options --expect="string: 123" --st 123
 '
 
 cat >typo.err <<\EOF
@@ -226,7 +226,7 @@ error: did you mean `--boolean` (with two dashes ?)
 EOF
 
 test_expect_success 'detect possible typos' '
-       test_must_fail test-parse-options -boolean >output 2>output.err &&
+       test_must_fail test-tool parse-options -boolean >output 2>output.err &&
        test_must_be_empty output &&
        test_cmp typo.err output.err
 '
@@ -236,13 +236,13 @@ error: did you mean `--ambiguous` (with two dashes ?)
 EOF
 
 test_expect_success 'detect possible typos' '
-       test_must_fail test-parse-options -ambiguous >output 2>output.err &&
+       test_must_fail test-tool parse-options -ambiguous >output 2>output.err &&
        test_must_be_empty output &&
        test_cmp typo.err output.err
 '
 
 test_expect_success 'keep some options as arguments' '
-       test-parse-options --expect="arg 00: --quux" --quux
+       test-tool parse-options --expect="arg 00: --quux" --quux
 '
 
 cat >expect <<\EOF
@@ -260,7 +260,7 @@ arg 00: foo
 EOF
 
 test_expect_success 'OPT_DATE() works' '
-       test-parse-options -t "1970-01-01 00:00:01 +0000" \
+       test-tool parse-options -t "1970-01-01 00:00:01 +0000" \
                foo -q >output 2>output.err &&
        test_must_be_empty output.err &&
        test_cmp expect output
@@ -281,13 +281,13 @@ file: (not set)
 EOF
 
 test_expect_success 'OPT_CALLBACK() and OPT_BIT() work' '
-       test-parse-options --length=four -b -4 >output 2>output.err &&
+       test-tool parse-options --length=four -b -4 >output 2>output.err &&
        test_must_be_empty output.err &&
        test_cmp expect output
 '
 
 test_expect_success 'OPT_CALLBACK() and callback errors work' '
-       test_must_fail test-parse-options --no-length >output 2>output.err &&
+       test_must_fail test-tool parse-options --no-length >output 2>output.err &&
        test_must_be_empty output &&
        test_must_be_empty output.err
 '
@@ -306,31 +306,31 @@ file: (not set)
 EOF
 
 test_expect_success 'OPT_BIT() and OPT_SET_INT() work' '
-       test-parse-options --set23 -bbbbb --no-or4 >output 2>output.err &&
+       test-tool parse-options --set23 -bbbbb --no-or4 >output 2>output.err &&
        test_must_be_empty output.err &&
        test_cmp expect output
 '
 
 test_expect_success 'OPT_NEGBIT() and OPT_SET_INT() work' '
-       test-parse-options --set23 -bbbbb --neg-or4 >output 2>output.err &&
+       test-tool parse-options --set23 -bbbbb --neg-or4 >output 2>output.err &&
        test_must_be_empty output.err &&
        test_cmp expect output
 '
 
 test_expect_success 'OPT_BIT() works' '
-       test-parse-options --expect="boolean: 6" -bb --or4
+       test-tool parse-options --expect="boolean: 6" -bb --or4
 '
 
 test_expect_success 'OPT_NEGBIT() works' '
-       test-parse-options --expect="boolean: 6" -bb --no-neg-or4
+       test-tool parse-options --expect="boolean: 6" -bb --no-neg-or4
 '
 
 test_expect_success 'OPT_COUNTUP() with PARSE_OPT_NODASH works' '
-       test-parse-options --expect="boolean: 6" + + + + + +
+       test-tool parse-options --expect="boolean: 6" + + + + + +
 '
 
 test_expect_success 'OPT_NUMBER_CALLBACK() works' '
-       test-parse-options --expect="integer: 12345" -12345
+       test-tool parse-options --expect="integer: 12345" -12345
 '
 
 cat >expect <<\EOF
@@ -347,7 +347,7 @@ file: (not set)
 EOF
 
 test_expect_success 'negation of OPT_NONEG flags is not ambiguous' '
-       test-parse-options --no-ambig >output 2>output.err &&
+       test-tool parse-options --no-ambig >output 2>output.err &&
        test_must_be_empty output.err &&
        test_cmp expect output
 '
@@ -358,38 +358,38 @@ list: bar
 list: baz
 EOF
 test_expect_success '--list keeps list of strings' '
-       test-parse-options --list foo --list=bar --list=baz >output &&
+       test-tool parse-options --list foo --list=bar --list=baz >output &&
        test_cmp expect output
 '
 
 test_expect_success '--no-list resets list' '
-       test-parse-options --list=other --list=irrelevant --list=options \
+       test-tool parse-options --list=other --list=irrelevant --list=options \
                --no-list --list=foo --list=bar --list=baz >output &&
        test_cmp expect output
 '
 
 test_expect_success 'multiple quiet levels' '
-       test-parse-options --expect="quiet: 3" -q -q -q
+       test-tool parse-options --expect="quiet: 3" -q -q -q
 '
 
 test_expect_success 'multiple verbose levels' '
-       test-parse-options --expect="verbose: 3" -v -v -v
+       test-tool parse-options --expect="verbose: 3" -v -v -v
 '
 
 test_expect_success '--no-quiet sets --quiet to 0' '
-       test-parse-options --expect="quiet: 0" --no-quiet
+       test-tool parse-options --expect="quiet: 0" --no-quiet
 '
 
 test_expect_success '--no-quiet resets multiple -q to 0' '
-       test-parse-options --expect="quiet: 0" -q -q -q --no-quiet
+       test-tool parse-options --expect="quiet: 0" -q -q -q --no-quiet
 '
 
 test_expect_success '--no-verbose sets verbose to 0' '
-       test-parse-options --expect="verbose: 0" --no-verbose
+       test-tool parse-options --expect="verbose: 0" --no-verbose
 '
 
 test_expect_success '--no-verbose resets multiple verbose to 0' '
-       test-parse-options --expect="verbose: 0" -v -v -v --no-verbose
+       test-tool parse-options --expect="verbose: 0" -v -v -v --no-verbose
 '
 
 test_done
diff --git a/t/t0051-windows-named-pipe.sh b/t/t0051-windows-named-pipe.sh
new file mode 100755 (executable)
index 0000000..10ac92d
--- /dev/null
@@ -0,0 +1,17 @@
+#!/bin/sh
+
+test_description='Windows named pipes'
+
+. ./test-lib.sh
+
+test_expect_success MINGW 'o_append write to named pipe' '
+       GIT_TRACE="$(pwd)/expect" git status >/dev/null 2>&1 &&
+       { test-tool windows-named-pipe t0051 >actual 2>&1 & } &&
+       pid=$! &&
+       sleep 1 &&
+       GIT_TRACE=//./pipe/t0051 git status >/dev/null 2>warning &&
+       wait $pid &&
+       test_cmp expect actual
+'
+
+test_done
index 67484502a007e3fed09fe381898bee52d21d07bb..5dda570b9a1ef3139165a4b901e16f64a3263209 100755 (executable)
@@ -3,30 +3,30 @@
 test_description='basic tests for the SHA1 array implementation'
 . ./test-lib.sh
 
-echo20 () {
+echoid () {
        prefix="${1:+$1 }"
        shift
        while test $# -gt 0
        do
-               echo "$prefix$1$1$1$1$1$1$1$1$1$1$1$1$1$1$1$1$1$1$1$1"
+               echo "$prefix$ZERO_OID" | sed -e "s/00/$1/g"
                shift
        done
 }
 
 test_expect_success 'ordered enumeration' '
-       echo20 "" 44 55 88 aa >expect &&
+       echoid "" 44 55 88 aa >expect &&
        {
-               echo20 append 88 44 aa 55 &&
+               echoid append 88 44 aa 55 &&
                echo for_each_unique
        } | test-tool sha1-array >actual &&
        test_cmp expect actual
 '
 
 test_expect_success 'ordered enumeration with duplicate suppression' '
-       echo20 "" 44 55 88 aa >expect &&
+       echoid "" 44 55 88 aa >expect &&
        {
-               echo20 append 88 44 aa 55 &&
-               echo20 append 88 44 aa 55 &&
+               echoid append 88 44 aa 55 &&
+               echoid append 88 44 aa 55 &&
                echo for_each_unique
        } | test-tool sha1-array >actual &&
        test_cmp expect actual
@@ -34,8 +34,8 @@ test_expect_success 'ordered enumeration with duplicate suppression' '
 
 test_expect_success 'lookup' '
        {
-               echo20 append 88 44 aa 55 &&
-               echo20 lookup 55
+               echoid append 88 44 aa 55 &&
+               echoid lookup 55
        } | test-tool sha1-array >actual &&
        n=$(cat actual) &&
        test "$n" -eq 1
@@ -43,8 +43,8 @@ test_expect_success 'lookup' '
 
 test_expect_success 'lookup non-existing entry' '
        {
-               echo20 append 88 44 aa 55 &&
-               echo20 lookup 33
+               echoid append 88 44 aa 55 &&
+               echoid lookup 33
        } | test-tool sha1-array >actual &&
        n=$(cat actual) &&
        test "$n" -lt 0
@@ -52,9 +52,9 @@ test_expect_success 'lookup non-existing entry' '
 
 test_expect_success 'lookup with duplicates' '
        {
-               echo20 append 88 44 aa 55 &&
-               echo20 append 88 44 aa 55 &&
-               echo20 lookup 55
+               echoid append 88 44 aa 55 &&
+               echoid append 88 44 aa 55 &&
+               echoid lookup 55
        } | test-tool sha1-array >actual &&
        n=$(cat actual) &&
        test "$n" -ge 2 &&
@@ -63,19 +63,24 @@ test_expect_success 'lookup with duplicates' '
 
 test_expect_success 'lookup non-existing entry with duplicates' '
        {
-               echo20 append 88 44 aa 55 &&
-               echo20 append 88 44 aa 55 &&
-               echo20 lookup 66
+               echoid append 88 44 aa 55 &&
+               echoid append 88 44 aa 55 &&
+               echoid lookup 66
        } | test-tool sha1-array >actual &&
        n=$(cat actual) &&
        test "$n" -lt 0
 '
 
 test_expect_success 'lookup with almost duplicate values' '
+       # n-1 5s
+       root=$(echoid "" 55) &&
+       root=${root%5} &&
        {
-               echo "append 5555555555555555555555555555555555555555" &&
-               echo "append 555555555555555555555555555555555555555f" &&
-               echo20 lookup 55
+               id1="${root}5" &&
+               id2="${root}f" &&
+               echo "append $id1" &&
+               echo "append $id2" &&
+               echoid lookup 55
        } | test-tool sha1-array >actual &&
        n=$(cat actual) &&
        test "$n" -eq 0
@@ -83,8 +88,8 @@ test_expect_success 'lookup with almost duplicate values' '
 
 test_expect_success 'lookup with single duplicate value' '
        {
-               echo20 append 55 55 &&
-               echo20 lookup 55
+               echoid append 55 55 &&
+               echoid lookup 55
        } | test-tool sha1-array >actual &&
        n=$(cat actual) &&
        test "$n" -ge 0 &&
index 7de40141ca84dc53657d81c0254870b387339f79..504334e552251ed0c1f74536f6fd09ffccda615d 100755 (executable)
@@ -161,6 +161,24 @@ test_expect_success PERL 'commit --interactive gives cache-tree on partial commi
        test_cache_tree
 '
 
+test_expect_success PERL 'commit -p with shrinking cache-tree' '
+       mkdir -p deep/subdir &&
+       echo content >deep/subdir/file &&
+       git add deep &&
+       git commit -m add &&
+       git rm -r deep &&
+
+       before=$(wc -c <.git/index) &&
+       git commit -m delete -p &&
+       after=$(wc -c <.git/index) &&
+
+       # double check that the index shrank
+       test $before -gt $after &&
+
+       # and that our index was not corrupted
+       git fsck
+'
+
 test_expect_success 'commit in child dir has cache-tree' '
        mkdir dir &&
        >dir/child.t &&
@@ -243,13 +261,16 @@ test_expect_success 'no phantom error when switching trees' '
 '
 
 test_expect_success 'switching trees does not invalidate shared index' '
-       git update-index --split-index &&
-       >split &&
-       git add split &&
-       test-tool dump-split-index .git/index | grep -v ^own >before &&
-       git commit -m "as-is" &&
-       test-tool dump-split-index .git/index | grep -v ^own >after &&
-       test_cmp before after
+       (
+               sane_unset GIT_TEST_SPLIT_INDEX &&
+               git update-index --split-index &&
+               >split &&
+               git add split &&
+               test-tool dump-split-index .git/index | grep -v ^own >before &&
+               git commit -m "as-is" &&
+               test-tool dump-split-index .git/index | grep -v ^own >after &&
+               test_cmp before after
+       )
 '
 
 test_done
index 128130066499feb5bdad705b6fb0ef03bf446fe9..cfd0655ea19b0caa8d55fd41b5b76abde4de1c8b 100755 (executable)
@@ -170,6 +170,18 @@ test_expect_success 'fetching of missing objects' '
        git verify-pack --verbose "$IDX" | grep "$HASH"
 '
 
+test_expect_success 'fetching of missing objects works with ref-in-want enabled' '
+       # ref-in-want requires protocol version 2
+       git -C server config protocol.version 2 &&
+       git -C server config uploadpack.allowrefinwant 1 &&
+       git -C repo config protocol.version 2 &&
+
+       rm -rf repo/.git/objects/* &&
+       rm -f trace &&
+       GIT_TRACE_PACKET="$(pwd)/trace" git -C repo cat-file -p "$HASH" &&
+       grep "git< fetch=.*ref-in-want" trace
+'
+
 test_expect_success 'rev-list stops traversal at missing and promised commit' '
        rm -rf repo &&
        test_create_repo repo &&
@@ -181,7 +193,7 @@ test_expect_success 'rev-list stops traversal at missing and promised commit' '
 
        git -C repo config core.repositoryformatversion 1 &&
        git -C repo config extensions.partialclone "arbitrary string" &&
-       git -C repo rev-list --exclude-promisor-objects --objects bar >out &&
+       GIT_TEST_COMMIT_GRAPH=0 git -C repo rev-list --exclude-promisor-objects --objects bar >out &&
        grep $(git -C repo rev-parse bar) out &&
        ! grep $FOO out
 '
index 7f19d591f250bd61f7d25a5e5e0fae2ca1d8f118..43c4be1e5ef559d10ca276ef777a8fe7ba8a3971 100755 (executable)
@@ -140,15 +140,17 @@ test_expect_success '--batch-check without %(rest) considers whole line' '
        test_cmp expect actual
 '
 
+test_oid_init
+
 tree_sha1=$(git write-tree)
-tree_size=33
+tree_size=$(($(test_oid rawsz) + 13))
 tree_pretty_content="100644 blob $hello_sha1   hello"
 
 run_tests 'tree' $tree_sha1 $tree_size "" "$tree_pretty_content"
 
 commit_message="Initial commit"
 commit_sha1=$(echo_without_newline "$commit_message" | git commit-tree $tree_sha1)
-commit_size=177
+commit_size=$(($(test_oid hexsz) + 137))
 commit_content="tree $tree_sha1
 author $GIT_AUTHOR_NAME <$GIT_AUTHOR_EMAIL> 0000000000 +0000
 committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> 0000000000 +0000
@@ -218,8 +220,8 @@ test_expect_success "--batch-check for a non-existent hash" '
     test "0000000000000000000000000000000000000042 missing
 0000000000000000000000000000000000000084 missing" = \
     "$( ( echo 0000000000000000000000000000000000000042;
-         echo_without_newline 0000000000000000000000000000000000000084; ) \
-       git cat-file --batch-check)"
+        echo_without_newline 0000000000000000000000000000000000000084; ) |
+       git cat-file --batch-check)"
 '
 
 test_expect_success "--batch for an existent and a non-existent hash" '
@@ -227,8 +229,8 @@ test_expect_success "--batch for an existent and a non-existent hash" '
 $tag_content
 0000000000000000000000000000000000000000 missing" = \
     "$( ( echo $tag_sha1;
-         echo_without_newline 0000000000000000000000000000000000000000; ) \
-       git cat-file --batch)"
+        echo_without_newline 0000000000000000000000000000000000000000; ) |
+       git cat-file --batch)"
 '
 
 test_expect_success "--batch-check for an empty line" '
index 1f61eb3e88e30cb2b62040f262a8d24d2758dcfc..25d7c700f6f480076f3ad00e6eeb709f8de517fa 100755 (executable)
@@ -31,6 +31,20 @@ test_expect_success 'perform sparse checkout of master' '
        test_path_is_file c
 '
 
+test_expect_success 'checkout -b checkout.optimizeNewBranch interaction' '
+       cp .git/info/sparse-checkout .git/info/sparse-checkout.bak &&
+       test_when_finished "
+               mv -f .git/info/sparse-checkout.bak .git/info/sparse-checkout
+               git checkout master
+       " &&
+       echo "/b" >>.git/info/sparse-checkout &&
+       test "$(git ls-files -t b)" = "S b" &&
+       git -c checkout.optimizeNewBranch=true checkout -b fast &&
+       test "$(git ls-files -t b)" = "S b" &&
+       git checkout -b slow &&
+       test "$(git ls-files -t b)" = "H b"
+'
+
 test_expect_success 'merge feature branch into sparse checkout of master' '
        git merge feature &&
        test_path_is_file a &&
index cdf1fed5d1c33e659b5d2c9bfb64423b13f05e94..e2cd50ecfcbcd849661cb46f23c2ddb54e9ad19e 100755 (executable)
@@ -1001,7 +1001,7 @@ EOF
 
 test_expect_success 'value continued on next line' '
        git config --list > result &&
-       test_cmp result expect
+       test_cmp expect result
 '
 
 cat > .git/config <<\EOF
@@ -1770,8 +1770,9 @@ test_expect_success '--show-origin stdin with file include' '
        cat >expect <<-EOF &&
                file:$INCLUDE_DIR/stdin.include include
        EOF
-       echo "[include]path=\"$INCLUDE_DIR\"/stdin.include" \
-               | git config --show-origin --includes --file - user.stdin >output &&
+       echo "[include]path=\"$INCLUDE_DIR\"/stdin.include" |
+       git config --show-origin --includes --file - user.stdin >output &&
+
        test_cmp expect output
 '
 
@@ -1881,7 +1882,7 @@ test_expect_success '--replace-all does not invent newlines' '
        Qkey = b
        EOF
        git config --replace-all abc.key b &&
-       test_cmp .git/config expect
+       test_cmp expect .git/config
 '
 
 test_done
index 3b92083e19d09bad951519201f9605a17b168c1a..0000e664e7b6d8c62cc12032f71224b832829e9d 100755 (executable)
@@ -14,7 +14,7 @@ setup() {
 check() {
        echo "$2" >expected
        git config --get "$1" >actual 2>&1
-       test_cmp actual expected
+       test_cmp expected actual
 }
 
 # 'check section.key regex value' verifies that the entry for
@@ -22,7 +22,7 @@ check() {
 check_regex() {
        echo "$3" >expected
        git config --get "$1" "$2" >actual 2>&1
-       test_cmp actual expected
+       test_cmp expected actual
 }
 
 test_expect_success 'modify same key' '
index 7c8df2095574f66f6d434f7717228f6f76a2a908..663f17c5fe4757a9eea8c749cf1a158a05f4a8df 100755 (executable)
@@ -346,7 +346,7 @@ test_expect_success "verifying $m's log (logged by config)" '
 
 git update-ref $m $D
 cat >.git/logs/$m <<EOF
-0000000000000000000000000000000000000000 $C $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> 1117150320 -0500
+$Z $C $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> 1117150320 -0500
 $C $A $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> 1117150350 -0500
 $A $B $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> 1117150380 -0500
 $F $Z $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> 1117150680 -0500
@@ -807,6 +807,37 @@ test_expect_success 'stdin delete symref works option no-deref' '
        test_cmp expect actual
 '
 
+test_expect_success 'stdin update symref works flag --no-deref' '
+       git symbolic-ref TESTSYMREFONE $b &&
+       git symbolic-ref TESTSYMREFTWO $b &&
+       cat >stdin <<-EOF &&
+       update TESTSYMREFONE $a $b
+       update TESTSYMREFTWO $a $b
+       EOF
+       git update-ref --no-deref --stdin <stdin &&
+       git rev-parse TESTSYMREFONE TESTSYMREFTWO >expect &&
+       git rev-parse $a $a >actual &&
+       test_cmp expect actual &&
+       git rev-parse $m~1 >expect &&
+       git rev-parse $b >actual &&
+       test_cmp expect actual
+'
+
+test_expect_success 'stdin delete symref works flag --no-deref' '
+       git symbolic-ref TESTSYMREFONE $b &&
+       git symbolic-ref TESTSYMREFTWO $b &&
+       cat >stdin <<-EOF &&
+       delete TESTSYMREFONE $b
+       delete TESTSYMREFTWO $b
+       EOF
+       git update-ref --no-deref --stdin <stdin &&
+       test_must_fail git rev-parse --verify -q TESTSYMREFONE &&
+       test_must_fail git rev-parse --verify -q TESTSYMREFTWO &&
+       git rev-parse $m~1 >expect &&
+       git rev-parse $b >actual &&
+       test_cmp expect actual
+'
+
 test_expect_success 'stdin delete ref works with right old value' '
        echo "delete $b $m~1" >stdin &&
        git update-ref --stdin <stdin &&
index 2a42a589a42cb9c086561be2df5540fa72af2ddf..51a4f4c0ac694ff421e3a17b174697122ecf4b1a 100755 (executable)
@@ -559,9 +559,9 @@ test_expect_success 'no bogus intermediate values during delete' '
        {
                # Note: the following command is intentionally run in the
                # background. We increase the timeout so that `update-ref`
-               # attempts to acquire the `packed-refs` lock for longer than
-               # it takes for us to do the check then delete it:
-               git -c core.packedrefstimeout=3000 update-ref -d $prefix/foo &
+               # attempts to acquire the `packed-refs` lock for much longer
+               # than it takes for us to do the check then delete it:
+               git -c core.packedrefstimeout=30000 update-ref -d $prefix/foo &
        } &&
        pid2=$! &&
        # Give update-ref plenty of time to get to the point where it tries
index a74c38b5fb22a6d274c26ed008c6bec355801e34..331899ddc4bd33805c83ebad8983233da7d67ec8 100755 (executable)
@@ -54,7 +54,7 @@ test_expect_success 'for_each_ref(refs/heads/)' '
 '
 
 test_expect_success 'for_each_ref() is sorted' '
-       $RUN for-each-ref refs/heads/ | cut -c 42- >actual &&
+       $RUN for-each-ref refs/heads/ | cut -d" " -f 2- >actual &&
        sort actual > expected &&
        test_cmp expected actual
 '
@@ -71,7 +71,7 @@ test_expect_success 'verify_ref(new-master)' '
 '
 
 test_expect_success 'for_each_reflog()' '
-       $RUN for-each-reflog | sort -k2 | cut -c 42- >actual &&
+       $RUN for-each-reflog | sort -k2 | cut -d" " -f 2- >actual &&
        cat >expected <<-\EOF &&
        HEAD 0x1
        refs/heads/master 0x0
index e093782cc37c495a122eb8676797b1988b828c29..d199d872fb1996922a0a77058955c105aef4377c 100755 (executable)
@@ -39,7 +39,7 @@ test_expect_success 'rename_refs() not allowed' '
 '
 
 test_expect_success 'for_each_ref(refs/heads/)' '
-       $RUN for-each-ref refs/heads/ | cut -c 42- >actual &&
+       $RUN for-each-ref refs/heads/ | cut -d" " -f 2- >actual &&
        cat >expected <<-\EOF &&
        master 0x0
        new-master 0x0
@@ -48,7 +48,7 @@ test_expect_success 'for_each_ref(refs/heads/)' '
 '
 
 test_expect_success 'for_each_ref() is sorted' '
-       $RUN for-each-ref refs/heads/ | cut -c 42- >actual &&
+       $RUN for-each-ref refs/heads/ | cut -d" " -f 2- >actual &&
        sort actual > expected &&
        test_cmp expected actual
 '
@@ -65,7 +65,7 @@ test_expect_success 'verify_ref(new-master)' '
 '
 
 test_expect_success 'for_each_reflog()' '
-       $RUN for-each-reflog | sort | cut -c 42- >actual &&
+       $RUN for-each-reflog | sort | cut -d" " -f 2- >actual &&
        cat >expected <<-\EOF &&
        HEAD 0x1
        refs/heads/master 0x0
index 4623ae15c4fd45df64b4acb94308e5ac4ca8f1f5..9a848581180ff73b41b345e448e427457b487ff7 100755 (executable)
@@ -58,7 +58,7 @@ test_expect_success 'for_each_reflog()' '
        mkdir -p     .git/worktrees/wt/logs/refs/bisect &&
        echo $ZERO_OID > .git/worktrees/wt/logs/refs/bisect/wt-random &&
 
-       $RWT for-each-reflog | cut -c 42- | sort >actual &&
+       $RWT for-each-reflog | cut -d" " -f 2- | sort >actual &&
        cat >expected <<-\EOF &&
        HEAD 0x1
        PSEUDO-WT 0x0
@@ -68,7 +68,7 @@ test_expect_success 'for_each_reflog()' '
        EOF
        test_cmp expected actual &&
 
-       $RMAIN for-each-reflog | cut -c 42- | sort >actual &&
+       $RMAIN for-each-reflog | cut -d" " -f 2- | sort >actual &&
        cat >expected <<-\EOF &&
        HEAD 0x1
        PSEUDO-MAIN 0x0
index b3b4d83eafc4a031618a3a89fd8ca50353f1a844..be22398a8545419f3d12d4d48cb636c5273b5206 100755 (executable)
@@ -57,7 +57,7 @@ test_expect_success 'disable split index' '
        EOF
        test_cmp ls-files.expect ls-files.actual &&
 
-       BASE=$(test-tool dump-split-index .git/index | grep "^own" | sed "s/own/base/") &&
+       BASE=$(test-tool dump-split-index .git/index | sed -n "s/^own/base/p") &&
        test-tool dump-split-index .git/index | sed "/^own/d" >actual &&
        cat >expect <<-EOF &&
        not a split index
index 07d292317cdfcbca784a19c6c8f130d09c98fe98..286bba35d8ae06f97cbf59263cd472975036deaa 100755 (executable)
@@ -552,4 +552,22 @@ test_expect_success '"add" in bare repo invokes post-checkout hook' '
        test_cmp hook.expect goozy/hook.actual
 '
 
+test_expect_success '"add" an existing but missing worktree' '
+       git worktree add --detach pneu &&
+       test_must_fail git worktree add --detach pneu &&
+       rm -fr pneu &&
+       test_must_fail git worktree add --detach pneu &&
+       git worktree add --force --detach pneu
+'
+
+test_expect_success '"add" an existing locked but missing worktree' '
+       git worktree add --detach gnoo &&
+       git worktree lock gnoo &&
+       test_when_finished "git worktree unlock gnoo || :" &&
+       rm -fr gnoo &&
+       test_must_fail git worktree add --detach gnoo &&
+       test_must_fail git worktree add --force --detach gnoo &&
+       git worktree add --force --force --detach gnoo
+'
+
 test_done
index 5f7d45b7b7fa91a497d6f8d96a7235fc0024919e..33c033773367a135d4cb7eb23f9e9d3131197174 100755 (executable)
@@ -98,6 +98,20 @@ test_expect_success 'move worktree to another dir' '
        test_cmp expected2 actual2
 '
 
+test_expect_success 'move locked worktree (force)' '
+       test_when_finished "
+               git worktree unlock flump || :
+               git worktree remove flump || :
+               git worktree unlock ploof || :
+               git worktree remove ploof || :
+               " &&
+       git worktree add --detach flump &&
+       git worktree lock flump &&
+       test_must_fail git worktree move flump ploof" &&
+       test_must_fail git worktree move --force flump ploof" &&
+       git worktree move --force --force flump ploof
+'
+
 test_expect_success 'remove main worktree' '
        test_must_fail git worktree remove .
 '
@@ -141,4 +155,34 @@ test_expect_success 'NOT remove missing-but-locked worktree' '
        test_path_is_dir .git/worktrees/gone-but-locked
 '
 
+test_expect_success 'proper error when worktree not found' '
+       for i in noodle noodle/bork
+       do
+               test_must_fail git worktree lock $i 2>err &&
+               test_i18ngrep "not a working tree" err || return 1
+       done
+'
+
+test_expect_success 'remove locked worktree (force)' '
+       git worktree add --detach gumby &&
+       test_when_finished "git worktree remove gumby || :" &&
+       git worktree lock gumby &&
+       test_when_finished "git worktree unlock gumby || :" &&
+       test_must_fail git worktree remove gumby &&
+       test_must_fail git worktree remove --force gumby &&
+       git worktree remove --force --force gumby
+'
+
+test_expect_success 'remove cleans up .git/worktrees when empty' '
+       git init moog &&
+       (
+               cd moog &&
+               test_commit bim &&
+               git worktree add --detach goom &&
+               test_path_exists .git/worktrees &&
+               git worktree remove goom &&
+               test_path_is_missing .git/worktrees
+       )
+'
+
 test_done
index 685ec45639a5e9a2bf929e10aa30976e9a4c456b..6c32d42c8c688845682d578141dbef760a663215 100755 (executable)
@@ -73,7 +73,7 @@ test_expect_success 'update-index --update from subdir' '
        100644 $(git hash-object dir1/file3) 0  dir1/file3
        100644 $file2 0 file2
        EOF
-       test_cmp current expected
+       test_cmp expected current
 '
 
 test_expect_success 'update-index --update with pathspec' '
index 93f21ab078080d00100cd63f6b5ee5cdfd636d29..478b82cf9b513a8f45f250de59d519847227f794 100755 (executable)
@@ -1221,7 +1221,7 @@ test_expect_success 'use --edit-description' '
        EOF
        EDITOR=./editor git branch --edit-description &&
        echo "New contents" >expect &&
-       test_cmp EDITOR_OUTPUT expect
+       test_cmp expect EDITOR_OUTPUT
 '
 
 test_expect_success 'detect typo in branch name when using --edit-description' '
index 2237c7f4af9464cbee087696ec3a86aeaa86e033..045aca1c18bec3d5278eb50a461df0588dd84618 100755 (executable)
@@ -122,6 +122,11 @@ test_expect_success 'changed commit' '
        test_cmp expected actual
 '
 
+test_expect_success 'no commits on one side' '
+       git commit --amend -m "new message" &&
+       git range-diff master HEAD@{1} HEAD
+'
+
 test_expect_success 'changed message' '
        git range-diff --no-color topic...changed-message >actual &&
        sed s/Z/\ /g >expected <<-EOF &&
@@ -133,13 +138,69 @@ test_expect_success 'changed message' '
            Z
            +    Also a silly comment here!
            +
-           Zdiff --git a/file b/file
-           Z--- a/file
-           Z+++ b/file
+           Z diff --git a/file b/file
+           Z --- a/file
+           Z +++ b/file
        3:  147e64e = 3:  b9cb956 s/11/B/
        4:  a63e992 = 4:  8add5f1 s/12/B/
        EOF
        test_cmp expected actual
 '
 
+test_expect_success 'dual-coloring' '
+       sed -e "s|^:||" >expect <<-\EOF &&
+       :<YELLOW>1:  a4b3333 = 1:  f686024 s/5/A/<RESET>
+       :<RED>2:  f51d370 <RESET><YELLOW>!<RESET><GREEN> 2:  4ab067d<RESET><YELLOW> s/4/A/<RESET>
+       :    <REVERSE><CYAN>@@ -2,6 +2,8 @@<RESET>
+       :     <RESET>
+       :         s/4/A/<RESET>
+       :     <RESET>
+       :    <REVERSE><GREEN>+<RESET><BOLD>    Also a silly comment here!<RESET>
+       :    <REVERSE><GREEN>+<RESET>
+       :      diff --git a/file b/file<RESET>
+       :      --- a/file<RESET>
+       :      +++ b/file<RESET>
+       :<RED>3:  0559556 <RESET><YELLOW>!<RESET><GREEN> 3:  b9cb956<RESET><YELLOW> s/11/B/<RESET>
+       :    <REVERSE><CYAN>@@ -10,7 +10,7 @@<RESET>
+       :      9<RESET>
+       :      10<RESET>
+       :    <RED> -11<RESET>
+       :    <REVERSE><RED>-<RESET><FAINT;GREEN>+BB<RESET>
+       :    <REVERSE><GREEN>+<RESET><BOLD;GREEN>+B<RESET>
+       :      12<RESET>
+       :      13<RESET>
+       :      14<RESET>
+       :<RED>4:  d966c5c <RESET><YELLOW>!<RESET><GREEN> 4:  8add5f1<RESET><YELLOW> s/12/B/<RESET>
+       :    <REVERSE><CYAN>@@ -8,7 +8,7 @@<RESET>
+       :    <CYAN> @@<RESET>
+       :      9<RESET>
+       :      10<RESET>
+       :    <REVERSE><RED>-<RESET><FAINT> BB<RESET>
+       :    <REVERSE><GREEN>+<RESET><BOLD> B<RESET>
+       :    <RED> -12<RESET>
+       :    <GREEN> +B<RESET>
+       :      13<RESET>
+       EOF
+       git range-diff changed...changed-message --color --dual-color >actual.raw &&
+       test_decode_color >actual <actual.raw &&
+       test_cmp expect actual
+'
+
+for prev in topic master..topic
+do
+       test_expect_success "format-patch --range-diff=$prev" '
+               git format-patch --stdout --cover-letter --range-diff=$prev \
+                       master..unmodified >actual &&
+               grep "= 1: .* s/5/A" actual &&
+               grep "= 2: .* s/4/A" actual &&
+               grep "= 3: .* s/11/B" actual &&
+               grep "= 4: .* s/12/B" actual
+       '
+done
+
+test_expect_success 'format-patch --range-diff as commentary' '
+       git format-patch --stdout --range-diff=HEAD~1 HEAD~1 >actual &&
+       test_i18ngrep "^Range-diff:$" actual
+'
+
 test_done
index 10bfc8b94710caf8d33a3a1fa66ad3a4ba394c0f..823fdbda1f32355e6db952976c1210b35fd452ec 100755 (executable)
@@ -44,7 +44,7 @@ test_expect_success 'merge z into y fails and sets NOTES_MERGE_REF' '
        git config core.notesRef refs/notes/y &&
        test_must_fail git notes merge z &&
        echo "ref: refs/notes/y" >expect &&
-       test_cmp .git/NOTES_MERGE_REF expect
+       test_cmp expect .git/NOTES_MERGE_REF
 '
 
 test_expect_success 'merge z into y while mid-merge in another workdir fails' '
@@ -66,7 +66,7 @@ test_expect_success 'merge z into x while mid-merge on y succeeds' '
                grep -v "A notes merge into refs/notes/x is already in-progress in" out
        ) &&
        echo "ref: refs/notes/x" >expect &&
-       test_cmp .git/worktrees/worktree2/NOTES_MERGE_REF expect
+       test_cmp expect .git/worktrees/worktree2/NOTES_MERGE_REF
 '
 
 test_done
index 3996ee013563ce1f91b2a693318b1e07aba96568..3e73f7584ce7d3a23e7e75d0f71bd595f5f822a3 100755 (executable)
@@ -183,13 +183,13 @@ test_expect_success 'cherry-picked commits and fork-point work together' '
        test_commit final_B B "Final B" &&
        git rebase &&
        echo Amended >expect &&
-       test_cmp A expect &&
+       test_cmp expect A &&
        echo "Final B" >expect &&
-       test_cmp B expect &&
+       test_cmp expect B &&
        echo C >expect &&
-       test_cmp C expect &&
+       test_cmp expect C &&
        echo D >expect &&
-       test_cmp D expect
+       test_cmp expect D
 '
 
 test_expect_success 'rebase -q is quiet' '
index 86bba5ed7c23f78065521a9042244ea978a3a80f..ff89b6341a6fc19801959058e35039778bc6577b 100755 (executable)
@@ -569,16 +569,15 @@ test_expect_success '--continue tries to commit, even for "edit"' '
 '
 
 test_expect_success 'aborted --continue does not squash commits after "edit"' '
-       test_when_finished "git rebase --abort" &&
        old=$(git rev-parse HEAD) &&
        test_tick &&
        set_fake_editor &&
        FAKE_LINES="edit 1" git rebase -i HEAD^ &&
        echo "edited again" > file7 &&
        git add file7 &&
-       echo all the things >>conflict &&
-       test_must_fail git rebase --continue &&
-       test $old = $(git rev-parse HEAD)
+       test_must_fail env FAKE_COMMIT_MESSAGE=" " git rebase --continue &&
+       test $old = $(git rev-parse HEAD) &&
+       git rebase --abort
 '
 
 test_expect_success 'auto-amend only edited commits after "edit"' '
index da94dddc86d78830664d3a4b910ab157c2db2f0f..860e63e444d296c64773bae0d743fbb80fe674f1 100755 (executable)
@@ -83,7 +83,7 @@ test_expect_success 'rebase -m commit with empty message' '
 test_expect_success 'rebase -i commit with empty message' '
        git checkout diff-in-message &&
        set_fake_editor &&
-       env FAKE_COMMIT_MESSAGE=" " FAKE_LINES="reword 1" \
+       test_must_fail env FAKE_COMMIT_MESSAGE=" " FAKE_LINES="reword 1" \
                git rebase -i HEAD^
 '
 
index e364c12622f8566c3c99370de25ec5659a1e1546..13f5688135d853106702b7679eb302f6ae3d1450 100755 (executable)
@@ -330,4 +330,23 @@ test_expect_success 'wrapped original subject' '
        test $base = $parent
 '
 
+test_expect_success 'abort last squash' '
+       test_when_finished "test_might_fail git rebase --abort" &&
+       test_when_finished "git checkout master" &&
+
+       git checkout -b some-squashes &&
+       git commit --allow-empty -m first &&
+       git commit --allow-empty --squash HEAD &&
+       git commit --allow-empty -m second &&
+       git commit --allow-empty --squash HEAD &&
+
+       test_must_fail git -c core.editor="grep -q ^pick" \
+               rebase -ki --autosquash HEAD~4 &&
+       : do not finish the squash, but resolve it manually &&
+       git commit --allow-empty --amend -m edited-first &&
+       git rebase --skip &&
+       git show >actual &&
+       ! grep first actual
+'
+
 test_done
index 1fb3e499b4a044d935c090bfa567a7ace271c054..e85cdc7037bcb6184d1ce6f9aa7ff2cfc4af2096 100755 (executable)
@@ -55,7 +55,7 @@ test_expect_success 'blank line at end of file; extend at end of file' '
        git add file && git commit -m second &&
        git rebase --whitespace=fix HEAD^^ &&
        git diff --exit-code HEAD^:file expect-first &&
-       test_cmp file expect-second
+       test_cmp expect-second file
 '
 
 # prepare third revision of "file"
@@ -82,7 +82,7 @@ test_expect_success 'two blanks line at end of file; extend at end of file' '
        cp third file && git add file && git commit -m third &&
        git rebase --whitespace=fix HEAD^^ &&
        git diff --exit-code HEAD^:file expect-second &&
-       test_cmp file expect-third
+       test_cmp expect-third file
 '
 
 test_expect_success 'same, but do not remove trailing spaces' '
@@ -120,7 +120,7 @@ test_expect_success 'at beginning of file' '
        done >> file &&
        git commit -m more file &&
        git rebase --whitespace=fix HEAD^^ &&
-       test_cmp file expect-beginning
+       test_cmp expect-beginning file
 '
 
 test_done
index fbdc47cfbdae6e3cec7d6762faa4a8e13554d803..5f911bb5290f33126380c45f3f5874d03f78a677 100755 (executable)
@@ -11,17 +11,14 @@ test_expect_success setup '
        test_tick &&
        git commit -m "first" &&
 
-       git checkout -b empty-branch &&
-       test_tick &&
-       git commit --allow-empty -m "empty" &&
-
+       git checkout -b empty-message-branch &&
        echo third >> file1 &&
        git add file1 &&
        test_tick &&
        git commit --allow-empty-message -m "" &&
 
        git checkout master &&
-       git checkout -b empty-branch2 &&
+       git checkout -b empty-change-branch &&
        test_tick &&
        git commit --allow-empty -m "empty"
 
@@ -29,7 +26,7 @@ test_expect_success setup '
 
 test_expect_success 'cherry-pick an empty commit' '
        git checkout master &&
-       test_expect_code 1 git cherry-pick empty-branch^
+       test_expect_code 1 git cherry-pick empty-change-branch
 '
 
 test_expect_success 'index lockfile was removed' '
@@ -37,8 +34,9 @@ test_expect_success 'index lockfile was removed' '
 '
 
 test_expect_success 'cherry-pick a commit with an empty message' '
+       test_when_finished "git reset --hard empty-message-branch~1" &&
        git checkout master &&
-       test_expect_code 1 git cherry-pick empty-branch
+       git cherry-pick empty-message-branch
 '
 
 test_expect_success 'index lockfile was removed' '
@@ -47,7 +45,7 @@ test_expect_success 'index lockfile was removed' '
 
 test_expect_success 'cherry-pick a commit with an empty message with --allow-empty-message' '
        git checkout -f master &&
-       git cherry-pick --allow-empty-message empty-branch
+       git cherry-pick --allow-empty-message empty-message-branch
 '
 
 test_expect_success 'cherry pick an empty non-ff commit without --allow-empty' '
@@ -55,12 +53,12 @@ test_expect_success 'cherry pick an empty non-ff commit without --allow-empty' '
        echo fourth >>file2 &&
        git add file2 &&
        git commit -m "fourth" &&
-       test_must_fail git cherry-pick empty-branch2
+       test_must_fail git cherry-pick empty-change-branch
 '
 
 test_expect_success 'cherry pick an empty non-ff commit with --allow-empty' '
        git checkout master &&
-       git cherry-pick --allow-empty empty-branch2
+       git cherry-pick --allow-empty empty-change-branch
 '
 
 test_expect_success 'cherry pick with --keep-redundant-commits' '
index 609fbfdc317137e420fab48e394f0095b0a1df69..65dfbc033a027df1a590cbfaf75ae47f4ed9c547 100755 (executable)
@@ -540,7 +540,7 @@ test_expect_success 'add -p does not expand argument lists' '
        # update it, but we want to be sure that our "." pathspec
        # was not expanded into the argument list of any command.
        # So look only for "not-changed".
-       ! grep not-changed trace.out
+       ! grep -E "^trace: (built-in|exec|run_command): .*not-changed" trace.out
 '
 
 test_expect_success 'hunk-editing handles custom comment char' '
index c6af7f82b58f606b3f6c7e2612931a1fc2af15c7..6c676645d837477077e9e349bf01398f3aa52b5f 100755 (executable)
@@ -110,10 +110,10 @@ test_expect_success 'add -e' '
        cp second-part file &&
        git add -e &&
        test_cmp second-part file &&
-       test_cmp orig-patch expected-patch &&
+       test_cmp expected-patch orig-patch &&
        git diff --cached >actual &&
        grep -v index actual >out &&
-       test_cmp out expected
+       test_cmp expected out
 
 '
 
index 6450bc669860f0f3e7d809514fc073013ffbf0b8..cd216655b97b852eec963916e796d11d32eb976a 100755 (executable)
@@ -36,7 +36,7 @@ EOF
 test_expect_success 'parents of stash' '
        test $(git rev-parse stash^) = $(git rev-parse HEAD) &&
        git diff stash^2..stash > output &&
-       test_cmp output expect
+       test_cmp expect output
 '
 
 test_expect_success 'applying bogus stash does nothing' '
@@ -210,9 +210,9 @@ test_expect_success 'stash branch' '
        test refs/heads/stashbranch = $(git symbolic-ref HEAD) &&
        test $(git rev-parse HEAD) = $(git rev-parse master^) &&
        git diff --cached > output &&
-       test_cmp output expect &&
+       test_cmp expect output &&
        git diff > output &&
-       test_cmp output expect1 &&
+       test_cmp expect1 output &&
        git add file &&
        git commit -m alternate\ second &&
        git diff master..stashbranch > output &&
@@ -710,7 +710,7 @@ test_expect_success 'stash where working directory contains "HEAD" file' '
        git diff-index --cached --quiet HEAD &&
        test "$(git rev-parse stash^)" = "$(git rev-parse HEAD)" &&
        git diff stash^..stash > output &&
-       test_cmp output expect
+       test_cmp expect output
 '
 
 test_expect_success 'store called with invalid commit' '
index 597b0637d160be0cab4d3114aace598616e371fd..cc1c8a7bb2da508e570621ddf9e08694007425d4 100755 (executable)
@@ -142,7 +142,7 @@ test_expect_success 'stash save --include-untracked removed files' '
        rm -f file &&
        git stash save --include-untracked &&
        echo 1 > expect &&
-       test_cmp file expect
+       test_cmp expect file
 '
 
 rm -f expect
index 53880da7bbe85521112ced3ff94ead05587c7e81..909c743c134c5e18e0a80db3eae2dfb6e8210e6a 100755 (executable)
@@ -1717,4 +1717,38 @@ test_expect_success 'format-patch --pretty=mboxrd' '
        test_cmp expect actual
 '
 
+test_expect_success 'interdiff: setup' '
+       git checkout -b boop master &&
+       test_commit fnorp blorp &&
+       test_commit fleep blorp
+'
+
+test_expect_success 'interdiff: cover-letter' '
+       sed "y/q/ /" >expect <<-\EOF &&
+       +fleep
+       --q
+       EOF
+       git format-patch --cover-letter --interdiff=boop~2 -1 boop &&
+       test_i18ngrep "^Interdiff:$" 0000-cover-letter.patch &&
+       test_i18ngrep ! "^Interdiff:$" 0001-fleep.patch &&
+       sed "1,/^@@ /d; /^-- $/q" <0000-cover-letter.patch >actual &&
+       test_cmp expect actual
+'
+
+test_expect_success 'interdiff: reroll-count' '
+       git format-patch --cover-letter --interdiff=boop~2 -v2 -1 boop &&
+       test_i18ngrep "^Interdiff ..* v1:$" v2-0000-cover-letter.patch
+'
+
+test_expect_success 'interdiff: solo-patch' '
+       cat >expect <<-\EOF &&
+         +fleep
+
+       EOF
+       git format-patch --interdiff=boop~2 -1 boop &&
+       test_i18ngrep "^Interdiff:$" 0001-fleep.patch &&
+       sed "1,/^  @@ /d; /^$/q" <0001-fleep.patch >actual &&
+       test_cmp expect actual
+'
+
 test_done
index fa44e788695c6f3226ad17cb6e8f2b8e256814ed..35578f2bb91dab8a00f40b5fbb26bd0a31ca7d58 100755 (executable)
@@ -37,7 +37,7 @@ test_expect_success 'hunk header truncation with an overly long line' '
                echo " A $N$N$N$N$N$N$N$N$N2" &&
                echo " L  $N$N$N$N$N$N$N$N$N1"
        ) >expected &&
-       test_cmp actual expected
+       test_cmp expected actual
 
 '
 
index d80187de9446c3129e1c69d3b67278f2fd40c9a0..f7de6f077a6e055b79dfc2ae572f1552c40d2eb3 100755 (executable)
@@ -72,7 +72,7 @@ test_expect_success 'apply with --reject should fail but update the file' '
        rm -f file1.rej file2.rej &&
 
        test_must_fail git apply --reject patch.1 &&
-       test_cmp file1 expected &&
+       test_cmp expected file1 &&
 
        cat file1.rej &&
        test_path_is_missing file2.rej
@@ -85,7 +85,7 @@ test_expect_success 'apply with --reject should fail but update the file' '
 
        test_must_fail git apply --reject patch.2 >rejects &&
        test_path_is_missing file1 &&
-       test_cmp file2 expected &&
+       test_cmp expected file2 &&
 
        cat file2.rej &&
        test_path_is_missing file1.rej
@@ -99,7 +99,7 @@ test_expect_success 'the same test with --verbose' '
 
        test_must_fail git apply --reject --verbose patch.2 >rejects &&
        test_path_is_missing file1 &&
-       test_cmp file2 expected &&
+       test_cmp expected file2 &&
 
        cat file2.rej &&
        test_path_is_missing file1.rej
index 7e32237a2ae9a21e370d501caa8adfe561c6bc6b..ff51e9e78914e4b9c7a801b4f88f3691b5f2844d 100755 (executable)
@@ -313,9 +313,9 @@ test_expect_success 'applying beyond EOF requires one non-blank context line' '
        { echo a; echo; } >one &&
        cp one expect &&
        test_must_fail git apply --whitespace=fix patch &&
-       test_cmp one expect &&
+       test_cmp expect one &&
        test_must_fail git apply --ignore-space-change --whitespace=fix patch &&
-       test_cmp one expect
+       test_cmp expect one
 '
 
 test_expect_success 'tons of blanks at EOF should not apply' '
@@ -342,10 +342,10 @@ test_expect_success 'missing blank line at end with --whitespace=fix' '
        cp one saved-one &&
        test_must_fail git apply patch &&
        git apply --whitespace=fix patch &&
-       test_cmp one expect &&
+       test_cmp expect one &&
        mv saved-one one &&
        git apply --ignore-space-change --whitespace=fix patch &&
-       test_cmp one expect
+       test_cmp expect one
 '
 
 test_expect_success 'two missing blank lines at end with --whitespace=fix' '
@@ -360,11 +360,11 @@ test_expect_success 'two missing blank lines at end with --whitespace=fix' '
        cp no-blank-lines one &&
        test_must_fail git apply patch &&
        git apply --whitespace=fix patch &&
-       test_cmp one expect &&
+       test_cmp expect one &&
        mv no-blank-lines one &&
        test_must_fail git apply patch &&
        git apply --ignore-space-change --whitespace=fix patch &&
-       test_cmp one expect
+       test_cmp expect one
 '
 
 test_expect_success 'missing blank line at end, insert before end, --whitespace=fix' '
@@ -376,7 +376,7 @@ test_expect_success 'missing blank line at end, insert before end, --whitespace=
        echo a >one &&
        test_must_fail git apply patch &&
        git apply --whitespace=fix patch &&
-       test_cmp one expect
+       test_cmp expect one
 '
 
 test_expect_success 'shrink file with tons of missing blanks at end of file' '
@@ -392,10 +392,10 @@ test_expect_success 'shrink file with tons of missing blanks at end of file' '
        cp no-blank-lines one &&
        test_must_fail git apply patch &&
        git apply --whitespace=fix patch &&
-       test_cmp one expect &&
+       test_cmp expect one &&
        mv no-blank-lines one &&
        git apply --ignore-space-change --whitespace=fix patch &&
-       test_cmp one expect
+       test_cmp expect one
 '
 
 test_expect_success 'missing blanks at EOF must only match blank lines' '
@@ -427,7 +427,7 @@ test_expect_success 'missing blank line should match context line with spaces' '
        git add one &&
 
        git apply --whitespace=fix patch &&
-       test_cmp one expect
+       test_cmp expect one
 '
 
 sed -e's/Z//' >one <<EOF
@@ -447,7 +447,7 @@ test_expect_success 'same, but with the --ignore-space-option' '
 
        git checkout-index -f one &&
        git apply --ignore-space-change --whitespace=fix patch &&
-       test_cmp one expect
+       test_cmp expect one
 '
 
 test_expect_success 'same, but with CR-LF line endings && cr-at-eol set' '
@@ -464,7 +464,7 @@ test_expect_success 'same, but with CR-LF line endings && cr-at-eol set' '
        mv save-one one &&
 
        git apply --ignore-space-change --whitespace=fix patch &&
-       test_cmp one expect
+       test_cmp expect one
 '
 
 test_expect_success 'CR-LF line endings && add line && text=auto' '
@@ -478,7 +478,7 @@ test_expect_success 'CR-LF line endings && add line && text=auto' '
        mv save-one one &&
        echo "one text=auto" >.gitattributes &&
        git apply patch &&
-       test_cmp one expect
+       test_cmp expect one
 '
 
 test_expect_success 'CR-LF line endings && change line && text=auto' '
@@ -491,7 +491,7 @@ test_expect_success 'CR-LF line endings && change line && text=auto' '
        mv save-one one &&
        echo "one text=auto" >.gitattributes &&
        git apply patch &&
-       test_cmp one expect
+       test_cmp expect one
 '
 
 test_expect_success 'LF in repo, CRLF in worktree && change line && text=auto' '
@@ -503,7 +503,7 @@ test_expect_success 'LF in repo, CRLF in worktree && change line && text=auto' '
        echo "one text=auto" >.gitattributes &&
        git -c core.eol=CRLF apply patch &&
        printf "b\r\n" >expect &&
-       test_cmp one expect
+       test_cmp expect one
 '
 
 test_expect_success 'whitespace=fix to expand' '
index 0ffe33fbefdc72c664d3914da234f7d4052ed462..3b636a63a3ef048c4568442d4354c7ad4d6a73f0 100755 (executable)
@@ -114,7 +114,7 @@ for t in 1 2 3 4
 do
        test_expect_success 'apply with ws expansion (t=$t)' '
                git apply patch$t.patch &&
-               test_cmp test-$t expect-$t
+               test_cmp expect-$t test-$t
        '
 done
 
index 65da74c76683c0ccd109fdec63766ab6bdfb08f0..55b7750ade1c5fd225f830bbbfcb42938abcbe37 100755 (executable)
@@ -577,4 +577,98 @@ test_expect_success 'multiple identical conflicts' '
        count_pre_post 0 0
 '
 
+test_expect_success 'rerere with unexpected conflict markers does not crash' '
+       git reset --hard &&
+
+       git checkout -b branch-1 master &&
+       echo "bar" >test &&
+       git add test &&
+       git commit -q -m two &&
+
+       git reset --hard &&
+       git checkout -b branch-2 master &&
+       echo "foo" >test &&
+       git add test &&
+       git commit -q -a -m one &&
+
+       test_must_fail git merge branch-1 &&
+       echo "<<<<<<< a" >test &&
+       git rerere &&
+
+       git rerere clear
+'
+
+test_expect_success 'rerere with inner conflict markers' '
+       git reset --hard &&
+
+       git checkout -b A master &&
+       echo "bar" >test &&
+       git add test &&
+       git commit -q -m two &&
+       echo "baz" >test &&
+       git add test &&
+       git commit -q -m three &&
+
+       git reset --hard &&
+       git checkout -b B master &&
+       echo "foo" >test &&
+       git add test &&
+       git commit -q -a -m one &&
+
+       test_must_fail git merge A~ &&
+       git add test &&
+       git commit -q -m "will solve conflicts later" &&
+       test_must_fail git merge A &&
+
+       echo "resolved" >test &&
+       git add test &&
+       git commit -q -m "solved conflict" &&
+
+       echo "resolved" >expect &&
+
+       git reset --hard HEAD~~ &&
+       test_must_fail git merge A~ &&
+       git add test &&
+       git commit -q -m "will solve conflicts later" &&
+       test_must_fail git merge A &&
+       cat test >actual &&
+       test_cmp expect actual &&
+
+       git add test &&
+       git commit -m "rerere solved conflict" &&
+       git reset --hard HEAD~ &&
+       test_must_fail git merge A &&
+       cat test >actual &&
+       test_cmp expect actual
+'
+
+test_expect_success 'setup simple stage 1 handling' '
+       test_create_repo stage_1_handling &&
+       (
+               cd stage_1_handling &&
+
+               test_seq 1 10 >original &&
+               git add original &&
+               git commit -m original &&
+
+               git checkout -b A master &&
+               git mv original A &&
+               git commit -m "rename to A" &&
+
+               git checkout -b B master &&
+               git mv original B &&
+               git commit -m "rename to B"
+       )
+'
+
+test_expect_success 'test simple stage 1 handling' '
+       (
+               cd stage_1_handling &&
+
+               git config rerere.enabled true &&
+               git checkout A^0 &&
+               test_must_fail git merge B^0
+       )
+'
+
 test_done
index 2052cadb1109d3644b0c3f82b707f8e8bb35f945..978a8a66ff05055ad1967b5cc25f36880230a5e4 100755 (executable)
@@ -598,4 +598,27 @@ test_expect_success ':only and :unfold work together' '
        test_cmp expect actual
 '
 
+test_expect_success 'trailer parsing not fooled by --- line' '
+       git commit --allow-empty -F - <<-\EOF &&
+       this is the subject
+
+       This is the body. The message has a "---" line which would confuse a
+       message+patch parser. But here we know we have only a commit message,
+       so we get it right.
+
+       trailer: wrong
+       ---
+       This is more body.
+
+       trailer: right
+       EOF
+
+       {
+               echo "trailer: right" &&
+               echo
+       } >expect &&
+       git log --no-walk --format="%(trailers)" >actual &&
+       test_cmp expect actual
+'
+
 test_done
diff --git a/t/t4256-am-format-flowed.sh b/t/t4256-am-format-flowed.sh
new file mode 100755 (executable)
index 0000000..6340310
--- /dev/null
@@ -0,0 +1,19 @@
+#!/bin/sh
+
+test_description='test format=flowed support of git am'
+
+. ./test-lib.sh
+
+test_expect_success 'setup' '
+       cp "$TEST_DIRECTORY/t4256/1/mailinfo.c.orig" mailinfo.c &&
+       git add mailinfo.c &&
+       git commit -m initial
+'
+
+test_expect_success 'am with format=flowed' '
+       git am <"$TEST_DIRECTORY/t4256/1/patch" >stdout 2>stderr &&
+       test_i18ngrep "warning: Patch sent with format=flowed" stderr &&
+       test_cmp "$TEST_DIRECTORY/t4256/1/mailinfo.c" mailinfo.c
+'
+
+test_done
diff --git a/t/t4256/1/mailinfo.c b/t/t4256/1/mailinfo.c
new file mode 100644 (file)
index 0000000..b395adb
--- /dev/null
@@ -0,0 +1,1245 @@
+#include "cache.h"
+#include "config.h"
+#include "utf8.h"
+#include "strbuf.h"
+#include "mailinfo.h"
+
+static void cleanup_space(struct strbuf *sb)
+{
+       size_t pos, cnt;
+       for (pos = 0; pos < sb->len; pos++) {
+               if (isspace(sb->buf[pos])) {
+                       sb->buf[pos] = ' ';
+                       for (cnt = 0; isspace(sb->buf[pos + cnt + 1]); cnt++);
+                       strbuf_remove(sb, pos + 1, cnt);
+               }
+       }
+}
+
+static void get_sane_name(struct strbuf *out, struct strbuf *name, struct strbuf *email)
+{
+       struct strbuf *src = name;
+       if (name->len < 3 || 60 < name->len || strchr(name->buf, '@') ||
+               strchr(name->buf, '<') || strchr(name->buf, '>'))
+               src = email;
+       else if (name == out)
+               return;
+       strbuf_reset(out);
+       strbuf_addbuf(out, src);
+}
+
+static void parse_bogus_from(struct mailinfo *mi, const struct strbuf *line)
+{
+       /* John Doe <johndoe> */
+
+       char *bra, *ket;
+       /* This is fallback, so do not bother if we already have an
+        * e-mail address.
+        */
+       if (mi->email.len)
+               return;
+
+       bra = strchr(line->buf, '<');
+       if (!bra)
+               return;
+       ket = strchr(bra, '>');
+       if (!ket)
+               return;
+
+       strbuf_reset(&mi->email);
+       strbuf_add(&mi->email, bra + 1, ket - bra - 1);
+
+       strbuf_reset(&mi->name);
+       strbuf_add(&mi->name, line->buf, bra - line->buf);
+       strbuf_trim(&mi->name);
+       get_sane_name(&mi->name, &mi->name, &mi->email);
+}
+
+static const char *unquote_comment(struct strbuf *outbuf, const char *in)
+{
+       int c;
+       int take_next_literally = 0;
+
+       strbuf_addch(outbuf, '(');
+
+       while ((c = *in++) != 0) {
+               if (take_next_literally == 1) {
+                       take_next_literally = 0;
+               } else {
+                       switch (c) {
+                       case '\\':
+                               take_next_literally = 1;
+                               continue;
+                       case '(':
+                               in = unquote_comment(outbuf, in);
+                               continue;
+                       case ')':
+                               strbuf_addch(outbuf, ')');
+                               return in;
+                       }
+               }
+
+               strbuf_addch(outbuf, c);
+       }
+
+       return in;
+}
+
+static const char *unquote_quoted_string(struct strbuf *outbuf, const char *in)
+{
+       int c;
+       int take_next_literally = 0;
+
+       while ((c = *in++) != 0) {
+               if (take_next_literally == 1) {
+                       take_next_literally = 0;
+               } else {
+                       switch (c) {
+                       case '\\':
+                               take_next_literally = 1;
+                               continue;
+                       case '"':
+                               return in;
+                       }
+               }
+
+               strbuf_addch(outbuf, c);
+       }
+
+       return in;
+}
+
+static void unquote_quoted_pair(struct strbuf *line)
+{
+       struct strbuf outbuf;
+       const char *in = line->buf;
+       int c;
+
+       strbuf_init(&outbuf, line->len);
+
+       while ((c = *in++) != 0) {
+               switch (c) {
+               case '"':
+                       in = unquote_quoted_string(&outbuf, in);
+                       continue;
+               case '(':
+                       in = unquote_comment(&outbuf, in);
+                       continue;
+               }
+
+               strbuf_addch(&outbuf, c);
+       }
+
+       strbuf_swap(&outbuf, line);
+       strbuf_release(&outbuf);
+
+}
+
+static void handle_from(struct mailinfo *mi, const struct strbuf *from)
+{
+       char *at;
+       size_t el;
+       struct strbuf f;
+
+       strbuf_init(&f, from->len);
+       strbuf_addbuf(&f, from);
+
+       unquote_quoted_pair(&f);
+
+       at = strchr(f.buf, '@');
+       if (!at) {
+               parse_bogus_from(mi, from);
+               goto out;
+       }
+
+       /*
+        * If we already have one email, don't take any confusing lines
+        */
+       if (mi->email.len && strchr(at + 1, '@'))
+               goto out;
+
+       /* Pick up the string around '@', possibly delimited with <>
+        * pair; that is the email part.
+        */
+       while (at > f.buf) {
+               char c = at[-1];
+               if (isspace(c))
+                       break;
+               if (c == '<') {
+                       at[-1] = ' ';
+                       break;
+               }
+               at--;
+       }
+       el = strcspn(at, " \n\t\r\v\f>");
+       strbuf_reset(&mi->email);
+       strbuf_add(&mi->email, at, el);
+       strbuf_remove(&f, at - f.buf, el + (at[el] ? 1 : 0));
+
+       /* The remainder is name.  It could be
+        *
+        * - "John Doe <john.doe@xz>"                   (a), or
+        * - "john.doe@xz (John Doe)"                   (b), or
+        * - "John (zzz) Doe <john.doe@xz> (Comment)"   (c)
+        *
+        * but we have removed the email part, so
+        *
+        * - remove extra spaces which could stay after email (case 'c'), and
+        * - trim from both ends, possibly removing the () pair at the end
+        *   (cases 'a' and 'b').
+        */
+       cleanup_space(&f);
+       strbuf_trim(&f);
+       if (f.buf[0] == '(' && f.len && f.buf[f.len - 1] == ')') {
+               strbuf_remove(&f, 0, 1);
+               strbuf_setlen(&f, f.len - 1);
+       }
+
+       get_sane_name(&mi->name, &f, &mi->email);
+out:
+       strbuf_release(&f);
+}
+
+static void handle_header(struct strbuf **out, const struct strbuf *line)
+{
+       if (!*out) {
+               *out = xmalloc(sizeof(struct strbuf));
+               strbuf_init(*out, line->len);
+       } else
+               strbuf_reset(*out);
+
+       strbuf_addbuf(*out, line);
+}
+
+/* NOTE NOTE NOTE.  We do not claim we do full MIME.  We just attempt
+ * to have enough heuristics to grok MIME encoded patches often found
+ * on our mailing lists.  For example, we do not even treat header lines
+ * case insensitively.
+ */
+
+static int slurp_attr(const char *line, const char *name, struct strbuf *attr)
+{
+       const char *ends, *ap = strcasestr(line, name);
+       size_t sz;
+
+       strbuf_setlen(attr, 0);
+       if (!ap)
+               return 0;
+       ap += strlen(name);
+       if (*ap == '"') {
+               ap++;
+               ends = "\"";
+       }
+       else
+               ends = "; \t";
+       sz = strcspn(ap, ends);
+       strbuf_add(attr, ap, sz);
+       return 1;
+}
+
+static int has_attr_value(const char *line, const char *name, const char *value)
+{
+       struct strbuf sb = STRBUF_INIT;
+       int rc = slurp_attr(line, name, &sb) && !strcasecmp(sb.buf, value);
+       strbuf_release(&sb);
+       return rc;
+}
+
+static void handle_content_type(struct mailinfo *mi, struct strbuf *line)
+{
+       struct strbuf *boundary = xmalloc(sizeof(struct strbuf));
+       strbuf_init(boundary, line->len);
+
+       mi->format_flowed = has_attr_value(line->buf, "format=", "flowed");
+       mi->delsp = has_attr_value(line->buf, "delsp=", "yes");
+
+       if (slurp_attr(line->buf, "boundary=", boundary)) {
+               strbuf_insert(boundary, 0, "--", 2);
+               if (++mi->content_top >= &mi->content[MAX_BOUNDARIES]) {
+                       error("Too many boundaries to handle");
+                       mi->input_error = -1;
+                       mi->content_top = &mi->content[MAX_BOUNDARIES] - 1;
+                       return;
+               }
+               *(mi->content_top) = boundary;
+               boundary = NULL;
+       }
+       slurp_attr(line->buf, "charset=", &mi->charset);
+
+       if (boundary) {
+               strbuf_release(boundary);
+               free(boundary);
+       }
+}
+
+static void handle_content_transfer_encoding(struct mailinfo *mi,
+                                            const struct strbuf *line)
+{
+       if (strcasestr(line->buf, "base64"))
+               mi->transfer_encoding = TE_BASE64;
+       else if (strcasestr(line->buf, "quoted-printable"))
+               mi->transfer_encoding = TE_QP;
+       else
+               mi->transfer_encoding = TE_DONTCARE;
+}
+
+static int is_multipart_boundary(struct mailinfo *mi, const struct strbuf *line)
+{
+       struct strbuf *content_top = *(mi->content_top);
+
+       return ((content_top->len <= line->len) &&
+               !memcmp(line->buf, content_top->buf, content_top->len));
+}
+
+static void cleanup_subject(struct mailinfo *mi, struct strbuf *subject)
+{
+       size_t at = 0;
+
+       while (at < subject->len) {
+               char *pos;
+               size_t remove;
+
+               switch (subject->buf[at]) {
+               case 'r': case 'R':
+                       if (subject->len <= at + 3)
+                               break;
+                       if ((subject->buf[at + 1] == 'e' ||
+                            subject->buf[at + 1] == 'E') &&
+                           subject->buf[at + 2] == ':') {
+                               strbuf_remove(subject, at, 3);
+                               continue;
+                       }
+                       at++;
+                       break;
+               case ' ': case '\t': case ':':
+                       strbuf_remove(subject, at, 1);
+                       continue;
+               case '[':
+                       pos = strchr(subject->buf + at, ']');
+                       if (!pos)
+                               break;
+                       remove = pos - subject->buf + at + 1;
+                       if (!mi->keep_non_patch_brackets_in_subject ||
+                           (7 <= remove &&
+                            memmem(subject->buf + at, remove, "PATCH", 5)))
+                               strbuf_remove(subject, at, remove);
+                       else {
+                               at += remove;
+                               /*
+                                * If the input had a space after the ], keep
+                                * it.  We don't bother with finding the end of
+                                * the space, since we later normalize it
+                                * anyway.
+                                */
+                               if (isspace(subject->buf[at]))
+                                       at += 1;
+                       }
+                       continue;
+               }
+               break;
+       }
+       strbuf_trim(subject);
+}
+
+#define MAX_HDR_PARSED 10
+static const char *header[MAX_HDR_PARSED] = {
+       "From","Subject","Date",
+};
+
+static inline int cmp_header(const struct strbuf *line, const char *hdr)
+{
+       int len = strlen(hdr);
+       return !strncasecmp(line->buf, hdr, len) && line->len > len &&
+                       line->buf[len] == ':' && isspace(line->buf[len + 1]);
+}
+
+static int is_format_patch_separator(const char *line, int len)
+{
+       static const char SAMPLE[] =
+               "From e6807f3efca28b30decfecb1732a56c7db1137ee Mon Sep 17 00:00:00 2001\n";
+       const char *cp;
+
+       if (len != strlen(SAMPLE))
+               return 0;
+       if (!skip_prefix(line, "From ", &cp))
+               return 0;
+       if (strspn(cp, "0123456789abcdef") != 40)
+               return 0;
+       cp += 40;
+       return !memcmp(SAMPLE + (cp - line), cp, strlen(SAMPLE) - (cp - line));
+}
+
+static struct strbuf *decode_q_segment(const struct strbuf *q_seg, int rfc2047)
+{
+       const char *in = q_seg->buf;
+       int c;
+       struct strbuf *out = xmalloc(sizeof(struct strbuf));
+       strbuf_init(out, q_seg->len);
+
+       while ((c = *in++) != 0) {
+               if (c == '=') {
+                       int ch, d = *in;
+                       if (d == '\n' || !d)
+                               break; /* drop trailing newline */
+                       ch = hex2chr(in);
+                       if (ch >= 0) {
+                               strbuf_addch(out, ch);
+                               in += 2;
+                               continue;
+                       }
+                       /* garbage -- fall through */
+               }
+               if (rfc2047 && c == '_') /* rfc2047 4.2 (2) */
+                       c = 0x20;
+               strbuf_addch(out, c);
+       }
+       return out;
+}
+
+static struct strbuf *decode_b_segment(const struct strbuf *b_seg)
+{
+       /* Decode in..ep, possibly in-place to ot */
+       int c, pos = 0, acc = 0;
+       const char *in = b_seg->buf;
+       struct strbuf *out = xmalloc(sizeof(struct strbuf));
+       strbuf_init(out, b_seg->len);
+
+       while ((c = *in++) != 0) {
+               if (c == '+')
+                       c = 62;
+               else if (c == '/')
+                       c = 63;
+               else if ('A' <= c && c <= 'Z')
+                       c -= 'A';
+               else if ('a' <= c && c <= 'z')
+                       c -= 'a' - 26;
+               else if ('0' <= c && c <= '9')
+                       c -= '0' - 52;
+               else
+                       continue; /* garbage */
+               switch (pos++) {
+               case 0:
+                       acc = (c << 2);
+                       break;
+               case 1:
+                       strbuf_addch(out, (acc | (c >> 4)));
+                       acc = (c & 15) << 4;
+                       break;
+               case 2:
+                       strbuf_addch(out, (acc | (c >> 2)));
+                       acc = (c & 3) << 6;
+                       break;
+               case 3:
+                       strbuf_addch(out, (acc | c));
+                       acc = pos = 0;
+                       break;
+               }
+       }
+       return out;
+}
+
+static int convert_to_utf8(struct mailinfo *mi,
+                          struct strbuf *line, const char *charset)
+{
+       char *out;
+
+       if (!mi->metainfo_charset || !charset || !*charset)
+               return 0;
+
+       if (same_encoding(mi->metainfo_charset, charset))
+               return 0;
+       out = reencode_string(line->buf, mi->metainfo_charset, charset);
+       if (!out) {
+               mi->input_error = -1;
+               return error("cannot convert from %s to %s",
+                            charset, mi->metainfo_charset);
+       }
+       strbuf_attach(line, out, strlen(out), strlen(out));
+       return 0;
+}
+
+static void decode_header(struct mailinfo *mi, struct strbuf *it)
+{
+       char *in, *ep, *cp;
+       struct strbuf outbuf = STRBUF_INIT, *dec;
+       struct strbuf charset_q = STRBUF_INIT, piecebuf = STRBUF_INIT;
+       int found_error = 1; /* pessimism */
+
+       in = it->buf;
+       while (in - it->buf <= it->len && (ep = strstr(in, "=?")) != NULL) {
+               int encoding;
+               strbuf_reset(&charset_q);
+               strbuf_reset(&piecebuf);
+
+               if (in != ep) {
+                       /*
+                        * We are about to process an encoded-word
+                        * that begins at ep, but there is something
+                        * before the encoded word.
+                        */
+                       char *scan;
+                       for (scan = in; scan < ep; scan++)
+                               if (!isspace(*scan))
+                                       break;
+
+                       if (scan != ep || in == it->buf) {
+                               /*
+                                * We should not lose that "something",
+                                * unless we have just processed an
+                                * encoded-word, and there is only LWS
+                                * before the one we are about to process.
+                                */
+                               strbuf_add(&outbuf, in, ep - in);
+                       }
+               }
+               /* E.g.
+                * ep : "=?iso-2022-jp?B?GyR...?= foo"
+                * ep : "=?ISO-8859-1?Q?Foo=FCbar?= baz"
+                */
+               ep += 2;
+
+               if (ep - it->buf >= it->len || !(cp = strchr(ep, '?')))
+                       goto release_return;
+
+               if (cp + 3 - it->buf > it->len)
+                       goto release_return;
+               strbuf_add(&charset_q, ep, cp - ep);
+
+               encoding = cp[1];
+               if (!encoding || cp[2] != '?')
+                       goto release_return;
+               ep = strstr(cp + 3, "?=");
+               if (!ep)
+                       goto release_return;
+               strbuf_add(&piecebuf, cp + 3, ep - cp - 3);
+               switch (tolower(encoding)) {
+               default:
+                       goto release_return;
+               case 'b':
+                       dec = decode_b_segment(&piecebuf);
+                       break;
+               case 'q':
+                       dec = decode_q_segment(&piecebuf, 1);
+                       break;
+               }
+               if (convert_to_utf8(mi, dec, charset_q.buf))
+                       goto release_return;
+
+               strbuf_addbuf(&outbuf, dec);
+               strbuf_release(dec);
+               free(dec);
+               in = ep + 2;
+       }
+       strbuf_addstr(&outbuf, in);
+       strbuf_reset(it);
+       strbuf_addbuf(it, &outbuf);
+       found_error = 0;
+release_return:
+       strbuf_release(&outbuf);
+       strbuf_release(&charset_q);
+       strbuf_release(&piecebuf);
+
+       if (found_error)
+               mi->input_error = -1;
+}
+
+static int check_header(struct mailinfo *mi,
+                       const struct strbuf *line,
+                       struct strbuf *hdr_data[], int overwrite)
+{
+       int i, ret = 0, len;
+       struct strbuf sb = STRBUF_INIT;
+
+       /* search for the interesting parts */
+       for (i = 0; header[i]; i++) {
+               int len = strlen(header[i]);
+               if ((!hdr_data[i] || overwrite) && cmp_header(line, header[i])) {
+                       /* Unwrap inline B and Q encoding, and optionally
+                        * normalize the meta information to utf8.
+                        */
+                       strbuf_add(&sb, line->buf + len + 2, line->len - len - 2);
+                       decode_header(mi, &sb);
+                       handle_header(&hdr_data[i], &sb);
+                       ret = 1;
+                       goto check_header_out;
+               }
+       }
+
+       /* Content stuff */
+       if (cmp_header(line, "Content-Type")) {
+               len = strlen("Content-Type: ");
+               strbuf_add(&sb, line->buf + len, line->len - len);
+               decode_header(mi, &sb);
+               strbuf_insert(&sb, 0, "Content-Type: ", len);
+               handle_content_type(mi, &sb);
+               ret = 1;
+               goto check_header_out;
+       }
+       if (cmp_header(line, "Content-Transfer-Encoding")) {
+               len = strlen("Content-Transfer-Encoding: ");
+               strbuf_add(&sb, line->buf + len, line->len - len);
+               decode_header(mi, &sb);
+               handle_content_transfer_encoding(mi, &sb);
+               ret = 1;
+               goto check_header_out;
+       }
+       if (cmp_header(line, "Message-Id")) {
+               len = strlen("Message-Id: ");
+               strbuf_add(&sb, line->buf + len, line->len - len);
+               decode_header(mi, &sb);
+               if (mi->add_message_id)
+                       mi->message_id = strbuf_detach(&sb, NULL);
+               ret = 1;
+               goto check_header_out;
+       }
+
+check_header_out:
+       strbuf_release(&sb);
+       return ret;
+}
+
+/*
+ * Returns 1 if the given line or any line beginning with the given line is an
+ * in-body header (that is, check_header will succeed when passed
+ * mi->s_hdr_data).
+ */
+static int is_inbody_header(const struct mailinfo *mi,
+                           const struct strbuf *line)
+{
+       int i;
+       for (i = 0; header[i]; i++)
+               if (!mi->s_hdr_data[i] && cmp_header(line, header[i]))
+                       return 1;
+       return 0;
+}
+
+static void decode_transfer_encoding(struct mailinfo *mi, struct strbuf *line)
+{
+       struct strbuf *ret;
+
+       switch (mi->transfer_encoding) {
+       case TE_QP:
+               ret = decode_q_segment(line, 0);
+               break;
+       case TE_BASE64:
+               ret = decode_b_segment(line);
+               break;
+       case TE_DONTCARE:
+       default:
+               return;
+       }
+       strbuf_reset(line);
+       strbuf_addbuf(line, ret);
+       strbuf_release(ret);
+       free(ret);
+}
+
+static inline int patchbreak(const struct strbuf *line)
+{
+       size_t i;
+
+       /* Beginning of a "diff -" header? */
+       if (starts_with(line->buf, "diff -"))
+               return 1;
+
+       /* CVS "Index: " line? */
+       if (starts_with(line->buf, "Index: "))
+               return 1;
+
+       /*
+        * "--- <filename>" starts patches without headers
+        * "---<sp>*" is a manual separator
+        */
+       if (line->len < 4)
+               return 0;
+
+       if (starts_with(line->buf, "---")) {
+               /* space followed by a filename? */
+               if (line->buf[3] == ' ' && !isspace(line->buf[4]))
+                       return 1;
+               /* Just whitespace? */
+               for (i = 3; i < line->len; i++) {
+                       unsigned char c = line->buf[i];
+                       if (c == '\n')
+                               return 1;
+                       if (!isspace(c))
+                               break;
+               }
+               return 0;
+       }
+       return 0;
+}
+
+static int is_scissors_line(const char *line)
+{
+       const char *c;
+       int scissors = 0, gap = 0;
+       const char *first_nonblank = NULL, *last_nonblank = NULL;
+       int visible, perforation = 0, in_perforation = 0;
+
+       for (c = line; *c; c++) {
+               if (isspace(*c)) {
+                       if (in_perforation) {
+                               perforation++;
+                               gap++;
+                       }
+                       continue;
+               }
+               last_nonblank = c;
+               if (first_nonblank == NULL)
+                       first_nonblank = c;
+               if (*c == '-') {
+                       in_perforation = 1;
+                       perforation++;
+                       continue;
+               }
+               if ((!memcmp(c, ">8", 2) || !memcmp(c, "8<", 2) ||
+                    !memcmp(c, ">%", 2) || !memcmp(c, "%<", 2))) {
+                       in_perforation = 1;
+                       perforation += 2;
+                       scissors += 2;
+                       c++;
+                       continue;
+               }
+               in_perforation = 0;
+       }
+
+       /*
+        * The mark must be at least 8 bytes long (e.g. "-- >8 --").
+        * Even though there can be arbitrary cruft on the same line
+        * (e.g. "cut here"), in order to avoid misidentification, the
+        * perforation must occupy more than a third of the visible
+        * width of the line, and dashes and scissors must occupy more
+        * than half of the perforation.
+        */
+
+       if (first_nonblank && last_nonblank)
+               visible = last_nonblank - first_nonblank + 1;
+       else
+               visible = 0;
+       return (scissors && 8 <= visible &&
+               visible < perforation * 3 &&
+               gap * 2 < perforation);
+}
+
+static void flush_inbody_header_accum(struct mailinfo *mi)
+{
+       if (!mi->inbody_header_accum.len)
+               return;
+       if (!check_header(mi, &mi->inbody_header_accum, mi->s_hdr_data, 0))
+               BUG("inbody_header_accum, if not empty, must always contain a valid in-body header");
+       strbuf_reset(&mi->inbody_header_accum);
+}
+
+static int check_inbody_header(struct mailinfo *mi, const struct strbuf *line)
+{
+       if (mi->inbody_header_accum.len &&
+           (line->buf[0] == ' ' || line->buf[0] == '\t')) {
+               if (mi->use_scissors && is_scissors_line(line->buf)) {
+                       /*
+                        * This is a scissors line; do not consider this line
+                        * as a header continuation line.
+                        */
+                       flush_inbody_header_accum(mi);
+                       return 0;
+               }
+               strbuf_strip_suffix(&mi->inbody_header_accum, "\n");
+               strbuf_addbuf(&mi->inbody_header_accum, line);
+               return 1;
+       }
+
+       flush_inbody_header_accum(mi);
+
+       if (starts_with(line->buf, ">From") && isspace(line->buf[5]))
+               return is_format_patch_separator(line->buf + 1, line->len - 1);
+       if (starts_with(line->buf, "[PATCH]") && isspace(line->buf[7])) {
+               int i;
+               for (i = 0; header[i]; i++)
+                       if (!strcmp("Subject", header[i])) {
+                               handle_header(&mi->s_hdr_data[i], line);
+                               return 1;
+                       }
+               return 0;
+       }
+       if (is_inbody_header(mi, line)) {
+               strbuf_addbuf(&mi->inbody_header_accum, line);
+               return 1;
+       }
+       return 0;
+}
+
+static int handle_commit_msg(struct mailinfo *mi, struct strbuf *line)
+{
+       assert(!mi->filter_stage);
+
+       if (mi->header_stage) {
+               if (!line->len || (line->len == 1 && line->buf[0] == '\n')) {
+                       if (mi->inbody_header_accum.len) {
+                               flush_inbody_header_accum(mi);
+                               mi->header_stage = 0;
+                       }
+                       return 0;
+               }
+       }
+
+       if (mi->use_inbody_headers && mi->header_stage) {
+               mi->header_stage = check_inbody_header(mi, line);
+               if (mi->header_stage)
+                       return 0;
+       } else
+               /* Only trim the first (blank) line of the commit message
+                * when ignoring in-body headers.
+                */
+               mi->header_stage = 0;
+
+       /* normalize the log message to UTF-8. */
+       if (convert_to_utf8(mi, line, mi->charset.buf))
+               return 0; /* mi->input_error already set */
+
+       if (mi->use_scissors && is_scissors_line(line->buf)) {
+               int i;
+
+               strbuf_setlen(&mi->log_message, 0);
+               mi->header_stage = 1;
+
+               /*
+                * We may have already read "secondary headers"; purge
+                * them to give ourselves a clean restart.
+                */
+               for (i = 0; header[i]; i++) {
+                       if (mi->s_hdr_data[i])
+                               strbuf_release(mi->s_hdr_data[i]);
+                       mi->s_hdr_data[i] = NULL;
+               }
+               return 0;
+       }
+
+       if (patchbreak(line)) {
+               if (mi->message_id)
+                       strbuf_addf(&mi->log_message,
+                                   "Message-Id: %s\n", mi->message_id);
+               return 1;
+       }
+
+       strbuf_addbuf(&mi->log_message, line);
+       return 0;
+}
+
+static void handle_patch(struct mailinfo *mi, const struct strbuf *line)
+{
+       fwrite(line->buf, 1, line->len, mi->patchfile);
+       mi->patch_lines++;
+}
+
+static void handle_filter(struct mailinfo *mi, struct strbuf *line)
+{
+       switch (mi->filter_stage) {
+       case 0:
+               if (!handle_commit_msg(mi, line))
+                       break;
+               mi->filter_stage++;
+               /* fallthrough */
+       case 1:
+               handle_patch(mi, line);
+               break;
+       }
+}
+
+static int is_rfc2822_header(const struct strbuf *line)
+{
+       /*
+        * The section that defines the loosest possible
+        * field name is "3.6.8 Optional fields".
+        *
+        * optional-field = field-name ":" unstructured CRLF
+        * field-name = 1*ftext
+        * ftext = %d33-57 / %59-126
+        */
+       int ch;
+       char *cp = line->buf;
+
+       /* Count mbox From headers as headers */
+       if (starts_with(cp, "From ") || starts_with(cp, ">From "))
+               return 1;
+
+       while ((ch = *cp++)) {
+               if (ch == ':')
+                       return 1;
+               if ((33 <= ch && ch <= 57) ||
+                   (59 <= ch && ch <= 126))
+                       continue;
+               break;
+       }
+       return 0;
+}
+
+static int read_one_header_line(struct strbuf *line, FILE *in)
+{
+       struct strbuf continuation = STRBUF_INIT;
+
+       /* Get the first part of the line. */
+       if (strbuf_getline_lf(line, in))
+               return 0;
+
+       /*
+        * Is it an empty line or not a valid rfc2822 header?
+        * If so, stop here, and return false ("not a header")
+        */
+       strbuf_rtrim(line);
+       if (!line->len || !is_rfc2822_header(line)) {
+               /* Re-add the newline */
+               strbuf_addch(line, '\n');
+               return 0;
+       }
+
+       /*
+        * Now we need to eat all the continuation lines..
+        * Yuck, 2822 header "folding"
+        */
+       for (;;) {
+               int peek;
+
+               peek = fgetc(in);
+               if (peek == EOF)
+                       break;
+               ungetc(peek, in);
+               if (peek != ' ' && peek != '\t')
+                       break;
+               if (strbuf_getline_lf(&continuation, in))
+                       break;
+               continuation.buf[0] = ' ';
+               strbuf_rtrim(&continuation);
+               strbuf_addbuf(line, &continuation);
+       }
+       strbuf_release(&continuation);
+
+       return 1;
+}
+
+static int find_boundary(struct mailinfo *mi, struct strbuf *line)
+{
+       while (!strbuf_getline_lf(line, mi->input)) {
+               if (*(mi->content_top) && is_multipart_boundary(mi, line))
+                       return 1;
+       }
+       return 0;
+}
+
+static int handle_boundary(struct mailinfo *mi, struct strbuf *line)
+{
+       struct strbuf newline = STRBUF_INIT;
+
+       strbuf_addch(&newline, '\n');
+again:
+       if (line->len >= (*(mi->content_top))->len + 2 &&
+           !memcmp(line->buf + (*(mi->content_top))->len, "--", 2)) {
+               /* we hit an end boundary */
+               /* pop the current boundary off the stack */
+               strbuf_release(*(mi->content_top));
+               FREE_AND_NULL(*(mi->content_top));
+
+               /* technically won't happen as is_multipart_boundary()
+                  will fail first.  But just in case..
+                */
+               if (--mi->content_top < mi->content) {
+                       error("Detected mismatched boundaries, can't recover");
+                       mi->input_error = -1;
+                       mi->content_top = mi->content;
+                       strbuf_release(&newline);
+                       return 0;
+               }
+               handle_filter(mi, &newline);
+               strbuf_release(&newline);
+               if (mi->input_error)
+                       return 0;
+
+               /* skip to the next boundary */
+               if (!find_boundary(mi, line))
+                       return 0;
+               goto again;
+       }
+
+       /* set some defaults */
+       mi->transfer_encoding = TE_DONTCARE;
+       strbuf_reset(&mi->charset);
+
+       /* slurp in this section's info */
+       while (read_one_header_line(line, mi->input))
+               check_header(mi, line, mi->p_hdr_data, 0);
+
+       strbuf_release(&newline);
+       /* replenish line */
+       if (strbuf_getline_lf(line, mi->input))
+               return 0;
+       strbuf_addch(line, '\n');
+       return 1;
+}
+
+static void handle_filter_flowed(struct mailinfo *mi, struct strbuf *line,
+                                struct strbuf *prev)
+{
+       size_t len = line->len;
+       const char *rest;
+
+       if (!mi->format_flowed) {
+               handle_filter(mi, line);
+               return;
+       }
+
+       if (line->buf[len - 1] == '\n') {
+               len--;
+               if (len && line->buf[len - 1] == '\r')
+                       len--;
+       }
+
+       /* Keep signature separator as-is. */
+       if (skip_prefix(line->buf, "-- ", &rest) && rest - line->buf == len) {
+               if (prev->len) {
+                       handle_filter(mi, prev);
+                       strbuf_reset(prev);
+               }
+               handle_filter(mi, line);
+               return;
+       }
+
+       /* Unstuff space-stuffed line. */
+       if (len && line->buf[0] == ' ') {
+               strbuf_remove(line, 0, 1);
+               len--;
+       }
+
+       /* Save flowed line for later, but without the soft line break. */
+       if (len && line->buf[len - 1] == ' ') {
+               strbuf_add(prev, line->buf, len - !!mi->delsp);
+               return;
+       }
+
+       /* Prepend any previous partial lines */
+       strbuf_insert(line, 0, prev->buf, prev->len);
+       strbuf_reset(prev);
+
+       handle_filter(mi, line);
+}
+
+static void handle_body(struct mailinfo *mi, struct strbuf *line)
+{
+       struct strbuf prev = STRBUF_INIT;
+
+       /* Skip up to the first boundary */
+       if (*(mi->content_top)) {
+               if (!find_boundary(mi, line))
+                       goto handle_body_out;
+       }
+
+       do {
+               /* process any boundary lines */
+               if (*(mi->content_top) && is_multipart_boundary(mi, line)) {
+                       /* flush any leftover */
+                       if (prev.len) {
+                               handle_filter(mi, &prev);
+                               strbuf_reset(&prev);
+                       }
+                       if (!handle_boundary(mi, line))
+                               goto handle_body_out;
+               }
+
+               /* Unwrap transfer encoding */
+               decode_transfer_encoding(mi, line);
+
+               switch (mi->transfer_encoding) {
+               case TE_BASE64:
+               case TE_QP:
+               {
+                       struct strbuf **lines, **it, *sb;
+
+                       /* Prepend any previous partial lines */
+                       strbuf_insert(line, 0, prev.buf, prev.len);
+                       strbuf_reset(&prev);
+
+                       /*
+                        * This is a decoded line that may contain
+                        * multiple new lines.  Pass only one chunk
+                        * at a time to handle_filter()
+                        */
+                       lines = strbuf_split(line, '\n');
+                       for (it = lines; (sb = *it); it++) {
+                               if (*(it + 1) == NULL) /* The last line */
+                                       if (sb->buf[sb->len - 1] != '\n') {
+                                               /* Partial line, save it for later. */
+                                               strbuf_addbuf(&prev, sb);
+                                               break;
+                                       }
+                               handle_filter_flowed(mi, sb, &prev);
+                       }
+                       /*
+                        * The partial chunk is saved in "prev" and will be
+                        * appended by the next iteration of read_line_with_nul().
+                        */
+                       strbuf_list_free(lines);
+                       break;
+               }
+               default:
+                       handle_filter_flowed(mi, line, &prev);
+               }
+
+               if (mi->input_error)
+                       break;
+       } while (!strbuf_getwholeline(line, mi->input, '\n'));
+
+       if (prev.len)
+               handle_filter(mi, &prev);
+
+       flush_inbody_header_accum(mi);
+
+handle_body_out:
+       strbuf_release(&prev);
+}
+
+static void output_header_lines(FILE *fout, const char *hdr, const struct strbuf *data)
+{
+       const char *sp = data->buf;
+       while (1) {
+               char *ep = strchr(sp, '\n');
+               int len;
+               if (!ep)
+                       len = strlen(sp);
+               else
+                       len = ep - sp;
+               fprintf(fout, "%s: %.*s\n", hdr, len, sp);
+               if (!ep)
+                       break;
+               sp = ep + 1;
+       }
+}
+
+static void handle_info(struct mailinfo *mi)
+{
+       struct strbuf *hdr;
+       int i;
+
+       for (i = 0; header[i]; i++) {
+               /* only print inbody headers if we output a patch file */
+               if (mi->patch_lines && mi->s_hdr_data[i])
+                       hdr = mi->s_hdr_data[i];
+               else if (mi->p_hdr_data[i])
+                       hdr = mi->p_hdr_data[i];
+               else
+                       continue;
+
+               if (!strcmp(header[i], "Subject")) {
+                       if (!mi->keep_subject) {
+                               cleanup_subject(mi, hdr);
+                               cleanup_space(hdr);
+                       }
+                       output_header_lines(mi->output, "Subject", hdr);
+               } else if (!strcmp(header[i], "From")) {
+                       cleanup_space(hdr);
+                       handle_from(mi, hdr);
+                       fprintf(mi->output, "Author: %s\n", mi->name.buf);
+                       fprintf(mi->output, "Email: %s\n", mi->email.buf);
+               } else {
+                       cleanup_space(hdr);
+                       fprintf(mi->output, "%s: %s\n", header[i], hdr->buf);
+               }
+       }
+       fprintf(mi->output, "\n");
+}
+
+int mailinfo(struct mailinfo *mi, const char *msg, const char *patch)
+{
+       FILE *cmitmsg;
+       int peek;
+       struct strbuf line = STRBUF_INIT;
+
+       cmitmsg = fopen(msg, "w");
+       if (!cmitmsg) {
+               perror(msg);
+               return -1;
+       }
+       mi->patchfile = fopen(patch, "w");
+       if (!mi->patchfile) {
+               perror(patch);
+               fclose(cmitmsg);
+               return -1;
+       }
+
+       mi->p_hdr_data = xcalloc(MAX_HDR_PARSED, sizeof(*(mi->p_hdr_data)));
+       mi->s_hdr_data = xcalloc(MAX_HDR_PARSED, sizeof(*(mi->s_hdr_data)));
+
+       do {
+               peek = fgetc(mi->input);
+               if (peek == EOF) {
+                       fclose(cmitmsg);
+                       return error("empty patch: '%s'", patch);
+               }
+       } while (isspace(peek));
+       ungetc(peek, mi->input);
+
+       /* process the email header */
+       while (read_one_header_line(&line, mi->input))
+               check_header(mi, &line, mi->p_hdr_data, 1);
+
+       handle_body(mi, &line);
+       fwrite(mi->log_message.buf, 1, mi->log_message.len, cmitmsg);
+       fclose(cmitmsg);
+       fclose(mi->patchfile);
+
+       handle_info(mi);
+       strbuf_release(&line);
+       return mi->input_error;
+}
+
+static int git_mailinfo_config(const char *var, const char *value, void *mi_)
+{
+       struct mailinfo *mi = mi_;
+
+       if (!starts_with(var, "mailinfo."))
+               return git_default_config(var, value, NULL);
+       if (!strcmp(var, "mailinfo.scissors")) {
+               mi->use_scissors = git_config_bool(var, value);
+               return 0;
+       }
+       /* perhaps others here */
+       return 0;
+}
+
+void setup_mailinfo(struct mailinfo *mi)
+{
+       memset(mi, 0, sizeof(*mi));
+       strbuf_init(&mi->name, 0);
+       strbuf_init(&mi->email, 0);
+       strbuf_init(&mi->charset, 0);
+       strbuf_init(&mi->log_message, 0);
+       strbuf_init(&mi->inbody_header_accum, 0);
+       mi->header_stage = 1;
+       mi->use_inbody_headers = 1;
+       mi->content_top = mi->content;
+       git_config(git_mailinfo_config, mi);
+}
+
+void clear_mailinfo(struct mailinfo *mi)
+{
+       int i;
+
+       strbuf_release(&mi->name);
+       strbuf_release(&mi->email);
+       strbuf_release(&mi->charset);
+       strbuf_release(&mi->inbody_header_accum);
+       free(mi->message_id);
+
+       if (mi->p_hdr_data)
+               for (i = 0; mi->p_hdr_data[i]; i++)
+                       strbuf_release(mi->p_hdr_data[i]);
+       free(mi->p_hdr_data);
+       if (mi->s_hdr_data)
+               for (i = 0; mi->s_hdr_data[i]; i++)
+                       strbuf_release(mi->s_hdr_data[i]);
+       free(mi->s_hdr_data);
+
+       while (mi->content < mi->content_top) {
+               free(*(mi->content_top));
+               mi->content_top--;
+       }
+
+       strbuf_release(&mi->log_message);
+}
diff --git a/t/t4256/1/mailinfo.c.orig b/t/t4256/1/mailinfo.c.orig
new file mode 100644 (file)
index 0000000..3281a37
--- /dev/null
@@ -0,0 +1,1185 @@
+#include "cache.h"
+#include "config.h"
+#include "utf8.h"
+#include "strbuf.h"
+#include "mailinfo.h"
+
+static void cleanup_space(struct strbuf *sb)
+{
+       size_t pos, cnt;
+       for (pos = 0; pos < sb->len; pos++) {
+               if (isspace(sb->buf[pos])) {
+                       sb->buf[pos] = ' ';
+                       for (cnt = 0; isspace(sb->buf[pos + cnt + 1]); cnt++);
+                       strbuf_remove(sb, pos + 1, cnt);
+               }
+       }
+}
+
+static void get_sane_name(struct strbuf *out, struct strbuf *name, struct strbuf *email)
+{
+       struct strbuf *src = name;
+       if (name->len < 3 || 60 < name->len || strchr(name->buf, '@') ||
+               strchr(name->buf, '<') || strchr(name->buf, '>'))
+               src = email;
+       else if (name == out)
+               return;
+       strbuf_reset(out);
+       strbuf_addbuf(out, src);
+}
+
+static void parse_bogus_from(struct mailinfo *mi, const struct strbuf *line)
+{
+       /* John Doe <johndoe> */
+
+       char *bra, *ket;
+       /* This is fallback, so do not bother if we already have an
+        * e-mail address.
+        */
+       if (mi->email.len)
+               return;
+
+       bra = strchr(line->buf, '<');
+       if (!bra)
+               return;
+       ket = strchr(bra, '>');
+       if (!ket)
+               return;
+
+       strbuf_reset(&mi->email);
+       strbuf_add(&mi->email, bra + 1, ket - bra - 1);
+
+       strbuf_reset(&mi->name);
+       strbuf_add(&mi->name, line->buf, bra - line->buf);
+       strbuf_trim(&mi->name);
+       get_sane_name(&mi->name, &mi->name, &mi->email);
+}
+
+static const char *unquote_comment(struct strbuf *outbuf, const char *in)
+{
+       int c;
+       int take_next_literally = 0;
+
+       strbuf_addch(outbuf, '(');
+
+       while ((c = *in++) != 0) {
+               if (take_next_literally == 1) {
+                       take_next_literally = 0;
+               } else {
+                       switch (c) {
+                       case '\\':
+                               take_next_literally = 1;
+                               continue;
+                       case '(':
+                               in = unquote_comment(outbuf, in);
+                               continue;
+                       case ')':
+                               strbuf_addch(outbuf, ')');
+                               return in;
+                       }
+               }
+
+               strbuf_addch(outbuf, c);
+       }
+
+       return in;
+}
+
+static const char *unquote_quoted_string(struct strbuf *outbuf, const char *in)
+{
+       int c;
+       int take_next_literally = 0;
+
+       while ((c = *in++) != 0) {
+               if (take_next_literally == 1) {
+                       take_next_literally = 0;
+               } else {
+                       switch (c) {
+                       case '\\':
+                               take_next_literally = 1;
+                               continue;
+                       case '"':
+                               return in;
+                       }
+               }
+
+               strbuf_addch(outbuf, c);
+       }
+
+       return in;
+}
+
+static void unquote_quoted_pair(struct strbuf *line)
+{
+       struct strbuf outbuf;
+       const char *in = line->buf;
+       int c;
+
+       strbuf_init(&outbuf, line->len);
+
+       while ((c = *in++) != 0) {
+               switch (c) {
+               case '"':
+                       in = unquote_quoted_string(&outbuf, in);
+                       continue;
+               case '(':
+                       in = unquote_comment(&outbuf, in);
+                       continue;
+               }
+
+               strbuf_addch(&outbuf, c);
+       }
+
+       strbuf_swap(&outbuf, line);
+       strbuf_release(&outbuf);
+
+}
+
+static void handle_from(struct mailinfo *mi, const struct strbuf *from)
+{
+       char *at;
+       size_t el;
+       struct strbuf f;
+
+       strbuf_init(&f, from->len);
+       strbuf_addbuf(&f, from);
+
+       unquote_quoted_pair(&f);
+
+       at = strchr(f.buf, '@');
+       if (!at) {
+               parse_bogus_from(mi, from);
+               goto out;
+       }
+
+       /*
+        * If we already have one email, don't take any confusing lines
+        */
+       if (mi->email.len && strchr(at + 1, '@'))
+               goto out;
+
+       /* Pick up the string around '@', possibly delimited with <>
+        * pair; that is the email part.
+        */
+       while (at > f.buf) {
+               char c = at[-1];
+               if (isspace(c))
+                       break;
+               if (c == '<') {
+                       at[-1] = ' ';
+                       break;
+               }
+               at--;
+       }
+       el = strcspn(at, " \n\t\r\v\f>");
+       strbuf_reset(&mi->email);
+       strbuf_add(&mi->email, at, el);
+       strbuf_remove(&f, at - f.buf, el + (at[el] ? 1 : 0));
+
+       /* The remainder is name.  It could be
+        *
+        * - "John Doe <john.doe@xz>"                   (a), or
+        * - "john.doe@xz (John Doe)"                   (b), or
+        * - "John (zzz) Doe <john.doe@xz> (Comment)"   (c)
+        *
+        * but we have removed the email part, so
+        *
+        * - remove extra spaces which could stay after email (case 'c'), and
+        * - trim from both ends, possibly removing the () pair at the end
+        *   (cases 'a' and 'b').
+        */
+       cleanup_space(&f);
+       strbuf_trim(&f);
+       if (f.buf[0] == '(' && f.len && f.buf[f.len - 1] == ')') {
+               strbuf_remove(&f, 0, 1);
+               strbuf_setlen(&f, f.len - 1);
+       }
+
+       get_sane_name(&mi->name, &f, &mi->email);
+out:
+       strbuf_release(&f);
+}
+
+static void handle_header(struct strbuf **out, const struct strbuf *line)
+{
+       if (!*out) {
+               *out = xmalloc(sizeof(struct strbuf));
+               strbuf_init(*out, line->len);
+       } else
+               strbuf_reset(*out);
+
+       strbuf_addbuf(*out, line);
+}
+
+/* NOTE NOTE NOTE.  We do not claim we do full MIME.  We just attempt
+ * to have enough heuristics to grok MIME encoded patches often found
+ * on our mailing lists.  For example, we do not even treat header lines
+ * case insensitively.
+ */
+
+static int slurp_attr(const char *line, const char *name, struct strbuf *attr)
+{
+       const char *ends, *ap = strcasestr(line, name);
+       size_t sz;
+
+       strbuf_setlen(attr, 0);
+       if (!ap)
+               return 0;
+       ap += strlen(name);
+       if (*ap == '"') {
+               ap++;
+               ends = "\"";
+       }
+       else
+               ends = "; \t";
+       sz = strcspn(ap, ends);
+       strbuf_add(attr, ap, sz);
+       return 1;
+}
+
+static void handle_content_type(struct mailinfo *mi, struct strbuf *line)
+{
+       struct strbuf *boundary = xmalloc(sizeof(struct strbuf));
+       strbuf_init(boundary, line->len);
+
+       if (slurp_attr(line->buf, "boundary=", boundary)) {
+               strbuf_insert(boundary, 0, "--", 2);
+               if (++mi->content_top >= &mi->content[MAX_BOUNDARIES]) {
+                       error("Too many boundaries to handle");
+                       mi->input_error = -1;
+                       mi->content_top = &mi->content[MAX_BOUNDARIES] - 1;
+                       return;
+               }
+               *(mi->content_top) = boundary;
+               boundary = NULL;
+       }
+       slurp_attr(line->buf, "charset=", &mi->charset);
+
+       if (boundary) {
+               strbuf_release(boundary);
+               free(boundary);
+       }
+}
+
+static void handle_content_transfer_encoding(struct mailinfo *mi,
+                                            const struct strbuf *line)
+{
+       if (strcasestr(line->buf, "base64"))
+               mi->transfer_encoding = TE_BASE64;
+       else if (strcasestr(line->buf, "quoted-printable"))
+               mi->transfer_encoding = TE_QP;
+       else
+               mi->transfer_encoding = TE_DONTCARE;
+}
+
+static int is_multipart_boundary(struct mailinfo *mi, const struct strbuf *line)
+{
+       struct strbuf *content_top = *(mi->content_top);
+
+       return ((content_top->len <= line->len) &&
+               !memcmp(line->buf, content_top->buf, content_top->len));
+}
+
+static void cleanup_subject(struct mailinfo *mi, struct strbuf *subject)
+{
+       size_t at = 0;
+
+       while (at < subject->len) {
+               char *pos;
+               size_t remove;
+
+               switch (subject->buf[at]) {
+               case 'r': case 'R':
+                       if (subject->len <= at + 3)
+                               break;
+                       if ((subject->buf[at + 1] == 'e' ||
+                            subject->buf[at + 1] == 'E') &&
+                           subject->buf[at + 2] == ':') {
+                               strbuf_remove(subject, at, 3);
+                               continue;
+                       }
+                       at++;
+                       break;
+               case ' ': case '\t': case ':':
+                       strbuf_remove(subject, at, 1);
+                       continue;
+               case '[':
+                       pos = strchr(subject->buf + at, ']');
+                       if (!pos)
+                               break;
+                       remove = pos - subject->buf + at + 1;
+                       if (!mi->keep_non_patch_brackets_in_subject ||
+                           (7 <= remove &&
+                            memmem(subject->buf + at, remove, "PATCH", 5)))
+                               strbuf_remove(subject, at, remove);
+                       else {
+                               at += remove;
+                               /*
+                                * If the input had a space after the ], keep
+                                * it.  We don't bother with finding the end of
+                                * the space, since we later normalize it
+                                * anyway.
+                                */
+                               if (isspace(subject->buf[at]))
+                                       at += 1;
+                       }
+                       continue;
+               }
+               break;
+       }
+       strbuf_trim(subject);
+}
+
+#define MAX_HDR_PARSED 10
+static const char *header[MAX_HDR_PARSED] = {
+       "From","Subject","Date",
+};
+
+static inline int cmp_header(const struct strbuf *line, const char *hdr)
+{
+       int len = strlen(hdr);
+       return !strncasecmp(line->buf, hdr, len) && line->len > len &&
+                       line->buf[len] == ':' && isspace(line->buf[len + 1]);
+}
+
+static int is_format_patch_separator(const char *line, int len)
+{
+       static const char SAMPLE[] =
+               "From e6807f3efca28b30decfecb1732a56c7db1137ee Mon Sep 17 00:00:00 2001\n";
+       const char *cp;
+
+       if (len != strlen(SAMPLE))
+               return 0;
+       if (!skip_prefix(line, "From ", &cp))
+               return 0;
+       if (strspn(cp, "0123456789abcdef") != 40)
+               return 0;
+       cp += 40;
+       return !memcmp(SAMPLE + (cp - line), cp, strlen(SAMPLE) - (cp - line));
+}
+
+static struct strbuf *decode_q_segment(const struct strbuf *q_seg, int rfc2047)
+{
+       const char *in = q_seg->buf;
+       int c;
+       struct strbuf *out = xmalloc(sizeof(struct strbuf));
+       strbuf_init(out, q_seg->len);
+
+       while ((c = *in++) != 0) {
+               if (c == '=') {
+                       int ch, d = *in;
+                       if (d == '\n' || !d)
+                               break; /* drop trailing newline */
+                       ch = hex2chr(in);
+                       if (ch >= 0) {
+                               strbuf_addch(out, ch);
+                               in += 2;
+                               continue;
+                       }
+                       /* garbage -- fall through */
+               }
+               if (rfc2047 && c == '_') /* rfc2047 4.2 (2) */
+                       c = 0x20;
+               strbuf_addch(out, c);
+       }
+       return out;
+}
+
+static struct strbuf *decode_b_segment(const struct strbuf *b_seg)
+{
+       /* Decode in..ep, possibly in-place to ot */
+       int c, pos = 0, acc = 0;
+       const char *in = b_seg->buf;
+       struct strbuf *out = xmalloc(sizeof(struct strbuf));
+       strbuf_init(out, b_seg->len);
+
+       while ((c = *in++) != 0) {
+               if (c == '+')
+                       c = 62;
+               else if (c == '/')
+                       c = 63;
+               else if ('A' <= c && c <= 'Z')
+                       c -= 'A';
+               else if ('a' <= c && c <= 'z')
+                       c -= 'a' - 26;
+               else if ('0' <= c && c <= '9')
+                       c -= '0' - 52;
+               else
+                       continue; /* garbage */
+               switch (pos++) {
+               case 0:
+                       acc = (c << 2);
+                       break;
+               case 1:
+                       strbuf_addch(out, (acc | (c >> 4)));
+                       acc = (c & 15) << 4;
+                       break;
+               case 2:
+                       strbuf_addch(out, (acc | (c >> 2)));
+                       acc = (c & 3) << 6;
+                       break;
+               case 3:
+                       strbuf_addch(out, (acc | c));
+                       acc = pos = 0;
+                       break;
+               }
+       }
+       return out;
+}
+
+static int convert_to_utf8(struct mailinfo *mi,
+                          struct strbuf *line, const char *charset)
+{
+       char *out;
+
+       if (!mi->metainfo_charset || !charset || !*charset)
+               return 0;
+
+       if (same_encoding(mi->metainfo_charset, charset))
+               return 0;
+       out = reencode_string(line->buf, mi->metainfo_charset, charset);
+       if (!out) {
+               mi->input_error = -1;
+               return error("cannot convert from %s to %s",
+                            charset, mi->metainfo_charset);
+       }
+       strbuf_attach(line, out, strlen(out), strlen(out));
+       return 0;
+}
+
+static void decode_header(struct mailinfo *mi, struct strbuf *it)
+{
+       char *in, *ep, *cp;
+       struct strbuf outbuf = STRBUF_INIT, *dec;
+       struct strbuf charset_q = STRBUF_INIT, piecebuf = STRBUF_INIT;
+       int found_error = 1; /* pessimism */
+
+       in = it->buf;
+       while (in - it->buf <= it->len && (ep = strstr(in, "=?")) != NULL) {
+               int encoding;
+               strbuf_reset(&charset_q);
+               strbuf_reset(&piecebuf);
+
+               if (in != ep) {
+                       /*
+                        * We are about to process an encoded-word
+                        * that begins at ep, but there is something
+                        * before the encoded word.
+                        */
+                       char *scan;
+                       for (scan = in; scan < ep; scan++)
+                               if (!isspace(*scan))
+                                       break;
+
+                       if (scan != ep || in == it->buf) {
+                               /*
+                                * We should not lose that "something",
+                                * unless we have just processed an
+                                * encoded-word, and there is only LWS
+                                * before the one we are about to process.
+                                */
+                               strbuf_add(&outbuf, in, ep - in);
+                       }
+               }
+               /* E.g.
+                * ep : "=?iso-2022-jp?B?GyR...?= foo"
+                * ep : "=?ISO-8859-1?Q?Foo=FCbar?= baz"
+                */
+               ep += 2;
+
+               if (ep - it->buf >= it->len || !(cp = strchr(ep, '?')))
+                       goto release_return;
+
+               if (cp + 3 - it->buf > it->len)
+                       goto release_return;
+               strbuf_add(&charset_q, ep, cp - ep);
+
+               encoding = cp[1];
+               if (!encoding || cp[2] != '?')
+                       goto release_return;
+               ep = strstr(cp + 3, "?=");
+               if (!ep)
+                       goto release_return;
+               strbuf_add(&piecebuf, cp + 3, ep - cp - 3);
+               switch (tolower(encoding)) {
+               default:
+                       goto release_return;
+               case 'b':
+                       dec = decode_b_segment(&piecebuf);
+                       break;
+               case 'q':
+                       dec = decode_q_segment(&piecebuf, 1);
+                       break;
+               }
+               if (convert_to_utf8(mi, dec, charset_q.buf))
+                       goto release_return;
+
+               strbuf_addbuf(&outbuf, dec);
+               strbuf_release(dec);
+               free(dec);
+               in = ep + 2;
+       }
+       strbuf_addstr(&outbuf, in);
+       strbuf_reset(it);
+       strbuf_addbuf(it, &outbuf);
+       found_error = 0;
+release_return:
+       strbuf_release(&outbuf);
+       strbuf_release(&charset_q);
+       strbuf_release(&piecebuf);
+
+       if (found_error)
+               mi->input_error = -1;
+}
+
+static int check_header(struct mailinfo *mi,
+                       const struct strbuf *line,
+                       struct strbuf *hdr_data[], int overwrite)
+{
+       int i, ret = 0, len;
+       struct strbuf sb = STRBUF_INIT;
+
+       /* search for the interesting parts */
+       for (i = 0; header[i]; i++) {
+               int len = strlen(header[i]);
+               if ((!hdr_data[i] || overwrite) && cmp_header(line, header[i])) {
+                       /* Unwrap inline B and Q encoding, and optionally
+                        * normalize the meta information to utf8.
+                        */
+                       strbuf_add(&sb, line->buf + len + 2, line->len - len - 2);
+                       decode_header(mi, &sb);
+                       handle_header(&hdr_data[i], &sb);
+                       ret = 1;
+                       goto check_header_out;
+               }
+       }
+
+       /* Content stuff */
+       if (cmp_header(line, "Content-Type")) {
+               len = strlen("Content-Type: ");
+               strbuf_add(&sb, line->buf + len, line->len - len);
+               decode_header(mi, &sb);
+               strbuf_insert(&sb, 0, "Content-Type: ", len);
+               handle_content_type(mi, &sb);
+               ret = 1;
+               goto check_header_out;
+       }
+       if (cmp_header(line, "Content-Transfer-Encoding")) {
+               len = strlen("Content-Transfer-Encoding: ");
+               strbuf_add(&sb, line->buf + len, line->len - len);
+               decode_header(mi, &sb);
+               handle_content_transfer_encoding(mi, &sb);
+               ret = 1;
+               goto check_header_out;
+       }
+       if (cmp_header(line, "Message-Id")) {
+               len = strlen("Message-Id: ");
+               strbuf_add(&sb, line->buf + len, line->len - len);
+               decode_header(mi, &sb);
+               if (mi->add_message_id)
+                       mi->message_id = strbuf_detach(&sb, NULL);
+               ret = 1;
+               goto check_header_out;
+       }
+
+check_header_out:
+       strbuf_release(&sb);
+       return ret;
+}
+
+/*
+ * Returns 1 if the given line or any line beginning with the given line is an
+ * in-body header (that is, check_header will succeed when passed
+ * mi->s_hdr_data).
+ */
+static int is_inbody_header(const struct mailinfo *mi,
+                           const struct strbuf *line)
+{
+       int i;
+       for (i = 0; header[i]; i++)
+               if (!mi->s_hdr_data[i] && cmp_header(line, header[i]))
+                       return 1;
+       return 0;
+}
+
+static void decode_transfer_encoding(struct mailinfo *mi, struct strbuf *line)
+{
+       struct strbuf *ret;
+
+       switch (mi->transfer_encoding) {
+       case TE_QP:
+               ret = decode_q_segment(line, 0);
+               break;
+       case TE_BASE64:
+               ret = decode_b_segment(line);
+               break;
+       case TE_DONTCARE:
+       default:
+               return;
+       }
+       strbuf_reset(line);
+       strbuf_addbuf(line, ret);
+       strbuf_release(ret);
+       free(ret);
+}
+
+static inline int patchbreak(const struct strbuf *line)
+{
+       size_t i;
+
+       /* Beginning of a "diff -" header? */
+       if (starts_with(line->buf, "diff -"))
+               return 1;
+
+       /* CVS "Index: " line? */
+       if (starts_with(line->buf, "Index: "))
+               return 1;
+
+       /*
+        * "--- <filename>" starts patches without headers
+        * "---<sp>*" is a manual separator
+        */
+       if (line->len < 4)
+               return 0;
+
+       if (starts_with(line->buf, "---")) {
+               /* space followed by a filename? */
+               if (line->buf[3] == ' ' && !isspace(line->buf[4]))
+                       return 1;
+               /* Just whitespace? */
+               for (i = 3; i < line->len; i++) {
+                       unsigned char c = line->buf[i];
+                       if (c == '\n')
+                               return 1;
+                       if (!isspace(c))
+                               break;
+               }
+               return 0;
+       }
+       return 0;
+}
+
+static int is_scissors_line(const char *line)
+{
+       const char *c;
+       int scissors = 0, gap = 0;
+       const char *first_nonblank = NULL, *last_nonblank = NULL;
+       int visible, perforation = 0, in_perforation = 0;
+
+       for (c = line; *c; c++) {
+               if (isspace(*c)) {
+                       if (in_perforation) {
+                               perforation++;
+                               gap++;
+                       }
+                       continue;
+               }
+               last_nonblank = c;
+               if (first_nonblank == NULL)
+                       first_nonblank = c;
+               if (*c == '-') {
+                       in_perforation = 1;
+                       perforation++;
+                       continue;
+               }
+               if ((!memcmp(c, ">8", 2) || !memcmp(c, "8<", 2) ||
+                    !memcmp(c, ">%", 2) || !memcmp(c, "%<", 2))) {
+                       in_perforation = 1;
+                       perforation += 2;
+                       scissors += 2;
+                       c++;
+                       continue;
+               }
+               in_perforation = 0;
+       }
+
+       /*
+        * The mark must be at least 8 bytes long (e.g. "-- >8 --").
+        * Even though there can be arbitrary cruft on the same line
+        * (e.g. "cut here"), in order to avoid misidentification, the
+        * perforation must occupy more than a third of the visible
+        * width of the line, and dashes and scissors must occupy more
+        * than half of the perforation.
+        */
+
+       if (first_nonblank && last_nonblank)
+               visible = last_nonblank - first_nonblank + 1;
+       else
+               visible = 0;
+       return (scissors && 8 <= visible &&
+               visible < perforation * 3 &&
+               gap * 2 < perforation);
+}
+
+static void flush_inbody_header_accum(struct mailinfo *mi)
+{
+       if (!mi->inbody_header_accum.len)
+               return;
+       if (!check_header(mi, &mi->inbody_header_accum, mi->s_hdr_data, 0))
+               BUG("inbody_header_accum, if not empty, must always contain a valid in-body header");
+       strbuf_reset(&mi->inbody_header_accum);
+}
+
+static int check_inbody_header(struct mailinfo *mi, const struct strbuf *line)
+{
+       if (mi->inbody_header_accum.len &&
+           (line->buf[0] == ' ' || line->buf[0] == '\t')) {
+               if (mi->use_scissors && is_scissors_line(line->buf)) {
+                       /*
+                        * This is a scissors line; do not consider this line
+                        * as a header continuation line.
+                        */
+                       flush_inbody_header_accum(mi);
+                       return 0;
+               }
+               strbuf_strip_suffix(&mi->inbody_header_accum, "\n");
+               strbuf_addbuf(&mi->inbody_header_accum, line);
+               return 1;
+       }
+
+       flush_inbody_header_accum(mi);
+
+       if (starts_with(line->buf, ">From") && isspace(line->buf[5]))
+               return is_format_patch_separator(line->buf + 1, line->len - 1);
+       if (starts_with(line->buf, "[PATCH]") && isspace(line->buf[7])) {
+               int i;
+               for (i = 0; header[i]; i++)
+                       if (!strcmp("Subject", header[i])) {
+                               handle_header(&mi->s_hdr_data[i], line);
+                               return 1;
+                       }
+               return 0;
+       }
+       if (is_inbody_header(mi, line)) {
+               strbuf_addbuf(&mi->inbody_header_accum, line);
+               return 1;
+       }
+       return 0;
+}
+
+static int handle_commit_msg(struct mailinfo *mi, struct strbuf *line)
+{
+       assert(!mi->filter_stage);
+
+       if (mi->header_stage) {
+               if (!line->len || (line->len == 1 && line->buf[0] == '\n')) {
+                       if (mi->inbody_header_accum.len) {
+                               flush_inbody_header_accum(mi);
+                               mi->header_stage = 0;
+                       }
+                       return 0;
+               }
+       }
+
+       if (mi->use_inbody_headers && mi->header_stage) {
+               mi->header_stage = check_inbody_header(mi, line);
+               if (mi->header_stage)
+                       return 0;
+       } else
+               /* Only trim the first (blank) line of the commit message
+                * when ignoring in-body headers.
+                */
+               mi->header_stage = 0;
+
+       /* normalize the log message to UTF-8. */
+       if (convert_to_utf8(mi, line, mi->charset.buf))
+               return 0; /* mi->input_error already set */
+
+       if (mi->use_scissors && is_scissors_line(line->buf)) {
+               int i;
+
+               strbuf_setlen(&mi->log_message, 0);
+               mi->header_stage = 1;
+
+               /*
+                * We may have already read "secondary headers"; purge
+                * them to give ourselves a clean restart.
+                */
+               for (i = 0; header[i]; i++) {
+                       if (mi->s_hdr_data[i])
+                               strbuf_release(mi->s_hdr_data[i]);
+                       mi->s_hdr_data[i] = NULL;
+               }
+               return 0;
+       }
+
+       if (patchbreak(line)) {
+               if (mi->message_id)
+                       strbuf_addf(&mi->log_message,
+                                   "Message-Id: %s\n", mi->message_id);
+               return 1;
+       }
+
+       strbuf_addbuf(&mi->log_message, line);
+       return 0;
+}
+
+static void handle_patch(struct mailinfo *mi, const struct strbuf *line)
+{
+       fwrite(line->buf, 1, line->len, mi->patchfile);
+       mi->patch_lines++;
+}
+
+static void handle_filter(struct mailinfo *mi, struct strbuf *line)
+{
+       switch (mi->filter_stage) {
+       case 0:
+               if (!handle_commit_msg(mi, line))
+                       break;
+               mi->filter_stage++;
+               /* fallthrough */
+       case 1:
+               handle_patch(mi, line);
+               break;
+       }
+}
+
+static int is_rfc2822_header(const struct strbuf *line)
+{
+       /*
+        * The section that defines the loosest possible
+        * field name is "3.6.8 Optional fields".
+        *
+        * optional-field = field-name ":" unstructured CRLF
+        * field-name = 1*ftext
+        * ftext = %d33-57 / %59-126
+        */
+       int ch;
+       char *cp = line->buf;
+
+       /* Count mbox From headers as headers */
+       if (starts_with(cp, "From ") || starts_with(cp, ">From "))
+               return 1;
+
+       while ((ch = *cp++)) {
+               if (ch == ':')
+                       return 1;
+               if ((33 <= ch && ch <= 57) ||
+                   (59 <= ch && ch <= 126))
+                       continue;
+               break;
+       }
+       return 0;
+}
+
+static int read_one_header_line(struct strbuf *line, FILE *in)
+{
+       struct strbuf continuation = STRBUF_INIT;
+
+       /* Get the first part of the line. */
+       if (strbuf_getline_lf(line, in))
+               return 0;
+
+       /*
+        * Is it an empty line or not a valid rfc2822 header?
+        * If so, stop here, and return false ("not a header")
+        */
+       strbuf_rtrim(line);
+       if (!line->len || !is_rfc2822_header(line)) {
+               /* Re-add the newline */
+               strbuf_addch(line, '\n');
+               return 0;
+       }
+
+       /*
+        * Now we need to eat all the continuation lines..
+        * Yuck, 2822 header "folding"
+        */
+       for (;;) {
+               int peek;
+
+               peek = fgetc(in);
+               if (peek == EOF)
+                       break;
+               ungetc(peek, in);
+               if (peek != ' ' && peek != '\t')
+                       break;
+               if (strbuf_getline_lf(&continuation, in))
+                       break;
+               continuation.buf[0] = ' ';
+               strbuf_rtrim(&continuation);
+               strbuf_addbuf(line, &continuation);
+       }
+       strbuf_release(&continuation);
+
+       return 1;
+}
+
+static int find_boundary(struct mailinfo *mi, struct strbuf *line)
+{
+       while (!strbuf_getline_lf(line, mi->input)) {
+               if (*(mi->content_top) && is_multipart_boundary(mi, line))
+                       return 1;
+       }
+       return 0;
+}
+
+static int handle_boundary(struct mailinfo *mi, struct strbuf *line)
+{
+       struct strbuf newline = STRBUF_INIT;
+
+       strbuf_addch(&newline, '\n');
+again:
+       if (line->len >= (*(mi->content_top))->len + 2 &&
+           !memcmp(line->buf + (*(mi->content_top))->len, "--", 2)) {
+               /* we hit an end boundary */
+               /* pop the current boundary off the stack */
+               strbuf_release(*(mi->content_top));
+               FREE_AND_NULL(*(mi->content_top));
+
+               /* technically won't happen as is_multipart_boundary()
+                  will fail first.  But just in case..
+                */
+               if (--mi->content_top < mi->content) {
+                       error("Detected mismatched boundaries, can't recover");
+                       mi->input_error = -1;
+                       mi->content_top = mi->content;
+                       strbuf_release(&newline);
+                       return 0;
+               }
+               handle_filter(mi, &newline);
+               strbuf_release(&newline);
+               if (mi->input_error)
+                       return 0;
+
+               /* skip to the next boundary */
+               if (!find_boundary(mi, line))
+                       return 0;
+               goto again;
+       }
+
+       /* set some defaults */
+       mi->transfer_encoding = TE_DONTCARE;
+       strbuf_reset(&mi->charset);
+
+       /* slurp in this section's info */
+       while (read_one_header_line(line, mi->input))
+               check_header(mi, line, mi->p_hdr_data, 0);
+
+       strbuf_release(&newline);
+       /* replenish line */
+       if (strbuf_getline_lf(line, mi->input))
+               return 0;
+       strbuf_addch(line, '\n');
+       return 1;
+}
+
+static void handle_body(struct mailinfo *mi, struct strbuf *line)
+{
+       struct strbuf prev = STRBUF_INIT;
+
+       /* Skip up to the first boundary */
+       if (*(mi->content_top)) {
+               if (!find_boundary(mi, line))
+                       goto handle_body_out;
+       }
+
+       do {
+               /* process any boundary lines */
+               if (*(mi->content_top) && is_multipart_boundary(mi, line)) {
+                       /* flush any leftover */
+                       if (prev.len) {
+                               handle_filter(mi, &prev);
+                               strbuf_reset(&prev);
+                       }
+                       if (!handle_boundary(mi, line))
+                               goto handle_body_out;
+               }
+
+               /* Unwrap transfer encoding */
+               decode_transfer_encoding(mi, line);
+
+               switch (mi->transfer_encoding) {
+               case TE_BASE64:
+               case TE_QP:
+               {
+                       struct strbuf **lines, **it, *sb;
+
+                       /* Prepend any previous partial lines */
+                       strbuf_insert(line, 0, prev.buf, prev.len);
+                       strbuf_reset(&prev);
+
+                       /*
+                        * This is a decoded line that may contain
+                        * multiple new lines.  Pass only one chunk
+                        * at a time to handle_filter()
+                        */
+                       lines = strbuf_split(line, '\n');
+                       for (it = lines; (sb = *it); it++) {
+                               if (*(it + 1) == NULL) /* The last line */
+                                       if (sb->buf[sb->len - 1] != '\n') {
+                                               /* Partial line, save it for later. */
+                                               strbuf_addbuf(&prev, sb);
+                                               break;
+                                       }
+                               handle_filter(mi, sb);
+                       }
+                       /*
+                        * The partial chunk is saved in "prev" and will be
+                        * appended by the next iteration of read_line_with_nul().
+                        */
+                       strbuf_list_free(lines);
+                       break;
+               }
+               default:
+                       handle_filter(mi, line);
+               }
+
+               if (mi->input_error)
+                       break;
+       } while (!strbuf_getwholeline(line, mi->input, '\n'));
+
+       flush_inbody_header_accum(mi);
+
+handle_body_out:
+       strbuf_release(&prev);
+}
+
+static void output_header_lines(FILE *fout, const char *hdr, const struct strbuf *data)
+{
+       const char *sp = data->buf;
+       while (1) {
+               char *ep = strchr(sp, '\n');
+               int len;
+               if (!ep)
+                       len = strlen(sp);
+               else
+                       len = ep - sp;
+               fprintf(fout, "%s: %.*s\n", hdr, len, sp);
+               if (!ep)
+                       break;
+               sp = ep + 1;
+       }
+}
+
+static void handle_info(struct mailinfo *mi)
+{
+       struct strbuf *hdr;
+       int i;
+
+       for (i = 0; header[i]; i++) {
+               /* only print inbody headers if we output a patch file */
+               if (mi->patch_lines && mi->s_hdr_data[i])
+                       hdr = mi->s_hdr_data[i];
+               else if (mi->p_hdr_data[i])
+                       hdr = mi->p_hdr_data[i];
+               else
+                       continue;
+
+               if (!strcmp(header[i], "Subject")) {
+                       if (!mi->keep_subject) {
+                               cleanup_subject(mi, hdr);
+                               cleanup_space(hdr);
+                       }
+                       output_header_lines(mi->output, "Subject", hdr);
+               } else if (!strcmp(header[i], "From")) {
+                       cleanup_space(hdr);
+                       handle_from(mi, hdr);
+                       fprintf(mi->output, "Author: %s\n", mi->name.buf);
+                       fprintf(mi->output, "Email: %s\n", mi->email.buf);
+               } else {
+                       cleanup_space(hdr);
+                       fprintf(mi->output, "%s: %s\n", header[i], hdr->buf);
+               }
+       }
+       fprintf(mi->output, "\n");
+}
+
+int mailinfo(struct mailinfo *mi, const char *msg, const char *patch)
+{
+       FILE *cmitmsg;
+       int peek;
+       struct strbuf line = STRBUF_INIT;
+
+       cmitmsg = fopen(msg, "w");
+       if (!cmitmsg) {
+               perror(msg);
+               return -1;
+       }
+       mi->patchfile = fopen(patch, "w");
+       if (!mi->patchfile) {
+               perror(patch);
+               fclose(cmitmsg);
+               return -1;
+       }
+
+       mi->p_hdr_data = xcalloc(MAX_HDR_PARSED, sizeof(*(mi->p_hdr_data)));
+       mi->s_hdr_data = xcalloc(MAX_HDR_PARSED, sizeof(*(mi->s_hdr_data)));
+
+       do {
+               peek = fgetc(mi->input);
+               if (peek == EOF) {
+                       fclose(cmitmsg);
+                       return error("empty patch: '%s'", patch);
+               }
+       } while (isspace(peek));
+       ungetc(peek, mi->input);
+
+       /* process the email header */
+       while (read_one_header_line(&line, mi->input))
+               check_header(mi, &line, mi->p_hdr_data, 1);
+
+       handle_body(mi, &line);
+       fwrite(mi->log_message.buf, 1, mi->log_message.len, cmitmsg);
+       fclose(cmitmsg);
+       fclose(mi->patchfile);
+
+       handle_info(mi);
+       strbuf_release(&line);
+       return mi->input_error;
+}
+
+static int git_mailinfo_config(const char *var, const char *value, void *mi_)
+{
+       struct mailinfo *mi = mi_;
+
+       if (!starts_with(var, "mailinfo."))
+               return git_default_config(var, value, NULL);
+       if (!strcmp(var, "mailinfo.scissors")) {
+               mi->use_scissors = git_config_bool(var, value);
+               return 0;
+       }
+       /* perhaps others here */
+       return 0;
+}
+
+void setup_mailinfo(struct mailinfo *mi)
+{
+       memset(mi, 0, sizeof(*mi));
+       strbuf_init(&mi->name, 0);
+       strbuf_init(&mi->email, 0);
+       strbuf_init(&mi->charset, 0);
+       strbuf_init(&mi->log_message, 0);
+       strbuf_init(&mi->inbody_header_accum, 0);
+       mi->header_stage = 1;
+       mi->use_inbody_headers = 1;
+       mi->content_top = mi->content;
+       git_config(git_mailinfo_config, mi);
+}
+
+void clear_mailinfo(struct mailinfo *mi)
+{
+       int i;
+
+       strbuf_release(&mi->name);
+       strbuf_release(&mi->email);
+       strbuf_release(&mi->charset);
+       strbuf_release(&mi->inbody_header_accum);
+       free(mi->message_id);
+
+       if (mi->p_hdr_data)
+               for (i = 0; mi->p_hdr_data[i]; i++)
+                       strbuf_release(mi->p_hdr_data[i]);
+       free(mi->p_hdr_data);
+       if (mi->s_hdr_data)
+               for (i = 0; mi->s_hdr_data[i]; i++)
+                       strbuf_release(mi->s_hdr_data[i]);
+       free(mi->s_hdr_data);
+
+       while (mi->content < mi->content_top) {
+               free(*(mi->content_top));
+               mi->content_top--;
+       }
+
+       strbuf_release(&mi->log_message);
+}
diff --git a/t/t4256/1/patch b/t/t4256/1/patch
new file mode 100644 (file)
index 0000000..bd0d8b0
--- /dev/null
@@ -0,0 +1,129 @@
+From: A <author@example.com>
+Subject: [PATCH] mailinfo: support format=flowed
+Message-ID: <aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa@example.com>
+Date: Sat, 25 Aug 2018 22:04:50 +0200
+User-Agent: Mozilla/5.0 (Windows NT 10.0; WOW64; rv:60.0) Gecko/20100101
+ Thunderbird/60.0
+MIME-Version: 1.0
+Content-Type: text/plain; charset=utf-8; format=flowed
+Content-Language: en-US
+Content-Transfer-Encoding: 7bit
+
+---
+  mailinfo.c | 64 ++++++++++++++++++++++++++++++++++++++++++++++++++++--
+  1 file changed, 62 insertions(+), 2 deletions(-)
+
+diff --git a/mailinfo.c b/mailinfo.c
+index 3281a37d51..b395adbdf2 100644
+--- a/mailinfo.c
++++ b/mailinfo.c
+@@ -237,11 +237,22 @@ static int slurp_attr(const char *line, const char 
+*name, struct strbuf *attr)
+       return 1;
+  }
+
++static int has_attr_value(const char *line, const char *name, const 
+char *value)
++{
++      struct strbuf sb = STRBUF_INIT;
++      int rc = slurp_attr(line, name, &sb) && !strcasecmp(sb.buf, value);
++      strbuf_release(&sb);
++      return rc;
++}
++
+  static void handle_content_type(struct mailinfo *mi, struct strbuf *line)
+  {
+       struct strbuf *boundary = xmalloc(sizeof(struct strbuf));
+       strbuf_init(boundary, line->len);
+
++      mi->format_flowed = has_attr_value(line->buf, "format=", "flowed");
++      mi->delsp = has_attr_value(line->buf, "delsp=", "yes");
++
+       if (slurp_attr(line->buf, "boundary=", boundary)) {
+               strbuf_insert(boundary, 0, "--", 2);
+               if (++mi->content_top >= &mi->content[MAX_BOUNDARIES]) {
+@@ -964,6 +975,52 @@ static int handle_boundary(struct mailinfo *mi, 
+struct strbuf *line)
+       return 1;
+  }
+
++static void handle_filter_flowed(struct mailinfo *mi, struct strbuf *line,
++                               struct strbuf *prev)
++{
++      size_t len = line->len;
++      const char *rest;
++
++      if (!mi->format_flowed) {
++              handle_filter(mi, line);
++              return;
++      }
++
++      if (line->buf[len - 1] == '\n') {
++              len--;
++              if (len && line->buf[len - 1] == '\r')
++                      len--;
++      }
++
++      /* Keep signature separator as-is. */
++      if (skip_prefix(line->buf, "-- ", &rest) && rest - line->buf == len) {
++              if (prev->len) {
++                      handle_filter(mi, prev);
++                      strbuf_reset(prev);
++              }
++              handle_filter(mi, line);
++              return;
++      }
++
++      /* Unstuff space-stuffed line. */
++      if (len && line->buf[0] == ' ') {
++              strbuf_remove(line, 0, 1);
++              len--;
++      }
++
++      /* Save flowed line for later, but without the soft line break. */
++      if (len && line->buf[len - 1] == ' ') {
++              strbuf_add(prev, line->buf, len - !!mi->delsp);
++              return;
++      }
++
++      /* Prepend any previous partial lines */
++      strbuf_insert(line, 0, prev->buf, prev->len);
++      strbuf_reset(prev);
++
++      handle_filter(mi, line);
++}
++
+  static void handle_body(struct mailinfo *mi, struct strbuf *line)
+  {
+       struct strbuf prev = STRBUF_INIT;
+@@ -1012,7 +1069,7 @@ static void handle_body(struct mailinfo *mi, 
+struct strbuf *line)
+                                               strbuf_addbuf(&prev, sb);
+                                               break;
+                                       }
+-                              handle_filter(mi, sb);
++                              handle_filter_flowed(mi, sb, &prev);
+                       }
+                       /*
+                        * The partial chunk is saved in "prev" and will be
+@@ -1022,13 +1079,16 @@ static void handle_body(struct mailinfo *mi, 
+struct strbuf *line)
+                       break;
+               }
+               default:
+-                      handle_filter(mi, line);
++                      handle_filter_flowed(mi, line, &prev);
+               }
+
+               if (mi->input_error)
+                       break;
+       } while (!strbuf_getwholeline(line, mi->input, '\n'));
+
++      if (prev.len)
++              handle_filter(mi, &prev);
++
+       flush_inbody_header_accum(mi);
+
+  handle_body_out:
+-- 
+2.18.0
index 3634e258f8bf66c2c1917c1598f6f21486c08684..41e6dc4dcfc5163c8db4d7875567162ea7cc1828 100755 (executable)
@@ -311,4 +311,93 @@ test_expect_success \
      test_must_fail git cat-file blob $blob_2 > /dev/null &&
      test_must_fail git cat-file blob $blob_3 > /dev/null'
 
+# \0 - empty base
+# \1 - one byte in result
+# \1 - one literal byte (X)
+test_expect_success \
+    'apply good minimal delta' \
+    'printf "\0\1\1X" > minimal_delta &&
+     test-tool delta -p /dev/null minimal_delta /dev/null'
+
+# \0 - empty base
+# \1 - 1 byte in result
+# \2 - two literal bytes (one too many)
+test_expect_success \
+    'apply delta with too many literal bytes' \
+    'printf "\0\1\2XX" > too_big_literal &&
+     test_must_fail test-tool delta -p /dev/null too_big_literal /dev/null'
+
+# \4 - four bytes in base
+# \1 - one byte in result
+# \221 - copy, one byte offset, one byte size
+#   \0 - copy from offset 0
+#   \2 - copy two bytes (one too many)
+test_expect_success \
+    'apply delta with too many copied bytes' \
+    'printf "\4\1\221\0\2" > too_big_copy &&
+     printf base >base &&
+     test_must_fail test-tool delta -p base too_big_copy /dev/null'
+
+# \0 - empty base
+# \2 - two bytes in result
+# \2 - two literal bytes (we are short one)
+test_expect_success \
+    'apply delta with too few literal bytes' \
+    'printf "\0\2\2X" > truncated_delta &&
+     test_must_fail test-tool delta -p /dev/null truncated_delta /dev/null'
+
+# \0 - empty base
+# \1 - one byte in result
+# \221 - copy, one byte offset, one byte size
+#   \0 - copy from offset 0
+#   \1 - copy one byte (we are short one)
+test_expect_success \
+    'apply delta with too few bytes in base' \
+    'printf "\0\1\221\0\1" > truncated_base &&
+     test_must_fail test-tool delta -p /dev/null truncated_base /dev/null'
+
+# \4 - four bytes in base
+# \2 - two bytes in result
+# \1 - one literal byte (X)
+# \221 - copy, one byte offset, one byte size
+#        (offset/size missing)
+#
+# Note that the literal byte is necessary to get past the uninteresting minimum
+# delta size check.
+test_expect_success \
+    'apply delta with truncated copy parameters' \
+    'printf "\4\2\1X\221" > truncated_copy_delta &&
+     printf base >base &&
+     test_must_fail test-tool delta -p base truncated_copy_delta /dev/null'
+
+# \0 - empty base
+# \1 - one byte in result
+# \1 - one literal byte (X)
+# \1 - trailing garbage command
+test_expect_success \
+    'apply delta with trailing garbage literal' \
+    'printf "\0\1\1X\1" > tail_garbage_literal &&
+     test_must_fail test-tool delta -p /dev/null tail_garbage_literal /dev/null'
+
+# \4 - four bytes in base
+# \1 - one byte in result
+# \1 - one literal byte (X)
+# \221 - copy, one byte offset, one byte size
+#   \0 - copy from offset 0
+#   \1 - copy 1 byte
+test_expect_success \
+    'apply delta with trailing garbage copy' \
+    'printf "\4\1\1X\221\0\1" > tail_garbage_copy &&
+     printf base >base &&
+     test_must_fail test-tool delta -p /dev/null tail_garbage_copy /dev/null'
+
+# \0 - empty base
+# \1 - one byte in result
+# \1 - one literal byte (X)
+# \0 - bogus opcode
+test_expect_success \
+    'apply delta with trailing garbage opcode' \
+    'printf "\0\1\1X\0" > tail_garbage_opcode &&
+     test_must_fail test-tool delta -p /dev/null tail_garbage_opcode /dev/null'
+
 test_done
index ae52a1882df65a05c2916450ff6101012b12ee6d..dacb440b2750e40b393a200229a9ee968f12ccd3 100755 (executable)
@@ -24,11 +24,11 @@ test_expect_success 'check corruption' '
 '
 
 test_expect_success 'rev-list notices corruption (1)' '
-       test_must_fail git rev-list HEAD
+       test_must_fail env GIT_TEST_COMMIT_GRAPH=0 git rev-list HEAD
 '
 
 test_expect_success 'rev-list notices corruption (2)' '
-       test_must_fail git rev-list --objects HEAD
+       test_must_fail env GIT_TEST_COMMIT_GRAPH=0 git rev-list --objects HEAD
 '
 
 test_expect_success 'pack-objects notices corruption' '
index 7bff7923f2a91165b5a0b5251351798519d63270..1be3459c5b8aa331a646e3ead24f7422110cdc08 100755 (executable)
@@ -342,4 +342,97 @@ test_expect_success 'truncated bitmap fails gracefully' '
        test_i18ngrep corrupt stderr
 '
 
+# have_delta <obj> <expected_base>
+#
+# Note that because this relies on cat-file, it might find _any_ copy of an
+# object in the repository. The caller is responsible for making sure
+# there's only one (e.g., via "repack -ad", or having just fetched a copy).
+have_delta () {
+       echo $2 >expect &&
+       echo $1 | git cat-file --batch-check="%(deltabase)" >actual &&
+       test_cmp expect actual
+}
+
+# Create a state of history with these properties:
+#
+#  - refs that allow a client to fetch some new history, while sharing some old
+#    history with the server; we use branches delta-reuse-old and
+#    delta-reuse-new here
+#
+#  - the new history contains an object that is stored on the server as a delta
+#    against a base that is in the old history
+#
+#  - the base object is not immediately reachable from the tip of the old
+#    history; finding it would involve digging down through history we know the
+#    other side has
+#
+# This should result in a state where fetching from old->new would not
+# traditionally reuse the on-disk delta (because we'd have to dig to realize
+# that the client has it), but we will do so if bitmaps can tell us cheaply
+# that the other side has it.
+test_expect_success 'set up thin delta-reuse parent' '
+       # This first commit contains the buried base object.
+       test-tool genrandom delta 16384 >file &&
+       git add file &&
+       git commit -m "delta base" &&
+       base=$(git rev-parse --verify HEAD:file) &&
+
+       # These intermediate commits bury the base back in history.
+       # This becomes the "old" state.
+       for i in 1 2 3 4 5
+       do
+               echo $i >file &&
+               git commit -am "intermediate $i" || return 1
+       done &&
+       git branch delta-reuse-old &&
+
+       # And now our new history has a delta against the buried base. Note
+       # that this must be smaller than the original file, since pack-objects
+       # prefers to create deltas from smaller objects to larger.
+       test-tool genrandom delta 16300 >file &&
+       git commit -am "delta result" &&
+       delta=$(git rev-parse --verify HEAD:file) &&
+       git branch delta-reuse-new &&
+
+       # Repack with bitmaps and double check that we have the expected delta
+       # relationship.
+       git repack -adb &&
+       have_delta $delta $base
+'
+
+# Now we can sanity-check the non-bitmap behavior (that the server is not able
+# to reuse the delta). This isn't strictly something we care about, so this
+# test could be scrapped in the future. But it makes sure that the next test is
+# actually triggering the feature we want.
+#
+# Note that our tools for working with on-the-wire "thin" packs are limited. So
+# we actually perform the fetch, retain the resulting pack, and inspect the
+# result.
+test_expect_success 'fetch without bitmaps ignores delta against old base' '
+       test_config pack.usebitmaps false &&
+       test_when_finished "rm -rf client.git" &&
+       git init --bare client.git &&
+       (
+               cd client.git &&
+               git config transfer.unpackLimit 1 &&
+               git fetch .. delta-reuse-old:delta-reuse-old &&
+               git fetch .. delta-reuse-new:delta-reuse-new &&
+               have_delta $delta $ZERO_OID
+       )
+'
+
+# And do the same for the bitmap case, where we do expect to find the delta.
+test_expect_success 'fetch with bitmaps can reuse old base' '
+       test_config pack.usebitmaps true &&
+       test_when_finished "rm -rf client.git" &&
+       git init --bare client.git &&
+       (
+               cd client.git &&
+               git config transfer.unpackLimit 1 &&
+               git fetch .. delta-reuse-old:delta-reuse-old &&
+               git fetch .. delta-reuse-new:delta-reuse-new &&
+               have_delta $delta $base
+       )
+'
+
 test_done
index 6710c8bc8c4699035bec418258fcaec8a634855c..2e718f0bdede397d399399536707743f9f7cff34 100755 (executable)
@@ -21,17 +21,21 @@ test_expect_success 'setup r1' '
 
 test_expect_success 'verify blob count in normal packfile' '
        git -C r1 ls-files -s file.1 file.2 file.3 file.4 file.5 \
-               | awk -f print_2.awk \
-               | sort >expected &&
+               >ls_files_result &&
+       awk -f print_2.awk ls_files_result |
+       sort >expected &&
+
        git -C r1 pack-objects --rev --stdout >all.pack <<-EOF &&
        HEAD
        EOF
        git -C r1 index-pack ../all.pack &&
-       git -C r1 verify-pack -v ../all.pack \
-               | grep blob \
-               | awk -f print_1.awk \
-               | sort >observed &&
-       test_cmp observed expected
+
+       git -C r1 verify-pack -v ../all.pack >verify_result &&
+       grep blob verify_result |
+       awk -f print_1.awk |
+       sort >observed &&
+
+       test_cmp expected observed
 '
 
 test_expect_success 'verify blob:none packfile has no blobs' '
@@ -39,24 +43,28 @@ test_expect_success 'verify blob:none packfile has no blobs' '
        HEAD
        EOF
        git -C r1 index-pack ../filter.pack &&
-       git -C r1 verify-pack -v ../filter.pack \
-               | grep blob \
-               | awk -f print_1.awk \
-               | sort >observed &&
+
+       git -C r1 verify-pack -v ../filter.pack >verify_result &&
+       grep blob verify_result |
+       awk -f print_1.awk |
+       sort >observed &&
+
        nr=$(wc -l <observed) &&
        test 0 -eq $nr
 '
 
 test_expect_success 'verify normal and blob:none packfiles have same commits/trees' '
-       git -C r1 verify-pack -v ../all.pack \
-               | grep -E "commit|tree" \
-               | awk -f print_1.awk \
-               | sort >expected &&
-       git -C r1 verify-pack -v ../filter.pack \
-               | grep -E "commit|tree" \
-               | awk -f print_1.awk \
-               | sort >observed &&
-       test_cmp observed expected
+       git -C r1 verify-pack -v ../all.pack >verify_result &&
+       grep -E "commit|tree" verify_result |
+       awk -f print_1.awk |
+       sort >expected &&
+
+       git -C r1 verify-pack -v ../filter.pack >verify_result &&
+       grep -E "commit|tree" verify_result |
+       awk -f print_1.awk |
+       sort >observed &&
+
+       test_cmp expected observed
 '
 
 # Test blob:limit=<n>[kmg] filter.
@@ -75,18 +83,21 @@ test_expect_success 'setup r2' '
 '
 
 test_expect_success 'verify blob count in normal packfile' '
-       git -C r2 ls-files -s large.1000 large.10000 \
-               | awk -f print_2.awk \
-               | sort >expected &&
+       git -C r2 ls-files -s large.1000 large.10000 >ls_files_result &&
+       awk -f print_2.awk ls_files_result |
+       sort >expected &&
+
        git -C r2 pack-objects --rev --stdout >all.pack <<-EOF &&
        HEAD
        EOF
        git -C r2 index-pack ../all.pack &&
-       git -C r2 verify-pack -v ../all.pack \
-               | grep blob \
-               | awk -f print_1.awk \
-               | sort >observed &&
-       test_cmp observed expected
+
+       git -C r2 verify-pack -v ../all.pack >verify_result &&
+       grep blob verify_result |
+       awk -f print_1.awk |
+       sort >observed &&
+
+       test_cmp expected observed
 '
 
 test_expect_success 'verify blob:limit=500 omits all blobs' '
@@ -94,10 +105,12 @@ test_expect_success 'verify blob:limit=500 omits all blobs' '
        HEAD
        EOF
        git -C r2 index-pack ../filter.pack &&
-       git -C r2 verify-pack -v ../filter.pack \
-               | grep blob \
-               | awk -f print_1.awk \
-               | sort >observed &&
+
+       git -C r2 verify-pack -v ../filter.pack >verify_result &&
+       grep blob verify_result |
+       awk -f print_1.awk |
+       sort >observed &&
+
        nr=$(wc -l <observed) &&
        test 0 -eq $nr
 '
@@ -107,100 +120,119 @@ test_expect_success 'verify blob:limit=1000' '
        HEAD
        EOF
        git -C r2 index-pack ../filter.pack &&
-       git -C r2 verify-pack -v ../filter.pack \
-               | grep blob \
-               | awk -f print_1.awk \
-               | sort >observed &&
+
+       git -C r2 verify-pack -v ../filter.pack >verify_result &&
+       grep blob verify_result |
+       awk -f print_1.awk |
+       sort >observed &&
+
        nr=$(wc -l <observed) &&
        test 0 -eq $nr
 '
 
 test_expect_success 'verify blob:limit=1001' '
-       git -C r2 ls-files -s large.1000 \
-               | awk -f print_2.awk \
-               | sort >expected &&
+       git -C r2 ls-files -s large.1000 >ls_files_result &&
+       awk -f print_2.awk ls_files_result |
+       sort >expected &&
+
        git -C r2 pack-objects --rev --stdout --filter=blob:limit=1001 >filter.pack <<-EOF &&
        HEAD
        EOF
        git -C r2 index-pack ../filter.pack &&
-       git -C r2 verify-pack -v ../filter.pack \
-               | grep blob \
-               | awk -f print_1.awk \
-               | sort >observed &&
-       test_cmp observed expected
+
+       git -C r2 verify-pack -v ../filter.pack >verify_result &&
+       grep blob verify_result |
+       awk -f print_1.awk |
+       sort >observed &&
+
+       test_cmp expected observed
 '
 
 test_expect_success 'verify blob:limit=10001' '
-       git -C r2 ls-files -s large.1000 large.10000 \
-               | awk -f print_2.awk \
-               | sort >expected &&
+       git -C r2 ls-files -s large.1000 large.10000 >ls_files_result &&
+       awk -f print_2.awk ls_files_result |
+       sort >expected &&
+
        git -C r2 pack-objects --rev --stdout --filter=blob:limit=10001 >filter.pack <<-EOF &&
        HEAD
        EOF
        git -C r2 index-pack ../filter.pack &&
-       git -C r2 verify-pack -v ../filter.pack \
-               | grep blob \
-               | awk -f print_1.awk \
-               | sort >observed &&
-       test_cmp observed expected
+
+       git -C r2 verify-pack -v ../filter.pack >verify_result &&
+       grep blob verify_result |
+       awk -f print_1.awk |
+       sort >observed &&
+
+       test_cmp expected observed
 '
 
 test_expect_success 'verify blob:limit=1k' '
-       git -C r2 ls-files -s large.1000 \
-               | awk -f print_2.awk \
-               | sort >expected &&
+       git -C r2 ls-files -s large.1000 >ls_files_result &&
+       awk -f print_2.awk ls_files_result |
+       sort >expected &&
+
        git -C r2 pack-objects --rev --stdout --filter=blob:limit=1k >filter.pack <<-EOF &&
        HEAD
        EOF
        git -C r2 index-pack ../filter.pack &&
-       git -C r2 verify-pack -v ../filter.pack \
-               | grep blob \
-               | awk -f print_1.awk \
-               | sort >observed &&
-       test_cmp observed expected
+
+       git -C r2 verify-pack -v ../filter.pack >verify_result &&
+       grep blob verify_result |
+       awk -f print_1.awk |
+       sort >observed &&
+
+       test_cmp expected observed
 '
 
 test_expect_success 'verify explicitly specifying oversized blob in input' '
-       git -C r2 ls-files -s large.1000 large.10000 \
-               | awk -f print_2.awk \
-               | sort >expected &&
+       git -C r2 ls-files -s large.1000 large.10000 >ls_files_result &&
+       awk -f print_2.awk ls_files_result |
+       sort >expected &&
+
        git -C r2 pack-objects --rev --stdout --filter=blob:limit=1k >filter.pack <<-EOF &&
        HEAD
        $(git -C r2 rev-parse HEAD:large.10000)
        EOF
        git -C r2 index-pack ../filter.pack &&
-       git -C r2 verify-pack -v ../filter.pack \
-               | grep blob \
-               | awk -f print_1.awk \
-               | sort >observed &&
-       test_cmp observed expected
+
+       git -C r2 verify-pack -v ../filter.pack >verify_result &&
+       grep blob verify_result |
+       awk -f print_1.awk |
+       sort >observed &&
+
+       test_cmp expected observed
 '
 
 test_expect_success 'verify blob:limit=1m' '
-       git -C r2 ls-files -s large.1000 large.10000 \
-               | awk -f print_2.awk \
-               | sort >expected &&
+       git -C r2 ls-files -s large.1000 large.10000 >ls_files_result &&
+       awk -f print_2.awk ls_files_result |
+       sort >expected &&
+
        git -C r2 pack-objects --rev --stdout --filter=blob:limit=1m >filter.pack <<-EOF &&
        HEAD
        EOF
        git -C r2 index-pack ../filter.pack &&
-       git -C r2 verify-pack -v ../filter.pack \
-               | grep blob \
-               | awk -f print_1.awk \
-               | sort >observed &&
-       test_cmp observed expected
+
+       git -C r2 verify-pack -v ../filter.pack >verify_result &&
+       grep blob verify_result |
+       awk -f print_1.awk |
+       sort >observed &&
+
+       test_cmp expected observed
 '
 
 test_expect_success 'verify normal and blob:limit packfiles have same commits/trees' '
-       git -C r2 verify-pack -v ../all.pack \
-               | grep -E "commit|tree" \
-               | awk -f print_1.awk \
-               | sort >expected &&
-       git -C r2 verify-pack -v ../filter.pack \
-               | grep -E "commit|tree" \
-               | awk -f print_1.awk \
-               | sort >observed &&
-       test_cmp observed expected
+       git -C r2 verify-pack -v ../all.pack >verify_result &&
+       grep -E "commit|tree" verify_result |
+       awk -f print_1.awk |
+       sort >expected &&
+
+       git -C r2 verify-pack -v ../filter.pack >verify_result &&
+       grep -E "commit|tree" verify_result |
+       awk -f print_1.awk |
+       sort >observed &&
+
+       test_cmp expected observed
 '
 
 # Test sparse:path=<path> filter.
@@ -225,71 +257,85 @@ test_expect_success 'setup r3' '
 
 test_expect_success 'verify blob count in normal packfile' '
        git -C r3 ls-files -s sparse1 sparse2 dir1/sparse1 dir1/sparse2 \
-               | awk -f print_2.awk \
-               | sort >expected &&
+               >ls_files_result &&
+       awk -f print_2.awk ls_files_result |
+       sort >expected &&
+
        git -C r3 pack-objects --rev --stdout >all.pack <<-EOF &&
        HEAD
        EOF
        git -C r3 index-pack ../all.pack &&
-       git -C r3 verify-pack -v ../all.pack \
-               | grep blob \
-               | awk -f print_1.awk \
-               | sort >observed &&
-       test_cmp observed expected
+
+       git -C r3 verify-pack -v ../all.pack >verify_result &&
+       grep blob verify_result |
+       awk -f print_1.awk |
+       sort >observed &&
+
+       test_cmp expected observed
 '
 
 test_expect_success 'verify sparse:path=pattern1' '
-       git -C r3 ls-files -s dir1/sparse1 dir1/sparse2 \
-               | awk -f print_2.awk \
-               | sort >expected &&
+       git -C r3 ls-files -s dir1/sparse1 dir1/sparse2 >ls_files_result &&
+       awk -f print_2.awk ls_files_result |
+       sort >expected &&
+
        git -C r3 pack-objects --rev --stdout --filter=sparse:path=../pattern1 >filter.pack <<-EOF &&
        HEAD
        EOF
        git -C r3 index-pack ../filter.pack &&
-       git -C r3 verify-pack -v ../filter.pack \
-               | grep blob \
-               | awk -f print_1.awk \
-               | sort >observed &&
-       test_cmp observed expected
+
+       git -C r3 verify-pack -v ../filter.pack >verify_result &&
+       grep blob verify_result |
+       awk -f print_1.awk |
+       sort >observed &&
+
+       test_cmp expected observed
 '
 
 test_expect_success 'verify normal and sparse:path=pattern1 packfiles have same commits/trees' '
-       git -C r3 verify-pack -v ../all.pack \
-               | grep -E "commit|tree" \
-               | awk -f print_1.awk \
-               | sort >expected &&
-       git -C r3 verify-pack -v ../filter.pack \
-               | grep -E "commit|tree" \
-               | awk -f print_1.awk \
-               | sort >observed &&
-       test_cmp observed expected
+       git -C r3 verify-pack -v ../all.pack >verify_result &&
+       grep -E "commit|tree" verify_result |
+       awk -f print_1.awk |
+       sort >expected &&
+
+       git -C r3 verify-pack -v ../filter.pack >verify_result &&
+       grep -E "commit|tree" verify_result |
+       awk -f print_1.awk |
+       sort >observed &&
+
+       test_cmp expected observed
 '
 
 test_expect_success 'verify sparse:path=pattern2' '
-       git -C r3 ls-files -s sparse1 dir1/sparse1 \
-               | awk -f print_2.awk \
-               | sort >expected &&
+       git -C r3 ls-files -s sparse1 dir1/sparse1 >ls_files_result &&
+       awk -f print_2.awk ls_files_result |
+       sort >expected &&
+
        git -C r3 pack-objects --rev --stdout --filter=sparse:path=../pattern2 >filter.pack <<-EOF &&
        HEAD
        EOF
        git -C r3 index-pack ../filter.pack &&
-       git -C r3 verify-pack -v ../filter.pack \
-               | grep blob \
-               | awk -f print_1.awk \
-               | sort >observed &&
-       test_cmp observed expected
+
+       git -C r3 verify-pack -v ../filter.pack >verify_result &&
+       grep blob verify_result |
+       awk -f print_1.awk |
+       sort >observed &&
+
+       test_cmp expected observed
 '
 
 test_expect_success 'verify normal and sparse:path=pattern2 packfiles have same commits/trees' '
-       git -C r3 verify-pack -v ../all.pack \
-               | grep -E "commit|tree" \
-               | awk -f print_1.awk \
-               | sort >expected &&
-       git -C r3 verify-pack -v ../filter.pack \
-               | grep -E "commit|tree" \
-               | awk -f print_1.awk \
-               | sort >observed &&
-       test_cmp observed expected
+       git -C r3 verify-pack -v ../all.pack >verify_result &&
+       grep -E "commit|tree" verify_result |
+       awk -f print_1.awk |
+       sort >expected &&
+
+       git -C r3 verify-pack -v ../filter.pack >verify_result &&
+       grep -E "commit|tree" verify_result |
+       awk -f print_1.awk |
+       sort >observed &&
+
+       test_cmp expected observed
 '
 
 # Test sparse:oid=<oid-ish> filter.
@@ -313,48 +359,58 @@ test_expect_success 'setup r4' '
 
 test_expect_success 'verify blob count in normal packfile' '
        git -C r4 ls-files -s pattern sparse1 sparse2 dir1/sparse1 dir1/sparse2 \
-               | awk -f print_2.awk \
-               | sort >expected &&
+               >ls_files_result &&
+       awk -f print_2.awk ls_files_result |
+       sort >expected &&
+
        git -C r4 pack-objects --rev --stdout >all.pack <<-EOF &&
        HEAD
        EOF
        git -C r4 index-pack ../all.pack &&
-       git -C r4 verify-pack -v ../all.pack \
-               | grep blob \
-               | awk -f print_1.awk \
-               | sort >observed &&
-       test_cmp observed expected
+
+       git -C r4 verify-pack -v ../all.pack >verify_result &&
+       grep blob verify_result |
+       awk -f print_1.awk |
+       sort >observed &&
+
+       test_cmp expected observed
 '
 
 test_expect_success 'verify sparse:oid=OID' '
-       git -C r4 ls-files -s dir1/sparse1 dir1/sparse2 \
-               | awk -f print_2.awk \
-               | sort >expected &&
+       git -C r4 ls-files -s dir1/sparse1 dir1/sparse2 >ls_files_result &&
+       awk -f print_2.awk ls_files_result |
+       sort >expected &&
+
        oid=$(git -C r4 ls-files -s pattern | awk -f print_2.awk) &&
        git -C r4 pack-objects --rev --stdout --filter=sparse:oid=$oid >filter.pack <<-EOF &&
        HEAD
        EOF
        git -C r4 index-pack ../filter.pack &&
-       git -C r4 verify-pack -v ../filter.pack \
-               | grep blob \
-               | awk -f print_1.awk \
-               | sort >observed &&
-       test_cmp observed expected
+
+       git -C r4 verify-pack -v ../filter.pack >verify_result &&
+       grep blob verify_result |
+       awk -f print_1.awk |
+       sort >observed &&
+
+       test_cmp expected observed
 '
 
 test_expect_success 'verify sparse:oid=oid-ish' '
-       git -C r4 ls-files -s dir1/sparse1 dir1/sparse2 \
-               | awk -f print_2.awk \
-               | sort >expected &&
+       git -C r4 ls-files -s dir1/sparse1 dir1/sparse2 >ls_files_result &&
+       awk -f print_2.awk ls_files_result |
+       sort >expected &&
+
        git -C r4 pack-objects --rev --stdout --filter=sparse:oid=master:pattern >filter.pack <<-EOF &&
        HEAD
        EOF
        git -C r4 index-pack ../filter.pack &&
-       git -C r4 verify-pack -v ../filter.pack \
-               | grep blob \
-               | awk -f print_1.awk \
-               | sort >observed &&
-       test_cmp observed expected
+
+       git -C r4 verify-pack -v ../filter.pack >verify_result &&
+       grep blob verify_result |
+       awk -f print_1.awk |
+       sort >observed &&
+
+       test_cmp expected observed
 '
 
 # Delete some loose objects and use pack-objects, but WITHOUT any filtering.
@@ -362,8 +418,10 @@ test_expect_success 'verify sparse:oid=oid-ish' '
 
 test_expect_success 'setup r1 - delete loose blobs' '
        git -C r1 ls-files -s file.1 file.2 file.3 file.4 file.5 \
-               | awk -f print_2.awk \
-               | sort >expected &&
+               >ls_files_result &&
+       awk -f print_2.awk ls_files_result |
+       sort >expected &&
+
        for id in `cat expected | sed "s|..|&/|"`
        do
                rm r1/.git/objects/$id
index 0c500f7ca2641a2752f5d4819bb11efcf5f588bf..5fe21db99f43aa79dc36ab36bf6d40ca2f47264d 100755 (executable)
@@ -8,7 +8,8 @@ test_expect_success 'setup full repo' '
        cd "$TRASH_DIRECTORY/full" &&
        git init &&
        git config core.commitGraph true &&
-       objdir=".git/objects"
+       objdir=".git/objects" &&
+       test_oid_init
 '
 
 test_expect_success 'verify graph with no graph file' '
@@ -35,7 +36,7 @@ test_expect_success 'create commits and repack' '
 graph_git_two_modes() {
        git -c core.commitGraph=true $1 >output
        git -c core.commitGraph=false $1 >expect
-       test_cmp output expect
+       test_cmp expect output
 }
 
 graph_git_behavior() {
@@ -259,6 +260,66 @@ test_expect_success 'check that gc computes commit-graph' '
        test_cmp_bin commit-graph-after-gc $objdir/info/commit-graph
 '
 
+test_expect_success 'replace-objects invalidates commit-graph' '
+       cd "$TRASH_DIRECTORY" &&
+       test_when_finished rm -rf replace &&
+       git clone full replace &&
+       (
+               cd replace &&
+               git commit-graph write --reachable &&
+               test_path_is_file .git/objects/info/commit-graph &&
+               git replace HEAD~1 HEAD~2 &&
+               git -c core.commitGraph=false log >expect &&
+               git -c core.commitGraph=true log >actual &&
+               test_cmp expect actual &&
+               git commit-graph write --reachable &&
+               git -c core.commitGraph=false --no-replace-objects log >expect &&
+               git -c core.commitGraph=true --no-replace-objects log >actual &&
+               test_cmp expect actual &&
+               rm -rf .git/objects/info/commit-graph &&
+               git commit-graph write --reachable &&
+               test_path_is_file .git/objects/info/commit-graph
+       )
+'
+
+test_expect_success 'commit grafts invalidate commit-graph' '
+       cd "$TRASH_DIRECTORY" &&
+       test_when_finished rm -rf graft &&
+       git clone full graft &&
+       (
+               cd graft &&
+               git commit-graph write --reachable &&
+               test_path_is_file .git/objects/info/commit-graph &&
+               H1=$(git rev-parse --verify HEAD~1) &&
+               H3=$(git rev-parse --verify HEAD~3) &&
+               echo "$H1 $H3" >.git/info/grafts &&
+               git -c core.commitGraph=false log >expect &&
+               git -c core.commitGraph=true log >actual &&
+               test_cmp expect actual &&
+               git commit-graph write --reachable &&
+               git -c core.commitGraph=false --no-replace-objects log >expect &&
+               git -c core.commitGraph=true --no-replace-objects log >actual &&
+               test_cmp expect actual &&
+               rm -rf .git/objects/info/commit-graph &&
+               git commit-graph write --reachable &&
+               test_path_is_missing .git/objects/info/commit-graph
+       )
+'
+
+test_expect_success 'replace-objects invalidates commit-graph' '
+       cd "$TRASH_DIRECTORY" &&
+       test_when_finished rm -rf shallow &&
+       git clone --depth 2 "file://$TRASH_DIRECTORY/full" shallow &&
+       (
+               cd shallow &&
+               git commit-graph write --reachable &&
+               test_path_is_missing .git/objects/info/commit-graph &&
+               git fetch origin --unshallow &&
+               git commit-graph write --reachable &&
+               test_path_is_file .git/objects/info/commit-graph
+       )
+'
+
 # the verify tests below expect the commit-graph to contain
 # exactly the commits reachable from the commits/8 branch.
 # If the file changes the set of commits in the list, then the
@@ -273,7 +334,7 @@ test_expect_success 'git commit-graph verify' '
 
 NUM_COMMITS=9
 NUM_OCTOPUS_EDGES=2
-HASH_LEN=20
+HASH_LEN="$(test_oid rawsz)"
 GRAPH_BYTE_VERSION=4
 GRAPH_BYTE_HASH=5
 GRAPH_BYTE_CHUNK_COUNT=6
diff --git a/t/t5319-multi-pack-index.sh b/t/t5319-multi-pack-index.sh
new file mode 100755 (executable)
index 0000000..bd8e841
--- /dev/null
@@ -0,0 +1,351 @@
+#!/bin/sh
+
+test_description='multi-pack-indexes'
+. ./test-lib.sh
+
+objdir=.git/objects
+
+midx_read_expect () {
+       NUM_PACKS=$1
+       NUM_OBJECTS=$2
+       NUM_CHUNKS=$3
+       OBJECT_DIR=$4
+       EXTRA_CHUNKS="$5"
+       {
+               cat <<-EOF &&
+               header: 4d494458 1 $NUM_CHUNKS $NUM_PACKS
+               chunks: pack-names oid-fanout oid-lookup object-offsets$EXTRA_CHUNKS
+               num_objects: $NUM_OBJECTS
+               packs:
+               EOF
+               if test $NUM_PACKS -ge 1
+               then
+                       ls $OBJECT_DIR/pack/ | grep idx | sort
+               fi &&
+               printf "object-dir: $OBJECT_DIR\n"
+       } >expect &&
+       test-tool read-midx $OBJECT_DIR >actual &&
+       test_cmp expect actual
+}
+
+test_expect_success 'write midx with no packs' '
+       test_when_finished rm -f pack/multi-pack-index &&
+       git multi-pack-index --object-dir=. write &&
+       midx_read_expect 0 0 4 .
+'
+
+generate_objects () {
+       i=$1
+       iii=$(printf '%03i' $i)
+       {
+               test-tool genrandom "bar" 200 &&
+               test-tool genrandom "baz $iii" 50
+       } >wide_delta_$iii &&
+       {
+               test-tool genrandom "foo"$i 100 &&
+               test-tool genrandom "foo"$(( $i + 1 )) 100 &&
+               test-tool genrandom "foo"$(( $i + 2 )) 100
+       } >deep_delta_$iii &&
+       {
+               echo $iii &&
+               test-tool genrandom "$iii" 8192
+       } >file_$iii &&
+       git update-index --add file_$iii deep_delta_$iii wide_delta_$iii
+}
+
+commit_and_list_objects () {
+       {
+               echo 101 &&
+               test-tool genrandom 100 8192;
+       } >file_101 &&
+       git update-index --add file_101 &&
+       tree=$(git write-tree) &&
+       commit=$(git commit-tree $tree -p HEAD</dev/null) &&
+       {
+               echo $tree &&
+               git ls-tree $tree | sed -e "s/.* \\([0-9a-f]*\\)        .*/\\1/"
+       } >obj-list &&
+       git reset --hard $commit
+}
+
+test_expect_success 'create objects' '
+       test_commit initial &&
+       for i in $(test_seq 1 5)
+       do
+               generate_objects $i
+       done &&
+       commit_and_list_objects
+'
+
+test_expect_success 'write midx with one v1 pack' '
+       pack=$(git pack-objects --index-version=1 $objdir/pack/test <obj-list) &&
+       test_when_finished rm $objdir/pack/test-$pack.pack \
+               $objdir/pack/test-$pack.idx $objdir/pack/multi-pack-index &&
+       git multi-pack-index --object-dir=$objdir write &&
+       midx_read_expect 1 18 4 $objdir
+'
+
+midx_git_two_modes () {
+       if [ "$2" = "sorted" ]
+       then
+               git -c core.multiPackIndex=false $1 | sort >expect &&
+               git -c core.multiPackIndex=true $1 | sort >actual
+       else
+               git -c core.multiPackIndex=false $1 >expect &&
+               git -c core.multiPackIndex=true $1 >actual
+       fi &&
+       test_cmp expect actual
+}
+
+compare_results_with_midx () {
+       MSG=$1
+       test_expect_success "check normal git operations: $MSG" '
+               midx_git_two_modes "rev-list --objects --all" &&
+               midx_git_two_modes "log --raw" &&
+               midx_git_two_modes "count-objects --verbose" &&
+               midx_git_two_modes "cat-file --batch-all-objects --buffer --batch-check" &&
+               midx_git_two_modes "cat-file --batch-all-objects --buffer --batch-check --unsorted" sorted
+       '
+}
+
+test_expect_success 'write midx with one v2 pack' '
+       git pack-objects --index-version=2,0x40 $objdir/pack/test <obj-list &&
+       git multi-pack-index --object-dir=$objdir write &&
+       midx_read_expect 1 18 4 $objdir
+'
+
+compare_results_with_midx "one v2 pack"
+
+test_expect_success 'add more objects' '
+       for i in $(test_seq 6 10)
+       do
+               generate_objects $i
+       done &&
+       commit_and_list_objects
+'
+
+test_expect_success 'write midx with two packs' '
+       git pack-objects --index-version=1 $objdir/pack/test-2 <obj-list &&
+       git multi-pack-index --object-dir=$objdir write &&
+       midx_read_expect 2 34 4 $objdir
+'
+
+compare_results_with_midx "two packs"
+
+test_expect_success 'add more packs' '
+       for j in $(test_seq 11 20)
+       do
+               generate_objects $j &&
+               commit_and_list_objects &&
+               git pack-objects --index-version=2 $objdir/pack/test-pack <obj-list
+       done
+'
+
+compare_results_with_midx "mixed mode (two packs + extra)"
+
+test_expect_success 'write midx with twelve packs' '
+       git multi-pack-index --object-dir=$objdir write &&
+       midx_read_expect 12 74 4 $objdir
+'
+
+compare_results_with_midx "twelve packs"
+
+test_expect_success 'verify multi-pack-index success' '
+       git multi-pack-index verify --object-dir=$objdir
+'
+
+# usage: corrupt_midx_and_verify <pos> <data> <objdir> <string>
+corrupt_midx_and_verify() {
+       POS=$1 &&
+       DATA="${2:-\0}" &&
+       OBJDIR=$3 &&
+       GREPSTR="$4" &&
+       COMMAND="$5" &&
+       if test -z "$COMMAND"
+       then
+               COMMAND="git multi-pack-index verify --object-dir=$OBJDIR"
+       fi &&
+       FILE=$OBJDIR/pack/multi-pack-index &&
+       chmod a+w $FILE &&
+       test_when_finished mv midx-backup $FILE &&
+       cp $FILE midx-backup &&
+       printf "$DATA" | dd of="$FILE" bs=1 seek="$POS" conv=notrunc &&
+       test_must_fail $COMMAND 2>test_err &&
+       grep -v "^+" test_err >err &&
+       test_i18ngrep "$GREPSTR" err
+}
+
+test_expect_success 'verify bad signature' '
+       corrupt_midx_and_verify 0 "\00" $objdir \
+               "multi-pack-index signature"
+'
+
+HASH_LEN=20
+NUM_OBJECTS=74
+MIDX_BYTE_VERSION=4
+MIDX_BYTE_OID_VERSION=5
+MIDX_BYTE_CHUNK_COUNT=6
+MIDX_HEADER_SIZE=12
+MIDX_BYTE_CHUNK_ID=$MIDX_HEADER_SIZE
+MIDX_BYTE_CHUNK_OFFSET=$(($MIDX_HEADER_SIZE + 4))
+MIDX_NUM_CHUNKS=5
+MIDX_CHUNK_LOOKUP_WIDTH=12
+MIDX_OFFSET_PACKNAMES=$(($MIDX_HEADER_SIZE + \
+                        $MIDX_NUM_CHUNKS * $MIDX_CHUNK_LOOKUP_WIDTH))
+MIDX_BYTE_PACKNAME_ORDER=$(($MIDX_OFFSET_PACKNAMES + 2))
+MIDX_OFFSET_OID_FANOUT=$(($MIDX_OFFSET_PACKNAMES + 652))
+MIDX_OID_FANOUT_WIDTH=4
+MIDX_BYTE_OID_FANOUT_ORDER=$((MIDX_OFFSET_OID_FANOUT + 250 * $MIDX_OID_FANOUT_WIDTH + 1))
+MIDX_OFFSET_OID_LOOKUP=$(($MIDX_OFFSET_OID_FANOUT + 256 * $MIDX_OID_FANOUT_WIDTH))
+MIDX_BYTE_OID_LOOKUP=$(($MIDX_OFFSET_OID_LOOKUP + 16 * $HASH_LEN))
+MIDX_OFFSET_OBJECT_OFFSETS=$(($MIDX_OFFSET_OID_LOOKUP + $NUM_OBJECTS * $HASH_LEN))
+MIDX_OFFSET_WIDTH=8
+MIDX_BYTE_PACK_INT_ID=$(($MIDX_OFFSET_OBJECT_OFFSETS + 16 * $MIDX_OFFSET_WIDTH + 2))
+MIDX_BYTE_OFFSET=$(($MIDX_OFFSET_OBJECT_OFFSETS + 16 * $MIDX_OFFSET_WIDTH + 6))
+
+test_expect_success 'verify bad version' '
+       corrupt_midx_and_verify $MIDX_BYTE_VERSION "\00" $objdir \
+               "multi-pack-index version"
+'
+
+test_expect_success 'verify bad OID version' '
+       corrupt_midx_and_verify $MIDX_BYTE_OID_VERSION "\02" $objdir \
+               "hash version"
+'
+
+test_expect_success 'verify truncated chunk count' '
+       corrupt_midx_and_verify $MIDX_BYTE_CHUNK_COUNT "\01" $objdir \
+               "missing required"
+'
+
+test_expect_success 'verify extended chunk count' '
+       corrupt_midx_and_verify $MIDX_BYTE_CHUNK_COUNT "\07" $objdir \
+               "terminating multi-pack-index chunk id appears earlier than expected"
+'
+
+test_expect_success 'verify missing required chunk' '
+       corrupt_midx_and_verify $MIDX_BYTE_CHUNK_ID "\01" $objdir \
+               "missing required"
+'
+
+test_expect_success 'verify invalid chunk offset' '
+       corrupt_midx_and_verify $MIDX_BYTE_CHUNK_OFFSET "\01" $objdir \
+               "invalid chunk offset (too large)"
+'
+
+test_expect_success 'verify packnames out of order' '
+       corrupt_midx_and_verify $MIDX_BYTE_PACKNAME_ORDER "z" $objdir \
+               "pack names out of order"
+'
+
+test_expect_success 'verify packnames out of order' '
+       corrupt_midx_and_verify $MIDX_BYTE_PACKNAME_ORDER "a" $objdir \
+               "failed to load pack"
+'
+
+test_expect_success 'verify oid fanout out of order' '
+       corrupt_midx_and_verify $MIDX_BYTE_OID_FANOUT_ORDER "\01" $objdir \
+               "oid fanout out of order"
+'
+
+test_expect_success 'verify oid lookup out of order' '
+       corrupt_midx_and_verify $MIDX_BYTE_OID_LOOKUP "\00" $objdir \
+               "oid lookup out of order"
+'
+
+test_expect_success 'verify incorrect pack-int-id' '
+       corrupt_midx_and_verify $MIDX_BYTE_PACK_INT_ID "\07" $objdir \
+               "bad pack-int-id"
+'
+
+test_expect_success 'verify incorrect offset' '
+       corrupt_midx_and_verify $MIDX_BYTE_OFFSET "\07" $objdir \
+               "incorrect object offset"
+'
+
+test_expect_success 'git-fsck incorrect offset' '
+       corrupt_midx_and_verify $MIDX_BYTE_OFFSET "\07" $objdir \
+               "incorrect object offset" \
+               "git -c core.multipackindex=true fsck"
+'
+
+test_expect_success 'repack removes multi-pack-index' '
+       test_path_is_file $objdir/pack/multi-pack-index &&
+       git repack -adf &&
+       test_path_is_missing $objdir/pack/multi-pack-index
+'
+
+compare_results_with_midx "after repack"
+
+test_expect_success 'multi-pack-index and pack-bitmap' '
+       git -c repack.writeBitmaps=true repack -ad &&
+       git multi-pack-index write &&
+       git rev-list --test-bitmap HEAD
+'
+
+test_expect_success 'multi-pack-index and alternates' '
+       git init --bare alt.git &&
+       echo $(pwd)/alt.git/objects >.git/objects/info/alternates &&
+       echo content1 >file1 &&
+       altblob=$(GIT_DIR=alt.git git hash-object -w file1) &&
+       git cat-file blob $altblob &&
+       git rev-list --all
+'
+
+compare_results_with_midx "with alternate (local midx)"
+
+test_expect_success 'multi-pack-index in an alternate' '
+       mv .git/objects/pack/* alt.git/objects/pack &&
+       test_commit add_local_objects &&
+       git repack --local &&
+       git multi-pack-index write &&
+       midx_read_expect 1 3 4 $objdir &&
+       git reset --hard HEAD~1 &&
+       rm -f .git/objects/pack/*
+'
+
+compare_results_with_midx "with alternate (remote midx)"
+
+# usage: corrupt_data <file> <pos> [<data>]
+corrupt_data () {
+       file=$1
+       pos=$2
+       data="${3:-\0}"
+       printf "$data" | dd of="$file" bs=1 seek="$pos" conv=notrunc
+}
+
+# Force 64-bit offsets by manipulating the idx file.
+# This makes the IDX file _incorrect_ so be careful to clean up after!
+test_expect_success 'force some 64-bit offsets with pack-objects' '
+       mkdir objects64 &&
+       mkdir objects64/pack &&
+       for i in $(test_seq 1 11)
+       do
+               generate_objects 11
+       done &&
+       commit_and_list_objects &&
+       pack64=$(git pack-objects --index-version=2,0x40 objects64/pack/test-64 <obj-list) &&
+       idx64=objects64/pack/test-64-$pack64.idx &&
+       chmod u+w $idx64 &&
+       corrupt_data $idx64 2999 "\02" &&
+       midx64=$(git multi-pack-index --object-dir=objects64 write) &&
+       midx_read_expect 1 63 5 objects64 " large-offsets"
+'
+
+test_expect_success 'verify multi-pack-index with 64-bit offsets' '
+       git multi-pack-index verify --object-dir=objects64
+'
+
+NUM_OBJECTS=63
+MIDX_OFFSET_OID_FANOUT=$((MIDX_OFFSET_PACKNAMES + 54))
+MIDX_OFFSET_OID_LOOKUP=$((MIDX_OFFSET_OID_FANOUT + 256 * $MIDX_OID_FANOUT_WIDTH))
+MIDX_OFFSET_OBJECT_OFFSETS=$(($MIDX_OFFSET_OID_LOOKUP + $NUM_OBJECTS * $HASH_LEN))
+MIDX_OFFSET_LARGE_OFFSETS=$(($MIDX_OFFSET_OBJECT_OFFSETS + $NUM_OBJECTS * $MIDX_OFFSET_WIDTH))
+MIDX_BYTE_LARGE_OFFSET=$(($MIDX_OFFSET_LARGE_OFFSETS + 3))
+
+test_expect_success 'verify incorrect 64-bit offset' '
+       corrupt_midx_and_verify $MIDX_BYTE_LARGE_OFFSET "\07" objects64 \
+               "incorrect object offset"
+'
+
+test_done
diff --git a/t/t5320-delta-islands.sh b/t/t5320-delta-islands.sh
new file mode 100755 (executable)
index 0000000..fea92a5
--- /dev/null
@@ -0,0 +1,143 @@
+#!/bin/sh
+
+test_description='exercise delta islands'
+. ./test-lib.sh
+
+# returns true iff $1 is a delta based on $2
+is_delta_base () {
+       delta_base=$(echo "$1" | git cat-file --batch-check='%(deltabase)') &&
+       echo >&2 "$1 has base $delta_base" &&
+       test "$delta_base" = "$2"
+}
+
+# generate a commit on branch $1 with a single file, "file", whose
+# content is mostly based on the seed $2, but with a unique bit
+# of content $3 appended. This should allow us to see whether
+# blobs of different refs delta against each other.
+commit() {
+       blob=$({ test-tool genrandom "$2" 10240 && echo "$3"; } |
+              git hash-object -w --stdin) &&
+       tree=$(printf '100644 blob %s\tfile\n' "$blob" | git mktree) &&
+       commit=$(echo "$2-$3" | git commit-tree "$tree" ${4:+-p "$4"}) &&
+       git update-ref "refs/heads/$1" "$commit" &&
+       eval "$1"'=$(git rev-parse $1:file)' &&
+       eval "echo >&2 $1=\$$1"
+}
+
+test_expect_success 'setup commits' '
+       commit one seed 1 &&
+       commit two seed 12
+'
+
+# Note: This is heavily dependent on the "prefer larger objects as base"
+# heuristic.
+test_expect_success 'vanilla repack deltas one against two' '
+       git repack -adf &&
+       is_delta_base $one $two
+'
+
+test_expect_success 'island repack with no island definition is vanilla' '
+       git repack -adfi &&
+       is_delta_base $one $two
+'
+
+test_expect_success 'island repack with no matches is vanilla' '
+       git -c "pack.island=refs/foo" repack -adfi &&
+       is_delta_base $one $two
+'
+
+test_expect_success 'separate islands disallows delta' '
+       git -c "pack.island=refs/heads/(.*)" repack -adfi &&
+       ! is_delta_base $one $two &&
+       ! is_delta_base $two $one
+'
+
+test_expect_success 'same island allows delta' '
+       git -c "pack.island=refs/heads" repack -adfi &&
+       is_delta_base $one $two
+'
+
+test_expect_success 'coalesce same-named islands' '
+       git \
+               -c "pack.island=refs/(.*)/one" \
+               -c "pack.island=refs/(.*)/two" \
+               repack -adfi &&
+       is_delta_base $one $two
+'
+
+test_expect_success 'island restrictions drop reused deltas' '
+       git repack -adfi &&
+       is_delta_base $one $two &&
+       git -c "pack.island=refs/heads/(.*)" repack -adi &&
+       ! is_delta_base $one $two &&
+       ! is_delta_base $two $one
+'
+
+test_expect_success 'island regexes are left-anchored' '
+       git -c "pack.island=heads/(.*)" repack -adfi &&
+       is_delta_base $one $two
+'
+
+test_expect_success 'island regexes follow last-one-wins scheme' '
+       git \
+               -c "pack.island=refs/heads/(.*)" \
+               -c "pack.island=refs/heads/" \
+               repack -adfi &&
+       is_delta_base $one $two
+'
+
+test_expect_success 'setup shared history' '
+       commit root shared root &&
+       commit one shared 1 root &&
+       commit two shared 12-long root
+'
+
+# We know that $two will be preferred as a base from $one,
+# because we can transform it with a pure deletion.
+#
+# We also expect $root as a delta against $two by the "longest is base" rule.
+test_expect_success 'vanilla delta goes between branches' '
+       git repack -adf &&
+       is_delta_base $one $two &&
+       is_delta_base $root $two
+'
+
+# Here we should allow $one to base itself on $root; even though
+# they are in different islands, the objects in $root are in a superset
+# of islands compared to those in $one.
+#
+# Similarly, $two can delta against $root by our rules. And unlike $one,
+# in which we are just allowing it, the island rules actually put $root
+# as a possible base for $two, which it would not otherwise be (due to the size
+# sorting).
+test_expect_success 'deltas allowed against superset islands' '
+       git -c "pack.island=refs/heads/(.*)" repack -adfi &&
+       is_delta_base $one $root &&
+       is_delta_base $two $root
+'
+
+# We are going to test the packfile order here, so we again have to make some
+# assumptions. We assume that "$root", as part of our core "one", must come
+# before "$two". This should be guaranteed by the island code. However, for
+# this test to fail without islands, we are also assuming that it would not
+# otherwise do so. This is true by the current write order, which will put
+# commits (and their contents) before their parents.
+test_expect_success 'island core places core objects first' '
+       cat >expect <<-EOF &&
+       $root
+       $two
+       EOF
+       git -c "pack.island=refs/heads/(.*)" \
+           -c "pack.islandcore=one" \
+           repack -adfi &&
+       git verify-pack -v .git/objects/pack/*.pack |
+       cut -d" " -f1 |
+       egrep "$root|$two" >actual &&
+       test_cmp expect actual
+'
+
+test_expect_success 'unmatched island core is not fatal' '
+       git -c "pack.islandcore=one" repack -adfi
+'
+
+test_done
index 1b5a4a6d380f37b0e616e5ca6c5fd5b732e6383d..086f2c40f68ffaf666a09d88b86709133559fc91 100755 (executable)
@@ -50,8 +50,11 @@ pull_to_client () {
                        case "$heads" in *B*)
                            git update-ref refs/heads/B "$BTIP";;
                        esac &&
-                       git symbolic-ref HEAD refs/heads/$(echo $heads \
-                               | sed -e "s/^\(.\).*$/\1/") &&
+
+                       git symbolic-ref HEAD refs/heads/$(
+                               echo $heads |
+                               sed -e "s/^\(.\).*$/\1/"
+                       ) &&
 
                        git fsck --full &&
 
index 62f35698912d0655e44d53d50c7197b8200d2710..7bc706873c5b2341f6a0922cf2e4f79d34eee97c 100755 (executable)
@@ -133,6 +133,34 @@ committer Bugs Bunny <bugs@bun.ni> 1234567890 +0000
 This commit object intentionally broken
 EOF
 
+test_expect_success 'setup bogus commit' '
+       commit="$(git hash-object -t commit -w --stdin <bogus-commit)"
+'
+
+test_expect_success 'fsck with no skipList input' '
+       test_must_fail git fsck 2>err &&
+       test_i18ngrep "missingEmail" err
+'
+
+test_expect_success 'setup sorted and unsorted skipLists' '
+       cat >SKIP.unsorted <<-EOF &&
+       0000000000000000000000000000000000000004
+       0000000000000000000000000000000000000002
+       $commit
+       0000000000000000000000000000000000000001
+       0000000000000000000000000000000000000003
+       EOF
+       sort SKIP.unsorted >SKIP.sorted
+'
+
+test_expect_success 'fsck with sorted skipList' '
+       git -c fsck.skipList=SKIP.sorted fsck
+'
+
+test_expect_success 'fsck with unsorted skipList' '
+       git -c fsck.skipList=SKIP.unsorted fsck
+'
+
 test_expect_success 'fsck with invalid or bogus skipList input' '
        git -c fsck.skipList=/dev/null -c fsck.missingEmail=ignore fsck &&
        test_must_fail git -c fsck.skipList=does-not-exist -c fsck.missingEmail=ignore fsck 2>err &&
@@ -141,8 +169,47 @@ test_expect_success 'fsck with invalid or bogus skipList input' '
        test_i18ngrep "Invalid SHA-1: \[core\]" err
 '
 
+test_expect_success 'fsck with other accepted skipList input (comments & empty lines)' '
+       cat >SKIP.with-comment <<-EOF &&
+       # Some bad commit
+       0000000000000000000000000000000000000001
+       EOF
+       test_must_fail git -c fsck.skipList=SKIP.with-comment fsck 2>err-with-comment &&
+       test_i18ngrep "missingEmail" err-with-comment &&
+       cat >SKIP.with-empty-line <<-EOF &&
+       0000000000000000000000000000000000000001
+
+       0000000000000000000000000000000000000002
+       EOF
+       test_must_fail git -c fsck.skipList=SKIP.with-empty-line fsck 2>err-with-empty-line &&
+       test_i18ngrep "missingEmail" err-with-empty-line
+'
+
+test_expect_success 'fsck no garbage output from comments & empty lines errors' '
+       test_line_count = 1 err-with-comment &&
+       test_line_count = 1 err-with-empty-line
+'
+
+test_expect_success 'fsck with invalid abbreviated skipList input' '
+       echo $commit | test_copy_bytes 20 >SKIP.abbreviated &&
+       test_must_fail git -c fsck.skipList=SKIP.abbreviated fsck 2>err-abbreviated &&
+       test_i18ngrep "^fatal: Invalid SHA-1: " err-abbreviated
+'
+
+test_expect_success 'fsck with exhaustive accepted skipList input (various types of comments etc.)' '
+       >SKIP.exhaustive &&
+       echo "# A commented line" >>SKIP.exhaustive &&
+       echo "" >>SKIP.exhaustive &&
+       echo " " >>SKIP.exhaustive &&
+       echo " # Comment after whitespace" >>SKIP.exhaustive &&
+       echo "$commit # Our bad commit (with leading whitespace and trailing comment)" >>SKIP.exhaustive &&
+       echo "# Some bad commit (leading whitespace)" >>SKIP.exhaustive &&
+       echo "  0000000000000000000000000000000000000001" >>SKIP.exhaustive &&
+       git -c fsck.skipList=SKIP.exhaustive fsck 2>err &&
+       test_must_be_empty err
+'
+
 test_expect_success 'push with receive.fsck.skipList' '
-       commit="$(git hash-object -t commit -w --stdin <bogus-commit)" &&
        git push . $commit:refs/heads/bogus &&
        rm -rf dst &&
        git init dst &&
@@ -169,7 +236,6 @@ test_expect_success 'push with receive.fsck.skipList' '
 '
 
 test_expect_success 'fetch with fetch.fsck.skipList' '
-       commit="$(git hash-object -t commit -w --stdin <bogus-commit)" &&
        refspec=refs/heads/bogus:refs/heads/bogus &&
        git push . $commit:refs/heads/bogus &&
        rm -rf dst &&
@@ -204,7 +270,6 @@ test_expect_success 'fsck.<unknownmsg-id> dies' '
 '
 
 test_expect_success 'push with receive.fsck.missingEmail=warn' '
-       commit="$(git hash-object -t commit -w --stdin <bogus-commit)" &&
        git push . $commit:refs/heads/bogus &&
        rm -rf dst &&
        git init dst &&
@@ -232,7 +297,6 @@ test_expect_success 'push with receive.fsck.missingEmail=warn' '
 '
 
 test_expect_success 'fetch with fetch.fsck.missingEmail=warn' '
-       commit="$(git hash-object -t commit -w --stdin <bogus-commit)" &&
        refspec=refs/heads/bogus:refs/heads/bogus &&
        git push . $commit:refs/heads/bogus &&
        rm -rf dst &&
index 241e6a319df4cefa612b6744c79e72e5cf929462..d2a2cdd453396b1dba5525f32b6325ac8a10d95d 100755 (executable)
@@ -145,7 +145,7 @@ test_expect_success 'remove remote protects local branches' '
 test_expect_success 'remove errors out early when deleting non-existent branch' '
        (
                cd test &&
-               echo "fatal: No such remote: foo" >expect &&
+               echo "fatal: No such remote: '\''foo'\''" >expect &&
                test_must_fail git remote rm foo 2>actual &&
                test_i18ncmp expect actual
        )
@@ -173,7 +173,7 @@ test_expect_success 'remove remote with a branch without configured merge' '
 test_expect_success 'rename errors out early when deleting non-existent branch' '
        (
                cd test &&
-               echo "fatal: No such remote: foo" >expect &&
+               echo "fatal: No such remote: '\''foo'\''" >expect &&
                test_must_fail git remote rename foo bar 2>actual &&
                test_i18ncmp expect actual
        )
index 539c25aadafdcf6aa9fbcce0988631702d3fb02d..7a8f56db53eb6c3b869d525501c186bd2df1ba0a 100755 (executable)
@@ -969,7 +969,7 @@ test_force_push_tag () {
        tag_type_description=$1
        tag_args=$2
 
-       test_expect_success 'force pushing required to update lightweight tag' "
+       test_expect_success "force pushing required to update $tag_type_description" "
                mk_test testrepo heads/master &&
                mk_child testrepo child1 &&
                mk_child testrepo child2 &&
@@ -1009,7 +1009,32 @@ test_force_push_tag () {
 }
 
 test_force_push_tag "lightweight tag" "-f"
-test_force_push_tag "annotated tag" "-f -a -m'msg'"
+test_force_push_tag "annotated tag" "-f -a -m'tag message'"
+
+test_force_fetch_tag () {
+       tag_type_description=$1
+       tag_args=$2
+
+       test_expect_success "fetch will not clobber an existing $tag_type_description without --force" "
+               mk_test testrepo heads/master &&
+               mk_child testrepo child1 &&
+               mk_child testrepo child2 &&
+               (
+                       cd testrepo &&
+                       git tag testTag &&
+                       git -C ../child1 fetch origin tag testTag &&
+                       >file1 &&
+                       git add file1 &&
+                       git commit -m 'file1' &&
+                       git tag $tag_args testTag &&
+                       test_must_fail git -C ../child1 fetch origin tag testTag &&
+                       git -C ../child1 fetch origin '+refs/tags/*:refs/tags/*'
+               )
+       "
+}
+
+test_force_fetch_tag "lightweight tag" "-f"
+test_force_fetch_tag "annotated tag" "-f -a -m'tag message'"
 
 test_expect_success 'push --porcelain' '
        mk_empty testrepo &&
index 771f36f9ff473d31507af374f1c96fac03192048..3dc8f8ecec2c03c63788bc5b0e9db88e55deb823 100755 (executable)
@@ -23,26 +23,26 @@ test_expect_success 'create http-accessible bare repository' '
 
 setup_askpass_helper
 
-cat >exp <<EOF
-> GET /smart/repo.git/info/refs?service=git-upload-pack HTTP/1.1
-> Accept: */*
-> Accept-Encoding: ENCODINGS
-> Pragma: no-cache
-< HTTP/1.1 200 OK
-< Pragma: no-cache
-< Cache-Control: no-cache, max-age=0, must-revalidate
-< Content-Type: application/x-git-upload-pack-advertisement
-> POST /smart/repo.git/git-upload-pack HTTP/1.1
-> Accept-Encoding: ENCODINGS
-> Content-Type: application/x-git-upload-pack-request
-> Accept: application/x-git-upload-pack-result
-> Content-Length: xxx
-< HTTP/1.1 200 OK
-< Pragma: no-cache
-< Cache-Control: no-cache, max-age=0, must-revalidate
-< Content-Type: application/x-git-upload-pack-result
-EOF
 test_expect_success 'clone http repository' '
+       cat >exp <<-\EOF &&
+       > GET /smart/repo.git/info/refs?service=git-upload-pack HTTP/1.1
+       > Accept: */*
+       > Accept-Encoding: ENCODINGS
+       > Pragma: no-cache
+       < HTTP/1.1 200 OK
+       < Pragma: no-cache
+       < Cache-Control: no-cache, max-age=0, must-revalidate
+       < Content-Type: application/x-git-upload-pack-advertisement
+       > POST /smart/repo.git/git-upload-pack HTTP/1.1
+       > Accept-Encoding: ENCODINGS
+       > Content-Type: application/x-git-upload-pack-request
+       > Accept: application/x-git-upload-pack-result
+       > Content-Length: xxx
+       < HTTP/1.1 200 OK
+       < Pragma: no-cache
+       < Cache-Control: no-cache, max-age=0, must-revalidate
+       < Content-Type: application/x-git-upload-pack-result
+       EOF
        GIT_TRACE_CURL=true git clone --quiet $HTTPD_URL/smart/repo.git clone 2>err &&
        test_cmp file clone/file &&
        tr '\''\015'\'' Q <err |
@@ -96,13 +96,13 @@ test_expect_success 'fetch changes via http' '
        test_cmp file clone/file
 '
 
-cat >exp <<EOF
-GET  /smart/repo.git/info/refs?service=git-upload-pack HTTP/1.1 200
-POST /smart/repo.git/git-upload-pack HTTP/1.1 200
-GET  /smart/repo.git/info/refs?service=git-upload-pack HTTP/1.1 200
-POST /smart/repo.git/git-upload-pack HTTP/1.1 200
-EOF
 test_expect_success 'used upload-pack service' '
+       cat >exp <<-\EOF &&
+       GET  /smart/repo.git/info/refs?service=git-upload-pack HTTP/1.1 200
+       POST /smart/repo.git/git-upload-pack HTTP/1.1 200
+       GET  /smart/repo.git/info/refs?service=git-upload-pack HTTP/1.1 200
+       POST /smart/repo.git/git-upload-pack HTTP/1.1 200
+       EOF
        check_access_log exp
 '
 
@@ -203,19 +203,19 @@ test_expect_success 'dumb clone via http-backend respects namespace' '
        test_cmp expect actual
 '
 
-cat >cookies.txt <<EOF
-127.0.0.1      FALSE   /smart_cookies/ FALSE   0       othername       othervalue
-EOF
-cat >expect_cookies.txt <<EOF
-
-127.0.0.1      FALSE   /smart_cookies/ FALSE   0       othername       othervalue
-127.0.0.1      FALSE   /smart_cookies/repo.git/info/   FALSE   0       name    value
-EOF
 test_expect_success 'cookies stored in http.cookiefile when http.savecookies set' '
+       cat >cookies.txt <<-\EOF &&
+       127.0.0.1       FALSE   /smart_cookies/ FALSE   0       othername       othervalue
+       EOF
+       sort >expect_cookies.txt <<-\EOF &&
+
+       127.0.0.1       FALSE   /smart_cookies/ FALSE   0       othername       othervalue
+       127.0.0.1       FALSE   /smart_cookies/repo.git/info/   FALSE   0       name    value
+       EOF
        git config http.cookiefile cookies.txt &&
        git config http.savecookies true &&
        git ls-remote $HTTPD_URL/smart_cookies/repo.git master &&
-       tail -3 cookies.txt >cookies_tail.txt &&
+       tail -3 cookies.txt | sort >cookies_tail.txt &&
        test_cmp expect_cookies.txt cookies_tail.txt
 '
 
index f94d01f69e5cb9565540411de2c09df92262b9c3..b24d8b05a498674612870a133e1a4fc4a37bd5d3 100755 (executable)
@@ -155,8 +155,8 @@ test_expect_success 'CONTENT_LENGTH overflow ssite_t' '
 
 test_expect_success 'empty CONTENT_LENGTH' '
        env \
-               QUERY_STRING=/repo.git/HEAD \
-               PATH_TRANSLATED="$PWD"/.git/HEAD \
+               QUERY_STRING="service=git-receive-pack" \
+               PATH_TRANSLATED="$PWD"/.git/info/refs \
                GIT_HTTP_EXPORT_ALL=TRUE \
                REQUEST_METHOD=GET \
                CONTENT_LENGTH="" \
index ddaa96ac4f44a4e4799aa509b3bd69bc28628d60..f1a49e94f5fe56a0c175b0e5354ce1d5300cb382 100755 (executable)
@@ -624,10 +624,16 @@ test_expect_success 'clone on case-insensitive fs' '
                        git hash-object -w -t tree --stdin) &&
                c=$(git commit-tree -m bogus $t) &&
                git update-ref refs/heads/bogus $c &&
-               git clone -b bogus . bogus
+               git clone -b bogus . bogus 2>warning
        )
 '
 
+test_expect_success !MINGW,!CYGWIN,CASE_INSENSITIVE_FS 'colliding file detection' '
+       grep X icasefs/warning &&
+       grep x icasefs/warning &&
+       test_i18ngrep "the following paths have collided" icasefs/warning
+'
+
 partial_clone () {
               SERVER="$1" &&
               URL="$2" &&
index 5582b3d5fd7118398bad00e43864d9e73c055f01..e36ac01661d1b5cd26b0ae4fe615b9ccf44e02c8 100755 (executable)
@@ -103,7 +103,7 @@ test_expect_success 'clone with --no-tags' '
 test_expect_success '--single-branch while HEAD pointing at master' '
        (
                cd dir_master &&
-               git fetch &&
+               git fetch --force &&
                git for-each-ref refs/remotes/origin |
                sed -e "/HEAD$/d" \
                    -e "s|/remotes/origin/|/heads/|" >../actual
@@ -114,7 +114,7 @@ test_expect_success '--single-branch while HEAD pointing at master' '
        test_cmp expect actual &&
        (
                cd dir_master &&
-               git fetch --tags &&
+               git fetch --tags --force &&
                git for-each-ref refs/tags >../actual
        ) &&
        git for-each-ref refs/tags >expect &&
index bbbe7537dfd315a567c6dc5583804325e6f10235..6ff6146923d945d9514ad5871b845969c2efb269 100755 (executable)
@@ -34,10 +34,12 @@ test_expect_success 'setup bare clone for server' '
 # confirm partial clone was registered in the local config.
 test_expect_success 'do partial clone 1' '
        git clone --no-checkout --filter=blob:none "file://$(pwd)/srv.bare" pc1 &&
-       git -C pc1 rev-list HEAD --quiet --objects --missing=print \
-               | awk -f print_1.awk \
-               | sed "s/?//" \
-               | sort >observed.oids &&
+
+       git -C pc1 rev-list --quiet --objects --missing=print HEAD >revs &&
+       awk -f print_1.awk revs |
+       sed "s/?//" |
+       sort >observed.oids &&
+
        test_cmp expect_1.oids observed.oids &&
        test "$(git -C pc1 config --local core.repositoryformatversion)" = "1" &&
        test "$(git -C pc1 config --local extensions.partialclone)" = "origin" &&
@@ -46,10 +48,10 @@ test_expect_success 'do partial clone 1' '
 
 # checkout master to force dynamic object fetch of blobs at HEAD.
 test_expect_success 'verify checkout with dynamic object fetch' '
-       git -C pc1 rev-list HEAD --quiet --objects --missing=print >observed &&
+       git -C pc1 rev-list --quiet --objects --missing=print HEAD >observed &&
        test_line_count = 4 observed &&
        git -C pc1 checkout master &&
-       git -C pc1 rev-list HEAD --quiet --objects --missing=print >observed &&
+       git -C pc1 rev-list --quiet --objects --missing=print HEAD >observed &&
        test_line_count = 0 observed
 '
 
@@ -72,7 +74,8 @@ test_expect_success 'push new commits to server' '
 # have the new blobs.
 test_expect_success 'partial fetch inherits filter settings' '
        git -C pc1 fetch origin &&
-       git -C pc1 rev-list master..origin/master --quiet --objects --missing=print >observed &&
+       git -C pc1 rev-list --quiet --objects --missing=print \
+               master..origin/master >observed &&
        test_line_count = 5 observed
 '
 
@@ -80,7 +83,8 @@ test_expect_success 'partial fetch inherits filter settings' '
 # we should only get 1 new blob (for the file in origin/master).
 test_expect_success 'verify diff causes dynamic object fetch' '
        git -C pc1 diff master..origin/master -- file.1.txt &&
-       git -C pc1 rev-list master..origin/master --quiet --objects --missing=print >observed &&
+       git -C pc1 rev-list --quiet --objects --missing=print \
+                master..origin/master >observed &&
        test_line_count = 4 observed
 '
 
@@ -89,7 +93,8 @@ test_expect_success 'verify diff causes dynamic object fetch' '
 test_expect_success 'verify blame causes dynamic object fetch' '
        git -C pc1 blame origin/master -- file.1.txt >observed.blame &&
        test_cmp expect.blame observed.blame &&
-       git -C pc1 rev-list master..origin/master --quiet --objects --missing=print >observed &&
+       git -C pc1 rev-list --quiet --objects --missing=print \
+               master..origin/master >observed &&
        test_line_count = 0 observed
 '
 
@@ -109,7 +114,8 @@ test_expect_success 'push new commits to server for file.2.txt' '
 # Verify we have all the new blobs.
 test_expect_success 'override inherited filter-spec using --no-filter' '
        git -C pc1 fetch --no-filter origin &&
-       git -C pc1 rev-list master..origin/master --quiet --objects --missing=print >observed &&
+       git -C pc1 rev-list --quiet --objects --missing=print \
+               master..origin/master >observed &&
        test_line_count = 0 observed
 '
 
@@ -130,16 +136,22 @@ test_expect_success 'push new commits to server for file.3.txt' '
 # perhaps combined with a command in dry-run mode.
 test_expect_success 'manual prefetch of missing objects' '
        git -C pc1 fetch --filter=blob:none origin &&
-       git -C pc1 rev-list master..origin/master --quiet --objects --missing=print \
-               | awk -f print_1.awk \
-               | sed "s/?//" \
-               | sort >observed.oids &&
+
+       git -C pc1 rev-list --quiet --objects --missing=print \
+                master..origin/master >revs &&
+       awk -f print_1.awk revs |
+       sed "s/?//" |
+       sort >observed.oids &&
+
        test_line_count = 6 observed.oids &&
        git -C pc1 fetch-pack --stdin "file://$(pwd)/srv.bare" <observed.oids &&
-       git -C pc1 rev-list master..origin/master --quiet --objects --missing=print \
-               | awk -f print_1.awk \
-               | sed "s/?//" \
-               | sort >observed.oids &&
+
+       git -C pc1 rev-list --quiet --objects --missing=print \
+               master..origin/master >revs &&
+       awk -f print_1.awk revs |
+       sed "s/?//" |
+       sort >observed.oids &&
+
        test_line_count = 0 observed.oids
 '
 
@@ -194,7 +206,7 @@ test_expect_success 'upon cloning, check that all refs point to objects' '
 
        # Craft a packfile not including that blob.
        git -C "$SERVER" rev-parse HEAD |
-               git -C "$SERVER" pack-objects --stdout >incomplete.pack &&
+       git -C "$SERVER" pack-objects --stdout >incomplete.pack &&
 
        # Replace the existing packfile with the crafted one. The protocol
        # requires that the packfile be sent in sideband 1, hence the extra
index 75ec79e6cb839e71f6830d9301d1982d0fc68925..ae79c6bbc0d66e9a7a184b2d1726a316a0affbbe 100755 (executable)
@@ -15,13 +15,13 @@ test_expect_success 'test capability advertisement' '
        EOF
 
        git serve --advertise-capabilities >out &&
-       test-pkt-line unpack <out >actual &&
-       test_cmp actual expect
+       test-tool pkt-line unpack <out >actual &&
+       test_cmp expect actual
 '
 
 test_expect_success 'stateless-rpc flag does not list capabilities' '
        # Empty request
-       test-pkt-line pack >in <<-EOF &&
+       test-tool pkt-line pack >in <<-EOF &&
        0000
        EOF
        git serve --stateless-rpc >out <in &&
@@ -33,7 +33,7 @@ test_expect_success 'stateless-rpc flag does not list capabilities' '
 '
 
 test_expect_success 'request invalid capability' '
-       test-pkt-line pack >in <<-EOF &&
+       test-tool pkt-line pack >in <<-EOF &&
        foobar
        0000
        EOF
@@ -42,7 +42,7 @@ test_expect_success 'request invalid capability' '
 '
 
 test_expect_success 'request with no command' '
-       test-pkt-line pack >in <<-EOF &&
+       test-tool pkt-line pack >in <<-EOF &&
        agent=git/test
        0000
        EOF
@@ -51,7 +51,7 @@ test_expect_success 'request with no command' '
 '
 
 test_expect_success 'request invalid command' '
-       test-pkt-line pack >in <<-EOF &&
+       test-tool pkt-line pack >in <<-EOF &&
        command=foo
        agent=git/test
        0000
@@ -71,7 +71,7 @@ test_expect_success 'setup some refs and tags' '
 '
 
 test_expect_success 'basics of ls-refs' '
-       test-pkt-line pack >in <<-EOF &&
+       test-tool pkt-line pack >in <<-EOF &&
        command=ls-refs
        0000
        EOF
@@ -88,12 +88,12 @@ test_expect_success 'basics of ls-refs' '
        EOF
 
        git serve --stateless-rpc <in >out &&
-       test-pkt-line unpack <out >actual &&
-       test_cmp actual expect
+       test-tool pkt-line unpack <out >actual &&
+       test_cmp expect actual
 '
 
 test_expect_success 'basic ref-prefixes' '
-       test-pkt-line pack >in <<-EOF &&
+       test-tool pkt-line pack >in <<-EOF &&
        command=ls-refs
        0001
        ref-prefix refs/heads/master
@@ -108,12 +108,12 @@ test_expect_success 'basic ref-prefixes' '
        EOF
 
        git serve --stateless-rpc <in >out &&
-       test-pkt-line unpack <out >actual &&
-       test_cmp actual expect
+       test-tool pkt-line unpack <out >actual &&
+       test_cmp expect actual
 '
 
 test_expect_success 'refs/heads prefix' '
-       test-pkt-line pack >in <<-EOF &&
+       test-tool pkt-line pack >in <<-EOF &&
        command=ls-refs
        0001
        ref-prefix refs/heads/
@@ -128,12 +128,12 @@ test_expect_success 'refs/heads prefix' '
        EOF
 
        git serve --stateless-rpc <in >out &&
-       test-pkt-line unpack <out >actual &&
-       test_cmp actual expect
+       test-tool pkt-line unpack <out >actual &&
+       test_cmp expect actual
 '
 
 test_expect_success 'peel parameter' '
-       test-pkt-line pack >in <<-EOF &&
+       test-tool pkt-line pack >in <<-EOF &&
        command=ls-refs
        0001
        peel
@@ -149,12 +149,12 @@ test_expect_success 'peel parameter' '
        EOF
 
        git serve --stateless-rpc <in >out &&
-       test-pkt-line unpack <out >actual &&
-       test_cmp actual expect
+       test-tool pkt-line unpack <out >actual &&
+       test_cmp expect actual
 '
 
 test_expect_success 'symrefs parameter' '
-       test-pkt-line pack >in <<-EOF &&
+       test-tool pkt-line pack >in <<-EOF &&
        command=ls-refs
        0001
        symrefs
@@ -170,12 +170,12 @@ test_expect_success 'symrefs parameter' '
        EOF
 
        git serve --stateless-rpc <in >out &&
-       test-pkt-line unpack <out >actual &&
-       test_cmp actual expect
+       test-tool pkt-line unpack <out >actual &&
+       test_cmp expect actual
 '
 
 test_expect_success 'sending server-options' '
-       test-pkt-line pack >in <<-EOF &&
+       test-tool pkt-line pack >in <<-EOF &&
        command=ls-refs
        server-option=hello
        server-option=world
@@ -190,14 +190,14 @@ test_expect_success 'sending server-options' '
        EOF
 
        git serve --stateless-rpc <in >out &&
-       test-pkt-line unpack <out >actual &&
-       test_cmp actual expect
+       test-tool pkt-line unpack <out >actual &&
+       test_cmp expect actual
 '
 
 test_expect_success 'unexpected lines are not allowed in fetch request' '
        git init server &&
 
-       test-pkt-line pack >in <<-EOF &&
+       test-tool pkt-line pack >in <<-EOF &&
        command=fetch
        0001
        this-is-not-a-command
index 3beeed4546cee6c7793de94ba2735b0e6d63b073..98fbf39da325ab574f9717a03c6a6588b15f0ffa 100755 (executable)
@@ -29,7 +29,7 @@ test_expect_success 'list refs with git:// using protocol v2' '
        grep "git< version 2" log &&
 
        git ls-remote --symref "$GIT_DAEMON_URL/parent" >expect &&
-       test_cmp actual expect
+       test_cmp expect actual
 '
 
 test_expect_success 'ref advertisment is filtered with ls-remote using protocol v2' '
@@ -42,7 +42,7 @@ test_expect_success 'ref advertisment is filtered with ls-remote using protocol
        $(git -C "$daemon_parent" rev-parse refs/heads/master)$(printf "\t")refs/heads/master
        EOF
 
-       test_cmp actual expect
+       test_cmp expect actual
 '
 
 test_expect_success 'clone with git:// using protocol v2' '
@@ -138,7 +138,7 @@ test_expect_success 'list refs with file:// using protocol v2' '
        grep "git< version 2" log &&
 
        git ls-remote --symref "file://$(pwd)/file_parent" >expect &&
-       test_cmp actual expect
+       test_cmp expect actual
 '
 
 test_expect_success 'ref advertisment is filtered with ls-remote using protocol v2' '
@@ -151,7 +151,7 @@ test_expect_success 'ref advertisment is filtered with ls-remote using protocol
        $(git -C file_parent rev-parse refs/heads/master)$(printf "\t")refs/heads/master
        EOF
 
-       test_cmp actual expect
+       test_cmp expect actual
 '
 
 test_expect_success 'server-options are sent when using ls-remote' '
@@ -164,7 +164,7 @@ test_expect_success 'server-options are sent when using ls-remote' '
        $(git -C file_parent rev-parse refs/heads/master)$(printf "\t")refs/heads/master
        EOF
 
-       test_cmp actual expect &&
+       test_cmp expect actual &&
        grep "server-option=hello" log &&
        grep "server-option=world" log
 '
@@ -271,7 +271,7 @@ test_expect_success 'partial clone' '
        grep "version 2" trace &&
 
        # Ensure that the old version of the file is missing
-       git -C client rev-list master --quiet --objects --missing=print \
+       git -C client rev-list --quiet --objects --missing=print master \
                >observed.oids &&
        grep "$(git -C server rev-parse message1:a.txt)" observed.oids &&
 
@@ -297,7 +297,7 @@ test_expect_success 'partial fetch' '
        grep "version 2" trace &&
 
        # Ensure that the old version of the file is missing
-       git -C client rev-list other --quiet --objects --missing=print \
+       git -C client rev-list --quiet --objects --missing=print other \
                >observed.oids &&
        grep "$(git -C server rev-parse message1:a.txt)" observed.oids &&
 
@@ -334,7 +334,7 @@ test_expect_success 'even with handcrafted request, filter does not work if not
        git -C server config uploadpack.allowfilter 0 &&
 
        # Custom request that tries to filter even though it is not advertised.
-       test-pkt-line pack >in <<-EOF &&
+       test-tool pkt-line pack >in <<-EOF &&
        command=fetch
        0001
        want $(git -C server rev-parse master)
index d1ccc22331338b016646de6af5f15387b3119665..3f58f05cbb49d3b4a7f50e946e0f46bbc953140a 100755 (executable)
@@ -9,14 +9,14 @@ get_actual_refs () {
                /wanted-refs/d
                /0001/d
                p
-               }' <out | test-pkt-line unpack >actual_refs
+               }' <out | test-tool pkt-line unpack >actual_refs
 }
 
 get_actual_commits () {
        sed -n -e '/packfile/,/0000/{
                /packfile/d
                p
-               }' <out | test-pkt-line unpack-sideband >o.pack &&
+               }' <out | test-tool pkt-line unpack-sideband >o.pack &&
        git index-pack o.pack &&
        git verify-pack -v o.idx | grep commit | cut -c-40 | sort >actual_commits
 }
@@ -61,7 +61,7 @@ test_expect_success 'config controls ref-in-want advertisement' '
 '
 
 test_expect_success 'invalid want-ref line' '
-       test-pkt-line pack >in <<-EOF &&
+       test-tool pkt-line pack >in <<-EOF &&
        command=fetch
        0001
        no-progress
@@ -80,7 +80,7 @@ test_expect_success 'basic want-ref' '
        EOF
        git rev-parse f | sort >expected_commits &&
 
-       test-pkt-line pack >in <<-EOF &&
+       test-tool pkt-line pack >in <<-EOF &&
        command=fetch
        0001
        no-progress
@@ -101,7 +101,7 @@ test_expect_success 'multiple want-ref lines' '
        EOF
        git rev-parse c d | sort >expected_commits &&
 
-       test-pkt-line pack >in <<-EOF &&
+       test-tool pkt-line pack >in <<-EOF &&
        command=fetch
        0001
        no-progress
@@ -122,7 +122,7 @@ test_expect_success 'mix want and want-ref' '
        EOF
        git rev-parse e f | sort >expected_commits &&
 
-       test-pkt-line pack >in <<-EOF &&
+       test-tool pkt-line pack >in <<-EOF &&
        command=fetch
        0001
        no-progress
@@ -143,7 +143,7 @@ test_expect_success 'want-ref with ref we already have commit for' '
        EOF
        >expected_commits &&
 
-       test-pkt-line pack >in <<-EOF &&
+       test-tool pkt-line pack >in <<-EOF &&
        command=fetch
        0001
        no-progress
index e51eb41f4b9575d2b51d8d4d255ff5ab7a0889ad..545b461e51d4fd5df39c7d27953fb0f26f73e052 100755 (executable)
@@ -41,10 +41,9 @@ test_expect_success 'corrupt second commit object' \
    test_must_fail git fsck --full
    '
 
-test_expect_success 'rev-list should fail' \
-   '
-   test_must_fail git rev-list --all > /dev/null
-   '
+test_expect_success 'rev-list should fail' '
+       test_must_fail env GIT_TEST_COMMIT_GRAPH=0 git rev-list --all > /dev/null
+'
 
 test_expect_success 'git repack _MUST_ fail' \
    '
index 0bf10d0686982418682ff03e3a42ef3a16e2cfb4..db8a7834d88e6d01b3cc5106af749ede7b75866a 100755 (executable)
@@ -255,7 +255,7 @@ test_expect_success 'rev-list accumulates multiple --exclude' '
        compare rev-list "--exclude=refs/remotes/* --exclude=refs/tags/* --all" --branches
 '
 
-test_expect_failure 'rev-list should succeed with empty output on empty stdin' '
+test_expect_success 'rev-list should succeed with empty output on empty stdin' '
        git rev-list --stdin </dev/null >actual &&
        test_must_be_empty actual
 '
index 20aee43f95ab3cc38b8671a233089873fe374777..51ee887a77639adca91b19c73fb297e33bc34f96 100755 (executable)
@@ -99,7 +99,7 @@ EOF
 printf "propter nomen suum." >> expect.txt
 
 test_expect_success "merge does not add LF away of change" \
-       "test_cmp test3.txt expect.txt"
+       "test_cmp expect.txt test3.txt"
 
 cp test.txt backup.txt
 test_expect_success "merge with conflicts" \
@@ -122,7 +122,7 @@ non timebo mala, quoniam tu mecum es:
 virga tua et baculus tuus ipsa me consolata sunt.
 EOF
 
-test_expect_success "expected conflict markers" "test_cmp test.txt expect.txt"
+test_expect_success "expected conflict markers" "test_cmp expect.txt test.txt"
 
 cp backup.txt test.txt
 
@@ -138,7 +138,7 @@ non timebo mala, quoniam tu mecum es:
 virga tua et baculus tuus ipsa me consolata sunt.
 EOF
 test_expect_success "merge conflicting with --ours" \
-       "git merge-file --ours test.txt orig.txt new3.txt && test_cmp test.txt expect.txt"
+       "git merge-file --ours test.txt orig.txt new3.txt && test_cmp expect.txt test.txt"
 cp backup.txt test.txt
 
 cat > expect.txt << EOF
@@ -154,7 +154,7 @@ non timebo mala, quoniam tu mecum es:
 virga tua et baculus tuus ipsa me consolata sunt.
 EOF
 test_expect_success "merge conflicting with --theirs" \
-       "git merge-file --theirs test.txt orig.txt new3.txt && test_cmp test.txt expect.txt"
+       "git merge-file --theirs test.txt orig.txt new3.txt && test_cmp expect.txt test.txt"
 cp backup.txt test.txt
 
 cat > expect.txt << EOF
@@ -171,7 +171,7 @@ non timebo mala, quoniam tu mecum es:
 virga tua et baculus tuus ipsa me consolata sunt.
 EOF
 test_expect_success "merge conflicting with --union" \
-       "git merge-file --union test.txt orig.txt new3.txt && test_cmp test.txt expect.txt"
+       "git merge-file --union test.txt orig.txt new3.txt && test_cmp expect.txt test.txt"
 cp backup.txt test.txt
 
 test_expect_success "merge with conflicts, using -L" \
@@ -195,7 +195,7 @@ virga tua et baculus tuus ipsa me consolata sunt.
 EOF
 
 test_expect_success "expected conflict markers, with -L" \
-       "test_cmp test.txt expect.txt"
+       "test_cmp expect.txt test.txt"
 
 sed "s/ tu / TU /" < new1.txt > new5.txt
 test_expect_success "conflict in removed tail" \
index 3f59e58dfb5171d2e5ef53399c2f8d24fff1e94e..27c7de90ce64aa17594ca8cd43d87e35534aeb01 100755 (executable)
@@ -60,9 +60,9 @@ git update-index a1 &&
 GIT_AUTHOR_DATE="2006-12-12 23:00:08" git commit -m F
 '
 
-test_expect_success "combined merge conflicts" "
-       test_must_fail git merge -m final G
-"
+test_expect_success 'combined merge conflicts' '
+       test_must_fail env GIT_TEST_COMMIT_GRAPH=0 git merge -m final G
+'
 
 cat > expect << EOF
 <<<<<<< HEAD
index 07735410b9536ba7639134c7ca0cda4f486f8291..4e6c7cb77e7dc42c9f21b8ca84f8a64b1ed7d3af 100755 (executable)
@@ -45,7 +45,7 @@ test_expect_success resolve '
                false
        else
                git ls-files -s >current
-               test_cmp current expect
+               test_cmp expect current
        fi
 '
 
@@ -60,7 +60,7 @@ test_expect_success recursive '
                false
        else
                git ls-files -s >current
-               test_cmp current expect
+               test_cmp expect current
        fi
 '
 
index 7d06461f1396583f7ec568c9bd5bb17735938e34..87741efad319237477541823637a8bf47c1a254d 100755 (executable)
@@ -61,7 +61,7 @@ do_both_modes () {
                git checkout -f a2 &&
                test_must_fail git merge -s $strategy b2 &&
                git ls-files -u >actual &&
-               test_cmp actual expect &&
+               test_cmp expect actual &&
                git ls-files -s file2 | grep ^100755
        '
 
index d4ff0b3bef61b5d3de1ab0fad4695949d80dc8fd..53975c572465314957aaef360dea040a5be2db22 100755 (executable)
@@ -21,24 +21,31 @@ test_expect_success 'setup r1' '
 
 test_expect_success 'verify blob:none omits all 5 blobs' '
        git -C r1 ls-files -s file.1 file.2 file.3 file.4 file.5 \
-               | awk -f print_2.awk \
-               | sort >expected &&
-       git -C r1 rev-list HEAD --quiet --objects --filter-print-omitted --filter=blob:none \
-               | awk -f print_1.awk \
-               | sed "s/~//" \
-               | sort >observed &&
-       test_cmp observed expected
+               >ls_files_result &&
+       awk -f print_2.awk ls_files_result |
+       sort >expected &&
+
+       git -C r1 rev-list --quiet --objects --filter-print-omitted \
+               --filter=blob:none HEAD >revs &&
+       awk -f print_1.awk revs |
+       sed "s/~//" |
+       sort >observed &&
+
+       test_cmp expected observed
 '
 
 test_expect_success 'verify emitted+omitted == all' '
-       git -C r1 rev-list HEAD --objects \
-               | awk -f print_1.awk \
-               | sort >expected &&
-       git -C r1 rev-list HEAD --objects --filter-print-omitted --filter=blob:none \
-               | awk -f print_1.awk \
-               | sed "s/~//" \
-               | sort >observed &&
-       test_cmp observed expected
+       git -C r1 rev-list --objects HEAD >revs &&
+       awk -f print_1.awk revs |
+       sort >expected &&
+
+       git -C r1 rev-list --objects --filter-print-omitted --filter=blob:none \
+               HEAD >revs &&
+       awk -f print_1.awk revs |
+       sed "s/~//" |
+       sort >observed &&
+
+       test_cmp expected observed
 '
 
 
@@ -58,65 +65,82 @@ test_expect_success 'setup r2' '
 '
 
 test_expect_success 'verify blob:limit=500 omits all blobs' '
-       git -C r2 ls-files -s large.1000 large.10000 \
-               | awk -f print_2.awk \
-               | sort >expected &&
-       git -C r2 rev-list HEAD --quiet --objects --filter-print-omitted --filter=blob:limit=500 \
-               | awk -f print_1.awk \
-               | sed "s/~//" \
-               | sort >observed &&
-       test_cmp observed expected
+       git -C r2 ls-files -s large.1000 large.10000 >ls_files_result &&
+       awk -f print_2.awk ls_files_result |
+       sort >expected &&
+
+       git -C r2 rev-list --quiet --objects --filter-print-omitted \
+               --filter=blob:limit=500 HEAD >revs &&
+       awk -f print_1.awk revs |
+       sed "s/~//" |
+       sort >observed &&
+
+       test_cmp expected observed
 '
 
 test_expect_success 'verify emitted+omitted == all' '
-       git -C r2 rev-list HEAD --objects \
-               | awk -f print_1.awk \
-               | sort >expected &&
-       git -C r2 rev-list HEAD --objects --filter-print-omitted --filter=blob:limit=500 \
-               | awk -f print_1.awk \
-               | sed "s/~//" \
-               | sort >observed &&
-       test_cmp observed expected
+       git -C r2 rev-list --objects HEAD >revs &&
+       awk -f print_1.awk revs |
+       sort >expected &&
+
+       git -C r2 rev-list --objects --filter-print-omitted \
+               --filter=blob:limit=500 HEAD >revs &&
+       awk -f print_1.awk revs |
+       sed "s/~//" |
+       sort >observed &&
+
+       test_cmp expected observed
 '
 
 test_expect_success 'verify blob:limit=1000' '
-       git -C r2 ls-files -s large.1000 large.10000 \
-               | awk -f print_2.awk \
-               | sort >expected &&
-       git -C r2 rev-list HEAD --quiet --objects --filter-print-omitted --filter=blob:limit=1000 \
-               | awk -f print_1.awk \
-               | sed "s/~//" \
-               | sort >observed &&
-       test_cmp observed expected
+       git -C r2 ls-files -s large.1000 large.10000 >ls_files_result &&
+       awk -f print_2.awk ls_files_result |
+       sort >expected &&
+
+       git -C r2 rev-list --quiet --objects --filter-print-omitted \
+               --filter=blob:limit=1000 HEAD >revs &&
+       awk -f print_1.awk revs |
+       sed "s/~//" |
+       sort >observed &&
+
+       test_cmp expected observed
 '
 
 test_expect_success 'verify blob:limit=1001' '
-       git -C r2 ls-files -s large.10000 \
-               | awk -f print_2.awk \
-               | sort >expected &&
-       git -C r2 rev-list HEAD --quiet --objects --filter-print-omitted --filter=blob:limit=1001 \
-               | awk -f print_1.awk \
-               | sed "s/~//" \
-               | sort >observed &&
-       test_cmp observed expected
+       git -C r2 ls-files -s large.10000 >ls_files_result &&
+       awk -f print_2.awk ls_files_result |
+       sort >expected &&
+
+       git -C r2 rev-list --quiet --objects --filter-print-omitted \
+               --filter=blob:limit=1001 HEAD >revs &&
+       awk -f print_1.awk revs |
+       sed "s/~//" |
+       sort >observed &&
+
+       test_cmp expected observed
 '
 
 test_expect_success 'verify blob:limit=1k' '
-       git -C r2 ls-files -s large.10000 \
-               | awk -f print_2.awk \
-               | sort >expected &&
-       git -C r2 rev-list HEAD --quiet --objects --filter-print-omitted --filter=blob:limit=1k \
-               | awk -f print_1.awk \
-               | sed "s/~//" \
-               | sort >observed &&
-       test_cmp observed expected
+       git -C r2 ls-files -s large.10000 >ls_files_result &&
+       awk -f print_2.awk ls_files_result |
+       sort >expected &&
+
+       git -C r2 rev-list --quiet --objects --filter-print-omitted \
+               --filter=blob:limit=1k HEAD >revs &&
+       awk -f print_1.awk revs |
+       sed "s/~//" |
+       sort >observed &&
+
+       test_cmp expected observed
 '
 
 test_expect_success 'verify blob:limit=1m' '
-       git -C r2 rev-list HEAD --quiet --objects --filter-print-omitted --filter=blob:limit=1m \
-               | awk -f print_1.awk \
-               | sed "s/~//" \
-               | sort >observed &&
+       git -C r2 rev-list --quiet --objects --filter-print-omitted \
+               --filter=blob:limit=1m HEAD >revs &&
+       awk -f print_1.awk revs |
+       sed "s/~//" |
+       sort >observed &&
+
        test_must_be_empty observed
 '
 
@@ -141,25 +165,31 @@ test_expect_success 'setup r3' '
 '
 
 test_expect_success 'verify sparse:path=pattern1 omits top-level files' '
-       git -C r3 ls-files -s sparse1 sparse2 \
-               | awk -f print_2.awk \
-               | sort >expected &&
-       git -C r3 rev-list HEAD --quiet --objects --filter-print-omitted --filter=sparse:path=../pattern1 \
-               | awk -f print_1.awk \
-               | sed "s/~//" \
-               | sort >observed &&
-       test_cmp observed expected
+       git -C r3 ls-files -s sparse1 sparse2 >ls_files_result &&
+       awk -f print_2.awk ls_files_result |
+       sort >expected &&
+
+       git -C r3 rev-list --quiet --objects --filter-print-omitted \
+               --filter=sparse:path=../pattern1 HEAD >revs &&
+       awk -f print_1.awk revs |
+       sed "s/~//" |
+       sort >observed &&
+
+       test_cmp expected observed
 '
 
 test_expect_success 'verify sparse:path=pattern2 omits both sparse2 files' '
-       git -C r3 ls-files -s sparse2 dir1/sparse2 \
-               | awk -f print_2.awk \
-               | sort >expected &&
-       git -C r3 rev-list HEAD --quiet --objects --filter-print-omitted --filter=sparse:path=../pattern2 \
-               | awk -f print_1.awk \
-               | sed "s/~//" \
-               | sort >observed &&
-       test_cmp observed expected
+       git -C r3 ls-files -s sparse2 dir1/sparse2 >ls_files_result &&
+       awk -f print_2.awk ls_files_result |
+       sort >expected &&
+
+       git -C r3 rev-list --quiet --objects --filter-print-omitted \
+               --filter=sparse:path=../pattern2 HEAD >revs &&
+       awk -f print_1.awk revs |
+       sed "s/~//" |
+       sort >observed &&
+
+       test_cmp expected observed
 '
 
 # Test sparse:oid=<oid-ish> filter.
@@ -173,26 +203,33 @@ test_expect_success 'setup r3 part 2' '
 '
 
 test_expect_success 'verify sparse:oid=OID omits top-level files' '
-       git -C r3 ls-files -s pattern sparse1 sparse2 \
-               | awk -f print_2.awk \
-               | sort >expected &&
+       git -C r3 ls-files -s pattern sparse1 sparse2 >ls_files_result &&
+       awk -f print_2.awk ls_files_result |
+       sort >expected &&
+
        oid=$(git -C r3 ls-files -s pattern | awk -f print_2.awk) &&
-       git -C r3 rev-list HEAD --quiet --objects --filter-print-omitted --filter=sparse:oid=$oid \
-               | awk -f print_1.awk \
-               | sed "s/~//" \
-               | sort >observed &&
-       test_cmp observed expected
+
+       git -C r3 rev-list --quiet --objects --filter-print-omitted \
+               --filter=sparse:oid=$oid HEAD >revs &&
+       awk -f print_1.awk revs |
+       sed "s/~//" |
+       sort >observed &&
+
+       test_cmp expected observed
 '
 
 test_expect_success 'verify sparse:oid=oid-ish omits top-level files' '
-       git -C r3 ls-files -s pattern sparse1 sparse2 \
-               | awk -f print_2.awk \
-               | sort >expected &&
-       git -C r3 rev-list HEAD --quiet --objects --filter-print-omitted --filter=sparse:oid=master:pattern \
-               | awk -f print_1.awk \
-               | sed "s/~//" \
-               | sort >observed &&
-       test_cmp observed expected
+       git -C r3 ls-files -s pattern sparse1 sparse2 >ls_files_result &&
+       awk -f print_2.awk ls_files_result |
+       sort >expected &&
+
+       git -C r3 rev-list --quiet --objects --filter-print-omitted \
+               --filter=sparse:oid=master:pattern HEAD >revs &&
+       awk -f print_1.awk revs |
+       sed "s/~//" |
+       sort >observed &&
+
+       test_cmp expected observed
 '
 
 # Delete some loose objects and use rev-list, but WITHOUT any filtering.
@@ -200,17 +237,21 @@ test_expect_success 'verify sparse:oid=oid-ish omits top-level files' '
 
 test_expect_success 'rev-list W/ --missing=print' '
        git -C r1 ls-files -s file.1 file.2 file.3 file.4 file.5 \
-               | awk -f print_2.awk \
-               | sort >expected &&
+               >ls_files_result &&
+       awk -f print_2.awk ls_files_result |
+       sort >expected &&
+
        for id in `cat expected | sed "s|..|&/|"`
        do
                rm r1/.git/objects/$id
        done &&
-       git -C r1 rev-list --quiet HEAD --missing=print --objects \
-               | awk -f print_1.awk \
-               | sed "s/?//" \
-               | sort >observed &&
-       test_cmp observed expected
+
+       git -C r1 rev-list --quiet --missing=print --objects HEAD >revs &&
+       awk -f print_1.awk revs |
+       sed "s/?//" |
+       sort >observed &&
+
+       test_cmp expected observed
 '
 
 test_expect_success 'rev-list W/O --missing fails' '
index 77b8cef66198a870f669441857207af7142c0fbb..e436a7396241e6aad71a5f62500ffe0b9d7c0b7e 100755 (executable)
@@ -166,7 +166,7 @@ test_expect_success 'fail if attr magic is used places not implemented' '
        # though, but git-add is convenient as it has its own internal pathspec
        # parsing.
        test_must_fail git add ":(attr:labelB)" 2>actual &&
-       test_i18ngrep "unsupported magic" actual
+       test_i18ngrep "magic not supported" actual
 '
 
 test_expect_success 'abort on giving invalid label on the command line' '
index 024f8c06f7c58a424204d6fca69021ace2de3cbf..97bfbee6e8d69e46bd1ef1c94dae32d64be977b2 100755 (executable)
@@ -715,6 +715,29 @@ test_expect_success 'basic atom: head contents:trailers' '
        test_cmp expect actual.clean
 '
 
+test_expect_success 'trailer parsing not fooled by --- line' '
+       git commit --allow-empty -F - <<-\EOF &&
+       this is the subject
+
+       This is the body. The message has a "---" line which would confuse a
+       message+patch parser. But here we know we have only a commit message,
+       so we get it right.
+
+       trailer: wrong
+       ---
+       This is more body.
+
+       trailer: right
+       EOF
+
+       {
+               echo "trailer: right" &&
+               echo
+       } >expect &&
+       git for-each-ref --format="%(trailers)" refs/heads/master >actual &&
+       test_cmp expect actual
+'
+
 test_expect_success 'Add symbolic ref for the following tests' '
        git symbolic-ref refs/heads/sym refs/heads/master
 '
index 818435f04e49b50965cd674bf046eb3d494fa939..4684d06552ac2eeafff19cfd696fd36d20cef5d4 100755 (executable)
@@ -4,6 +4,7 @@ test_description='basic git gc tests
 '
 
 . ./test-lib.sh
+. "$TEST_DIRECTORY"/lib-terminal.sh
 
 test_expect_success 'setup' '
        # do not let the amount of physical memory affects gc
@@ -99,6 +100,26 @@ test_expect_success 'auto gc with too many loose objects does not attempt to cre
        test_line_count = 2 new # There is one new pack and its .idx
 '
 
+test_expect_success 'gc --no-quiet' '
+       git -c gc.writeCommitGraph=true gc --no-quiet >stdout 2>stderr &&
+       test_must_be_empty stdout &&
+       test_line_count = 1 stderr &&
+       test_i18ngrep "Computing commit graph generation numbers" stderr
+'
+
+test_expect_success TTY 'with TTY: gc --no-quiet' '
+       test_terminal git -c gc.writeCommitGraph=true gc --no-quiet >stdout 2>stderr &&
+       test_must_be_empty stdout &&
+       test_i18ngrep "Enumerating objects" stderr &&
+       test_i18ngrep "Computing commit graph generation numbers" stderr
+'
+
+test_expect_success 'gc --quiet' '
+       git -c gc.writeCommitGraph=true gc --quiet >stdout 2>stderr &&
+       test_must_be_empty stdout &&
+       test_must_be_empty stderr
+'
+
 run_and_wait_for_auto_gc () {
        # We read stdout from gc for the side effect of waiting until the
        # background gc process exits, closing its fd 9.  Furthermore, the
@@ -116,11 +137,11 @@ test_expect_success 'background auto gc does not run if gc.log is present and re
        test_config gc.autopacklimit 1 &&
        test_config gc.autodetach true &&
        echo fleem >.git/gc.log &&
-       test_must_fail git gc --auto 2>err &&
-       test_i18ngrep "^error:" err &&
+       git gc --auto 2>err &&
+       test_i18ngrep "^warning:" err &&
        test_config gc.logexpiry 5.days &&
        test-tool chmtime =-345600 .git/gc.log &&
-       test_must_fail git gc --auto &&
+       git gc --auto &&
        test_config gc.logexpiry 2.days &&
        run_and_wait_for_auto_gc &&
        ls .git/objects/pack/pack-*.pack >packs &&
diff --git a/t/t6600-test-reach.sh b/t/t6600-test-reach.sh
new file mode 100755 (executable)
index 0000000..ae94b27
--- /dev/null
@@ -0,0 +1,268 @@
+#!/bin/sh
+
+test_description='basic commit reachability tests'
+
+. ./test-lib.sh
+
+# Construct a grid-like commit graph with points (x,y)
+# with 1 <= x <= 10, 1 <= y <= 10, where (x,y) has
+# parents (x-1, y) and (x, y-1), keeping in mind that
+# we drop a parent if a coordinate is nonpositive.
+#
+#             (10,10)
+#            /       \
+#         (10,9)    (9,10)
+#        /     \   /      \
+#    (10,8)    (9,9)      (8,10)
+#   /     \    /   \      /    \
+#         ( continued...)
+#   \     /    \   /      \    /
+#    (3,1)     (2,2)      (1,3)
+#        \     /    \     /
+#         (2,1)      (2,1)
+#              \    /
+#              (1,1)
+#
+# We use branch 'commit-x-y' to refer to (x,y).
+# This grid allows interesting reachability and
+# non-reachability queries: (x,y) can reach (x',y')
+# if and only if x' <= x and y' <= y.
+test_expect_success 'setup' '
+       for i in $(test_seq 1 10)
+       do
+               test_commit "1-$i" &&
+               git branch -f commit-1-$i &&
+               git tag -a -m "1-$i" tag-1-$i commit-1-$i
+       done &&
+       for j in $(test_seq 1 9)
+       do
+               git reset --hard commit-$j-1 &&
+               x=$(($j + 1)) &&
+               test_commit "$x-1" &&
+               git branch -f commit-$x-1 &&
+               git tag -a -m "$x-1" tag-$x-1 commit-$x-1 &&
+
+               for i in $(test_seq 2 10)
+               do
+                       git merge commit-$j-$i -m "$x-$i" &&
+                       git branch -f commit-$x-$i &&
+                       git tag -a -m "$x-$i" tag-$x-$i commit-$x-$i
+               done
+       done &&
+       git commit-graph write --reachable &&
+       mv .git/objects/info/commit-graph commit-graph-full &&
+       git show-ref -s commit-5-5 | git commit-graph write --stdin-commits &&
+       mv .git/objects/info/commit-graph commit-graph-half &&
+       git config core.commitGraph true
+'
+
+test_three_modes () {
+       test_when_finished rm -rf .git/objects/info/commit-graph &&
+       test-tool reach $1 <input >actual &&
+       test_cmp expect actual &&
+       cp commit-graph-full .git/objects/info/commit-graph &&
+       test-tool reach $1 <input >actual &&
+       test_cmp expect actual &&
+       cp commit-graph-half .git/objects/info/commit-graph &&
+       test-tool reach $1 <input >actual &&
+       test_cmp expect actual
+}
+
+test_expect_success 'ref_newer:miss' '
+       cat >input <<-\EOF &&
+       A:commit-5-7
+       B:commit-4-9
+       EOF
+       echo "ref_newer(A,B):0" >expect &&
+       test_three_modes ref_newer
+'
+
+test_expect_success 'ref_newer:hit' '
+       cat >input <<-\EOF &&
+       A:commit-5-7
+       B:commit-2-3
+       EOF
+       echo "ref_newer(A,B):1" >expect &&
+       test_three_modes ref_newer
+'
+
+test_expect_success 'in_merge_bases:hit' '
+       cat >input <<-\EOF &&
+       A:commit-5-7
+       B:commit-8-8
+       EOF
+       echo "in_merge_bases(A,B):1" >expect &&
+       test_three_modes in_merge_bases
+'
+
+test_expect_success 'in_merge_bases:miss' '
+       cat >input <<-\EOF &&
+       A:commit-6-8
+       B:commit-5-9
+       EOF
+       echo "in_merge_bases(A,B):0" >expect &&
+       test_three_modes in_merge_bases
+'
+
+test_expect_success 'is_descendant_of:hit' '
+       cat >input <<-\EOF &&
+       A:commit-5-7
+       X:commit-4-8
+       X:commit-6-6
+       X:commit-1-1
+       EOF
+       echo "is_descendant_of(A,X):1" >expect &&
+       test_three_modes is_descendant_of
+'
+
+test_expect_success 'is_descendant_of:miss' '
+       cat >input <<-\EOF &&
+       A:commit-6-8
+       X:commit-5-9
+       X:commit-4-10
+       X:commit-7-6
+       EOF
+       echo "is_descendant_of(A,X):0" >expect &&
+       test_three_modes is_descendant_of
+'
+
+test_expect_success 'get_merge_bases_many' '
+       cat >input <<-\EOF &&
+       A:commit-5-7
+       X:commit-4-8
+       X:commit-6-6
+       X:commit-8-3
+       EOF
+       {
+               echo "get_merge_bases_many(A,X):" &&
+               git rev-parse commit-5-6 \
+                             commit-4-7 | sort
+       } >expect &&
+       test_three_modes get_merge_bases_many
+'
+
+test_expect_success 'reduce_heads' '
+       cat >input <<-\EOF &&
+       X:commit-1-10
+       X:commit-2-8
+       X:commit-3-6
+       X:commit-4-4
+       X:commit-1-7
+       X:commit-2-5
+       X:commit-3-3
+       X:commit-5-1
+       EOF
+       {
+               echo "reduce_heads(X):" &&
+               git rev-parse commit-5-1 \
+                             commit-4-4 \
+                             commit-3-6 \
+                             commit-2-8 \
+                             commit-1-10 | sort
+       } >expect &&
+       test_three_modes reduce_heads
+'
+
+test_expect_success 'can_all_from_reach:hit' '
+       cat >input <<-\EOF &&
+       X:commit-2-10
+       X:commit-3-9
+       X:commit-4-8
+       X:commit-5-7
+       X:commit-6-6
+       X:commit-7-5
+       X:commit-8-4
+       X:commit-9-3
+       Y:commit-1-9
+       Y:commit-2-8
+       Y:commit-3-7
+       Y:commit-4-6
+       Y:commit-5-5
+       Y:commit-6-4
+       Y:commit-7-3
+       Y:commit-8-1
+       EOF
+       echo "can_all_from_reach(X,Y):1" >expect &&
+       test_three_modes can_all_from_reach
+'
+
+test_expect_success 'can_all_from_reach:miss' '
+       cat >input <<-\EOF &&
+       X:commit-2-10
+       X:commit-3-9
+       X:commit-4-8
+       X:commit-5-7
+       X:commit-6-6
+       X:commit-7-5
+       X:commit-8-4
+       X:commit-9-3
+       Y:commit-1-9
+       Y:commit-2-8
+       Y:commit-3-7
+       Y:commit-4-6
+       Y:commit-5-5
+       Y:commit-6-4
+       Y:commit-8-5
+       EOF
+       echo "can_all_from_reach(X,Y):0" >expect &&
+       test_three_modes can_all_from_reach
+'
+
+test_expect_success 'can_all_from_reach_with_flag: tags case' '
+       cat >input <<-\EOF &&
+       X:tag-2-10
+       X:tag-3-9
+       X:tag-4-8
+       X:commit-5-7
+       X:commit-6-6
+       X:commit-7-5
+       X:commit-8-4
+       X:commit-9-3
+       Y:tag-1-9
+       Y:tag-2-8
+       Y:tag-3-7
+       Y:commit-4-6
+       Y:commit-5-5
+       Y:commit-6-4
+       Y:commit-7-3
+       Y:commit-8-1
+       EOF
+       echo "can_all_from_reach_with_flag(X,_,_,0,0):1" >expect &&
+       test_three_modes can_all_from_reach_with_flag
+'
+
+test_expect_success 'commit_contains:hit' '
+       cat >input <<-\EOF &&
+       A:commit-7-7
+       X:commit-2-10
+       X:commit-3-9
+       X:commit-4-8
+       X:commit-5-7
+       X:commit-6-6
+       X:commit-7-5
+       X:commit-8-4
+       X:commit-9-3
+       EOF
+       echo "commit_contains(_,A,X,_):1" >expect &&
+       test_three_modes commit_contains &&
+       test_three_modes commit_contains --tag
+'
+
+test_expect_success 'commit_contains:miss' '
+       cat >input <<-\EOF &&
+       A:commit-6-5
+       X:commit-2-10
+       X:commit-3-9
+       X:commit-4-8
+       X:commit-5-7
+       X:commit-6-6
+       X:commit-7-5
+       X:commit-8-4
+       X:commit-9-3
+       EOF
+       echo "commit_contains(_,A,X,_):0" >expect &&
+       test_three_modes commit_contains &&
+       test_three_modes commit_contains --tag
+'
+
+test_done
index 2da57fce7b12bd9a2cfac38ba4d98fc9c4af1a4c..190ae149cf3cb6daa0a89d50a5a44ccafdd2aaec 100755 (executable)
@@ -55,7 +55,7 @@ test_expect_success 'setup' '
 '
 
 test_expect_success 'untracked cache is empty' '
-       test-dump-untracked-cache >../actual &&
+       test-tool dump-untracked-cache >../actual &&
        cat >../expect-empty <<EOF &&
 info/exclude 0000000000000000000000000000000000000000
 core.excludesfile 0000000000000000000000000000000000000000
@@ -106,7 +106,7 @@ EOF
 '
 
 test_expect_success 'untracked cache after first status' '
-       test-dump-untracked-cache >../actual &&
+       test-tool dump-untracked-cache >../actual &&
        test_cmp ../dump.expect ../actual
 '
 
@@ -126,7 +126,7 @@ EOF
 '
 
 test_expect_success 'untracked cache after second status' '
-       test-dump-untracked-cache >../actual &&
+       test-tool dump-untracked-cache >../actual &&
        test_cmp ../dump.expect ../actual
 '
 
@@ -157,7 +157,7 @@ EOF
 '
 
 test_expect_success 'verify untracked cache dump' '
-       test-dump-untracked-cache >../actual &&
+       test-tool dump-untracked-cache >../actual &&
        cat >../expect <<EOF &&
 info/exclude $EMPTY_BLOB
 core.excludesfile 0000000000000000000000000000000000000000
@@ -204,7 +204,7 @@ EOF
 '
 
 test_expect_success 'verify untracked cache dump' '
-       test-dump-untracked-cache >../actual &&
+       test-tool dump-untracked-cache >../actual &&
        cat >../expect <<EOF &&
 info/exclude $EMPTY_BLOB
 core.excludesfile 0000000000000000000000000000000000000000
@@ -248,7 +248,7 @@ EOF
 '
 
 test_expect_success 'verify untracked cache dump' '
-       test-dump-untracked-cache >../actual &&
+       test-tool dump-untracked-cache >../actual &&
        cat >../expect <<EOF &&
 info/exclude 13263c0978fb9fad16b2d580fb800b6d811c3ff0
 core.excludesfile 0000000000000000000000000000000000000000
@@ -267,7 +267,7 @@ EOF
 
 test_expect_success 'move two from tracked to untracked' '
        git rm --cached two &&
-       test-dump-untracked-cache >../actual &&
+       test-tool dump-untracked-cache >../actual &&
        cat >../expect <<EOF &&
 info/exclude 13263c0978fb9fad16b2d580fb800b6d811c3ff0
 core.excludesfile 0000000000000000000000000000000000000000
@@ -304,7 +304,7 @@ EOF
 '
 
 test_expect_success 'verify untracked cache dump' '
-       test-dump-untracked-cache >../actual &&
+       test-tool dump-untracked-cache >../actual &&
        cat >../expect <<EOF &&
 info/exclude 13263c0978fb9fad16b2d580fb800b6d811c3ff0
 core.excludesfile 0000000000000000000000000000000000000000
@@ -324,7 +324,7 @@ EOF
 
 test_expect_success 'move two from untracked to tracked' '
        git add two &&
-       test-dump-untracked-cache >../actual &&
+       test-tool dump-untracked-cache >../actual &&
        cat >../expect <<EOF &&
 info/exclude 13263c0978fb9fad16b2d580fb800b6d811c3ff0
 core.excludesfile 0000000000000000000000000000000000000000
@@ -361,7 +361,7 @@ EOF
 '
 
 test_expect_success 'verify untracked cache dump' '
-       test-dump-untracked-cache >../actual &&
+       test-tool dump-untracked-cache >../actual &&
        cat >../expect <<EOF &&
 info/exclude 13263c0978fb9fad16b2d580fb800b6d811c3ff0
 core.excludesfile 0000000000000000000000000000000000000000
@@ -405,7 +405,7 @@ EOF
 '
 
 test_expect_success 'untracked cache correct after commit' '
-       test-dump-untracked-cache >../actual &&
+       test-tool dump-untracked-cache >../actual &&
        cat >../expect <<EOF &&
 info/exclude 13263c0978fb9fad16b2d580fb800b6d811c3ff0
 core.excludesfile 0000000000000000000000000000000000000000
@@ -464,7 +464,7 @@ EOF
 '
 
 test_expect_success 'untracked cache correct after status' '
-       test-dump-untracked-cache >../actual &&
+       test-tool dump-untracked-cache >../actual &&
        cat >../expect <<EOF &&
 info/exclude 13263c0978fb9fad16b2d580fb800b6d811c3ff0
 core.excludesfile 0000000000000000000000000000000000000000
@@ -532,7 +532,7 @@ EOF
 '
 
 test_expect_success 'verify untracked cache dump (sparse/subdirs)' '
-       test-dump-untracked-cache >../actual &&
+       test-tool dump-untracked-cache >../actual &&
        cat >../expect-from-test-dump <<EOF &&
 info/exclude 13263c0978fb9fad16b2d580fb800b6d811c3ff0
 core.excludesfile 0000000000000000000000000000000000000000
@@ -598,66 +598,66 @@ EOF
 
 test_expect_success '--no-untracked-cache removes the cache' '
        git update-index --no-untracked-cache &&
-       test-dump-untracked-cache >../actual &&
+       test-tool dump-untracked-cache >../actual &&
        echo "no untracked cache" >../expect-no-uc &&
        test_cmp ../expect-no-uc ../actual
 '
 
 test_expect_success 'git status does not change anything' '
        git status &&
-       test-dump-untracked-cache >../actual &&
+       test-tool dump-untracked-cache >../actual &&
        test_cmp ../expect-no-uc ../actual
 '
 
 test_expect_success 'setting core.untrackedCache to true and using git status creates the cache' '
        git config core.untrackedCache true &&
-       test-dump-untracked-cache >../actual &&
+       test-tool dump-untracked-cache >../actual &&
        test_cmp ../expect-no-uc ../actual &&
        git status &&
-       test-dump-untracked-cache >../actual &&
+       test-tool dump-untracked-cache >../actual &&
        test_cmp ../expect-from-test-dump ../actual
 '
 
 test_expect_success 'using --no-untracked-cache does not fail when core.untrackedCache is true' '
        git update-index --no-untracked-cache &&
-       test-dump-untracked-cache >../actual &&
+       test-tool dump-untracked-cache >../actual &&
        test_cmp ../expect-no-uc ../actual &&
        git update-index --untracked-cache &&
-       test-dump-untracked-cache >../actual &&
+       test-tool dump-untracked-cache >../actual &&
        test_cmp ../expect-empty ../actual
 '
 
 test_expect_success 'setting core.untrackedCache to false and using git status removes the cache' '
        git config core.untrackedCache false &&
-       test-dump-untracked-cache >../actual &&
+       test-tool dump-untracked-cache >../actual &&
        test_cmp ../expect-empty ../actual &&
        git status &&
-       test-dump-untracked-cache >../actual &&
+       test-tool dump-untracked-cache >../actual &&
        test_cmp ../expect-no-uc ../actual
 '
 
 test_expect_success 'using --untracked-cache does not fail when core.untrackedCache is false' '
        git update-index --untracked-cache &&
-       test-dump-untracked-cache >../actual &&
+       test-tool dump-untracked-cache >../actual &&
        test_cmp ../expect-empty ../actual
 '
 
 test_expect_success 'setting core.untrackedCache to keep' '
        git config core.untrackedCache keep &&
        git update-index --untracked-cache &&
-       test-dump-untracked-cache >../actual &&
+       test-tool dump-untracked-cache >../actual &&
        test_cmp ../expect-empty ../actual &&
        git status &&
-       test-dump-untracked-cache >../actual &&
+       test-tool dump-untracked-cache >../actual &&
        test_cmp ../expect-from-test-dump ../actual &&
        git update-index --no-untracked-cache &&
-       test-dump-untracked-cache >../actual &&
+       test-tool dump-untracked-cache >../actual &&
        test_cmp ../expect-no-uc ../actual &&
        git update-index --force-untracked-cache &&
-       test-dump-untracked-cache >../actual &&
+       test-tool dump-untracked-cache >../actual &&
        test_cmp ../expect-empty ../actual &&
        git status &&
-       test-dump-untracked-cache >../actual &&
+       test-tool dump-untracked-cache >../actual &&
        test_cmp ../expect-from-test-dump ../actual
 '
 
@@ -671,23 +671,23 @@ test_expect_success 'test ident field is working' '
 
 test_expect_success 'untracked cache survives a checkout' '
        git commit --allow-empty -m empty &&
-       test-dump-untracked-cache >../before &&
+       test-tool dump-untracked-cache >../before &&
        test_when_finished  "git checkout master" &&
        git checkout -b other_branch &&
-       test-dump-untracked-cache >../after &&
+       test-tool dump-untracked-cache >../after &&
        test_cmp ../before ../after &&
        test_commit test &&
-       test-dump-untracked-cache >../before &&
+       test-tool dump-untracked-cache >../before &&
        git checkout master &&
-       test-dump-untracked-cache >../after &&
+       test-tool dump-untracked-cache >../after &&
        test_cmp ../before ../after
 '
 
 test_expect_success 'untracked cache survives a commit' '
-       test-dump-untracked-cache >../before &&
+       test-tool dump-untracked-cache >../before &&
        git add done/two &&
        git commit -m commit &&
-       test-dump-untracked-cache >../after &&
+       test-tool dump-untracked-cache >../after &&
        test_cmp ../before ../after
 '
 
@@ -751,7 +751,7 @@ test_expect_success '"status" after file replacement should be clean with UC=tru
        git checkout master &&
        avoid_racy &&
        status_is_clean &&
-       test-dump-untracked-cache >../actual &&
+       test-tool dump-untracked-cache >../actual &&
        grep -F "recurse valid" ../actual >../actual.grep &&
        cat >../expect.grep <<EOF &&
 / 0000000000000000000000000000000000000000 recurse valid
index 324933acfe94153e166783e9810e98e69da1fc7e..826987ca804fbd74bc06467446c99b7b4a27ea3e 100755 (executable)
@@ -160,7 +160,7 @@ test_expect_success 'checkout -m with merge conflict' '
        git diff master:one :3:uno |
        sed -e "1,/^@@/d" -e "/^ /d" -e "s/^-/d/" -e "s/^+/a/" >current &&
        fill d2 aT d7 aS >expect &&
-       test_cmp current expect &&
+       test_cmp expect current &&
        git diff --cached two >current &&
        test_must_be_empty current
 '
@@ -174,7 +174,7 @@ test_expect_success 'format of merge conflict from checkout -m' '
 
        git ls-files >current &&
        fill same two two two >expect &&
-       test_cmp current expect &&
+       test_cmp expect current &&
 
        cat <<-EOF >expect &&
        <<<<<<< simple
index 10dc91620a69870ab5dac213d032ae5d9938128c..e87164aa8ffdba169ba6f91af99ae3d28f28459b 100755 (executable)
@@ -789,7 +789,7 @@ test_expect_success 'submodule add places git-dir in superprojects git-dir' '
         (cd .git/modules/deeper/submodule &&
          git log > ../../../../actual
         ) &&
-        test_cmp actual expected
+        test_cmp expected actual
        )
 '
 
@@ -807,7 +807,7 @@ test_expect_success 'submodule update places git-dir in superprojects git-dir' '
         (cd .git/modules/deeper/submodule &&
          git log > ../../../../actual
         ) &&
-        test_cmp actual expected
+        test_cmp expected actual
        )
 '
 
@@ -827,7 +827,7 @@ test_expect_success 'submodule add places git-dir in superprojects git-dir recur
         git add deeper/submodule &&
         git commit -m "update submodule" &&
         git push origin : &&
-        test_cmp actual expected
+        test_cmp expected actual
        )
 '
 
@@ -874,7 +874,7 @@ test_expect_success 'submodule update places git-dir in superprojects git-dir re
         (cd .git/modules/submodule/modules/subsubmodule &&
          git log > ../../../../../actual
         ) &&
-        test_cmp actual expected
+        test_cmp expected actual
        )
 '
 
diff --git a/t/t7416-submodule-dash-url.sh b/t/t7416-submodule-dash-url.sh
new file mode 100755 (executable)
index 0000000..1cd2c1c
--- /dev/null
@@ -0,0 +1,49 @@
+#!/bin/sh
+
+test_description='check handling of .gitmodule url with dash'
+. ./test-lib.sh
+
+test_expect_success 'create submodule with protected dash in url' '
+       git init upstream &&
+       git -C upstream commit --allow-empty -m base &&
+       mv upstream ./-upstream &&
+       git submodule add ./-upstream sub &&
+       git add sub .gitmodules &&
+       git commit -m submodule
+'
+
+test_expect_success 'clone can recurse submodule' '
+       test_when_finished "rm -rf dst" &&
+       git clone --recurse-submodules . dst &&
+       echo base >expect &&
+       git -C dst/sub log -1 --format=%s >actual &&
+       test_cmp expect actual
+'
+
+test_expect_success 'fsck accepts protected dash' '
+       test_when_finished "rm -rf dst" &&
+       git init --bare dst &&
+       git -C dst config transfer.fsckObjects true &&
+       git push dst HEAD
+'
+
+test_expect_success 'remove ./ protection from .gitmodules url' '
+       perl -i -pe "s{\./}{}" .gitmodules &&
+       git commit -am "drop protection"
+'
+
+test_expect_success 'clone rejects unprotected dash' '
+       test_when_finished "rm -rf dst" &&
+       test_must_fail git clone --recurse-submodules . dst 2>err &&
+       test_i18ngrep ignoring err
+'
+
+test_expect_success 'fsck rejects unprotected dash' '
+       test_when_finished "rm -rf dst" &&
+       git init --bare dst &&
+       git -C dst config transfer.fsckObjects true &&
+       test_must_fail git push dst HEAD 2>err &&
+       grep gitmodulesUrl err
+'
+
+test_done
diff --git a/t/t7417-submodule-path-url.sh b/t/t7417-submodule-path-url.sh
new file mode 100755 (executable)
index 0000000..756af8c
--- /dev/null
@@ -0,0 +1,28 @@
+#!/bin/sh
+
+test_description='check handling of .gitmodule path with dash'
+. ./test-lib.sh
+
+test_expect_success 'create submodule with dash in path' '
+       git init upstream &&
+       git -C upstream commit --allow-empty -m base &&
+       git submodule add ./upstream sub &&
+       git mv sub ./-sub &&
+       git commit -m submodule
+'
+
+test_expect_success 'clone rejects unprotected dash' '
+       test_when_finished "rm -rf dst" &&
+       git clone --recurse-submodules . dst 2>err &&
+       test_i18ngrep ignoring err
+'
+
+test_expect_success 'fsck rejects unprotected dash' '
+       test_when_finished "rm -rf dst" &&
+       git init --bare dst &&
+       git -C dst config transfer.fsckObjects true &&
+       test_must_fail git push dst HEAD 2>err &&
+       grep gitmodulesPath err
+'
+
+test_done
index 4cae92804d11f75f24bdd6f6517a88c834e7cfc7..1a6773ee6889939a0046664bd8a7cdcc4f21fe01 100755 (executable)
@@ -517,6 +517,22 @@ Myfooter: x" &&
        test_cmp expected actual
 '
 
+test_expect_success 'signoff not confused by ---' '
+       cat >expected <<-EOF &&
+               subject
+
+               body
+               ---
+               these dashes confuse the parser!
+
+               Signed-off-by: $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL>
+       EOF
+       # should be a noop, since we already signed
+       git commit --allow-empty --signoff -F expected &&
+       git log -1 --pretty=format:%B >actual &&
+       test_cmp expected actual
+'
+
 test_expect_success 'multiple -m' '
 
        >negative &&
index 164719d1c9d3e76a08bbbeb968aaf535370db7d1..c44186133147838d7f17c4d42f8cb96a5df73b28 100755 (executable)
@@ -1417,4 +1417,46 @@ test_expect_success 'unfold' '
        test_cmp expected actual
 '
 
+test_expect_success 'handling of --- lines in input' '
+       echo "real-trailer: just right" >expected &&
+
+       git interpret-trailers --parse >actual <<-\EOF &&
+       subject
+
+       body
+
+       not-a-trailer: too soon
+       ------ this is just a line in the commit message with a bunch of
+       ------ dashes; it does not have any syntactic meaning.
+
+       real-trailer: just right
+       ---
+       below the dashed line may be a patch, etc.
+
+       not-a-trailer: too late
+       EOF
+
+       test_cmp expected actual
+'
+
+test_expect_success 'suppress --- handling' '
+       echo "real-trailer: just right" >expected &&
+
+       git interpret-trailers --parse --no-divider >actual <<-\EOF &&
+       subject
+
+       This commit message has a "---" in it, but because we tell
+       interpret-trailers not to respect that, it has no effect.
+
+       not-a-trailer: too soon
+       ---
+
+       This is still the commit message body.
+
+       real-trailer: just right
+       EOF
+
+       test_cmp expected actual
+'
+
 test_done
index 756beb0d8eb466d78b235af363b6a36dde37c79e..8384ad258c359572d83bb38fd69f61ec6fe612e7 100755 (executable)
@@ -84,21 +84,21 @@ test_expect_success 'setup' '
 
 # test that the fsmonitor extension is off by default
 test_expect_success 'fsmonitor extension is off by default' '
-       test-dump-fsmonitor >actual &&
+       test-tool dump-fsmonitor >actual &&
        grep "^no fsmonitor" actual
 '
 
 # test that "update-index --fsmonitor" adds the fsmonitor extension
 test_expect_success 'update-index --fsmonitor" adds the fsmonitor extension' '
        git update-index --fsmonitor &&
-       test-dump-fsmonitor >actual &&
+       test-tool dump-fsmonitor >actual &&
        grep "^fsmonitor last update" actual
 '
 
 # test that "update-index --no-fsmonitor" removes the fsmonitor extension
 test_expect_success 'update-index --no-fsmonitor" removes the fsmonitor extension' '
        git update-index --no-fsmonitor &&
-       test-dump-fsmonitor >actual &&
+       test-tool dump-fsmonitor >actual &&
        grep "^no fsmonitor" actual
 '
 
@@ -307,9 +307,9 @@ test_expect_success 'splitting the index results in the same state' '
        dirty_repo &&
        git update-index --fsmonitor  &&
        git ls-files -f >expect &&
-       test-dump-fsmonitor >&2 && echo &&
+       test-tool dump-fsmonitor >&2 && echo &&
        git update-index --fsmonitor --split-index &&
-       test-dump-fsmonitor >&2 && echo &&
+       test-tool dump-fsmonitor >&2 && echo &&
        git ls-files -f >actual &&
        test_cmp expect actual
 '
@@ -333,7 +333,7 @@ test_expect_success UNTRACKED_CACHE 'ignore .git changes when invalidating UNTR'
                git update-index --fsmonitor &&
                GIT_TRACE_UNTRACKED_STATS="$TRASH_DIRECTORY/trace-before" \
                git status &&
-               test-dump-untracked-cache >../before
+               test-tool dump-untracked-cache >../before
        ) &&
        cat >>dot-git/.git/hooks/fsmonitor-test <<-\EOF &&
        printf ".git\0"
@@ -345,7 +345,7 @@ test_expect_success UNTRACKED_CACHE 'ignore .git changes when invalidating UNTR'
                cd dot-git &&
                GIT_TRACE_UNTRACKED_STATS="$TRASH_DIRECTORY/trace-after" \
                git status &&
-               test-dump-untracked-cache >../after
+               test-tool dump-untracked-cache >../after
        ) &&
        grep "directory invalidation" trace-before >>before &&
        grep "directory invalidation" trace-after >>after &&
index 668bbee73c8dc58c6ba61c4a8002ed68206d1445..562bd215a5315757dcc7c10e311106f2c7b0ff72 100755 (executable)
@@ -557,7 +557,7 @@ test_expect_success SYMLINKS 'difftool --dir-diff --symlink without unstaged cha
        EOF
        git difftool --dir-diff --symlink \
                --extcmd "./.git/CHECK_SYMLINKS" branch HEAD &&
-       test_cmp actual expect
+       test_cmp expect actual
 '
 
 write_script modify-right-file <<\EOF
index 9af60788443a9e69f3ffa7eca079a15f5bafc457..2c309a57d98898e99b76289d2eb298a2bce8cb00 100755 (executable)
@@ -221,7 +221,7 @@ tree d667270a1f7b109f5eb3aaea21ede14b56bfdd6e
 tree 8f51f74cf0163afc9ad68a4b1537288c4558b5a4
 EOF
 
-test_expect_success POSIXPERM,SYMLINKS "$name" "test_cmp a expected"
+test_expect_success POSIXPERM,SYMLINKS "$name" "test_cmp expected a"
 
 test_expect_success 'exit if remote refs are ambigious' '
         git config --add svn-remote.svn.fetch \
index 8a5c8dc1aad6d036964776d9e3fe2ecd0a893305..c26c4b0927916fca6902eb54600a13aee5e7dea9 100755 (executable)
@@ -174,7 +174,8 @@ test_expect_success 'test create-ignore' "
        cmp ./deeply/.gitignore create-ignore.expect &&
        cmp ./deeply/nested/.gitignore create-ignore.expect &&
        cmp ./deeply/nested/directory/.gitignore create-ignore.expect &&
-       git ls-files -s | grep gitignore | cmp - create-ignore-index.expect
+       git ls-files -s >ls_files_result &&
+       grep gitignore ls_files_result | cmp - create-ignore-index.expect
        "
 
 cat >prop.expect <<\EOF
@@ -189,17 +190,21 @@ EOF
 # This test can be improved: since all the svn:ignore contain the same
 # pattern, it can pass even though the propget did not execute on the
 # right directory.
-test_expect_success 'test propget' "
-       git svn propget svn:ignore . | cmp - prop.expect &&
+test_expect_success 'test propget' '
+       test_propget () {
+               git svn propget $1 $2 >actual &&
+               cmp $3 actual
+       } &&
+       test_propget svn:ignore . prop.expect &&
        cd deeply &&
-       git svn propget svn:ignore . | cmp - ../prop.expect &&
-       git svn propget svn:entry:committed-rev nested/directory/.keep \
-         | cmp - ../prop2.expect &&
-       git svn propget svn:ignore .. | cmp - ../prop.expect &&
-       git svn propget svn:ignore nested/ | cmp - ../prop.expect &&
-       git svn propget svn:ignore ./nested | cmp - ../prop.expect &&
-       git svn propget svn:ignore .././deeply/nested | cmp - ../prop.expect
-       "
+       test_propget svn:ignore . ../prop.expect &&
+       test_propget svn:entry:committed-rev nested/directory/.keep \
+               ../prop2.expect &&
+       test_propget svn:ignore .. ../prop.expect &&
+       test_propget svn:ignore nested/ ../prop.expect &&
+       test_propget svn:ignore ./nested ../prop.expect &&
+       test_propget svn:ignore .././deeply/nested ../prop.expect
+       '
 
 cat >prop.expect <<\EOF
 Properties on '.':
@@ -218,8 +223,11 @@ Properties on 'nested/directory/.keep':
 EOF
 
 test_expect_success 'test proplist' "
-       git svn proplist . | cmp - prop.expect &&
-       git svn proplist nested/directory/.keep | cmp - prop2.expect
+       git svn proplist . >actual &&
+       cmp prop.expect actual &&
+
+       git svn proplist nested/directory/.keep >actual &&
+       cmp prop2.expect actual
        "
 
 test_done
index f3c30e63b7f584cddb91793b0170f004d866530d..f894860867a1398555876ce0586479157189a376 100755 (executable)
@@ -45,7 +45,7 @@ test_expect_success 'update git svn-cloned repo' '
                git svn rebase &&
                echo a > expect &&
                echo b >> expect &&
-               test_cmp a expect &&
+               test_cmp expect a &&
                rm expect
        )
 '
@@ -69,7 +69,7 @@ test_expect_success 'update git svn-cloned repo' '
                git svn rebase &&
                echo a > expect &&
                echo b >> expect &&
-               test_cmp a expect &&
+               test_cmp expect a &&
                rm expect
        )
 '
@@ -93,7 +93,7 @@ test_expect_success 'update git svn-cloned repo again' '
                echo a > expect &&
                echo b >> expect &&
                echo c >> expect &&
-               test_cmp a expect &&
+               test_cmp expect a &&
                rm expect
        )
 '
index 5dfee07d9add00d4a68e70c89c8711604f218ef1..251fdd66c47b5e378cf091a47ba1f804eb74c487 100755 (executable)
@@ -148,7 +148,7 @@ test_expect_success PERL 'import from a CVS working tree' '
                git cvsimport -a -z0 &&
                echo 1 >expect &&
                git log -1 --pretty=format:%s%n >actual &&
-               test_cmp actual expect
+               test_cmp expect actual
        )
 
 '
index c4c3c4954612ff317a72bc342741bee8f3c628b0..3e64b11eac5ebb7b0c01487c5f11bf39cd9855c0 100755 (executable)
@@ -29,11 +29,11 @@ test_expect_failure PERL 'import with criss cross times on revisions' '
 Rev 3
 Rev 2
 Rev 1" > expect-master &&
-    test_cmp actual-master expect-master &&
+    test_cmp expect-master actual-master &&
 
     echo "Rev 5 Branch A Wed Mar 11 19:09:10 2009 +0000
 Rev 4 Branch A Wed Mar 11 19:03:52 2009 +0000" > expect-A &&
-    test_cmp actual-A expect-A
+    test_cmp expect-A actual-A
 '
 
 test_done
index a4b3db24bd9afee29de1679b47a5371a10c9863c..2ff4aa932df44e3afed6447acceb20e8c599c4ea 100755 (executable)
@@ -31,7 +31,7 @@ test_expect_success PERL 'check timestamps are UTC (TZ=CST6CDT)' '
        Rev  2 2005-02-01 00:00:00 +0000
        Rev  1 2005-01-01 00:00:00 +0000
        EOF
-       test_cmp actual-1 expect-1
+       test_cmp expect-1 actual-1
 '
 
 test_expect_success PERL 'check timestamps with author-specific timezones' '
@@ -65,7 +65,7 @@ test_expect_success PERL 'check timestamps with author-specific timezones' '
        Rev  2 2005-01-31 18:00:00 -0600 User Two
        Rev  1 2005-01-01 00:00:00 +0000 User One
        EOF
-       test_cmp actual-2 expect-2
+       test_cmp expect-2 actual-2
 '
 
 test_done
index 4207af40777c69365dc395e800a7e4214beab076..78d8c3783bd301a3dd2bf7061f04e08368d7ca37 100644 (file)
@@ -42,6 +42,8 @@ test_decode_color () {
                function name(n) {
                        if (n == 0) return "RESET";
                        if (n == 1) return "BOLD";
+                       if (n == 2) return "FAINT";
+                       if (n == 3) return "ITALIC";
                        if (n == 7) return "REVERSE";
                        if (n == 30) return "BLACK";
                        if (n == 31) return "RED";
@@ -1155,3 +1157,72 @@ depacketize () {
                }
        '
 }
+
+# Set the hash algorithm in use to $1.  Only useful when testing the testsuite.
+test_set_hash () {
+       test_hash_algo="$1"
+}
+
+# Detect the hash algorithm in use.
+test_detect_hash () {
+       # Currently we only support SHA-1, but in the future this function will
+       # actually detect the algorithm in use.
+       test_hash_algo='sha1'
+}
+
+# Load common hash metadata and common placeholder object IDs for use with
+# test_oid.
+test_oid_init () {
+       test -n "$test_hash_algo" || test_detect_hash &&
+       test_oid_cache <"$TEST_DIRECTORY/oid-info/hash-info" &&
+       test_oid_cache <"$TEST_DIRECTORY/oid-info/oid"
+}
+
+# Load key-value pairs from stdin suitable for use with test_oid.  Blank lines
+# and lines starting with "#" are ignored.  Keys must be shell identifier
+# characters.
+#
+# Examples:
+# rawsz sha1:20
+# rawsz sha256:32
+test_oid_cache () {
+       local tag rest k v &&
+
+       { test -n "$test_hash_algo" || test_detect_hash; } &&
+       while read tag rest
+       do
+               case $tag in
+               \#*)
+                       continue;;
+               ?*)
+                       # non-empty
+                       ;;
+               *)
+                       # blank line
+                       continue;;
+               esac &&
+
+               k="${rest%:*}" &&
+               v="${rest#*:}" &&
+
+               if ! expr "$k" : '[a-z0-9][a-z0-9]*$' >/dev/null
+               then
+                       error 'bug in the test script: bad hash algorithm'
+               fi &&
+               eval "test_oid_${k}_$tag=\"\$v\""
+       done
+}
+
+# Look up a per-hash value based on a key ($1).  The value must have been loaded
+# by test_oid_init or test_oid_cache.
+test_oid () {
+       local var="test_oid_${test_hash_algo}_$1" &&
+
+       # If the variable is unset, we must be missing an entry for this
+       # key-hash pair, so exit with an error.
+       if eval "test -z \"\${$var+set}\""
+       then
+               error "bug in the test script: undefined key '$1'" >&2
+       fi &&
+       eval "printf '%s' \"\${$var}\""
+}
index 44288cbb598435a5dfff05e8e895cca85e53e804..3f95bfda605f7ad1f9b0b0385ffdc91e5cca415e 100644 (file)
@@ -1083,6 +1083,12 @@ else
        test_set_prereq C_LOCALE_OUTPUT
 fi
 
+if test -z "$GIT_TEST_CHECK_CACHE_TREE"
+then
+       GIT_TEST_CHECK_CACHE_TREE=true
+       export GIT_TEST_CHECK_CACHE_TREE
+fi
+
 test_lazy_prereq PIPE '
        # test whether the filesystem supports FIFOs
        test_have_prereq !MINGW,!CYGWIN &&
index 139ecd97f8eb88b597aab50c2eb2b171a11ef3ef..d43ad8c1912d977183270fabe7ac76c5bbb7a1a3 100644 (file)
@@ -279,7 +279,7 @@ int reopen_tempfile(struct tempfile *tempfile)
                BUG("reopen_tempfile called for an inactive object");
        if (0 <= tempfile->fd)
                BUG("reopen_tempfile called for an open object");
-       tempfile->fd = open(tempfile->filename.buf, O_WRONLY);
+       tempfile->fd = open(tempfile->filename.buf, O_WRONLY|O_TRUNC);
        return tempfile->fd;
 }
 
index 36434eb6fa64721bcafdb40db5f9e7d0400a50b4..61d8dc4d1bb2fc446ac7b452661064e615b4b5c9 100644 (file)
@@ -236,8 +236,8 @@ extern int close_tempfile_gently(struct tempfile *tempfile);
  *   it (and nobody else) to inspect or even modify the file's
  *   contents.
  *
- * * `reopen_tempfile()` to reopen the temporary file. Make further
- *   updates to the contents.
+ * * `reopen_tempfile()` to reopen the temporary file, truncating the existing
+ *   contents. Write out the new contents.
  *
  * * `rename_tempfile()` to move the file to its permanent location.
  */
diff --git a/trace.c b/trace.c
index fc623e91fdd7ed8268922ae0460cfbd6903f3800..fa4a2e7120e405f3cf2d12422802b785f9e37fad 100644 (file)
--- a/trace.c
+++ b/trace.c
@@ -176,10 +176,30 @@ void trace_strbuf_fl(const char *file, int line, struct trace_key *key,
        strbuf_release(&buf);
 }
 
+static uint64_t perf_start_times[10];
+static int perf_indent;
+
+uint64_t trace_performance_enter(void)
+{
+       uint64_t now;
+
+       if (!trace_want(&trace_perf_key))
+               return 0;
+
+       now = getnanotime();
+       perf_start_times[perf_indent] = now;
+       if (perf_indent + 1 < ARRAY_SIZE(perf_start_times))
+               perf_indent++;
+       else
+               BUG("Too deep indentation");
+       return now;
+}
+
 static void trace_performance_vprintf_fl(const char *file, int line,
                                         uint64_t nanos, const char *format,
                                         va_list ap)
 {
+       static const char space[] = "          ";
        struct strbuf buf = STRBUF_INIT;
 
        if (!prepare_trace_line(file, line, &trace_perf_key, &buf))
@@ -188,7 +208,10 @@ static void trace_performance_vprintf_fl(const char *file, int line,
        strbuf_addf(&buf, "performance: %.9f s", (double) nanos / 1000000000);
 
        if (format && *format) {
-               strbuf_addstr(&buf, ": ");
+               if (perf_indent >= strlen(space))
+                       BUG("Too deep indentation");
+
+               strbuf_addf(&buf, ":%.*s ", perf_indent, space);
                strbuf_vaddf(&buf, format, ap);
        }
 
@@ -244,6 +267,24 @@ void trace_performance_since(uint64_t start, const char *format, ...)
        va_end(ap);
 }
 
+void trace_performance_leave(const char *format, ...)
+{
+       va_list ap;
+       uint64_t since;
+
+       if (perf_indent)
+               perf_indent--;
+
+       if (!format) /* Allow callers to leave without tracing anything */
+               return;
+
+       since = perf_start_times[perf_indent];
+       va_start(ap, format);
+       trace_performance_vprintf_fl(NULL, 0, getnanotime() - since,
+                                    format, ap);
+       va_end(ap);
+}
+
 #else
 
 void trace_printf_key_fl(const char *file, int line, struct trace_key *key,
@@ -273,6 +314,24 @@ void trace_performance_fl(const char *file, int line, uint64_t nanos,
        va_end(ap);
 }
 
+void trace_performance_leave_fl(const char *file, int line,
+                               uint64_t nanos, const char *format, ...)
+{
+       va_list ap;
+       uint64_t since;
+
+       if (perf_indent)
+               perf_indent--;
+
+       if (!format) /* Allow callers to leave without tracing anything */
+               return;
+
+       since = perf_start_times[perf_indent];
+       va_start(ap, format);
+       trace_performance_vprintf_fl(file, line, nanos - since, format, ap);
+       va_end(ap);
+}
+
 #endif /* HAVE_VARIADIC_MACROS */
 
 
@@ -411,13 +470,11 @@ uint64_t getnanotime(void)
        }
 }
 
-static uint64_t command_start_time;
 static struct strbuf command_line = STRBUF_INIT;
 
 static void print_command_performance_atexit(void)
 {
-       trace_performance_since(command_start_time, "git command:%s",
-                               command_line.buf);
+       trace_performance_leave("git command:%s", command_line.buf);
 }
 
 void trace_command_performance(const char **argv)
@@ -425,10 +482,10 @@ void trace_command_performance(const char **argv)
        if (!trace_want(&trace_perf_key))
                return;
 
-       if (!command_start_time)
+       if (!command_line.len)
                atexit(print_command_performance_atexit);
 
        strbuf_reset(&command_line);
        sq_quote_argv_pretty(&command_line, argv);
-       command_start_time = getnanotime();
+       trace_performance_enter();
 }
diff --git a/trace.h b/trace.h
index 2b6a1bc17c2cc1a8642d8c7bd460808638f28d77..171b256d261c771927541cf4d431b926b3ba102b 100644 (file)
--- a/trace.h
+++ b/trace.h
@@ -23,6 +23,7 @@ extern void trace_disable(struct trace_key *key);
 extern uint64_t getnanotime(void);
 extern void trace_command_performance(const char **argv);
 extern void trace_verbatim(struct trace_key *key, const void *buf, unsigned len);
+uint64_t trace_performance_enter(void);
 
 #ifndef HAVE_VARIADIC_MACROS
 
@@ -45,6 +46,9 @@ extern void trace_performance(uint64_t nanos, const char *format, ...);
 __attribute__((format (printf, 2, 3)))
 extern void trace_performance_since(uint64_t start, const char *format, ...);
 
+__attribute__((format (printf, 1, 2)))
+void trace_performance_leave(const char *format, ...);
+
 #else
 
 /*
@@ -118,6 +122,14 @@ extern void trace_performance_since(uint64_t start, const char *format, ...);
                                             __VA_ARGS__);                  \
        } while (0)
 
+#define trace_performance_leave(...)                                       \
+       do {                                                                \
+               if (trace_pass_fl(&trace_perf_key))                         \
+                       trace_performance_leave_fl(TRACE_CONTEXT, __LINE__, \
+                                                  getnanotime(),           \
+                                                  __VA_ARGS__);            \
+       } while (0)
+
 /* backend functions, use non-*fl macros instead */
 __attribute__((format (printf, 4, 5)))
 extern void trace_printf_key_fl(const char *file, int line, struct trace_key *key,
@@ -130,6 +142,9 @@ extern void trace_strbuf_fl(const char *file, int line, struct trace_key *key,
 __attribute__((format (printf, 4, 5)))
 extern void trace_performance_fl(const char *file, int line,
                                 uint64_t nanos, const char *fmt, ...);
+__attribute__((format (printf, 4, 5)))
+extern void trace_performance_leave_fl(const char *file, int line,
+                                      uint64_t nanos, const char *fmt, ...);
 static inline int trace_pass_fl(struct trace_key *key)
 {
        return key->fd || !key->initialized;
index 4e309460d1367a7b35b61c2391525a165a3645e3..0796f326b36bac334cbf7ca5162cb22c4b62d1e8 100644 (file)
--- a/trailer.c
+++ b/trailer.c
@@ -585,7 +585,7 @@ static const char *token_from_item(struct arg_item *item, char *tok)
        return item->conf.name;
 }
 
-static int token_matches_item(const char *tok, struct arg_item *item, int tok_len)
+static int token_matches_item(const char *tok, struct arg_item *item, size_t tok_len)
 {
        if (!strncasecmp(tok, item->conf.name, tok_len))
                return 1;
@@ -603,7 +603,7 @@ static int token_matches_item(const char *tok, struct arg_item *item, int tok_le
  * distinguished from the non-well-formed-line case (in which this function
  * returns -1) because some callers of this function need such a distinction.
  */
-static int find_separator(const char *line, const char *separators)
+static ssize_t find_separator(const char *line, const char *separators)
 {
        int whitespace_found = 0;
        const char *c;
@@ -630,10 +630,10 @@ static int find_separator(const char *line, const char *separators)
  */
 static void parse_trailer(struct strbuf *tok, struct strbuf *val,
                         const struct conf_info **conf, const char *trailer,
-                        int separator_pos)
+                        ssize_t separator_pos)
 {
        struct arg_item *item;
-       int tok_len;
+       size_t tok_len;
        struct list_head *pos;
 
        if (separator_pos != -1) {
@@ -721,7 +721,7 @@ static void process_command_line_args(struct list_head *arg_head,
        list_for_each(pos, new_trailer_head) {
                struct new_trailer_item *tr =
                        list_entry(pos, struct new_trailer_item, list);
-               int separator_pos = find_separator(tr->text, cl_separators);
+               ssize_t separator_pos = find_separator(tr->text, cl_separators);
 
                if (separator_pos == 0) {
                        struct strbuf sb = STRBUF_INIT;
@@ -763,9 +763,9 @@ static const char *next_line(const char *str)
 /*
  * Return the position of the start of the last line. If len is 0, return -1.
  */
-static int last_line(const char *buf, size_t len)
+static ssize_t last_line(const char *buf, size_t len)
 {
-       int i;
+       ssize_t i;
        if (len == 0)
                return -1;
        if (len == 1)
@@ -788,12 +788,14 @@ static int last_line(const char *buf, size_t len)
  * Return the position of the start of the patch or the length of str if there
  * is no patch in the message.
  */
-static int find_patch_start(const char *str)
+static size_t find_patch_start(const char *str)
 {
        const char *s;
 
        for (s = str; *s; s = next_line(s)) {
-               if (starts_with(s, "---"))
+               const char *v;
+
+               if (skip_prefix(s, "---", &v) && isspace(*v))
                        return s - str;
        }
 
@@ -804,10 +806,11 @@ static int find_patch_start(const char *str)
  * Return the position of the first trailer line or len if there are no
  * trailers.
  */
-static int find_trailer_start(const char *buf, size_t len)
+static size_t find_trailer_start(const char *buf, size_t len)
 {
        const char *s;
-       int end_of_title, l, only_spaces = 1;
+       ssize_t end_of_title, l;
+       int only_spaces = 1;
        int recognized_prefix = 0, trailer_lines = 0, non_trailer_lines = 0;
        /*
         * Number of possible continuation lines encountered. This will be
@@ -838,7 +841,7 @@ static int find_trailer_start(const char *buf, size_t len)
             l = last_line(buf, l)) {
                const char *bol = buf + l;
                const char **p;
-               int separator_pos;
+               ssize_t separator_pos;
 
                if (bol[0] == comment_line_char) {
                        non_trailer_lines += possible_continuation_lines;
@@ -899,14 +902,14 @@ static int find_trailer_start(const char *buf, size_t len)
 }
 
 /* Return the position of the end of the trailers. */
-static int find_trailer_end(const char *buf, size_t len)
+static size_t find_trailer_end(const char *buf, size_t len)
 {
        return len - ignore_non_trailer(buf, len);
 }
 
 static int ends_with_blank_line(const char *buf, size_t len)
 {
-       int ll = last_line(buf, len);
+       ssize_t ll = last_line(buf, len);
        if (ll < 0)
                return 0;
        return is_blank_line(buf + ll);
@@ -939,17 +942,17 @@ static void unfold_value(struct strbuf *val)
        strbuf_release(&out);
 }
 
-static int process_input_file(FILE *outfile,
-                             const char *str,
-                             struct list_head *head,
-                             const struct process_trailer_options *opts)
+static size_t process_input_file(FILE *outfile,
+                                const char *str,
+                                struct list_head *head,
+                                const struct process_trailer_options *opts)
 {
        struct trailer_info info;
        struct strbuf tok = STRBUF_INIT;
        struct strbuf val = STRBUF_INIT;
-       int i;
+       size_t i;
 
-       trailer_info_get(&info, str);
+       trailer_info_get(&info, str, opts);
 
        /* Print lines before the trailers as is */
        if (!opts->only_trailers)
@@ -1032,7 +1035,7 @@ void process_trailers(const char *file,
 {
        LIST_HEAD(head);
        struct strbuf sb = STRBUF_INIT;
-       int trailer_end;
+       size_t trailer_end;
        FILE *outfile = stdout;
 
        ensure_configured();
@@ -1066,7 +1069,8 @@ void process_trailers(const char *file,
        strbuf_release(&sb);
 }
 
-void trailer_info_get(struct trailer_info *info, const char *str)
+void trailer_info_get(struct trailer_info *info, const char *str,
+                     const struct process_trailer_options *opts)
 {
        int patch_start, trailer_end, trailer_start;
        struct strbuf **trailer_lines, **ptr;
@@ -1076,7 +1080,11 @@ void trailer_info_get(struct trailer_info *info, const char *str)
 
        ensure_configured();
 
-       patch_start = find_patch_start(str);
+       if (opts->no_divider)
+               patch_start = strlen(str);
+       else
+               patch_start = find_patch_start(str);
+
        trailer_end = find_trailer_end(str, patch_start);
        trailer_start = find_trailer_start(str, trailer_end);
 
@@ -1111,7 +1119,7 @@ void trailer_info_get(struct trailer_info *info, const char *str)
 
 void trailer_info_release(struct trailer_info *info)
 {
-       int i;
+       size_t i;
        for (i = 0; i < info->trailer_nr; i++)
                free(info->trailers[i]);
        free(info->trailers);
@@ -1121,7 +1129,7 @@ static void format_trailer_info(struct strbuf *out,
                                const struct trailer_info *info,
                                const struct process_trailer_options *opts)
 {
-       int i;
+       size_t i;
 
        /* If we want the whole block untouched, we can take the fast path. */
        if (!opts->only_trailers && !opts->unfold) {
@@ -1132,7 +1140,7 @@ static void format_trailer_info(struct strbuf *out,
 
        for (i = 0; i < info->trailer_nr; i++) {
                char *trailer = info->trailers[i];
-               int separator_pos = find_separator(trailer, separators);
+               ssize_t separator_pos = find_separator(trailer, separators);
 
                if (separator_pos >= 1) {
                        struct strbuf tok = STRBUF_INIT;
@@ -1158,7 +1166,7 @@ void format_trailers_from_commit(struct strbuf *out, const char *msg,
 {
        struct trailer_info info;
 
-       trailer_info_get(&info, msg);
+       trailer_info_get(&info, msg, opts);
        format_trailer_info(out, &info, opts);
        trailer_info_release(&info);
 }
index 9c10026c358326ce0c0098ca52341ce8160c5bbe..b997739649a37e8791e8448c2e7daefe8023bb71 100644 (file)
--- a/trailer.h
+++ b/trailer.h
@@ -71,6 +71,7 @@ struct process_trailer_options {
        int only_trailers;
        int only_input;
        int unfold;
+       int no_divider;
 };
 
 #define PROCESS_TRAILER_OPTIONS_INIT {0}
@@ -79,7 +80,8 @@ void process_trailers(const char *file,
                      const struct process_trailer_options *opts,
                      struct list_head *new_trailer_head);
 
-void trailer_info_get(struct trailer_info *info, const char *str);
+void trailer_info_get(struct trailer_info *info, const char *str,
+                     const struct process_trailer_options *opts);
 
 void trailer_info_release(struct trailer_info *info);
 
index 06ffea277460d68694aa4700b269601b56dd46ce..1c76d64aba9fac7d62dfc8e95ab2b32f45dd95a5 100644 (file)
@@ -1228,7 +1228,7 @@ int transport_fetch_refs(struct transport *transport, struct ref *refs)
                nr_refs++;
                if (rm->peer_ref &&
                    !is_null_oid(&rm->old_oid) &&
-                   !oidcmp(&rm->peer_ref->old_oid, &rm->old_oid))
+                   oideq(&rm->peer_ref->old_oid, &rm->old_oid))
                        continue;
                ALLOC_GROW(heads, nr_heads + 1, nr_alloc);
                heads[nr_heads++] = rm;
index 553bc0e63ae37ada1c3e19ae748d1228561f00f6..425668e1e0b468d55e87dba69169acd53bba2c31 100644 (file)
@@ -491,7 +491,7 @@ static struct combine_diff_path *ll_diff_tree_paths(
                                                continue;
 
                                        /* diff(t,pi) != Ã¸ */
-                                       if (oidcmp(t.entry.oid, tp[i].entry.oid) ||
+                                       if (!oideq(t.entry.oid, tp[i].entry.oid) ||
                                            (t.entry.mode != tp[i].entry.mode))
                                                continue;
 
index f25089b878a8b0842a9d6407cb6b1821867a737c..51bfac6aa0bea246cdf6415490654b7ca2e4b9c2 100644 (file)
@@ -336,6 +336,46 @@ static struct progress *get_progress(struct unpack_trees_options *o)
        return start_delayed_progress(_("Checking out files"), total);
 }
 
+static void setup_collided_checkout_detection(struct checkout *state,
+                                             struct index_state *index)
+{
+       int i;
+
+       state->clone = 1;
+       for (i = 0; i < index->cache_nr; i++)
+               index->cache[i]->ce_flags &= ~CE_MATCHED;
+}
+
+static void report_collided_checkout(struct index_state *index)
+{
+       struct string_list list = STRING_LIST_INIT_NODUP;
+       int i;
+
+       for (i = 0; i < index->cache_nr; i++) {
+               struct cache_entry *ce = index->cache[i];
+
+               if (!(ce->ce_flags & CE_MATCHED))
+                       continue;
+
+               string_list_append(&list, ce->name);
+               ce->ce_flags &= ~CE_MATCHED;
+       }
+
+       list.cmp = fspathcmp;
+       string_list_sort(&list);
+
+       if (list.nr) {
+               warning(_("the following paths have collided (e.g. case-sensitive paths\n"
+                         "on a case-insensitive filesystem) and only one from the same\n"
+                         "colliding group is in the working tree:\n"));
+
+               for (i = 0; i < list.nr; i++)
+                       fprintf(stderr, "  '%s'\n", list.items[i].string);
+       }
+
+       string_list_clear(&list, 0);
+}
+
 static int check_updates(struct unpack_trees_options *o)
 {
        unsigned cnt = 0;
@@ -345,11 +385,15 @@ static int check_updates(struct unpack_trees_options *o)
        struct checkout state = CHECKOUT_INIT;
        int i;
 
+       trace_performance_enter();
        state.force = 1;
        state.quiet = 1;
        state.refresh_cache = 1;
        state.istate = index;
 
+       if (o->clone)
+               setup_collided_checkout_detection(&state, index);
+
        progress = get_progress(o);
 
        if (o->update)
@@ -392,7 +436,7 @@ static int check_updates(struct unpack_trees_options *o)
                }
                if (to_fetch.nr)
                        fetch_objects(repository_format_partial_clone,
-                                     &to_fetch);
+                                     to_fetch.oid, to_fetch.nr);
                fetch_if_missing = fetch_if_missing_store;
                oid_array_clear(&to_fetch);
        }
@@ -414,6 +458,11 @@ static int check_updates(struct unpack_trees_options *o)
        errs |= finish_delayed_checkout(&state);
        if (o->update)
                git_attr_set_direction(GIT_ATTR_CHECKIN);
+
+       if (o->clone)
+               report_collided_checkout(index);
+
+       trace_performance_leave("check_updates");
        return errs != 0;
 }
 
@@ -630,7 +679,114 @@ static int switch_cache_bottom(struct traverse_info *info)
 
 static inline int are_same_oid(struct name_entry *name_j, struct name_entry *name_k)
 {
-       return name_j->oid && name_k->oid && !oidcmp(name_j->oid, name_k->oid);
+       return name_j->oid && name_k->oid && oideq(name_j->oid, name_k->oid);
+}
+
+static int all_trees_same_as_cache_tree(int n, unsigned long dirmask,
+                                       struct name_entry *names,
+                                       struct traverse_info *info)
+{
+       struct unpack_trees_options *o = info->data;
+       int i;
+
+       if (!o->merge || dirmask != ((1 << n) - 1))
+               return 0;
+
+       for (i = 1; i < n; i++)
+               if (!are_same_oid(names, names + i))
+                       return 0;
+
+       return cache_tree_matches_traversal(o->src_index->cache_tree, names, info);
+}
+
+static int index_pos_by_traverse_info(struct name_entry *names,
+                                     struct traverse_info *info)
+{
+       struct unpack_trees_options *o = info->data;
+       int len = traverse_path_len(info, names);
+       char *name = xmalloc(len + 1 /* slash */ + 1 /* NUL */);
+       int pos;
+
+       make_traverse_path(name, info, names);
+       name[len++] = '/';
+       name[len] = '\0';
+       pos = index_name_pos(o->src_index, name, len);
+       if (pos >= 0)
+               BUG("This is a directory and should not exist in index");
+       pos = -pos - 1;
+       if (!starts_with(o->src_index->cache[pos]->name, name) ||
+           (pos > 0 && starts_with(o->src_index->cache[pos-1]->name, name)))
+               BUG("pos must point at the first entry in this directory");
+       free(name);
+       return pos;
+}
+
+/*
+ * Fast path if we detect that all trees are the same as cache-tree at this
+ * path. We'll walk these trees in an iterative loop using cache-tree/index
+ * instead of ODB since we already know what these trees contain.
+ */
+static int traverse_by_cache_tree(int pos, int nr_entries, int nr_names,
+                                 struct name_entry *names,
+                                 struct traverse_info *info)
+{
+       struct cache_entry *src[MAX_UNPACK_TREES + 1] = { NULL, };
+       struct unpack_trees_options *o = info->data;
+       struct cache_entry *tree_ce = NULL;
+       int ce_len = 0;
+       int i, d;
+
+       if (!o->merge)
+               BUG("We need cache-tree to do this optimization");
+
+       /*
+        * Do what unpack_callback() and unpack_nondirectories() normally
+        * do. But we walk all paths in an iterative loop instead.
+        *
+        * D/F conflicts and higher stage entries are not a concern
+        * because cache-tree would be invalidated and we would never
+        * get here in the first place.
+        */
+       for (i = 0; i < nr_entries; i++) {
+               int new_ce_len, len, rc;
+
+               src[0] = o->src_index->cache[pos + i];
+
+               len = ce_namelen(src[0]);
+               new_ce_len = cache_entry_size(len);
+
+               if (new_ce_len > ce_len) {
+                       new_ce_len <<= 1;
+                       tree_ce = xrealloc(tree_ce, new_ce_len);
+                       memset(tree_ce, 0, new_ce_len);
+                       ce_len = new_ce_len;
+
+                       tree_ce->ce_flags = create_ce_flags(0);
+
+                       for (d = 1; d <= nr_names; d++)
+                               src[d] = tree_ce;
+               }
+
+               tree_ce->ce_mode = src[0]->ce_mode;
+               tree_ce->ce_namelen = len;
+               oidcpy(&tree_ce->oid, &src[0]->oid);
+               memcpy(tree_ce->name, src[0]->name, len + 1);
+
+               rc = call_unpack_fn((const struct cache_entry * const *)src, o);
+               if (rc < 0) {
+                       free(tree_ce);
+                       return rc;
+               }
+
+               mark_ce_used(src[0], o);
+       }
+       free(tree_ce);
+       if (o->debug_unpack)
+               printf("Unpacked %d entries from %s to %s using cache-tree\n",
+                      nr_entries,
+                      o->src_index->cache[pos]->name,
+                      o->src_index->cache[pos + nr_entries - 1]->name);
+       return 0;
 }
 
 static int traverse_trees_recursive(int n, unsigned long dirmask,
@@ -644,6 +800,27 @@ static int traverse_trees_recursive(int n, unsigned long dirmask,
        void *buf[MAX_UNPACK_TREES];
        struct traverse_info newinfo;
        struct name_entry *p;
+       int nr_entries;
+
+       nr_entries = all_trees_same_as_cache_tree(n, dirmask, names, info);
+       if (nr_entries > 0) {
+               struct unpack_trees_options *o = info->data;
+               int pos = index_pos_by_traverse_info(names, info);
+
+               if (!o->merge || df_conflicts)
+                       BUG("Wrong condition to get here buddy");
+
+               /*
+                * All entries up to 'pos' must have been processed
+                * (i.e. marked CE_UNPACKED) at this point. But to be safe,
+                * save and restore cache_bottom anyway to not miss
+                * unprocessed entries before 'pos'.
+                */
+               bottom = o->cache_bottom;
+               ret = traverse_by_cache_tree(pos, nr_entries, n, names, info);
+               o->cache_bottom = bottom;
+               return ret;
+       }
 
        p = names;
        while (!p->mode)
@@ -810,6 +987,11 @@ static struct cache_entry *create_ce_entry(const struct traverse_info *info,
        return ce;
 }
 
+/*
+ * Note that traverse_by_cache_tree() duplicates some logic in this function
+ * without actually calling it. If you change the logic here you may need to
+ * check and change there as well.
+ */
 static int unpack_nondirectories(int n, unsigned long mask,
                                 unsigned long dirmask,
                                 struct cache_entry **src,
@@ -1002,6 +1184,11 @@ static void debug_unpack_callback(int n,
                debug_name_entry(i, names + i);
 }
 
+/*
+ * Note that traverse_by_cache_tree() duplicates some logic in this function
+ * without actually calling it. If you change the logic here you may need to
+ * check and change there as well.
+ */
 static int unpack_callback(int n, unsigned long mask, unsigned long dirmask, struct name_entry *names, struct traverse_info *info)
 {
        struct cache_entry *src[MAX_UNPACK_TREES + 1] = { NULL, };
@@ -1289,6 +1476,7 @@ int unpack_trees(unsigned len, struct tree_desc *t, struct unpack_trees_options
        if (len > MAX_UNPACK_TREES)
                die("unpack_trees takes at most %d trees", MAX_UNPACK_TREES);
 
+       trace_performance_enter();
        memset(&el, 0, sizeof(el));
        if (!core_apply_sparse_checkout || !o->update)
                o->skip_sparse_checkout = 1;
@@ -1361,7 +1549,10 @@ int unpack_trees(unsigned len, struct tree_desc *t, struct unpack_trees_options
                        }
                }
 
-               if (traverse_trees(len, t, &info) < 0)
+               trace_performance_enter();
+               ret = traverse_trees(len, t, &info);
+               trace_performance_leave("traverse_trees");
+               if (ret < 0)
                        goto return_failed;
        }
 
@@ -1436,7 +1627,10 @@ int unpack_trees(unsigned len, struct tree_desc *t, struct unpack_trees_options
 
        ret = check_updates(o) ? (-2) : 0;
        if (o->dst_index) {
+               move_index_extensions(&o->result, o->src_index);
                if (!ret) {
+                       if (git_env_bool("GIT_TEST_CHECK_CACHE_TREE", 0))
+                               cache_tree_verify(&o->result);
                        if (!o->result.cache_tree)
                                o->result.cache_tree = cache_tree();
                        if (!cache_tree_fully_valid(o->result.cache_tree))
@@ -1444,7 +1638,6 @@ int unpack_trees(unsigned len, struct tree_desc *t, struct unpack_trees_options
                                                  WRITE_TREE_SILENT |
                                                  WRITE_TREE_REPAIR);
                }
-               move_index_extensions(&o->result, o->src_index);
                discard_index(o->dst_index);
                *o->dst_index = o->result;
        } else {
@@ -1453,6 +1646,7 @@ int unpack_trees(unsigned len, struct tree_desc *t, struct unpack_trees_options
        o->src_index = NULL;
 
 done:
+       trace_performance_leave("unpack_trees");
        clear_exclude_list(&el);
        return ret;
 
@@ -1484,7 +1678,7 @@ static int same(const struct cache_entry *a, const struct cache_entry *b)
        if ((a->ce_flags | b->ce_flags) & CE_CONFLICTED)
                return 0;
        return a->ce_mode == b->ce_mode &&
-              !oidcmp(&a->oid, &b->oid);
+              oideq(&a->oid, &b->oid);
 }
 
 
@@ -1616,7 +1810,7 @@ static int verify_clean_subdirectory(const struct cache_entry *ce,
                 * If we are not going to update the submodule, then
                 * we don't care.
                 */
-               if (!sub_head && !oidcmp(&oid, &ce->oid))
+               if (!sub_head && oideq(&oid, &ce->oid))
                        return 0;
                return verify_clean_submodule(sub_head ? NULL : oid_to_hex(&oid),
                                              ce, error_type, o);
@@ -1644,6 +1838,7 @@ static int verify_clean_subdirectory(const struct cache_entry *ce,
                        if (verify_uptodate(ce2, o))
                                return -1;
                        add_entry(o, ce2, CE_REMOVE, 0);
+                       invalidate_ce_path(ce, o);
                        mark_ce_used(ce2, o);
                }
                cnt++;
@@ -1903,6 +2098,8 @@ static int keep_entry(const struct cache_entry *ce,
                      struct unpack_trees_options *o)
 {
        add_entry(o, ce, 0, 0);
+       if (ce_stage(ce))
+               invalidate_ce_path(ce, o);
        return 1;
 }
 
index 847f217dbaecea678f48d5086e7099c3c24a53c4..0135080a7b4e91713b66f68669e57a2f5e4edbdd 100644 (file)
@@ -45,6 +45,7 @@ struct unpack_trees_options {
        unsigned int reset,
                     merge,
                     update,
+                    clone,
                     index_only,
                     nontrivial_merge,
                     trivial_merges_only,
index 82b393ec31917c0c2bcd904668a60b5aaef00633..540778d1ddb85b494a92bce75993c4d13d47ef82 100644 (file)
 #include "quote.h"
 #include "upload-pack.h"
 #include "serve.h"
+#include "commit-graph.h"
+#include "commit-reach.h"
 
 /* Remember to update object flag allocation in object.h */
 #define THEY_HAVE      (1u << 11)
 #define OUR_REF                (1u << 12)
 #define WANTED         (1u << 13)
 #define COMMON_KNOWN   (1u << 14)
-#define REACHABLE      (1u << 15)
 
 #define SHALLOW                (1u << 16)
 #define NOT_SHALLOW    (1u << 17)
@@ -337,64 +338,16 @@ static int got_oid(const char *hex, struct object_id *oid)
        return 0;
 }
 
-static int reachable(struct commit *want)
-{
-       struct prio_queue work = { compare_commits_by_commit_date };
-
-       prio_queue_put(&work, want);
-       while (work.nr) {
-               struct commit_list *list;
-               struct commit *commit = prio_queue_get(&work);
-
-               if (commit->object.flags & THEY_HAVE) {
-                       want->object.flags |= COMMON_KNOWN;
-                       break;
-               }
-               if (!commit->object.parsed)
-                       parse_object(the_repository, &commit->object.oid);
-               if (commit->object.flags & REACHABLE)
-                       continue;
-               commit->object.flags |= REACHABLE;
-               if (commit->date < oldest_have)
-                       continue;
-               for (list = commit->parents; list; list = list->next) {
-                       struct commit *parent = list->item;
-                       if (!(parent->object.flags & REACHABLE))
-                               prio_queue_put(&work, parent);
-               }
-       }
-       want->object.flags |= REACHABLE;
-       clear_commit_marks(want, REACHABLE);
-       clear_prio_queue(&work);
-       return (want->object.flags & COMMON_KNOWN);
-}
-
 static int ok_to_give_up(void)
 {
-       int i;
+       uint32_t min_generation = GENERATION_NUMBER_ZERO;
 
        if (!have_obj.nr)
                return 0;
 
-       for (i = 0; i < want_obj.nr; i++) {
-               struct object *want = want_obj.objects[i].item;
-
-               if (want->flags & COMMON_KNOWN)
-                       continue;
-               want = deref_tag(the_repository, want, "a want line", 0);
-               if (!want || want->type != OBJ_COMMIT) {
-                       /* no way to tell if this is reachable by
-                        * looking at the ancestry chain alone, so
-                        * leave a note to ourselves not to worry about
-                        * this object anymore.
-                        */
-                       want_obj.objects[i].item->flags |= COMMON_KNOWN;
-                       continue;
-               }
-               if (!reachable((struct commit *)want))
-                       return 0;
-       }
-       return 1;
+       return can_all_from_reach_with_flag(&want_obj, THEY_HAVE,
+                                           COMMON_KNOWN, oldest_have,
+                                           min_generation);
 }
 
 static int get_common_commits(void)
@@ -740,6 +693,7 @@ static void deepen_by_rev_list(int ac, const char **av,
 {
        struct commit_list *result;
 
+       close_commit_graph(the_repository);
        result = get_shallow_commits_by_rev_list(ac, av, SHALLOW, NOT_SHALLOW);
        send_shallow(result);
        free_commit_list(result);
index f3f4be579c9810d0fcf94badd4bf21770b99c91b..f565f6731d1336c761b527a8ce8a62a21b453dcc 100644 (file)
@@ -278,8 +278,7 @@ struct userdiff_driver *userdiff_find_by_path(const char *path)
                check = attr_check_initl("diff", NULL);
        if (!path)
                return NULL;
-       if (git_check_attr(&the_index, path, check))
-               return NULL;
+       git_check_attr(&the_index, path, check);
 
        if (ATTR_TRUE(check->items[0].value))
                return &driver_true;
index 97cda5f97bbc10b2f5690b945ab42d2beef74e2e..b0d0b5426da0d1cbe8d7b6ff569c7511569cf0a3 100644 (file)
@@ -217,7 +217,11 @@ struct worktree *find_worktree(struct worktree **list,
 
        if (prefix)
                arg = to_free = prefix_filename(prefix, arg);
-       path = real_pathdup(arg, 1);
+       path = real_pathdup(arg, 0);
+       if (!path) {
+               free(to_free);
+               return NULL;
+       }
        for (; *list; list++)
                if (!fspathcmp(path, real_path((*list)->path)))
                        break;
diff --git a/ws.c b/ws.c
index 5b67b426e7b41d92f9b11bd82fd7e9bd09c89d79..a64ab51e09a99e190e5f1912de19b1e093feb360 100644 (file)
--- a/ws.c
+++ b/ws.c
@@ -74,35 +74,31 @@ unsigned parse_whitespace_rule(const char *string)
 unsigned whitespace_rule(const char *pathname)
 {
        static struct attr_check *attr_whitespace_rule;
+       const char *value;
 
        if (!attr_whitespace_rule)
                attr_whitespace_rule = attr_check_initl("whitespace", NULL);
 
-       if (!git_check_attr(&the_index, pathname, attr_whitespace_rule)) {
-               const char *value;
-
-               value = attr_whitespace_rule->items[0].value;
-               if (ATTR_TRUE(value)) {
-                       /* true (whitespace) */
-                       unsigned all_rule = ws_tab_width(whitespace_rule_cfg);
-                       int i;
-                       for (i = 0; i < ARRAY_SIZE(whitespace_rule_names); i++)
-                               if (!whitespace_rule_names[i].loosens_error &&
-                                   !whitespace_rule_names[i].exclude_default)
-                                       all_rule |= whitespace_rule_names[i].rule_bits;
-                       return all_rule;
-               } else if (ATTR_FALSE(value)) {
-                       /* false (-whitespace) */
-                       return ws_tab_width(whitespace_rule_cfg);
-               } else if (ATTR_UNSET(value)) {
-                       /* reset to default (!whitespace) */
-                       return whitespace_rule_cfg;
-               } else {
-                       /* string */
-                       return parse_whitespace_rule(value);
-               }
-       } else {
+       git_check_attr(&the_index, pathname, attr_whitespace_rule);
+       value = attr_whitespace_rule->items[0].value;
+       if (ATTR_TRUE(value)) {
+               /* true (whitespace) */
+               unsigned all_rule = ws_tab_width(whitespace_rule_cfg);
+               int i;
+               for (i = 0; i < ARRAY_SIZE(whitespace_rule_names); i++)
+                       if (!whitespace_rule_names[i].loosens_error &&
+                           !whitespace_rule_names[i].exclude_default)
+                               all_rule |= whitespace_rule_names[i].rule_bits;
+               return all_rule;
+       } else if (ATTR_FALSE(value)) {
+               /* false (-whitespace) */
+               return ws_tab_width(whitespace_rule_cfg);
+       } else if (ATTR_UNSET(value)) {
+               /* reset to default (!whitespace) */
                return whitespace_rule_cfg;
+       } else {
+               /* string */
+               return parse_whitespace_rule(value);
        }
 }
 
index 5ffab61015da908ed36b7ca6455f53fd42f1e292..1c8746d0ea7d02f6f7899fa5418597b584d8cfb8 100644 (file)
@@ -453,8 +453,8 @@ static void wt_status_collect_changed_cb(struct diff_queue_struct *q,
                        d->worktree_status = p->status;
                if (S_ISGITLINK(p->two->mode)) {
                        d->dirty_submodule = p->two->dirty_submodule;
-                       d->new_submodule_commits = !!oidcmp(&p->one->oid,
-                                                           &p->two->oid);
+                       d->new_submodule_commits = !oideq(&p->one->oid,
+                                                         &p->two->oid);
                        if (s->status_format == STATUS_FORMAT_SHORT)
                                d->worktree_status = short_submodule_status(d);
                }
@@ -1487,10 +1487,10 @@ static void wt_status_get_detached_from(struct wt_status_state *state)
 
        if (dwim_ref(cb.buf.buf, cb.buf.len, &oid, &ref) == 1 &&
            /* sha1 is a commit? match without further lookup */
-           (!oidcmp(&cb.noid, &oid) ||
+           (oideq(&cb.noid, &oid) ||
             /* perhaps sha1 is a tag, try to dereference to a commit */
             ((commit = lookup_commit_reference_gently(the_repository, &oid, 1)) != NULL &&
-             !oidcmp(&cb.noid, &commit->object.oid)))) {
+             oideq(&cb.noid, &commit->object.oid)))) {
                const char *from = ref;
                if (!skip_prefix(from, "refs/tags/", &from))
                        skip_prefix(from, "refs/remotes/", &from);
@@ -1500,7 +1500,7 @@ static void wt_status_get_detached_from(struct wt_status_state *state)
                        xstrdup(find_unique_abbrev(&cb.noid, DEFAULT_ABBREV));
        oidcpy(&state->detached_oid, &cb.noid);
        state->detached_at = !get_oid("HEAD", &oid) &&
-                            !oidcmp(&oid, &state->detached_oid);
+                            oideq(&oid, &state->detached_oid);
 
        free(ref);
        strbuf_release(&cb.buf);
index ec6e574e4aa07414b9a17bb99ddee26fd44497de..e7af95db8654a88359b9abd31bc018fb1cb66b5f 100644 (file)
@@ -186,7 +186,7 @@ void read_mmblob(mmfile_t *ptr, const struct object_id *oid)
        unsigned long size;
        enum object_type type;
 
-       if (!oidcmp(oid, &null_oid)) {
+       if (oideq(oid, &null_oid)) {
                ptr->ptr = xstrdup("");
                ptr->size = 0;
                return;