Benoit Sigoure <tsunanet@gmail.com> <tsuna@lrde.epita.fr>
Bernt Hansen <bernt@norang.ca> <bernt@alumni.uwaterloo.ca>
Brandon Casey <drafnel@gmail.com> <casey@nrlssc.navy.mil>
+Brandon Williams <bwilliams.eng@gmail.com> <bmwill@google.com>
brian m. carlson <sandals@crustytoothpaste.net>
brian m. carlson <sandals@crustytoothpaste.net> <sandals@crustytoothpaste.ath.cx>
Bryan Larsen <bryan@larsen.st> <bryan.larsen@gmail.com>
MAN7_TXT += gittutorial.txt
MAN7_TXT += gitworkflows.txt
+ifdef MAN_FILTER
+MAN_TXT = $(filter $(MAN_FILTER),$(MAN1_TXT) $(MAN5_TXT) $(MAN7_TXT))
+else
MAN_TXT = $(MAN1_TXT) $(MAN5_TXT) $(MAN7_TXT)
+MAN_FILTER = $(MAN_TXT)
+endif
+
MAN_XML = $(patsubst %.txt,%.xml,$(MAN_TXT))
MAN_HTML = $(patsubst %.txt,%.html,$(MAN_TXT))
GIT_MAN_REF = master
OBSOLETE_HTML += everyday.html
OBSOLETE_HTML += git-remote-helpers.html
-DOC_HTML = $(MAN_HTML) $(OBSOLETE_HTML)
ARTICLES += howto-index
ARTICLES += git-tools
SP_ARTICLES += $(TECH_DOCS)
SP_ARTICLES += technical/api-index
-DOC_HTML += $(patsubst %,%.html,$(ARTICLES) $(SP_ARTICLES))
+ARTICLES_HTML += $(patsubst %,%.html,$(ARTICLES) $(SP_ARTICLES))
+HTML_FILTER ?= $(ARTICLES_HTML) $(OBSOLETE_HTML)
+DOC_HTML = $(MAN_HTML) $(filter $(HTML_FILTER),$(ARTICLES_HTML) $(OBSOLETE_HTML))
-DOC_MAN1 = $(patsubst %.txt,%.1,$(MAN1_TXT))
-DOC_MAN5 = $(patsubst %.txt,%.5,$(MAN5_TXT))
-DOC_MAN7 = $(patsubst %.txt,%.7,$(MAN7_TXT))
+DOC_MAN1 = $(patsubst %.txt,%.1,$(filter $(MAN_FILTER),$(MAN1_TXT)))
+DOC_MAN5 = $(patsubst %.txt,%.5,$(filter $(MAN_FILTER),$(MAN5_TXT)))
+DOC_MAN7 = $(patsubst %.txt,%.7,$(filter $(MAN_FILTER),$(MAN7_TXT)))
prefix ?= $(HOME)
bindir ?= $(prefix)/bin
lint-docs::
$(QUIET_LINT)$(PERL_PATH) lint-gitlink.perl
+ifeq ($(wildcard po/Makefile),po/Makefile)
+doc-l10n install-l10n::
+ $(MAKE) -C po $@
+endif
+
.PHONY: FORCE
object into account (e.g. a tag object would want to go under
refs/tags/).
+ * "git checkout [<tree-ish>] path..." learned to report the number of
+ paths that have been checked out of the index or the tree-ish,
+ which gives it the same degree of noisy-ness as the case in which
+ the command checks out a branch.
+
+ * "git quiltimport" learned "--keep-non-patch" option.
+
+ * "git worktree remove" and "git worktree move" refused to work when
+ there is a submodule involved. This has been loosened to ignore
+ uninitialized submodules.
+
+ * "git cherry-pick -m1" was forbidden when picking a non-merge
+ commit, even though there _is_ parent number 1 for such a commit.
+ This was done to avoid mistakes back when "cherry-pick" was about
+ picking a single commit, but is no longer useful with "cherry-pick"
+ that can pick a range of commits. Now the "-m$num" option is
+ allowed when picking any commit, as long as $num names an existing
+ parent of the commit.
+
+ * Update "git multimail" from the upstream.
+
+ * "git p4" update.
+
+ * The "--format=<placeholder>" option of for-each-ref, branch and tag
+ learned to show a few more traits of objects that can be learned by
+ the object_info API.
+
Performance, Internal Implementation, Development Support etc.
* More codepaths become aware of working with in-core repository
instance other than the default "the_repository".
+ * The "strncat()" function is now among the banned functions.
+
+ * Portability updates for the HPE NonStop platform.
+
+ * Earlier we added "-Wformat-security" to developer builds, assuming
+ that "-Wall" (which includes "-Wformat" which in turn is required
+ to use "-Wformat-security") is always in effect. This is not true
+ when config.mak.autogen is in use, unfortunately. This has been
+ fixed by unconditionally adding "-Wall" to developer builds.
+
+ * The loose object cache used to optimize existence look-up has been
+ updated.
+
+ * Flaky tests can now be repeatedly run under load with the
+ "--stress" option.
+ (merge fb7d1e3ac8 sg/stress-test later to maint).
+
Fixes since v2.20
-----------------
which has been corrected.
(merge 02818a98d7 mk/http-backend-kill-children-before-exit later to maint).
+ * "git rev-list --exclude-promisor-objects" had to take an object
+ that does not exist locally (and is lazily available) from the
+ command line without barfing, but the code dereferenced NULL.
+ (merge 4cf67869b2 md/list-lazy-objects-fix later to maint).
+
+ * The traversal over tree objects has learned to honor
+ ":(attr:label)" pathspec match, which has been implemented only for
+ enumerating paths on the filesystem.
+ (merge 5a0b97b34c nd/attr-pathspec-in-tree-walk later to maint).
+
+ * BSD port updates.
+ (merge 4e3ecbd439 cb/openbsd-allows-reading-directory later to maint).
+ (merge b6bdc2a0f5 cb/t5004-empty-tar-archive-fix later to maint).
+ (merge 82cbc8cde2 cb/test-lint-cp-a later to maint).
+
+ * Lines that begin with a certain keyword that come over the wire, as
+ well as lines that consist only of one of these keywords, ought to
+ be painted in color for easier eyeballing, but the latter was
+ broken ever since the feature was introduced in 2.19, which has
+ been corrected.
+ (merge 1f67290450 hn/highlight-sideband-keywords later to maint).
+
+ * "git log -G<regex>" looked for a hunk in the "git log -p" patch
+ output that contained a string that matches the given pattern.
+ Optimize this code to ignore binary files, which by default will
+ not show any hunk that would match any pattern (unless textconv or
+ the --text option is in effect, that is).
+ (merge e0e7cb8080 tb/log-G-binary later to maint).
+
+ * "git submodule update" ought to use a single job unless asked, but
+ by mistake used multiple jobs, which has been fixed.
+ (merge e3a9d1aca9 sb/submodule-fetchjobs-default-to-one later to maint).
+
+ * "git stripspace" should be usable outside a git repository, but
+ under the "-s" or "-c" mode, it didn't.
+ (merge 957da75802 jn/stripspace-wo-repository later to maint).
+
+ * Some of the documentation pages formatted incorrectly with
+ Asciidoctor, which have been fixed.
+ (merge b62eb1d2f4 ma/asciidoctor later to maint).
+
+ * The core.worktree setting in a submodule repository should not be
+ pointing at a directory when the submodule loses its working tree
+ (e.g. getting deinit'ed), but the code did not properly maintain
+ this invariant.
+
+ * With zsh, "git cmd path<TAB>" was completed to "git cmd path name"
+ when the completed path has a special character like SP in it,
+ without any attempt to keep "path name" a single filename. This
+ has been fixed to complete it to "git cmd path\ name" just like
+ Bash completion does.
+
+ * The test suite tried to see if it is run under bash, but the check
+ itself failed under some other implementations of shell (notably
+ under NetBSD). This has been corrected.
+ (merge 54ea72f09c sg/test-bash-version-fix later to maint).
+
+ * "git gc" and "git repack" did not close the open packfiles that
+ they found unneeded before removing them, which didn't work on a
+ platform incapable of removing an open file. This has been
+ corrected.
+ (merge 5bdece0d70 js/gc-repack-close-before-remove later to maint).
+
* Code cleanup, docfix, build fix, etc.
+ (merge 89ba9a79ae hb/t0061-dot-in-path-fix later to maint).
+ (merge d173e799ea sb/diff-color-moved-config-option-fixup later to maint).
+ (merge a8f5a59067 en/directory-renames-nothanks-doc-update later to maint).
+ (merge ec36c42a63 nd/indentation-fix later to maint).
+ (merge f116ee21cd do/gitweb-strict-export-conf-doc later to maint).
+ (merge 112ea42663 fd/gitweb-snapshot-conf-doc-fix later to maint).
+ (merge 1cadad6f65 tb/use-common-win32-pathfuncs-on-cygwin later to maint).
+ (merge 57e9dcaa65 km/rebase-doc-typofix later to maint).
+ (merge b8b4cb27e6 ds/gc-doc-typofix later to maint).
+ (merge 3b3357626e nd/style-opening-brace later to maint).
+ (merge b4583d5595 es/doc-worktree-guessremote-config later to maint).
+ (merge cce99cd8c6 ds/commit-graph-assert-missing-parents later to maint).
+ (merge 0650614982 cy/completion-typofix later to maint).
+ (merge 6881925ef5 rs/sha1-file-close-mapped-file-on-error later to maint).
+ (merge bd8d6f0def en/show-ref-doc-fix later to maint).
-------------------------------------------
+
Defaults to false.
+
+rebase.rescheduleFailedExec::
+ Automatically reschedule `exec` commands that failed. This only makes
+ sense in interactive mode (or when an `--exec` option was provided).
+ This is the same as specifying the `--reschedule-failed-exec` option.
worktree.guessRemote::
- With `add`, if no branch argument, and neither of `-b` nor
- `-B` nor `--detach` are given, the command defaults to
+ If no branch is specified and neither `-b` nor `-B` nor
+ `--detach` is used, then `git worktree add` defaults to
creating a new branch from HEAD. If `worktree.guessRemote` is
set to true, `worktree add` tries to find a remote-tracking
branch whose name uniquely matches the new branch name. If
`dimmed_zebra` is a deprecated synonym.
--
+--no-color-moved::
+ Turn off move detection. This can be used to override configuration
+ settings. It is the same as `--color-moved=no`.
+
--color-moved-ws=<modes>::
- This configures how white spaces are ignored when performing the
+ This configures how whitespace is ignored when performing the
move detection for `--color-moved`.
ifdef::git-diff[]
It can be set by the `diff.colorMovedWS` configuration setting.
These modes can be given as a comma separated list:
+
--
+no::
+ Do not ignore whitespace when performing move detection.
ignore-space-at-eol::
Ignore changes in whitespace at EOL.
ignore-space-change::
Ignore whitespace when comparing lines. This ignores differences
even if one line has whitespace where the other line has none.
allow-indentation-change::
- Initially ignore any white spaces in the move detection, then
+ Initially ignore any whitespace in the move detection, then
group the moved code blocks only into a block if the change in
whitespace is the same per line. This is incompatible with the
other modes.
--
+--no-color-moved-ws::
+ Do not ignore whitespace when performing move detection. This can be
+ used to override configuration settings. It is the same as
+ `--color-moved-ws=no`.
+
--word-diff[=<mode>]::
Show a word diff, using the <mode> to delimit changed words.
By default, words are delimited by whitespace; see
came into being: use the feature iteratively to feed the interesting
block in the preimage back into `-S`, and keep going until you get the
very first version of the block.
++
+Binary files are searched as well.
-G<regex>::
Look for differences whose patch text contains added/removed
-S"regexec\(regexp" --pickaxe-regex` will not (because the number of
occurrences of that string did not change).
+
+Unless `--text` is supplied patches of binary files without a textconv
+filter will be ignored.
++
See the 'pickaxe' entry in linkgit:gitdiffcore[7] for more
information.
stdin, and the SHA-1, type, and size of each object is printed on stdout. The
output format can be overridden using the optional `<format>` argument. If
either `--textconv` or `--filters` was specified, the input is expected to
-list the object names followed by the path name, separated by a single white
-space, so that the appropriate drivers can be determined.
+list the object names followed by the path name, separated by a single
+whitespace, so that the appropriate drivers can be determined.
OPTIONS
-------
Print object information and contents for each object provided
on stdin. May not be combined with any other options or arguments
except `--textconv` or `--filters`, in which case the input lines
- also need to specify the path, separated by white space. See the
+ also need to specify the path, separated by whitespace. See the
section `BATCH OUTPUT` below for details.
--batch-check::
Print object information for each object provided on stdin. May
not be combined with any other options or arguments except
`--textconv` or `--filters`, in which case the input lines also
- need to specify the path, separated by white space. See the
+ need to specify the path, separated by whitespace. See the
section `BATCH OUTPUT` below for details.
--batch-all-objects::
The number of spaces between columns. One space by default.
EXAMPLES
-------
+--------
Format data by columns:
------------
objectsize::
The size of the object (the same as 'git cat-file -s' reports).
-
+ Append `:disk` to get the size, in bytes, that the object takes up on
+ disk. See the note about on-disk sizes in the `CAVEATS` section below.
objectname::
The object name (aka SHA-1).
For a non-ambiguous abbreviation of the object name append `:short`.
For an abbreviation of the object name with desired length append
`:short=<length>`, where the minimum length is MINIMUM_ABBREV. The
length may be exceeded to ensure unique object names.
+deltabase::
+ This expands to the object name of the delta base for the
+ given object, if it is stored as a delta. Otherwise it
+ expands to the null object name (all zeroes).
upstream::
The name of a local ref which can be considered ``upstream''
git for-each-ref --format="%(refname)%(if)%(authorname)%(then) Authored by: %(authorname)%(end)"
------------
+CAVEATS
+-------
+
+Note that the sizes of objects on disk are reported accurately, but care
+should be taken in drawing conclusions about which refs or objects are
+responsible for disk usage. The size of a packed non-delta object may be
+much larger than the size of objects which delta against it, but the
+choice of which object is the base and which is the delta is arbitrary
+and is subject to change during a repack.
+
+Note also that multiple copies of an object may be present in the object
+database; in this case, it is undefined which copy's size or delta base
+will be reported.
+
SEE ALSO
--------
linkgit:git-show-ref[1]
it within all non-bare repos or it can be set to a boolean value.
This defaults to true.
-The optional configuration variable `gc.commitGraph` determines if
+The optional configuration variable `gc.writeCommitGraph` determines if
'git gc' should run 'git commit-graph write'. This can be set to a
boolean value. This defaults to false.
OPTIONS
-------
---
-
-q::
--quiet::
If you provide a 'directory', the command is run inside it. If this directory
does not exist, it will be created.
---
-
TEMPLATE DIRECTORY
------------------
--------
[verse]
'git quiltimport' [--dry-run | -n] [--author <author>] [--patches <dir>]
- [--series <file>]
+ [--series <file>] [--keep-non-patch]
DESCRIPTION
or the value of the `$QUILT_SERIES` environment
variable.
+--keep-non-patch::
+ Pass `-b` flag to 'git mailinfo' (see linkgit:git-mailinfo[1]).
+
GIT
---
Part of the linkgit:git[1] suite
+
See also INCOMPATIBLE OPTIONS below.
+-y <cmd>::
+ This is the same as passing `--reschedule-failed-exec` before
+ `-x <cmd>`, i.e. it appends the specified `exec` command and
+ turns on the mode where failed `exec` commands are automatically
+ rescheduled.
+
--root::
Rebase all commits reachable from <branch>, instead of
limiting them with an <upstream>. This allows you to rebase
with care: the final stash application after a successful
rebase might result in non-trivial conflicts.
+--reschedule-failed-exec::
+--no-reschedule-failed-exec::
+ Automatically reschedule `exec` commands that failed. This only makes
+ sense in interactive mode (or when an `--exec` option was provided).
+
INCOMPATIBLE OPTIONS
--------------------
Directory rename detection
~~~~~~~~~~~~~~~~~~~~~~~~~~
-The merge and interactive backends work fine with
-directory rename detection. The am backend sometimes does not.
+Directory rename heuristics are enabled in the merge and interactive
+backends. Due to the lack of accurate tree information, directory
+rename detection is disabled in the am backend.
include::merge-strategies.txt[]
At this time, the `merge` command will *always* use the `recursive`
merge strategy for regular merges, and `octopus` for octopus merges,
-strategy, with no way to choose a different one. To work around
+with no way to choose a different one. To work around
this, an `exec` command can be used to call `git merge` explicitly,
using the fact that the labels are worktree-local refs (the ref
`refs/rewritten/onto` would correspond to the label `onto`, for example).
Show the HEAD reference, even if it would normally be filtered out.
---tags::
--heads::
+--tags::
Limit to "refs/heads" and "refs/tags", respectively. These options
are not mutually exclusive; when given both, references stored in
Ignored files are not listed, unless `--ignored` option is in effect,
in which case `XY` are `!!`.
- X Y Meaning
- -------------------------------------------------
- [AMD] not updated
- M [ MD] updated in index
- A [ MD] added to index
- D deleted from index
- R [ MD] renamed in index
- C [ MD] copied in index
- [MARC] index and work tree matches
- [ MARC] M work tree changed since index
- [ MARC] D deleted in work tree
- [ D] R renamed in work tree
- [ D] C copied in work tree
- -------------------------------------------------
- D D unmerged, both deleted
- A U unmerged, added by us
- U D unmerged, deleted by them
- U A unmerged, added by them
- D U unmerged, deleted by us
- A A unmerged, both added
- U U unmerged, both modified
- -------------------------------------------------
- ? ? untracked
- ! ! ignored
- -------------------------------------------------
+....
+X Y Meaning
+-------------------------------------------------
+ [AMD] not updated
+M [ MD] updated in index
+A [ MD] added to index
+D deleted from index
+R [ MD] renamed in index
+C [ MD] copied in index
+[MARC] index and work tree matches
+[ MARC] M work tree changed since index
+[ MARC] D deleted in work tree
+[ D] R renamed in work tree
+[ D] C copied in work tree
+-------------------------------------------------
+D D unmerged, both deleted
+A U unmerged, added by us
+U D unmerged, deleted by them
+U A unmerged, added by them
+D U unmerged, deleted by us
+A A unmerged, both added
+U U unmerged, both modified
+-------------------------------------------------
+? ? untracked
+! ! ignored
+-------------------------------------------------
+....
Submodules have more state and instead report
M the submodule has a different HEAD than
If `--branch` is given, a series of header lines are printed with
information about the current branch.
- Line Notes
- ------------------------------------------------------------
- # branch.oid <commit> | (initial) Current commit.
- # branch.head <branch> | (detached) Current branch.
- # branch.upstream <upstream_branch> If upstream is set.
- # branch.ab +<ahead> -<behind> If upstream is set and
- the commit is present.
- ------------------------------------------------------------
+....
+Line Notes
+------------------------------------------------------------
+# branch.oid <commit> | (initial) Current commit.
+# branch.head <branch> | (detached) Current branch.
+# branch.upstream <upstream_branch> If upstream is set.
+# branch.ab +<ahead> -<behind> If upstream is set and
+ the commit is present.
+------------------------------------------------------------
+....
### Changed Tracked Entries
2 <XY> <sub> <mH> <mI> <mW> <hH> <hI> <X><score> <path><sep><origPath>
- Field Meaning
- --------------------------------------------------------
- <XY> A 2 character field containing the staged and
- unstaged XY values described in the short format,
- with unchanged indicated by a "." rather than
- a space.
- <sub> A 4 character field describing the submodule state.
- "N..." when the entry is not a submodule.
- "S<c><m><u>" when the entry is a submodule.
- <c> is "C" if the commit changed; otherwise ".".
- <m> is "M" if it has tracked changes; otherwise ".".
- <u> is "U" if there are untracked changes; otherwise ".".
- <mH> The octal file mode in HEAD.
- <mI> The octal file mode in the index.
- <mW> The octal file mode in the worktree.
- <hH> The object name in HEAD.
- <hI> The object name in the index.
- <X><score> The rename or copy score (denoting the percentage
- of similarity between the source and target of the
- move or copy). For example "R100" or "C75".
- <path> The pathname. In a renamed/copied entry, this
- is the target path.
- <sep> When the `-z` option is used, the 2 pathnames are separated
- with a NUL (ASCII 0x00) byte; otherwise, a tab (ASCII 0x09)
- byte separates them.
- <origPath> The pathname in the commit at HEAD or in the index.
- This is only present in a renamed/copied entry, and
- tells where the renamed/copied contents came from.
- --------------------------------------------------------
+....
+Field Meaning
+--------------------------------------------------------
+<XY> A 2 character field containing the staged and
+ unstaged XY values described in the short format,
+ with unchanged indicated by a "." rather than
+ a space.
+<sub> A 4 character field describing the submodule state.
+ "N..." when the entry is not a submodule.
+ "S<c><m><u>" when the entry is a submodule.
+ <c> is "C" if the commit changed; otherwise ".".
+ <m> is "M" if it has tracked changes; otherwise ".".
+ <u> is "U" if there are untracked changes; otherwise ".".
+<mH> The octal file mode in HEAD.
+<mI> The octal file mode in the index.
+<mW> The octal file mode in the worktree.
+<hH> The object name in HEAD.
+<hI> The object name in the index.
+<X><score> The rename or copy score (denoting the percentage
+ of similarity between the source and target of the
+ move or copy). For example "R100" or "C75".
+<path> The pathname. In a renamed/copied entry, this
+ is the target path.
+<sep> When the `-z` option is used, the 2 pathnames are separated
+ with a NUL (ASCII 0x00) byte; otherwise, a tab (ASCII 0x09)
+ byte separates them.
+<origPath> The pathname in the commit at HEAD or in the index.
+ This is only present in a renamed/copied entry, and
+ tells where the renamed/copied contents came from.
+--------------------------------------------------------
+....
Unmerged entries have the following format; the first character is
a "u" to distinguish from ordinary changed entries.
u <xy> <sub> <m1> <m2> <m3> <mW> <h1> <h2> <h3> <path>
- Field Meaning
- --------------------------------------------------------
- <XY> A 2 character field describing the conflict type
- as described in the short format.
- <sub> A 4 character field describing the submodule state
- as described above.
- <m1> The octal file mode in stage 1.
- <m2> The octal file mode in stage 2.
- <m3> The octal file mode in stage 3.
- <mW> The octal file mode in the worktree.
- <h1> The object name in stage 1.
- <h2> The object name in stage 2.
- <h3> The object name in stage 3.
- <path> The pathname.
- --------------------------------------------------------
+....
+Field Meaning
+--------------------------------------------------------
+<XY> A 2 character field describing the conflict type
+ as described in the short format.
+<sub> A 4 character field describing the submodule state
+ as described above.
+<m1> The octal file mode in stage 1.
+<m2> The octal file mode in stage 2.
+<m3> The octal file mode in stage 3.
+<mW> The octal file mode in the worktree.
+<h1> The object name in stage 1.
+<h2> The object name in stage 2.
+<h3> The object name in stage 3.
+<path> The pathname.
+--------------------------------------------------------
+....
### Other Items
regular expression. This means that it will detect in-file (or what
rename-detection considers the same file) moves, which is noise. The
implementation runs diff twice and greps, and this can be quite
-expensive.
+expensive. To speed things up binary files without textconv filters
+will be ignored.
When `-S` or `-G` are used without `--pickaxe-all`, only filepairs
that match their respective criterion are kept in the output. When
$strict_export::
Only allow viewing of repositories also shown on the overview page.
- This for example makes `$gitweb_export_ok` file decide if repository is
- available and not only if it is shown. If `$gitweb_list` points to
+ This for example makes `$export_ok` file decide if repository is
+ available and not only if it is shown. If `$projects_list` points to
file with list of project, only those repositories listed would be
available for gitweb. Can be set during building gitweb via
`GITWEB_STRICT_EXPORT`. By default this variable is not set, which
a definitive list. By default only "tgz" is offered.
+
This feature can be configured on a per-repository basis via
-repository's `gitweb.blame` configuration variable, which contains
+repository's `gitweb.snapshot` configuration variable, which contains
a comma separated list of formats or "none" to disable snapshots.
Unknown values are ignored.
- "`!ATTR`" requires that the attribute `ATTR` be
unspecified.
+
+Note that when matching against a tree object, attributes are still
+obtained from working tree, not from the given tree object.
exclude;;
After a path matches any non-exclude pathspec, it will be run
Note that these are applied before commit
ordering and formatting options, such as `--reverse`.
---
-
-<number>::
-n <number>::
--max-count=<number>::
`<header>` text will be printed with each progress update.
endif::git-rev-list[]
---
-
History Simplification
~~~~~~~~~~~~~~~~~~~~~~
is not sorted, this function has the side effect of sorting
it.
+`oid_array_filter`::
+ Apply the callback function `want` to each entry in the array,
+ retaining only the entries for which the function returns true.
+ Preserve the order of the entries that are retained.
+
Examples
--------
that it has, either because the local repository has that object in one of
its promisor packfiles, or because another promisor object refers to it.
+
-When Git encounters a missing object, Git can see if it a promisor object
+When Git encounters a missing object, Git can see if it is a promisor object
and handle it appropriately. If not, Git can report a corruption.
+
This means that there is no need for the client to explicitly maintain an
# in one call to the platform's SHA1_Update(). e.g. APPLE_COMMON_CRYPTO
# wants 'SHA1_MAX_BLOCK_SIZE=1024L*1024L*1024L' defined.
#
+# Define BLK_SHA256 to use the built-in SHA-256 routines.
+#
+# Define GCRYPT_SHA256 to use the SHA-256 routines in libgcrypt.
+#
+# Define OPENSSL_SHA256 to use the SHA-256 routines in OpenSSL.
+#
# Define NEEDS_CRYPTO_WITH_SSL if you need -lcrypto when using -lssl (Darwin).
#
# Define NEEDS_SSL_WITH_CRYPTO if you need -lssl when using -lcrypto (Darwin).
TEST_BUILTINS_OBJS += test-dump-untracked-cache.o
TEST_BUILTINS_OBJS += test-example-decorate.o
TEST_BUILTINS_OBJS += test-genrandom.o
+TEST_BUILTINS_OBJS += test-hash.o
TEST_BUILTINS_OBJS += test-hashmap.o
+TEST_BUILTINS_OBJS += test-hash-speed.o
TEST_BUILTINS_OBJS += test-index-version.o
TEST_BUILTINS_OBJS += test-json-writer.o
TEST_BUILTINS_OBJS += test-lazy-init-name-hash.o
TEST_BUILTINS_OBJS += test-scrap-cache-tree.o
TEST_BUILTINS_OBJS += test-sha1.o
TEST_BUILTINS_OBJS += test-sha1-array.o
+TEST_BUILTINS_OBJS += test-sha256.o
TEST_BUILTINS_OBJS += test-sigchain.o
TEST_BUILTINS_OBJS += test-strcmp-offset.o
TEST_BUILTINS_OBJS += test-string-list.o
endif
endif
+ifdef OPENSSL_SHA256
+ EXTLIBS += $(LIB_4_CRYPTO)
+ BASIC_CFLAGS += -DSHA256_OPENSSL
+else
+ifdef GCRYPT_SHA256
+ BASIC_CFLAGS += -DSHA256_GCRYPT
+ EXTLIBS += -lgcrypt
+else
+ LIB_OBJS += sha256/block/sha256.o
+ BASIC_CFLAGS += -DSHA256_BLK
+endif
+endif
+
ifdef SHA1_MAX_BLOCK_SIZE
LIB_OBJS += compat/sha1-chunked.o
BASIC_CFLAGS += -DSHA1_MAX_BLOCK_SIZE="$(SHA1_MAX_BLOCK_SIZE)"
costate.refresh_cache = 1;
costate.istate = istate;
- if (checkout_entry(ce, &costate, NULL) || lstat(ce->name, st))
+ if (checkout_entry(ce, &costate, NULL, NULL) ||
+ lstat(ce->name, st))
return error(_("cannot checkout %s"), ce->name);
return 0;
}
* string and appends it to a struct strbuf.
*/
static void strbuf_append_ext_header(struct strbuf *sb, const char *keyword,
- const char *value, unsigned int valuelen)
+ const char *value, unsigned int valuelen)
{
int len, tmp;
}
static void format_subst(const struct commit *commit,
- const char *src, size_t len,
- struct strbuf *buf)
+ const char *src, size_t len,
+ struct strbuf *buf)
{
char *to_free = NULL;
struct strbuf fmt = STRBUF_INIT;
git_attr_set_direction(GIT_ATTR_INDEX);
}
- err = read_tree_recursive(args->tree, "", 0, 0, &args->pathspec,
+ err = read_tree_recursive(args->repo, args->tree, "",
+ 0, 0, &args->pathspec,
queue_or_write_archive_entry,
&context);
if (err == READ_TREE_RECURSIVE)
ctx.args = args;
parse_pathspec(&ctx.pathspec, 0, 0, "", paths);
ctx.pathspec.recursive = 1;
- ret = read_tree_recursive(args->tree, "", 0, 0, &ctx.pathspec,
+ ret = read_tree_recursive(args->repo, args->tree, "",
+ 0, 0, &ctx.pathspec,
reject_entry, &ctx);
clear_pathspec(&ctx.pathspec);
return ret != 0;
#define strcat(x,y) BANNED(strcat)
#undef strncpy
#define strncpy(x,y,n) BANNED(strncpy)
+#undef strncat
+#define strncat(x,y,n) BANNED(strncat)
#undef sprintf
#undef vsprintf
* is increased by one between each call, but that should not matter
* for this application.
*/
-static unsigned get_prn(unsigned count) {
+static unsigned get_prn(unsigned count)
+{
count = count * 1103515245 + 12345;
return (count/65536) % PRN_MODULO;
}
die(_("pathspec '%s' did not match any files"),
pathspec->items[i].match);
}
- free(seen);
+ free(seen);
}
int run_add_interactive(const char *revision, const char *patch_mode,
continue;
did_checkout = 1;
if (checkout_entry(ce, &state,
- to_tempfile ? topath[ce_stage(ce)] : NULL) < 0)
+ to_tempfile ? topath[ce_stage(ce)] : NULL,
+ NULL) < 0)
errs++;
}
write_tempfile_record(last_ce->name, prefix);
}
if (checkout_entry(ce, &state,
- to_tempfile ? topath[ce_stage(ce)] : NULL) < 0)
+ to_tempfile ? topath[ce_stage(ce)] : NULL,
+ NULL) < 0)
errs++;
last_ce = ce;
}
int ignore_skipworktree;
int ignore_other_worktrees;
int show_progress;
+ int count_checkout_paths;
/*
* If new checkout options are added, skip_merge_working_tree
* should be updated accordingly.
static int read_tree_some(struct tree *tree, const struct pathspec *pathspec)
{
- read_tree_recursive(tree, "", 0, 0, pathspec, update_some, NULL);
+ read_tree_recursive(the_repository, tree, "", 0, 0,
+ pathspec, update_some, NULL);
/* update the index with the given tree's info
* for all args, expanding wildcards, and exit
}
static int checkout_stage(int stage, const struct cache_entry *ce, int pos,
- const struct checkout *state)
+ const struct checkout *state, int *nr_checkouts)
{
while (pos < active_nr &&
!strcmp(active_cache[pos]->name, ce->name)) {
if (ce_stage(active_cache[pos]) == stage)
- return checkout_entry(active_cache[pos], state, NULL);
+ return checkout_entry(active_cache[pos], state,
+ NULL, nr_checkouts);
pos++;
}
if (stage == 2)
return error(_("path '%s' does not have their version"), ce->name);
}
-static int checkout_merged(int pos, const struct checkout *state)
+static int checkout_merged(int pos, const struct checkout *state, int *nr_checkouts)
{
struct cache_entry *ce = active_cache[pos];
const char *path = ce->name;
ce = make_transient_cache_entry(mode, &oid, path, 2);
if (!ce)
die(_("make_cache_entry failed for path '%s'"), path);
- status = checkout_entry(ce, state, NULL);
+ status = checkout_entry(ce, state, NULL, nr_checkouts);
discard_cache_entry(ce);
return status;
}
struct commit *head;
int errs = 0;
struct lock_file lock_file = LOCK_INIT;
+ int nr_checkouts = 0;
if (opts->track != BRANCH_TRACK_UNSPECIFIED)
die(_("'%s' cannot be used with updating paths"), "--track");
struct cache_entry *ce = active_cache[pos];
if (ce->ce_flags & CE_MATCHED) {
if (!ce_stage(ce)) {
- errs |= checkout_entry(ce, &state, NULL);
+ errs |= checkout_entry(ce, &state,
+ NULL, &nr_checkouts);
continue;
}
if (opts->writeout_stage)
- errs |= checkout_stage(opts->writeout_stage, ce, pos, &state);
+ errs |= checkout_stage(opts->writeout_stage,
+ ce, pos,
+ &state, &nr_checkouts);
else if (opts->merge)
- errs |= checkout_merged(pos, &state);
+ errs |= checkout_merged(pos, &state,
+ &nr_checkouts);
pos = skip_same_name(ce, pos) - 1;
}
}
- errs |= finish_delayed_checkout(&state);
+ errs |= finish_delayed_checkout(&state, &nr_checkouts);
+
+ if (opts->count_checkout_paths) {
+ if (opts->source_tree)
+ fprintf_ln(stderr, Q_("Checked out %d path out of %s",
+ "Checked out %d paths out of %s",
+ nr_checkouts),
+ nr_checkouts,
+ find_unique_abbrev(&opts->source_tree->object.oid,
+ DEFAULT_ABBREV));
+ else
+ fprintf_ln(stderr, Q_("Checked out %d path out of the index",
+ "Checked out %d paths out of the index",
+ nr_checkouts),
+ nr_checkouts);
+ }
if (write_locked_index(&the_index, &lock_file, COMMIT_LOCK))
die(_("unable to write new index file"));
has_dash_dash = 1; /* case (3) or (1) */
else if (dash_dash_pos >= 2)
die(_("only one reference expected, %d given."), dash_dash_pos);
+ opts->count_checkout_paths = !opts->quiet && !has_dash_dash;
if (!strcmp(arg, "-"))
arg = "@{-1}";
if (write_locked_index(&the_index, &index_lock, 0))
die(_("unable to create temporary index"));
- old_index_env = getenv(INDEX_ENVIRONMENT);
+ old_index_env = xstrdup_or_null(getenv(INDEX_ENVIRONMENT));
setenv(INDEX_ENVIRONMENT, get_lock_file_path(&index_lock), 1);
if (interactive_add(argc, argv, prefix, patch_interactive) != 0)
setenv(INDEX_ENVIRONMENT, old_index_env, 1);
else
unsetenv(INDEX_ENVIRONMENT);
+ FREE_AND_NULL(old_index_env);
discard_cache();
read_cache_from(get_lock_file_path(&index_lock));
usage_with_options(builtin_config_usage, builtin_config_options);
}
-static void check_argc(int argc, int min, int max) {
+static void check_argc(int argc, int min, int max)
+{
if (argc >= min && argc <= max)
return;
if (min == max)
int nongit = !startup_info->have_repository;
char *value;
- given_config_source.file = getenv(CONFIG_ENVIRONMENT);
+ given_config_source.file = xstrdup_or_null(getenv(CONFIG_ENVIRONMENT));
argc = parse_options(argc, argv, prefix, builtin_config_options,
builtin_config_usage,
int ret;
ce = make_transient_cache_entry(mode, oid, path, 0);
- ret = checkout_entry(ce, state, NULL);
+ ret = checkout_entry(ce, state, NULL, NULL);
discard_cache_entry(ce);
return ret;
struct oid_array shallow = OID_ARRAY_INIT;
struct string_list deepen_not = STRING_LIST_INIT_DUP;
struct packet_reader reader;
+ enum protocol_version version;
fetch_if_missing = 0;
PACKET_READ_CHOMP_NEWLINE |
PACKET_READ_GENTLE_ON_EOF);
- switch (discover_version(&reader)) {
+ version = discover_version(&reader);
+ switch (version) {
case protocol_v2:
- die("support for protocol v2 not implemented yet");
+ get_remote_refs(fd[1], &reader, &ref, 0, NULL, NULL);
+ break;
case protocol_v1:
case protocol_v0:
get_remote_heads(&reader, &ref, 0, NULL, &shallow);
}
ref = fetch_pack(&args, fd, conn, ref, dest, sought, nr_sought,
- &shallow, pack_lockfile_ptr, protocol_v0);
+ &shallow, pack_lockfile_ptr, version);
if (pack_lockfile) {
printf("lock %s\n", pack_lockfile);
fflush(stdout);
what = _("[new ref]");
}
- if ((recurse_submodules != RECURSE_SUBMODULES_OFF) &&
- (recurse_submodules != RECURSE_SUBMODULES_ON))
- check_for_new_submodule_commits(&ref->new_oid);
r = s_update_ref(msg, ref, 0);
format_display(display, r ? '!' : '*', what,
r ? _("unable to update local ref") : NULL,
strbuf_add_unique_abbrev(&quickref, ¤t->object.oid, DEFAULT_ABBREV);
strbuf_addstr(&quickref, "..");
strbuf_add_unique_abbrev(&quickref, &ref->new_oid, DEFAULT_ABBREV);
- if ((recurse_submodules != RECURSE_SUBMODULES_OFF) &&
- (recurse_submodules != RECURSE_SUBMODULES_ON))
- check_for_new_submodule_commits(&ref->new_oid);
r = s_update_ref("fast-forward", ref, 1);
format_display(display, r ? '!' : ' ', quickref.buf,
r ? _("unable to update local ref") : NULL,
strbuf_add_unique_abbrev(&quickref, ¤t->object.oid, DEFAULT_ABBREV);
strbuf_addstr(&quickref, "...");
strbuf_add_unique_abbrev(&quickref, &ref->new_oid, DEFAULT_ABBREV);
- if ((recurse_submodules != RECURSE_SUBMODULES_OFF) &&
- (recurse_submodules != RECURSE_SUBMODULES_ON))
- check_for_new_submodule_commits(&ref->new_oid);
r = s_update_ref("forced-update", ref, 1);
format_display(display, r ? '!' : '+', quickref.buf,
r ? _("unable to update local ref") : _("forced update"),
ref->force = rm->peer_ref->force;
}
+ if (recurse_submodules != RECURSE_SUBMODULES_OFF)
+ check_for_new_submodule_commits(&rm->old_oid);
if (!strcmp(rm->name, "HEAD")) {
kind = "";
*/
if (strcmp(remote->name, repository_format_partial_clone)) {
if (filter_options.choice)
- die(_("--filter can only be used with the remote configured in core.partialClone"));
+ die(_("--filter can only be used with the remote "
+ "configured in extensions.partialclone"));
return;
}
result = fetch_one(remote, argc, argv, prune_tags_ok);
} else {
if (filter_options.choice)
- die(_("--filter can only be used with the remote configured in core.partialClone"));
+ die(_("--filter can only be used with the remote "
+ "configured in extensions.partialclone"));
/* TODO should this also die if we have a previous partial-clone? */
result = fetch_multiple(&list);
}
static void add_repack_incremental_option(void)
{
- argv_array_push(&repack, "--no-write-bitmap-index");
+ argv_array_push(&repack, "--no-write-bitmap-index");
}
static int need_to_gc(void)
report_garbage = report_pack_garbage;
reprepare_packed_git(the_repository);
- if (pack_garbage.nr > 0)
+ if (pack_garbage.nr > 0) {
+ close_all_packs(the_repository->objects);
clean_pack_garbage();
+ }
if (gc_write_commit_graph)
write_commit_graph_reachable(get_object_directory(), 0,
const struct object_id *oid,
const char *filename, const char *path)
{
- struct repository submodule;
+ struct repository subrepo;
+ const struct submodule *sub = submodule_from_path(superproject,
+ &null_oid, path);
+
int hit;
/*
return 0;
}
- if (repo_submodule_init(&submodule, superproject, path)) {
+ if (repo_submodule_init(&subrepo, superproject, sub)) {
grep_read_unlock();
return 0;
}
- repo_read_gitmodules(&submodule);
+ repo_read_gitmodules(&subrepo);
/*
* NEEDSWORK: This adds the submodule's object directory to the list of
* store is no longer global and instead is a member of the repository
* object.
*/
- add_to_alternates_memory(submodule.objects->odb->path);
+ add_to_alternates_memory(subrepo.objects->odb->path);
grep_read_unlock();
if (oid) {
init_tree_desc(&tree, data, size);
hit = grep_tree(opt, pathspec, &tree, &base, base.len,
- object->type == OBJ_COMMIT, &submodule);
+ object->type == OBJ_COMMIT, &subrepo);
strbuf_release(&base);
free(data);
} else {
- hit = grep_cache(opt, &submodule, pathspec, 1);
+ hit = grep_cache(opt, &subrepo, pathspec, 1);
}
- repo_clear(&submodule);
+ repo_clear(&subrepo);
return hit;
}
if (match != all_entries_interesting) {
strbuf_addstr(&name, base->buf + tn_len);
- match = tree_entry_interesting(&entry, &name,
+ match = tree_entry_interesting(repo->index,
+ &entry, &name,
0, pathspec);
strbuf_setlen(&name, name_base_len);
* GIT_WORK_TREE makes sense only in conjunction with GIT_DIR
* without --bare. Catch the error early.
*/
- git_dir = getenv(GIT_DIR_ENVIRONMENT);
- work_tree = getenv(GIT_WORK_TREE_ENVIRONMENT);
+ git_dir = xstrdup_or_null(getenv(GIT_DIR_ENVIRONMENT));
+ work_tree = xstrdup_or_null(getenv(GIT_WORK_TREE_ENVIRONMENT));
if ((!git_dir || is_bare_repository_cfg == 1) && work_tree)
die(_("%s (or --work-tree=<directory>) not allowed without "
"specifying %s (or --git-dir=<directory>)"),
}
UNLEAK(real_git_dir);
+ UNLEAK(git_dir);
+ UNLEAK(work_tree);
flags |= INIT_DB_EXIST_OK;
return init_db(git_dir, real_git_dir, template_dir, flags);
diff_get_color_opt(&rev.diffopt, DIFF_COMMIT),
name,
diff_get_color_opt(&rev.diffopt, DIFF_RESET));
- read_tree_recursive((struct tree *)o, "", 0, 0, &match_all,
- show_tree_object, rev.diffopt.file);
+ read_tree_recursive(the_repository, (struct tree *)o, "",
+ 0, 0, &match_all, show_tree_object,
+ rev.diffopt.file);
rev.shown_one = 1;
break;
case OBJ_COMMIT:
static void show_submodule(struct repository *superproject,
struct dir_struct *dir, const char *path)
{
- struct repository submodule;
+ struct repository subrepo;
+ const struct submodule *sub = submodule_from_path(superproject,
+ &null_oid, path);
- if (repo_submodule_init(&submodule, superproject, path))
+ if (repo_submodule_init(&subrepo, superproject, sub))
return;
- if (repo_read_index(&submodule) < 0)
+ if (repo_read_index(&subrepo) < 0)
die("index file corrupt");
- show_files(&submodule, dir);
+ show_files(&subrepo, dir);
- repo_clear(&submodule);
+ repo_clear(&subrepo);
}
static void show_ce(struct repository *repo, struct dir_struct *dir,
PATHSPEC_PREFER_CWD, prefix, matchbuf);
} else
memset(&pathspec, 0, sizeof(pathspec));
- if (read_tree(tree, 1, &pathspec, istate))
+ if (read_tree(the_repository, tree, 1, &pathspec, istate))
die("unable to read tree entries %s", tree_name);
for (i = 0; i < istate->cache_nr; i++) {
tree = parse_tree_indirect(&oid);
if (!tree)
die("not a tree object");
- return !!read_tree_recursive(tree, "", 0, 0, &pathspec, show_tree, NULL);
+ return !!read_tree_recursive(the_repository, tree, "", 0, 0,
+ &pathspec, show_tree, NULL);
}
static const char builtin_merge_recursive_usage[] =
"git %s <base>... -- <head> <remote> ...";
-static const char *better_branch_name(const char *branch)
+static char *better_branch_name(const char *branch)
{
static char githead_env[8 + GIT_MAX_HEXSZ + 1];
char *name;
if (strlen(branch) != the_hash_algo->hexsz)
- return branch;
+ return xstrdup(branch);
xsnprintf(githead_env, sizeof(githead_env), "GITHEAD_%s", branch);
name = getenv(githead_env);
- return name ? name : branch;
+ return xstrdup(name ? name : branch);
}
int cmd_merge_recursive(int argc, const char **argv, const char *prefix)
int i, failed;
struct object_id h1, h2;
struct merge_options o;
+ char *better1, *better2;
struct commit *result;
init_merge_options(&o);
if (get_oid(o.branch2, &h2))
die(_("could not resolve ref '%s'"), o.branch2);
- o.branch1 = better_branch_name(o.branch1);
- o.branch2 = better_branch_name(o.branch2);
+ o.branch1 = better1 = better_branch_name(o.branch1);
+ o.branch2 = better2 = better_branch_name(o.branch2);
if (o.verbosity >= 3)
printf(_("Merging %s with %s\n"), o.branch1, o.branch2);
failed = merge_recursive_generic(&o, &h1, &h2, bases_count, bases, &result);
+
+ free(better1);
+ free(better2);
+
if (failed < 0)
return 128; /* die() error code */
return failed;
setup_traverse_info(&info, base);
info.fn = threeway_callback;
- traverse_trees(3, t, &info);
+ traverse_trees(&the_index, 3, t, &info);
}
static void *get_tree_descriptor(struct tree_desc *desc, const char *rev)
static void get_object_list(int ac, const char **av)
{
struct rev_info revs;
+ struct setup_revision_opt s_r_opt = {
+ .allow_exclude_promisor_objects = 1,
+ };
char line[1000];
int flags = 0;
int save_warning;
repo_init_revisions(the_repository, &revs, NULL);
save_commit_buffer = 0;
- revs.allow_exclude_promisor_objects_opt = 1;
- setup_revisions(ac, av, &revs, NULL);
+ setup_revisions(ac, av, &revs, &s_r_opt);
/* make sure shallows are read */
is_repository_shallow(the_repository);
save_commit_buffer = 0;
read_replace_refs = 0;
ref_paranoia = 1;
- revs.allow_exclude_promisor_objects_opt = 1;
repo_init_revisions(the_repository, &revs, prefix);
argc = parse_options(argc, argv, prefix, options, prune_usage, 0);
return remote->url_nr;
}
-static NORETURN int die_push_simple(struct branch *branch, struct remote *remote) {
+static NORETURN int die_push_simple(struct branch *branch,
+ struct remote *remote)
+{
/*
* There's no point in using shorten_unambiguous_ref here,
* as the ambiguity would be on the remote side, not what
OPT_STRING(0, "onto-name", &onto_name, N_("onto-name"), N_("onto name")),
OPT_STRING(0, "cmd", &cmd, N_("cmd"), N_("the command to run")),
OPT_RERERE_AUTOUPDATE(&opts.allow_rerere_auto),
+ OPT_BOOL(0, "reschedule-failed-exec", &opts.reschedule_failed_exec,
+ N_("automatically re-schedule any `exec` that fails")),
OPT_END()
};
int rebase_merges, rebase_cousins;
char *strategy, *strategy_opts;
struct strbuf git_format_patch_opt;
+ int reschedule_failed_exec;
};
static int is_interactive(struct rebase_options *opts)
argv_array_push(&child.args, opts->gpg_sign_opt);
if (opts->signoff)
argv_array_push(&child.args, "--signoff");
+ if (opts->reschedule_failed_exec)
+ argv_array_push(&child.args, "--reschedule-failed-exec");
status = run_command(&child);
goto finished_rebase;
#define RESET_HEAD_DETACH (1<<0)
#define RESET_HEAD_HARD (1<<1)
+#define RESET_HEAD_RUN_POST_CHECKOUT_HOOK (1<<2)
static int reset_head(struct object_id *oid, const char *action,
const char *switch_to_branch, unsigned flags,
{
unsigned detach_head = flags & RESET_HEAD_DETACH;
unsigned reset_hard = flags & RESET_HEAD_HARD;
+ unsigned run_hook = flags & RESET_HEAD_RUN_POST_CHECKOUT_HOOK;
struct object_id head_oid;
struct tree_desc desc[2] = { { NULL }, { NULL } };
struct lock_file lock = LOCK_INIT;
ret = update_ref(reflog_head, "HEAD", oid, NULL, 0,
UPDATE_REFS_MSG_ON_ERR);
}
+ if (run_hook)
+ run_hook_le(NULL, "post-checkout",
+ oid_to_hex(orig ? orig : &null_oid),
+ oid_to_hex(oid), "1", NULL);
leave_reset_head:
strbuf_release(&msg);
return 0;
}
+ if (!strcmp(var, "rebase.reschedulefailedexec")) {
+ opts->reschedule_failed_exec = git_config_bool(var, value);
+ return 0;
+ }
+
return git_default_config(var, value, data);
}
return 0;
}
+struct opt_y {
+ struct string_list *list;
+ struct rebase_options *options;
+};
+
+static int parse_opt_y(const struct option *opt, const char *arg, int unset)
+{
+ struct opt_y *o = opt->value;
+
+ if (unset || !arg)
+ return -1;
+
+ o->options->reschedule_failed_exec = 1;
+ string_list_append(o->list, arg);
+ return 0;
+}
+
static void NORETURN error_on_missing_default_upstream(void)
{
struct branch *current_branch = branch_get(NULL);
struct string_list strategy_options = STRING_LIST_INIT_NODUP;
struct object_id squash_onto;
char *squash_onto_name = NULL;
+ struct opt_y opt_y = { .list = &exec, .options = &options };
struct option builtin_rebase_options[] = {
OPT_STRING(0, "onto", &options.onto_name,
N_("revision"),
OPT_STRING_LIST('x', "exec", &exec, N_("exec"),
N_("add exec lines after each commit of the "
"editable list")),
+ { OPTION_CALLBACK, 'y', NULL, &opt_y, N_("<cmd>"),
+ N_("same as --reschedule-failed-exec -x <cmd>"),
+ PARSE_OPT_NONEG, parse_opt_y },
OPT_BOOL(0, "allow-empty-message",
&options.allow_empty_message,
N_("allow rebasing commits with empty messages")),
"strategy")),
OPT_BOOL(0, "root", &options.root,
N_("rebase all reachable commits up to the root(s)")),
+ OPT_BOOL(0, "reschedule-failed-exec",
+ &options.reschedule_failed_exec,
+ N_("automatically re-schedule any `exec` that fails")),
OPT_END(),
};
int i;
break;
}
+ if (options.reschedule_failed_exec && !is_interactive(&options))
+ die(_("--reschedule-failed-exec requires an interactive rebase"));
+
if (options.git_am_opts.argc) {
/* all am options except -q are compatible only with --am */
for (i = options.git_am_opts.argc - 1; i >= 0; i--)
options.flags |= REBASE_FORCE;
}
- if (options.type == REBASE_PRESERVE_MERGES)
+ if (options.type == REBASE_PRESERVE_MERGES) {
/*
* Note: incompatibility with --signoff handled in signoff block above
* Note: incompatibility with --interactive is just a strong warning;
die(_("error: cannot combine '--preserve-merges' with "
"'--rebase-merges'"));
+ if (options.reschedule_failed_exec)
+ die(_("error: cannot combine '--preserve-merges' with "
+ "'--reschedule-failed-exec'"));
+ }
+
if (options.rebase_merges) {
if (strategy_options.nr)
die(_("error: cannot combine '--rebase-merges' with "
getenv(GIT_REFLOG_ACTION_ENVIRONMENT),
options.switch_to);
if (reset_head(&oid, "checkout",
- options.head_name, 0,
+ options.head_name,
+ RESET_HEAD_RUN_POST_CHECKOUT_HOOK,
NULL, buf.buf) < 0) {
ret = !!error(_("could not switch to "
"%s"),
strbuf_addf(&msg, "%s: checkout %s",
getenv(GIT_REFLOG_ACTION_ENVIRONMENT), options.onto_name);
if (reset_head(&options.onto->object.oid, "checkout", NULL,
- RESET_HEAD_DETACH, NULL, msg.buf))
+ RESET_HEAD_DETACH | RESET_HEAD_RUN_POST_CHECKOUT_HOOK,
+ NULL, msg.buf))
die(_("Could not detach HEAD"));
strbuf_release(&msg);
if (!names.nr && !po_args.quiet)
printf_ln(_("Nothing new to pack."));
+ close_all_packs(the_repository->objects);
+
/*
* Ok we have prepared all new packfiles.
* First see if there are packs of the same name and if so
{
struct rev_info revs;
struct rev_list_info info;
+ struct setup_revision_opt s_r_opt = {
+ .allow_exclude_promisor_objects = 1,
+ };
int i;
int bisect_list = 0;
int bisect_show_vars = 0;
git_config(git_default_config, NULL);
repo_init_revisions(the_repository, &revs, prefix);
revs.abbrev = DEFAULT_ABBREV;
- revs.allow_exclude_promisor_objects_opt = 1;
revs.commit_format = CMIT_FMT_UNSPECIFIED;
revs.do_not_die_on_missing_tree = 1;
}
}
- argc = setup_revisions(argc, argv, &revs, NULL);
+ argc = setup_revisions(argc, argv, &revs, &s_r_opt);
memset(&info, 0, sizeof(info));
info.revs = &revs;
{
struct strbuf buf = STRBUF_INIT;
enum stripspace_mode mode = STRIP_DEFAULT;
+ int nongit;
const struct option options[] = {
OPT_CMDMODE('s', "strip-comments", &mode,
usage_with_options(stripspace_usage, options);
if (mode == STRIP_COMMENTS || mode == COMMENT_LINES) {
- setup_git_directory_gently(NULL);
+ setup_git_directory_gently(&nongit);
git_config(git_default_config, NULL);
}
if (!(flags & OPT_QUIET))
printf(format, displaypath);
+ submodule_unset_core_worktree(sub);
+
strbuf_release(&sb_rm);
}
#define SUBMODULE_UPDATE_CLONE_INIT {0, MODULE_LIST_INIT, 0, \
SUBMODULE_UPDATE_STRATEGY_INIT, 0, 0, -1, STRING_LIST_INIT_DUP, 0, \
NULL, NULL, NULL, \
- NULL, 0, 0, 0, NULL, 0, 0, 0}
+ NULL, 0, 0, 0, NULL, 0, 0, 1}
static void next_submodule_warn_missing(struct submodule_update_clone *suc,
struct repository subrepo;
if (argc != 2)
- BUG("submodule--helper connect-gitdir-workingtree <name> <path>");
+ BUG("submodule--helper ensure-core-worktree <path>");
path = argv[1];
if (!sub)
BUG("We could get the submodule handle before?");
- if (repo_submodule_init(&subrepo, the_repository, path))
+ if (repo_submodule_init(&subrepo, the_repository, sub))
die(_("could not get a repository handle for submodule '%s'"), path);
if (!repo_config_get_string(&subrepo, "core.worktree", &cw)) {
#include "refs.h"
#include "run-command.h"
#include "sigchain.h"
+#include "submodule.h"
#include "refs.h"
#include "utf8.h"
#include "worktree.h"
static void validate_no_submodules(const struct worktree *wt)
{
struct index_state istate = { NULL };
+ struct strbuf path = STRBUF_INIT;
int i, found_submodules = 0;
- if (read_index_from(&istate, worktree_git_path(wt, "index"),
- get_worktree_git_dir(wt)) > 0) {
+ if (is_directory(worktree_git_path(wt, "modules"))) {
+ /*
+ * There could be false positives, e.g. the "modules"
+ * directory exists but is empty. But it's a rare case and
+ * this simpler check is probably good enough for now.
+ */
+ found_submodules = 1;
+ } else if (read_index_from(&istate, worktree_git_path(wt, "index"),
+ get_worktree_git_dir(wt)) > 0) {
for (i = 0; i < istate.cache_nr; i++) {
struct cache_entry *ce = istate.cache[i];
+ int err;
- if (S_ISGITLINK(ce->ce_mode)) {
- found_submodules = 1;
- break;
- }
+ if (!S_ISGITLINK(ce->ce_mode))
+ continue;
+
+ strbuf_reset(&path);
+ strbuf_addf(&path, "%s/%s", wt->path, ce->name);
+ if (!is_submodule_populated_gently(path.buf, &err))
+ continue;
+
+ found_submodules = 1;
+ break;
}
}
discard_index(&istate);
+ strbuf_release(&path);
if (found_submodules)
die(_("working trees containing submodules cannot be moved or removed"));
}
static void write_one(struct strbuf *buffer, struct cache_tree *it,
- const char *path, int pathlen)
+ const char *path, int pathlen)
{
int i;
/* The length in bytes and in hex digits of an object name (SHA-1 value). */
#define GIT_SHA1_RAWSZ 20
#define GIT_SHA1_HEXSZ (2 * GIT_SHA1_RAWSZ)
+/* The block size of SHA-1. */
+#define GIT_SHA1_BLKSZ 64
+
+/* The length in bytes and in hex digits of an object name (SHA-256 value). */
+#define GIT_SHA256_RAWSZ 32
+#define GIT_SHA256_HEXSZ (2 * GIT_SHA256_RAWSZ)
+/* The block size of SHA-256. */
+#define GIT_SHA256_BLKSZ 64
/* The length in byte and in hex digits of the largest possible hash value. */
-#define GIT_MAX_RAWSZ GIT_SHA1_RAWSZ
-#define GIT_MAX_HEXSZ GIT_SHA1_HEXSZ
+#define GIT_MAX_RAWSZ GIT_SHA256_RAWSZ
+#define GIT_MAX_HEXSZ GIT_SHA256_HEXSZ
+/* The largest possible block size for any supported hash. */
+#define GIT_MAX_BLKSZ GIT_SHA256_BLKSZ
struct object_id {
unsigned char hash[GIT_MAX_RAWSZ];
static inline int hashcmp(const unsigned char *sha1, const unsigned char *sha2)
{
/*
- * This is a temporary optimization hack. By asserting the size here,
- * we let the compiler know that it's always going to be 20, which lets
- * it turn this fixed-size memcmp into a few inline instructions.
- *
- * This will need to be extended or ripped out when we learn about
- * hashes of different sizes.
+ * Teach the compiler that there are only two possibilities of hash size
+ * here, so that it can optimize for this case as much as possible.
*/
- if (the_hash_algo->rawsz != 20)
- BUG("hash size not yet supported by hashcmp");
- return memcmp(sha1, sha2, the_hash_algo->rawsz);
+ if (the_hash_algo->rawsz == GIT_MAX_RAWSZ)
+ return memcmp(sha1, sha2, GIT_MAX_RAWSZ);
+ return memcmp(sha1, sha2, GIT_SHA1_RAWSZ);
}
static inline int oidcmp(const struct object_id *oid1, const struct object_id *oid2)
static inline int hasheq(const unsigned char *sha1, const unsigned char *sha2)
{
- return !hashcmp(sha1, sha2);
+ /*
+ * We write this here instead of deferring to hashcmp so that the
+ * compiler can properly inline it and avoid calling memcmp.
+ */
+ if (the_hash_algo->rawsz == GIT_MAX_RAWSZ)
+ return !memcmp(sha1, sha2, GIT_MAX_RAWSZ);
+ return !memcmp(sha1, sha2, GIT_SHA1_RAWSZ);
}
static inline int oideq(const struct object_id *oid1, const struct object_id *oid2)
extern int hex_to_bytes(unsigned char *binary, const char *hex, size_t len);
/*
- * Convert a binary sha1 to its hex equivalent. The `_r` variant is reentrant,
+ * Convert a binary hash to its hex equivalent. The `_r` variant is reentrant,
* and writes the NUL-terminated output to the buffer `out`, which must be at
- * least `GIT_SHA1_HEXSZ + 1` bytes, and returns a pointer to out for
+ * least `GIT_MAX_HEXSZ + 1` bytes, and returns a pointer to out for
* convenience.
*
* The non-`_r` variant returns a static buffer, but uses a ring of 4
*
* printf("%s -> %s", sha1_to_hex(one), sha1_to_hex(two));
*/
-extern char *sha1_to_hex_r(char *out, const unsigned char *sha1);
-extern char *oid_to_hex_r(char *out, const struct object_id *oid);
-extern char *sha1_to_hex(const unsigned char *sha1); /* static buffer result! */
-extern char *oid_to_hex(const struct object_id *oid); /* same static buffer as sha1_to_hex */
+char *hash_to_hex_algop_r(char *buffer, const unsigned char *hash, const struct git_hash_algo *);
+char *sha1_to_hex_r(char *out, const unsigned char *sha1);
+char *oid_to_hex_r(char *out, const struct object_id *oid);
+char *hash_to_hex_algop(const unsigned char *hash, const struct git_hash_algo *); /* static buffer result! */
+char *sha1_to_hex(const unsigned char *sha1); /* same static buffer */
+char *hash_to_hex(const unsigned char *hash); /* same static buffer */
+char *oid_to_hex(const struct object_id *oid); /* same static buffer */
/*
* Parse a 40-character hexadecimal object ID starting from hex, updating the
#define CHECKOUT_INIT { NULL, "" }
#define TEMPORARY_FILENAME_LENGTH 25
-extern int checkout_entry(struct cache_entry *ce, const struct checkout *state, char *topath);
+extern int checkout_entry(struct cache_entry *ce, const struct checkout *state, char *topath, int *nr_checkouts);
extern void enable_delayed_checkout(struct checkout *state);
-extern int finish_delayed_checkout(struct checkout *state);
+extern int finish_delayed_checkout(struct checkout *state, int *nr_checkouts);
struct cache_def {
struct strbuf path;
#define GRAPH_CHUNKID_DATA 0x43444154 /* "CDAT" */
#define GRAPH_CHUNKID_LARGEEDGES 0x45444745 /* "EDGE" */
-#define GRAPH_DATA_WIDTH 36
+#define GRAPH_DATA_WIDTH (the_hash_algo->rawsz + 16)
#define GRAPH_VERSION_1 0x1
#define GRAPH_VERSION GRAPH_VERSION_1
-#define GRAPH_OID_VERSION_SHA1 1
-#define GRAPH_OID_LEN_SHA1 GIT_SHA1_RAWSZ
-#define GRAPH_OID_VERSION GRAPH_OID_VERSION_SHA1
-#define GRAPH_OID_LEN GRAPH_OID_LEN_SHA1
-
#define GRAPH_OCTOPUS_EDGES_NEEDED 0x80000000
-#define GRAPH_PARENT_MISSING 0x7fffffff
#define GRAPH_EDGE_LAST_MASK 0x7fffffff
#define GRAPH_PARENT_NONE 0x70000000
#define GRAPH_FANOUT_SIZE (4 * 256)
#define GRAPH_CHUNKLOOKUP_WIDTH 12
#define GRAPH_MIN_SIZE (GRAPH_HEADER_SIZE + 4 * GRAPH_CHUNKLOOKUP_WIDTH \
- + GRAPH_FANOUT_SIZE + GRAPH_OID_LEN)
+ + GRAPH_FANOUT_SIZE + the_hash_algo->rawsz)
char *get_commit_graph_filename(const char *obj_dir)
{
return xstrfmt("%s/info/commit-graph", obj_dir);
}
+static uint8_t oid_version(void)
+{
+ return 1;
+}
+
static struct commit_graph *alloc_commit_graph(void)
{
struct commit_graph *g = xcalloc(1, sizeof(*g));
}
hash_version = *(unsigned char*)(data + 5);
- if (hash_version != GRAPH_OID_VERSION) {
+ if (hash_version != oid_version()) {
error(_("hash version %X does not match version %X"),
- hash_version, GRAPH_OID_VERSION);
+ hash_version, oid_version());
goto cleanup_fail;
}
graph = alloc_commit_graph();
- graph->hash_len = GRAPH_OID_LEN;
+ graph->hash_len = the_hash_algo->rawsz;
graph->num_chunks = *(unsigned char*)(data + 6);
graph->graph_fd = fd;
graph->data = graph_map;
chunk_lookup += GRAPH_CHUNKLOOKUP_WIDTH;
- if (chunk_offset > graph_size - GIT_MAX_RAWSZ) {
+ if (chunk_offset > graph_size - the_hash_algo->rawsz) {
error(_("improper chunk offset %08x%08x"), (uint32_t)(chunk_offset >> 32),
(uint32_t)chunk_offset);
goto cleanup_fail;
commit_to_sha1);
if (edge_value < 0)
- edge_value = GRAPH_PARENT_MISSING;
+ BUG("missing parent %s for commit %s",
+ oid_to_hex(&parent->item->object.oid),
+ oid_to_hex(&(*list)->object.oid));
}
hashwrite_be32(f, edge_value);
nr_commits,
commit_to_sha1);
if (edge_value < 0)
- edge_value = GRAPH_PARENT_MISSING;
+ BUG("missing parent %s for commit %s",
+ oid_to_hex(&parent->item->object.oid),
+ oid_to_hex(&(*list)->object.oid));
}
hashwrite_be32(f, edge_value);
commit_to_sha1);
if (edge_value < 0)
- edge_value = GRAPH_PARENT_MISSING;
+ BUG("missing parent %s for commit %s",
+ oid_to_hex(&parent->item->object.oid),
+ oid_to_hex(&(*list)->object.oid));
else if (!parent->next)
edge_value |= GRAPH_LAST_EDGE;
static void close_reachable(struct packed_oid_list *oids, int report_progress)
{
- int i;
+ int i, j;
struct commit *commit;
struct progress *progress = NULL;
- int j = 0;
if (report_progress)
progress = start_delayed_progress(
- _("Annotating commits in commit graph"), 0);
+ _("Loading known commits in commit graph"), j = 0);
for (i = 0; i < oids->nr; i++) {
display_progress(progress, ++j);
commit = lookup_commit(the_repository, &oids->list[i]);
if (commit)
commit->object.flags |= UNINTERESTING;
}
+ stop_progress(&progress);
/*
* As this loop runs, oids->nr may grow, but not more
* than the number of missing commits in the reachable
* closure.
*/
+ if (report_progress)
+ progress = start_delayed_progress(
+ _("Expanding reachable commits in commit graph"), j = 0);
for (i = 0; i < oids->nr; i++) {
display_progress(progress, ++j);
commit = lookup_commit(the_repository, &oids->list[i]);
if (commit && !parse_commit(commit))
add_missing_parents(oids, commit);
}
+ stop_progress(&progress);
+ if (report_progress)
+ progress = start_delayed_progress(
+ _("Clearing commit marks in commit graph"), j = 0);
for (i = 0; i < oids->nr; i++) {
display_progress(progress, ++j);
commit = lookup_commit(the_repository, &oids->list[i]);
int num_extra_edges;
struct commit_list *parent;
struct progress *progress = NULL;
+ const unsigned hashsz = the_hash_algo->rawsz;
if (!commit_graph_compatible(the_repository))
return;
count_distinct++;
}
- if (count_distinct >= GRAPH_PARENT_MISSING)
+ if (count_distinct >= GRAPH_EDGE_LAST_MASK)
die(_("the commit graph format cannot write %d commits"), count_distinct);
commits.nr = 0;
}
num_chunks = num_extra_edges ? 4 : 3;
- if (commits.nr >= GRAPH_PARENT_MISSING)
+ if (commits.nr >= GRAPH_EDGE_LAST_MASK)
die(_("too many commits to write graph"));
compute_generation_numbers(&commits, report_progress);
hashwrite_be32(f, GRAPH_SIGNATURE);
hashwrite_u8(f, GRAPH_VERSION);
- hashwrite_u8(f, GRAPH_OID_VERSION);
+ hashwrite_u8(f, oid_version());
hashwrite_u8(f, num_chunks);
hashwrite_u8(f, 0); /* unused padding byte */
chunk_offsets[0] = 8 + (num_chunks + 1) * GRAPH_CHUNKLOOKUP_WIDTH;
chunk_offsets[1] = chunk_offsets[0] + GRAPH_FANOUT_SIZE;
- chunk_offsets[2] = chunk_offsets[1] + GRAPH_OID_LEN * commits.nr;
- chunk_offsets[3] = chunk_offsets[2] + (GRAPH_OID_LEN + 16) * commits.nr;
+ chunk_offsets[2] = chunk_offsets[1] + hashsz * commits.nr;
+ chunk_offsets[3] = chunk_offsets[2] + (hashsz + 16) * commits.nr;
chunk_offsets[4] = chunk_offsets[3] + 4 * num_extra_edges;
for (i = 0; i <= num_chunks; i++) {
}
write_graph_chunk_fanout(f, commits.list, commits.nr);
- write_graph_chunk_oids(f, GRAPH_OID_LEN, commits.list, commits.nr);
- write_graph_chunk_data(f, GRAPH_OID_LEN, commits.list, commits.nr);
+ write_graph_chunk_oids(f, hashsz, commits.list, commits.nr);
+ write_graph_chunk_data(f, hashsz, commits.list, commits.nr);
write_graph_chunk_large_edges(f, commits.list, commits.nr);
close_commit_graph(the_repository);
+++ /dev/null
-#include "../git-compat-util.h"
-#include "../cache.h"
-
-int cygwin_offset_1st_component(const char *path)
-{
- const char *pos = path;
- /* unc paths */
- if (is_dir_sep(pos[0]) && is_dir_sep(pos[1])) {
- /* skip server name */
- pos = strchr(pos + 2, '/');
- if (!pos)
- return 0; /* Error: malformed unc path */
-
- do {
- pos++;
- } while (*pos && pos[0] != '/');
- }
- return pos + is_dir_sep(*pos) - path;
-}
+++ /dev/null
-int cygwin_offset_1st_component(const char *path);
-#define offset_1st_component cygwin_offset_1st_component
return 0;
/* We cannot use basename(), as it would remove trailing slashes */
- mingw_skip_dos_drive_prefix((char **)&path);
+ win32_skip_dos_drive_prefix((char **)&path);
if (!*path)
return 0;
return -1;
}
-int mingw_skip_dos_drive_prefix(char **path)
-{
- int ret = has_dos_drive_prefix(*path);
- *path += ret;
- return ret;
-}
-
-int mingw_offset_1st_component(const char *path)
-{
- char *pos = (char *)path;
-
- /* unc paths */
- if (!skip_dos_drive_prefix(&pos) &&
- is_dir_sep(pos[0]) && is_dir_sep(pos[1])) {
- /* skip server name */
- pos = strpbrk(pos + 2, "\\/");
- if (!pos)
- return 0; /* Error: malformed unc path */
-
- do {
- pos++;
- } while (*pos && !is_dir_sep(*pos));
- }
-
- return pos + is_dir_sep(*pos) - path;
-}
-
int xutftowcsn(wchar_t *wcs, const char *utfs, size_t wcslen, int utflen)
{
int upos = 0, wpos = 0;
* git specific compatibility
*/
-#define has_dos_drive_prefix(path) \
- (isalpha(*(path)) && (path)[1] == ':' ? 2 : 0)
-int mingw_skip_dos_drive_prefix(char **path);
-#define skip_dos_drive_prefix mingw_skip_dos_drive_prefix
-static inline int mingw_is_dir_sep(int c)
-{
- return c == '/' || c == '\\';
-}
-#define is_dir_sep mingw_is_dir_sep
-static inline char *mingw_find_last_dir_sep(const char *path)
-{
- char *ret = NULL;
- for (; *path; ++path)
- if (is_dir_sep(*path))
- ret = (char *)path;
- return ret;
-}
static inline void convert_slashes(char *path)
{
for (; *path; path++)
if (*path == '\\')
*path = '/';
}
-#define find_last_dir_sep mingw_find_last_dir_sep
-int mingw_offset_1st_component(const char *path);
-#define offset_1st_component mingw_offset_1st_component
#define PATH_SEP ';'
extern char *mingw_query_user_email(void);
#define query_user_email mingw_query_user_email
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
+#if defined __TANDEM
+ /* This is currently duplicated from git-compat-utils.h */
+# ifdef NO_INTPTR_T
+ typedef long intptr_t;
+ typedef unsigned long uintptr_t;
+# endif
+#endif
+
static reg_errcode_t re_compile_internal (regex_t *preg, const char * pattern,
size_t length, reg_syntax_t syntax);
static void re_compile_fastmap_iter (regex_t *bufp,
--- /dev/null
+#include "../../git-compat-util.h"
+
+int win32_skip_dos_drive_prefix(char **path)
+{
+ int ret = has_dos_drive_prefix(*path);
+ *path += ret;
+ return ret;
+}
+
+int win32_offset_1st_component(const char *path)
+{
+ char *pos = (char *)path;
+
+ /* unc paths */
+ if (!skip_dos_drive_prefix(&pos) &&
+ is_dir_sep(pos[0]) && is_dir_sep(pos[1])) {
+ /* skip server name */
+ pos = strpbrk(pos + 2, "\\/");
+ if (!pos)
+ return 0; /* Error: malformed unc path */
+
+ do {
+ pos++;
+ } while (*pos && !is_dir_sep(*pos));
+ }
+
+ return pos + is_dir_sep(*pos) - path;
+}
--- /dev/null
+#define has_dos_drive_prefix(path) \
+ (isalpha(*(path)) && (path)[1] == ':' ? 2 : 0)
+int win32_skip_dos_drive_prefix(char **path);
+#define skip_dos_drive_prefix win32_skip_dos_drive_prefix
+static inline int win32_is_dir_sep(int c)
+{
+ return c == '/' || c == '\\';
+}
+#define is_dir_sep win32_is_dir_sep
+static inline char *win32_find_last_dir_sep(const char *path)
+{
+ char *ret = NULL;
+ for (; *path; ++path)
+ if (is_dir_sep(*path))
+ ret = (char *)path;
+ return ret;
+}
+#define find_last_dir_sep win32_find_last_dir_sep
+int win32_offset_1st_component(const char *path);
+#define offset_1st_component win32_offset_1st_component
# don't warn for each N_ use
CFLAGS += -DUSE_PARENS_AROUND_GETTEXT_N=0
endif
+CFLAGS += -Wall
CFLAGS += -Wdeclaration-after-statement
CFLAGS += -Wformat-security
CFLAGS += -Wno-format-zero-length
UNRELIABLE_FSTAT = UnfortunatelyYes
OBJECT_CREATION_USES_RENAMES = UnfortunatelyNeedsTo
MMAP_PREVENTS_DELETE = UnfortunatelyYes
- COMPAT_OBJS += compat/cygwin.o
+ COMPAT_OBJS += compat/win32/path-utils.o
FREAD_READS_DIRECTORIES = UnfortunatelyYes
endif
ifeq ($(uname_S),FreeBSD)
HAVE_BSD_SYSCTL = YesPlease
HAVE_BSD_KERN_PROC_SYSCTL = YesPlease
PROCFS_EXECUTABLE_PATH = /proc/curproc/file
+ FREAD_READS_DIRECTORIES = UnfortunatelyYes
endif
ifeq ($(uname_S),MirBSD)
NO_STRCASESTR = YesPlease
# INLINE='' would just replace one set of warnings with another and
# still not compile in c89 mode, due to non-const array initializations.
CC = cc -c99
+ # Build down-rev compatible objects that don't use our new getopt_long.
+ ifeq ($(uname_R).$(uname_V),J06.21)
+ CC += -WRVU=J06.20
+ endif
+ ifeq ($(uname_R).$(uname_V),L17.02)
+ CC += -WRVU=L16.05
+ endif
# Disable all optimization, seems to result in bad code, with -O or -O2
# or even -O1 (default), /usr/local/libexec/git-core/git-pack-objects
# abends on "git push". Needs more investigation.
- CFLAGS = -g -O0
+ CFLAGS = -g -O0 -Winline
# We'd want it to be here.
prefix = /usr/local
- # Our's are in ${prefix}/bin (perl might also be in /usr/bin/perl).
- PERL_PATH = ${prefix}/bin/perl
- PYTHON_PATH = ${prefix}/bin/python
-
+ # perl and python must be in /usr/bin on NonStop - supplied by HPE
+ # with operating system in that managed directory.
+ PERL_PATH = /usr/bin/perl
+ PYTHON_PATH = /usr/bin/python
+ # The current /usr/coreutils/rm at lowest support level does not work
+ # with the git test structure. Long paths as in
+ # 'trash directory...' cause rm to terminate prematurely without fully
+ # removing the directory at OS releases J06.21 and L17.02.
+ # Default to the older rm until those two releases are deprecated.
+ RM = /bin/rm -f
# As detected by './configure'.
# Missdetected, hence commented out, see below.
#NO_CURL = YesPlease
# Added manually, see above.
+ NEEDS_SSL_WITH_CURL = YesPlease
+ NEEDS_CRYPTO_WITH_SSL = YesPlease
+ HAVE_DEV_TTY = YesPlease
HAVE_LIBCHARSET_H = YesPlease
HAVE_STRINGS_H = YesPlease
NEEDS_LIBICONV = YesPlease
NEEDS_LIBINTL_BEFORE_LIBICONV = YesPlease
NO_SYS_SELECT_H = UnfortunatelyYes
NO_D_TYPE_IN_DIRENT = YesPlease
+ NO_GETTEXT = YesPlease
NO_HSTRERROR = YesPlease
NO_STRCASESTR = YesPlease
NO_MEMMEM = YesPlease
NO_MKDTEMP = YesPlease
# Currently libiconv-1.9.1.
OLD_ICONV = UnfortunatelyYes
- NO_REGEX = YesPlease
+ NO_REGEX = NeedsStartEnd
NO_PTHREADS = UnfortunatelyYes
# Not detected (nor checked for) by './configure'.
COMPAT_CFLAGS += -DNOGDI -Icompat -Icompat/win32
COMPAT_CFLAGS += -DSTRIP_EXTENSION=\".exe\"
COMPAT_OBJS += compat/mingw.o compat/winansi.o \
+ compat/win32/path-utils.o \
compat/win32/pthread.o compat/win32/syslog.o \
compat/win32/dirent.o
BASIC_CFLAGS += -DWIN32 -DPROTECT_NTFS_DEFAULT=1
# Callers must take care of providing only paths that match the current path
# to be completed and adding any prefix path components, if necessary.
# 1: List of newline-separated matching paths, complete with all prefix
-# path componens.
+# path components.
__gitcomp_file_direct ()
{
local IFS=$'\n'
__git_complete_revlist_file ()
{
- local pfx ls ref cur_="$cur"
+ local dequoted_word pfx ls ref cur_="$cur"
case "$cur_" in
*..?*:*)
return
?*:*)
ref="${cur_%%:*}"
cur_="${cur_#*:}"
- case "$cur_" in
+
+ __git_dequote "$cur_"
+
+ case "$dequoted_word" in
?*/*)
- pfx="${cur_%/*}"
- cur_="${cur_##*/}"
+ pfx="${dequoted_word%/*}"
+ cur_="${dequoted_word##*/}"
ls="$ref:$pfx"
pfx="$pfx/"
;;
*)
+ cur_="$dequoted_word"
ls="$ref"
;;
esac
*) pfx="$ref:$pfx" ;;
esac
- __gitcomp_nl "$(__git ls-tree "$ls" \
- | sed '/^100... blob /{
- s,^.* ,,
- s,$, ,
- }
- /^120000 blob /{
- s,^.* ,,
- s,$, ,
- }
- /^040000 tree /{
- s,^.* ,,
- s,$,/,
- }
- s/^.* //')" \
- "$pfx" "$cur_" ""
+ __gitcomp_file "$(__git ls-tree "$ls" \
+ | sed 's/^.* //
+ s/$//')" \
+ "$pfx" "$cur_"
;;
*...*)
pfx="${cur_%...*}..."
local IFS=$'\n'
compset -P '*[=:]'
- compadd -Q -f -- ${=1} && _ret=0
+ compadd -f -- ${=1} && _ret=0
}
__gitcomp_file ()
local IFS=$'\n'
compset -P '*[=:]'
- compadd -Q -p "${2-}" -f -- ${=1} && _ret=0
+ compadd -p "${2-}" -f -- ${=1} && _ret=0
}
_git ()
local IFS=$'\n'
compset -P '*[=:]'
- compadd -Q -f -- ${=1} && _ret=0
+ compadd -f -- ${=1} && _ret=0
}
__gitcomp_file ()
local IFS=$'\n'
compset -P '*[=:]'
- compadd -Q -p "${2-}" -f -- ${=1} && _ret=0
+ compadd -p "${2-}" -f -- ${=1} && _ret=0
}
__git_zsh_bash_func ()
+Release 1.5.0
+=============
+
+Backward-incompatible change
+----------------------------
+
+The name of classes for environment was misnamed as `*Environement`.
+It is now `*Environment`.
+
+New features
+------------
+
+* A Thread-Index header is now added to each email sent (except for
+ combined emails where it would not make sense), so that MS Outlook
+ properly groups messages by threads even though they have a
+ different subject line. Unfortunately, even adding this header the
+ threading still seems to be unreliable, but it is unclear whether
+ this is an issue on our side or on MS Outlook's side (see discussion
+ here: https://github.com/git-multimail/git-multimail/pull/194).
+
+* A new variable multimailhook.ExcludeMergeRevisions was added to send
+ notification emails only for non-merge commits.
+
+* For gitolite environment, it is now possible to specify the mail map
+ in a separate file in addition to gitolite.conf, using the variable
+ multimailhook.MailaddressMap.
+
+Internal changes
+----------------
+
+* The testsuite now uses GIT_PRINT_SHA1_ELLIPSIS where needed for
+ compatibility with recent Git versions. Only tests are affected.
+
+* We don't try to install pyflakes in the continuous integration job
+ for old Python versions where it's no longer available.
+
+* Stop using the deprecated cgi.escape in Python 3.
+
+* New flake8 warnings have been fixed.
+
+* Python 3.6 is now tested against on Travis-CI.
+
+* A bunch of lgtm.com warnings have been fixed.
+
+Bug fixes
+---------
+
+* SMTPMailer logs in only once now. It used to re-login for each email
+ sent which triggered errors for some SMTP servers.
+
+* migrate-mailhook-config was broken by internal refactoring, it
+ should now work again.
+
+This version was tested with Python 2.6 to 3.7. It was tested with Git
+1.7.10.406.gdc801, 2.15.1 and 2.20.1.98.gecbdaf0.
+
Release 1.4.0
=============
git-multimail is an open-source project, built by volunteers. We would
welcome your help!
-The current maintainers are Matthieu Moy
-<matthieu.moy@grenoble-inp.fr> and Michael Haggerty
-<mhagger@alum.mit.edu>.
+The current maintainers are `Matthieu Moy <http://matthieu-moy.fr>`__ and
+`Michael Haggerty <https://github.com/mhagger>`__.
Please note that although a copy of git-multimail is distributed in
the "contrib" section of the main Git project, development takes place
Please CC emails regarding git-multimail to the maintainers so that we
don't overlook them.
+Help needed: testers/maintainer for specific environments/OS
+------------------------------------------------------------
+
+The current maintainer uses and tests git-multimail on Linux with the
+Generic environment. More testers, or better contributors are needed
+to test git-multimail on other real-life setups:
+
+* Mac OS X, Windows: git-multimail is currently not supported on these
+ platforms. But since we have no external dependencies and try to
+ write code as portable as possible, it is possible that
+ git-multimail already runs there and if not, it is likely that it
+ could be ported easily.
+
+ Patches to improve support for Windows and OS X are welcome.
+ Ideally, there would be a sub-maintainer for each OS who would test
+ at least once before each release (around twice a year).
+
+* Gerrit, Stash, Gitolite environments: although the testsuite
+ contains tests for these environments, a tester/maintainer for each
+ environment would be welcome to test and report failure (or success)
+ on real-life environments periodically (here also, feedback before
+ each release would be highly appreciated).
+
.. _`git-multimail repository on GitHub`: https://github.com/git-multimail/git-multimail
.. _`Git mailing list`: git@vger.kernel.org
+++ /dev/null
-git-multimail version 1.4.0
-===========================
-
-.. image:: https://travis-ci.org/git-multimail/git-multimail.svg?branch=master
- :target: https://travis-ci.org/git-multimail/git-multimail
-
-git-multimail is a tool for sending notification emails on pushes to a
-Git repository. It includes a Python module called ``git_multimail.py``,
-which can either be used as a hook script directly or can be imported
-as a Python module into another script.
-
-git-multimail is derived from the Git project's old
-contrib/hooks/post-receive-email, and is mostly compatible with that
-script. See README.migrate-from-post-receive-email for details about
-the differences and for how to migrate from post-receive-email to
-git-multimail.
-
-git-multimail, like the rest of the Git project, is licensed under
-GPLv2 (see the COPYING file for details).
-
-Please note: although, as a convenience, git-multimail may be
-distributed along with the main Git project, development of
-git-multimail takes place in its own, separate project. See section
-"Getting involved" below for more information.
-
-
-By default, for each push received by the repository, git-multimail:
-
-1. Outputs one email summarizing each reference that was changed.
- These "reference change" (called "refchange" below) emails describe
- the nature of the change (e.g., was the reference created, deleted,
- fast-forwarded, etc.) and include a one-line summary of each commit
- that was added to the reference.
-
-2. Outputs one email for each new commit that was introduced by the
- reference change. These "commit" emails include a list of the
- files changed by the commit, followed by the diffs of files
- modified by the commit. The commit emails are threaded to the
- corresponding reference change email via "In-Reply-To". This style
- (similar to the "git format-patch" style used on the Git mailing
- list) makes it easy to scan through the emails, jump to patches
- that need further attention, and write comments about specific
- commits. Commits are handled in reverse topological order (i.e.,
- parents shown before children). For example::
-
- [git] branch master updated
- + [git] 01/08: doc: fix xref link from api docs to manual pages
- + [git] 02/08: api-credentials.txt: show the big picture first
- + [git] 03/08: api-credentials.txt: mention credential.helper explicitly
- + [git] 04/08: api-credentials.txt: add "see also" section
- + [git] 05/08: t3510 (cherry-pick-sequence): add missing '&&'
- + [git] 06/08: Merge branch 'rr/maint-t3510-cascade-fix'
- + [git] 07/08: Merge branch 'mm/api-credentials-doc'
- + [git] 08/08: Git 1.7.11-rc2
-
- By default, each commit appears in exactly one commit email, the
- first time that it is pushed to the repository. If a commit is later
- merged into another branch, then a one-line summary of the commit
- is included in the reference change email (as usual), but no
- additional commit email is generated. See
- `multimailhook.refFilter(Inclusion|Exclusion|DoSend|DontSend)Regex`
- below to configure which branches and tags are watched by the hook.
-
- By default, reference change emails have their "Reply-To" field set
- to the person who pushed the change, and commit emails have their
- "Reply-To" field set to the author of the commit.
-
-3. Output one "announce" mail for each new annotated tag, including
- information about the tag and optionally a shortlog describing the
- changes since the previous tag. Such emails might be useful if you
- use annotated tags to mark releases of your project.
-
-
-Requirements
-------------
-
-* Python 2.x, version 2.4 or later. No non-standard Python modules
- are required. git-multimail has preliminary support for Python 3
- (but it has been better tested with Python 2).
-
-* The ``git`` command must be in your PATH. git-multimail is known to
- work with Git versions back to 1.7.1. (Earlier versions have not
- been tested; if you do so, please report your results.)
-
-* To send emails using the default configuration, a standard sendmail
- program must be located at '/usr/sbin/sendmail' or
- '/usr/lib/sendmail' and must be configured correctly to send emails.
- If this is not the case, set multimailhook.sendmailCommand, or see
- the multimailhook.mailer configuration variable below for how to
- configure git-multimail to send emails via an SMTP server.
-
-
-Invocation
-----------
-
-``git_multimail.py`` is designed to be used as a ``post-receive`` hook in a
-Git repository (see githooks(5)). Link or copy it to
-$GIT_DIR/hooks/post-receive within the repository for which email
-notifications are desired. Usually it should be installed on the
-central repository for a project, to which all commits are eventually
-pushed.
-
-For use on pre-v1.5.1 Git servers, ``git_multimail.py`` can also work as
-an ``update`` hook, taking its arguments on the command line. To use
-this script in this manner, link or copy it to $GIT_DIR/hooks/update.
-Please note that the script is not completely reliable in this mode
-[1]_.
-
-Alternatively, ``git_multimail.py`` can be imported as a Python module
-into your own Python post-receive script. This method is a bit more
-work, but allows the behavior of the hook to be customized using
-arbitrary Python code. For example, you can use a custom environment
-(perhaps inheriting from GenericEnvironment or GitoliteEnvironment) to
-
-* change how the user who did the push is determined
-
-* read users' email addresses from an LDAP server or from a database
-
-* decide which users should be notified about which commits based on
- the contents of the commits (e.g., for users who want to be notified
- only about changes affecting particular files or subdirectories)
-
-Or you can change how emails are sent by writing your own Mailer
-class. The ``post-receive`` script in this directory demonstrates how
-to use ``git_multimail.py`` as a Python module. (If you make interesting
-changes of this type, please consider sharing them with the
-community.)
-
-
-Troubleshooting/FAQ
--------------------
-
-Please read `<doc/troubleshooting.rst>`__ for frequently asked
-questions and common issues with git-multimail.
-
-
-Configuration
--------------
-
-By default, git-multimail mostly takes its configuration from the
-following ``git config`` settings:
-
-multimailhook.environment
- This describes the general environment of the repository. In most
- cases, you do not need to specify a value for this variable:
- `git-multimail` will autodetect which environment to use.
- Currently supported values:
-
- generic
- the username of the pusher is read from $USER or $USERNAME and
- the repository name is derived from the repository's path.
-
- gitolite
- Environment to use when ``git-multimail`` is ran as a gitolite_
- hook.
-
- The username of the pusher is read from $GL_USER, the repository
- name is read from $GL_REPO, and the From: header value is
- optionally read from gitolite.conf (see multimailhook.from).
-
- For more information about gitolite and git-multimail, read
- `<doc/gitolite.rst>`__
-
- stash
- Environment to use when ``git-multimail`` is ran as an Atlassian
- BitBucket Server (formerly known as Atlassian Stash) hook.
-
- **Warning:** this mode was provided by a third-party contributor
- and never tested by the git-multimail maintainers. It is
- provided as-is and may or may not work for you.
-
- This value is automatically assumed when the stash-specific
- flags (``--stash-user`` and ``--stash-repo``) are specified on
- the command line. When this environment is active, the username
- and repo come from these two command line flags, which must be
- specified.
-
- gerrit
- Environment to use when ``git-multimail`` is ran as a
- ``ref-updated`` Gerrit hook.
-
- This value is used when the gerrit-specific command line flags
- (``--oldrev``, ``--newrev``, ``--refname``, ``--project``) for
- gerrit's ref-updated hook are present. When this environment is
- active, the username of the pusher is taken from the
- ``--submitter`` argument if that command line option is passed,
- otherwise 'Gerrit' is used. The repository name is taken from
- the ``--project`` option on the command line, which must be passed.
-
- For more information about gerrit and git-multimail, read
- `<doc/gerrit.rst>`__
-
- If none of these environments is suitable for your setup, then you
- can implement a Python class that inherits from Environment and
- instantiate it via a script that looks like the example
- post-receive script.
-
- The environment value can be specified on the command line using
- the ``--environment`` option. If it is not specified on the
- command line or by ``multimailhook.environment``, the value is
- guessed as follows:
-
- * If stash-specific (respectively gerrit-specific) command flags
- are present on the command-line, then ``stash`` (respectively
- ``gerrit``) is used.
-
- * If the environment variables $GL_USER and $GL_REPO are set, then
- ``gitolite`` is used.
-
- * If none of the above apply, then ``generic`` is used.
-
-multimailhook.repoName
- A short name of this Git repository, to be used in various places
- in the notification email text. The default is to use $GL_REPO
- for gitolite repositories, or otherwise to derive this value from
- the repository path name.
-
-multimailhook.mailingList
- The list of email addresses to which notification emails should be
- sent, as RFC 2822 email addresses separated by commas. This
- configuration option can be multivalued. Leave it unset or set it
- to the empty string to not send emails by default. The next few
- settings can be used to configure specific address lists for
- specific types of notification email.
-
-multimailhook.refchangeList
- The list of email addresses to which summary emails about
- reference changes should be sent, as RFC 2822 email addresses
- separated by commas. This configuration option can be
- multivalued. The default is the value in
- multimailhook.mailingList. Set this value to "none" (or the empty
- string) to prevent reference change emails from being sent even if
- multimailhook.mailingList is set.
-
-multimailhook.announceList
- The list of email addresses to which emails about new annotated
- tags should be sent, as RFC 2822 email addresses separated by
- commas. This configuration option can be multivalued. The
- default is the value in multimailhook.refchangeList or
- multimailhook.mailingList. Set this value to "none" (or the empty
- string) to prevent annotated tag announcement emails from being sent
- even if one of the other values is set.
-
-multimailhook.commitList
- The list of email addresses to which emails about individual new
- commits should be sent, as RFC 2822 email addresses separated by
- commas. This configuration option can be multivalued. The
- default is the value in multimailhook.mailingList. Set this value
- to "none" (or the empty string) to prevent notification emails about
- individual commits from being sent even if
- multimailhook.mailingList is set.
-
-multimailhook.announceShortlog
- If this option is set to true, then emails about changes to
- annotated tags include a shortlog of changes since the previous
- tag. This can be useful if the annotated tags represent releases;
- then the shortlog will be a kind of rough summary of what has
- happened since the last release. But if your tagging policy is
- not so straightforward, then the shortlog might be confusing
- rather than useful. Default is false.
-
-multimailhook.commitEmailFormat
- The format of email messages for the individual commits, can be "text" or
- "html". In the latter case, the emails will include diffs using colorized
- HTML instead of plain text used by default. Note that this currently the
- ref change emails are always sent in plain text.
-
- Note that when using "html", the formatting is done by parsing the
- output of ``git log`` with ``-p``. When using
- ``multimailhook.commitLogOpts`` to specify a ``--format`` for
- ``git log``, one may get false positive (e.g. lines in the body of
- the message starting with ``+++`` or ``---`` colored in red or
- green).
-
- By default, all the message is HTML-escaped. See
- ``multimailhook.htmlInIntro`` to change this behavior.
-
-multimailhook.commitBrowseURL
- Used to generate a link to an online repository browser in commit
- emails. This variable must be a string. Format directives like
- ``%(<variable>)s`` will be expanded the same way as template
- strings. In particular, ``%(id)s`` will be replaced by the full
- Git commit identifier (40-chars hexadecimal).
-
- If the string does not contain any format directive, then
- ``%(id)s`` will be automatically added to the string. If you don't
- want ``%(id)s`` to be automatically added, use the empty format
- directive ``%()s`` anywhere in the string.
-
- For example, a suitable value for the git-multimail project itself
- would be
- ``https://github.com/git-multimail/git-multimail/commit/%(id)s``.
-
-multimailhook.htmlInIntro, multimailhook.htmlInFooter
- When generating an HTML message, git-multimail escapes any HTML
- sequence by default. This means that if a template contains HTML
- like ``<a href="foo">link</a>``, the reader will see the HTML
- source code and not a proper link.
-
- Set ``multimailhook.htmlInIntro`` to true to allow writing HTML
- formatting in introduction templates. Similarly, set
- ``multimailhook.htmlInFooter`` for HTML in the footer.
-
- Variables expanded in the template are still escaped. For example,
- if a repository's path contains a ``<``, it will be rendered as
- such in the message.
-
- Read `<doc/customizing-emails.rst>`__ for more details and
- examples.
-
-multimailhook.refchangeShowGraph
- If this option is set to true, then summary emails about reference
- changes will additionally include:
-
- * a graph of the added commits (if any)
-
- * a graph of the discarded commits (if any)
-
- The log is generated by running ``git log --graph`` with the options
- specified in graphOpts. The default is false.
-
-multimailhook.refchangeShowLog
- If this option is set to true, then summary emails about reference
- changes will include a detailed log of the added commits in
- addition to the one line summary. The log is generated by running
- ``git log`` with the options specified in multimailhook.logOpts.
- Default is false.
-
-multimailhook.mailer
- This option changes the way emails are sent. Accepted values are:
-
- * **sendmail (the default)**: use the command ``/usr/sbin/sendmail`` or
- ``/usr/lib/sendmail`` (or sendmailCommand, if configured). This
- mode can be further customized via the following options:
-
- multimailhook.sendmailCommand
- The command used by mailer ``sendmail`` to send emails. Shell
- quoting is allowed in the value of this setting, but remember that
- Git requires double-quotes to be escaped; e.g.::
-
- git config multimailhook.sendmailcommand '/usr/sbin/sendmail -oi -t -F \"Git Repo\"'
-
- Default is '/usr/sbin/sendmail -oi -t' or
- '/usr/lib/sendmail -oi -t' (depending on which file is
- present and executable).
-
- multimailhook.envelopeSender
- If set then pass this value to sendmail via the -f option to set
- the envelope sender address.
-
- * **smtp**: use Python's smtplib. This is useful when the sendmail
- command is not available on the system. This mode can be
- further customized via the following options:
-
- multimailhook.smtpServer
- The name of the SMTP server to connect to. The value can
- also include a colon and a port number; e.g.,
- ``mail.example.com:25``. Default is 'localhost' using port 25.
-
- multimailhook.smtpUser, multimailhook.smtpPass
- Server username and password. Required if smtpEncryption is 'ssl'.
- Note that the username and password currently need to be
- set cleartext in the configuration file, which is not
- recommended. If you need to use this option, be sure your
- configuration file is read-only.
-
- multimailhook.envelopeSender
- The sender address to be passed to the SMTP server. If
- unset, then the value of multimailhook.from is used.
-
- multimailhook.smtpServerTimeout
- Timeout in seconds.
-
- multimailhook.smtpEncryption
- Set the security type. Allowed values: ``none``, ``ssl``, ``tls`` (starttls).
- Default is ``none``.
-
- multimailhook.smtpCACerts
- Set the path to a list of trusted CA certificate to verify the
- server certificate, only supported when ``smtpEncryption`` is
- ``tls``. If unset or empty, the server certificate is not
- verified. If it targets a file containing a list of trusted CA
- certificates (PEM format) these CAs will be used to verify the
- server certificate. For debian, you can set
- ``/etc/ssl/certs/ca-certificates.crt`` for using the system
- trusted CAs. For self-signed server, you can add your server
- certificate to the system store::
-
- cd /usr/local/share/ca-certificates/
- openssl s_client -starttls smtp \
- -connect mail.example.net:587 -showcerts \
- </dev/null 2>/dev/null \
- | openssl x509 -outform PEM >mail.example.net.crt
- update-ca-certificates
-
- and used the updated ``/etc/ssl/certs/ca-certificates.crt``. Or
- directly use your ``/path/to/mail.example.net.crt``. Default is
- unset.
-
- multimailhook.smtpServerDebugLevel
- Integer number. Set to greater than 0 to activate debugging.
-
-multimailhook.from, multimailhook.fromCommit, multimailhook.fromRefchange
- If set, use this value in the From: field of generated emails.
- ``fromCommit`` is used for commit emails, ``fromRefchange`` is
- used for refchange emails, and ``from`` is used as fall-back in
- all cases.
-
- The value for these variables can be either:
-
- - An email address, which will be used directly.
-
- - The value ``pusher``, in which case the pusher's address (if
- available) will be used.
-
- - The value ``author`` (meaningful only for ``fromCommit``), in which
- case the commit author's address will be used.
-
- If config values are unset, the value of the From: header is
- determined as follows:
-
- 1. (gitolite environment only) Parse gitolite.conf, looking for a
- block of comments that looks like this::
-
- # BEGIN USER EMAILS
- # username Firstname Lastname <email@example.com>
- # END USER EMAILS
-
- If that block exists, and there is a line between the BEGIN
- USER EMAILS and END USER EMAILS lines where the first field
- matches the gitolite username ($GL_USER), use the rest of the
- line for the From: header.
-
- 2. If the user.email configuration setting is set, use its value
- (and the value of user.name, if set).
-
- 3. Use the value of multimailhook.envelopeSender.
-
-multimailhook.administrator
- The name and/or email address of the administrator of the Git
- repository; used in FOOTER_TEMPLATE. Default is
- multimailhook.envelopesender if it is set; otherwise a generic
- string is used.
-
-multimailhook.emailPrefix
- All emails have this string prepended to their subjects, to aid
- email filtering (though filtering based on the X-Git-* email
- headers is probably more robust). Default is the short name of
- the repository in square brackets; e.g., ``[myrepo]``. Set this
- value to the empty string to suppress the email prefix. You may
- use the placeholder ``%(repo_shortname)s`` for the short name of
- the repository.
-
-multimailhook.emailMaxLines
- The maximum number of lines that should be included in the body of
- a generated email. If not specified, there is no limit. Lines
- beyond the limit are suppressed and counted, and a final line is
- added indicating the number of suppressed lines.
-
-multimailhook.emailMaxLineLength
- The maximum length of a line in the email body. Lines longer than
- this limit are truncated to this length with a trailing ``[...]``
- added to indicate the missing text. The default is 500, because
- (a) diffs with longer lines are probably from binary files, for
- which a diff is useless, and (b) even if a text file has such long
- lines, the diffs are probably unreadable anyway. To disable line
- truncation, set this option to 0.
-
-multimailhook.subjectMaxLength
- The maximum length of the subject line (i.e. the ``oneline`` field
- in templates, not including the prefix). Lines longer than this
- limit are truncated to this length with a trailing ``[...]`` added
- to indicate the missing text. This option The default is to use
- ``multimailhook.emailMaxLineLength``. This option avoids sending
- emails with overly long subject lines, but should not be needed if
- the commit messages follow the Git convention (one short subject
- line, then a blank line, then the message body). To disable line
- truncation, set this option to 0.
-
-multimailhook.maxCommitEmails
- The maximum number of commit emails to send for a given change.
- When the number of patches is larger that this value, only the
- summary refchange email is sent. This can avoid accidental
- mailbombing, for example on an initial push. To disable commit
- emails limit, set this option to 0. The default is 500.
-
-multimailhook.emailStrictUTF8
- If this boolean option is set to `true`, then the main part of the
- email body is forced to be valid UTF-8. Any characters that are
- not valid UTF-8 are converted to the Unicode replacement
- character, U+FFFD. The default is `true`.
-
- This option is ineffective with Python 3, where non-UTF-8
- characters are unconditionally replaced.
-
-multimailhook.diffOpts
- Options passed to ``git diff-tree`` when generating the summary
- information for ReferenceChange emails. Default is ``--stat
- --summary --find-copies-harder``. Add -p to those options to
- include a unified diff of changes in addition to the usual summary
- output. Shell quoting is allowed; see ``multimailhook.logOpts`` for
- details.
-
-multimailhook.graphOpts
- Options passed to ``git log --graph`` when generating graphs for the
- reference change summary emails (used only if refchangeShowGraph
- is true). The default is '--oneline --decorate'.
-
- Shell quoting is allowed; see logOpts for details.
-
-multimailhook.logOpts
- Options passed to ``git log`` to generate additional info for
- reference change emails (used only if refchangeShowLog is set).
- For example, adding -p will show each commit's complete diff. The
- default is empty.
-
- Shell quoting is allowed; for example, a log format that contains
- spaces can be specified using something like::
-
- git config multimailhook.logopts '--pretty=format:"%h %aN <%aE>%n%s%n%n%b%n"'
-
- If you want to set this by editing your configuration file
- directly, remember that Git requires double-quotes to be escaped
- (see git-config(1) for more information)::
-
- [multimailhook]
- logopts = --pretty=format:\"%h %aN <%aE>%n%s%n%n%b%n\"
-
-multimailhook.commitLogOpts
- Options passed to ``git log`` to generate additional info for
- revision change emails. For example, adding --ignore-all-spaces
- will suppress whitespace changes. The default options are ``-C
- --stat -p --cc``. Shell quoting is allowed; see
- multimailhook.logOpts for details.
-
-multimailhook.dateSubstitute
- String to use as a substitute for ``Date:`` in the output of ``git
- log`` while formatting commit messages. This is useful to avoid
- emitting a line that can be interpreted by mailers as the start of
- a cited message (Zimbra webmail in particular). Defaults to
- ``CommitDate:``. Set to an empty string or ``none`` to deactivate
- the behavior.
-
-multimailhook.emailDomain
- Domain name appended to the username of the person doing the push
- to convert it into an email address
- (via ``"%s@%s" % (username, emaildomain)``). More complicated
- schemes can be implemented by overriding Environment and
- overriding its get_pusher_email() method.
-
-multimailhook.replyTo, multimailhook.replyToCommit, multimailhook.replyToRefchange
- Addresses to use in the Reply-To: field for commit emails
- (replyToCommit) and refchange emails (replyToRefchange).
- multimailhook.replyTo is used as default when replyToCommit or
- replyToRefchange is not set. The shortcuts ``pusher`` and
- ``author`` are allowed with the same semantics as for
- ``multimailhook.from``. In addition, the value ``none`` can be
- used to omit the ``Reply-To:`` field.
-
- The default is ``pusher`` for refchange emails, and ``author`` for
- commit emails.
-
-multimailhook.quiet
- Do not output the list of email recipients from the hook
-
-multimailhook.stdout
- For debugging, send emails to stdout rather than to the
- mailer. Equivalent to the --stdout command line option
-
-multimailhook.scanCommitForCc
- If this option is set to true, than recipients from lines in commit body
- that starts with ``CC:`` will be added to CC list.
- Default: false
-
-multimailhook.combineWhenSingleCommit
- If this option is set to true and a single new commit is pushed to
- a branch, combine the summary and commit email messages into a
- single email.
- Default: true
-
-multimailhook.refFilterInclusionRegex, multimailhook.refFilterExclusionRegex, multimailhook.refFilterDoSendRegex, multimailhook.refFilterDontSendRegex
- **Warning:** these options are experimental. They should work, but
- the user-interface is not stable yet (in particular, the option
- names may change). If you want to participate in stabilizing the
- feature, please contact the maintainers and/or send pull-requests.
- If you are happy with the current shape of the feature, please
- report it too.
-
- Regular expressions that can be used to limit refs for which email
- updates will be sent. It is an error to specify both an inclusion
- and an exclusion regex. If a ``refFilterInclusionRegex`` is
- specified, emails will only be sent for refs which match this
- regex. If a ``refFilterExclusionRegex`` regex is specified,
- emails will be sent for all refs except those that match this
- regex (or that match a predefined regex specific to the
- environment, such as "^refs/notes" for most environments and
- "^refs/notes|^refs/changes" for the gerrit environment).
-
- The expressions are matched against the complete refname, and is
- considered to match if any substring matches. For example, to
- filter-out all tags, set ``refFilterExclusionRegex`` to
- ``^refs/tags/`` (note the leading ``^`` but no trailing ``$``). If
- you set ``refFilterExclusionRegex`` to ``master``, then any ref
- containing ``master`` will be excluded (the ``master`` branch, but
- also ``refs/tags/master`` or ``refs/heads/foo-master-bar``).
-
- ``refFilterDoSendRegex`` and ``refFilterDontSendRegex`` are
- analogous to ``refFilterInclusionRegex`` and
- ``refFilterExclusionRegex`` with one difference: with
- ``refFilterDoSendRegex`` and ``refFilterDontSendRegex``, commits
- introduced by one excluded ref will not be considered as new when
- they reach an included ref. Typically, if you add a branch ``foo``
- to ``refFilterDontSendRegex``, push commits to this branch, and
- later merge branch ``foo`` into ``master``, then the notification
- email for ``master`` will contain a commit email only for the
- merge commit. If you include ``foo`` in
- ``refFilterExclusionRegex``, then at the time of merge, you will
- receive one commit email per commit in the branch.
-
- These variables can be multi-valued, like::
-
- [multimailhook]
- refFilterExclusionRegex = ^refs/tags/
- refFilterExclusionRegex = ^refs/heads/master$
-
- You can also provide a whitespace-separated list like::
-
- [multimailhook]
- refFilterExclusionRegex = ^refs/tags/ ^refs/heads/master$
-
- Both examples exclude tags and the master branch, and are
- equivalent to::
-
- [multimailhook]
- refFilterExclusionRegex = ^refs/tags/|^refs/heads/master$
-
- ``refFilterInclusionRegex`` and ``refFilterExclusionRegex`` are
- strictly stronger than ``refFilterDoSendRegex`` and
- ``refFilterDontSendRegex``. In other words, adding a ref to a
- DoSend/DontSend regex has no effect if it is already excluded by a
- Exclusion/Inclusion regex.
-
-multimailhook.logFile, multimailhook.errorLogFile, multimailhook.debugLogFile
-
- When set, these variable designate path to files where
- git-multimail will log some messages. Normal messages and error
- messages are sent to ``logFile``, and error messages are also sent
- to ``errorLogFile``. Debug messages and all other messages are
- sent to ``debugLogFile``. The recommended way is to set only one
- of these variables, but it is also possible to set several of them
- (part of the information is then duplicated in several log files,
- for example errors are duplicated to all log files).
-
- Relative path are relative to the Git repository where the push is
- done.
-
-multimailhook.verbose
-
- Verbosity level of git-multimail on its standard output. By
- default, show only error and info messages. If set to true, show
- also debug messages.
-
-Email filtering aids
---------------------
-
-All emails include extra headers to enable fine tuned filtering and
-give information for debugging. All emails include the headers
-``X-Git-Host``, ``X-Git-Repo``, ``X-Git-Refname``, and ``X-Git-Reftype``.
-ReferenceChange emails also include headers ``X-Git-Oldrev`` and ``X-Git-Newrev``;
-Revision emails also include header ``X-Git-Rev``.
-
-
-Customizing email contents
---------------------------
-
-git-multimail mostly generates emails by expanding templates. The
-templates can be customized. To avoid the need to edit
-``git_multimail.py`` directly, the preferred way to change the templates
-is to write a separate Python script that imports ``git_multimail.py`` as
-a module, then replaces the templates in place. See the provided
-post-receive script for an example of how this is done.
-
-
-Customizing git-multimail for your environment
-----------------------------------------------
-
-git-multimail is mostly customized via an "environment" that describes
-the local environment in which Git is running. Two types of
-environment are built in:
-
-GenericEnvironment
- a stand-alone Git repository.
-
-GitoliteEnvironment
- a Git repository that is managed by gitolite_. For such
- repositories, the identity of the pusher is read from
- environment variable $GL_USER, the name of the repository is read
- from $GL_REPO (if it is not overridden by multimailhook.reponame),
- and the From: header value is optionally read from gitolite.conf
- (see multimailhook.from).
-
-By default, git-multimail assumes GitoliteEnvironment if $GL_USER and
-$GL_REPO are set, and otherwise assumes GenericEnvironment.
-Alternatively, you can choose one of these two environments explicitly
-by setting a ``multimailhook.environment`` config setting (which can
-have the value `generic` or `gitolite`) or by passing an --environment
-option to the script.
-
-If you need to customize the script in ways that are not supported by
-the existing environments, you can define your own environment class
-class using arbitrary Python code. To do so, you need to import
-``git_multimail.py`` as a Python module, as demonstrated by the example
-post-receive script. Then implement your environment class; it should
-usually inherit from one of the existing Environment classes and
-possibly one or more of the EnvironmentMixin classes. Then set the
-``environment`` variable to an instance of your own environment class
-and pass it to ``run_as_post_receive_hook()``.
-
-The standard environment classes, GenericEnvironment and
-GitoliteEnvironment, are in fact themselves put together out of a
-number of mixin classes, each of which handles one aspect of the
-customization. For the finest control over your configuration, you
-can specify exactly which mixin classes your own environment class
-should inherit from, and override individual methods (or even add your
-own mixin classes) to implement entirely new behaviors. If you
-implement any mixins that might be useful to other people, please
-consider sharing them with the community!
-
-
-Getting involved
-----------------
-
-Please, read `<CONTRIBUTING.rst>`__ for instructions on how to
-contribute to git-multimail.
-
-
-Footnotes
----------
-
-.. [1] Because of the way information is passed to update hooks, the
- script's method of determining whether a commit has already
- been seen does not work when it is used as an ``update`` script.
- In particular, no notification email will be generated for a
- new commit that is added to multiple references in the same
- push. A workaround is to use --force-send to force sending the
- emails.
-
-.. _gitolite: https://github.com/sitaramc/gitolite
https://github.com/git-multimail/git-multimail
The version in this directory was obtained from the upstream project
-on August 17 2016 and consists of the "git-multimail" subdirectory from
+on January 07 2019 and consists of the "git-multimail" subdirectory from
revision
- 07b1cb6bfd7be156c62e1afa17cae13b850a869f refs/tags/1.4.0
+ 04e80e6c40be465cc62b6c246f0fcb8fd2cfd454 refs/tags/1.5.0
Please see the README file in this directory for information about how
to report bugs or contribute to git-multimail.
--- /dev/null
+git-multimail version 1.5.0
+===========================
+
+.. image:: https://travis-ci.org/git-multimail/git-multimail.svg?branch=master
+ :target: https://travis-ci.org/git-multimail/git-multimail
+
+git-multimail is a tool for sending notification emails on pushes to a
+Git repository. It includes a Python module called ``git_multimail.py``,
+which can either be used as a hook script directly or can be imported
+as a Python module into another script.
+
+git-multimail is derived from the Git project's old
+contrib/hooks/post-receive-email, and is mostly compatible with that
+script. See README.migrate-from-post-receive-email for details about
+the differences and for how to migrate from post-receive-email to
+git-multimail.
+
+git-multimail, like the rest of the Git project, is licensed under
+GPLv2 (see the COPYING file for details).
+
+Please note: although, as a convenience, git-multimail may be
+distributed along with the main Git project, development of
+git-multimail takes place in its own, separate project. Please, read
+`<CONTRIBUTING.rst>`__ for more information.
+
+
+By default, for each push received by the repository, git-multimail:
+
+1. Outputs one email summarizing each reference that was changed.
+ These "reference change" (called "refchange" below) emails describe
+ the nature of the change (e.g., was the reference created, deleted,
+ fast-forwarded, etc.) and include a one-line summary of each commit
+ that was added to the reference.
+
+2. Outputs one email for each new commit that was introduced by the
+ reference change. These "commit" emails include a list of the
+ files changed by the commit, followed by the diffs of files
+ modified by the commit. The commit emails are threaded to the
+ corresponding reference change email via "In-Reply-To". This style
+ (similar to the "git format-patch" style used on the Git mailing
+ list) makes it easy to scan through the emails, jump to patches
+ that need further attention, and write comments about specific
+ commits. Commits are handled in reverse topological order (i.e.,
+ parents shown before children). For example::
+
+ [git] branch master updated
+ + [git] 01/08: doc: fix xref link from api docs to manual pages
+ + [git] 02/08: api-credentials.txt: show the big picture first
+ + [git] 03/08: api-credentials.txt: mention credential.helper explicitly
+ + [git] 04/08: api-credentials.txt: add "see also" section
+ + [git] 05/08: t3510 (cherry-pick-sequence): add missing '&&'
+ + [git] 06/08: Merge branch 'rr/maint-t3510-cascade-fix'
+ + [git] 07/08: Merge branch 'mm/api-credentials-doc'
+ + [git] 08/08: Git 1.7.11-rc2
+
+ By default, each commit appears in exactly one commit email, the
+ first time that it is pushed to the repository. If a commit is later
+ merged into another branch, then a one-line summary of the commit
+ is included in the reference change email (as usual), but no
+ additional commit email is generated. See
+ `multimailhook.refFilter(Inclusion|Exclusion|DoSend|DontSend)Regex`
+ below to configure which branches and tags are watched by the hook.
+
+ By default, reference change emails have their "Reply-To" field set
+ to the person who pushed the change, and commit emails have their
+ "Reply-To" field set to the author of the commit.
+
+3. Output one "announce" mail for each new annotated tag, including
+ information about the tag and optionally a shortlog describing the
+ changes since the previous tag. Such emails might be useful if you
+ use annotated tags to mark releases of your project.
+
+
+Requirements
+------------
+
+* Python 2.x, version 2.4 or later. No non-standard Python modules
+ are required. git-multimail has preliminary support for Python 3
+ (but it has been better tested with Python 2).
+
+* The ``git`` command must be in your PATH. git-multimail is known to
+ work with Git versions back to 1.7.1. (Earlier versions have not
+ been tested; if you do so, please report your results.)
+
+* To send emails using the default configuration, a standard sendmail
+ program must be located at '/usr/sbin/sendmail' or
+ '/usr/lib/sendmail' and must be configured correctly to send emails.
+ If this is not the case, set multimailhook.sendmailCommand, or see
+ the multimailhook.mailer configuration variable below for how to
+ configure git-multimail to send emails via an SMTP server.
+
+* git-multimail is currently tested only on Linux. It may or may not
+ work on other platforms such as Windows and Mac OS. See
+ `<CONTRIBUTING.rst>`__ to improve the situation.
+
+
+Invocation
+----------
+
+``git_multimail.py`` is designed to be used as a ``post-receive`` hook in a
+Git repository (see githooks(5)). Link or copy it to
+$GIT_DIR/hooks/post-receive within the repository for which email
+notifications are desired. Usually it should be installed on the
+central repository for a project, to which all commits are eventually
+pushed.
+
+For use on pre-v1.5.1 Git servers, ``git_multimail.py`` can also work as
+an ``update`` hook, taking its arguments on the command line. To use
+this script in this manner, link or copy it to $GIT_DIR/hooks/update.
+Please note that the script is not completely reliable in this mode
+[1]_.
+
+Alternatively, ``git_multimail.py`` can be imported as a Python module
+into your own Python post-receive script. This method is a bit more
+work, but allows the behavior of the hook to be customized using
+arbitrary Python code. For example, you can use a custom environment
+(perhaps inheriting from GenericEnvironment or GitoliteEnvironment) to
+
+* change how the user who did the push is determined
+
+* read users' email addresses from an LDAP server or from a database
+
+* decide which users should be notified about which commits based on
+ the contents of the commits (e.g., for users who want to be notified
+ only about changes affecting particular files or subdirectories)
+
+Or you can change how emails are sent by writing your own Mailer
+class. The ``post-receive`` script in this directory demonstrates how
+to use ``git_multimail.py`` as a Python module. (If you make interesting
+changes of this type, please consider sharing them with the
+community.)
+
+
+Troubleshooting/FAQ
+-------------------
+
+Please read `<doc/troubleshooting.rst>`__ for frequently asked
+questions and common issues with git-multimail.
+
+
+Configuration
+-------------
+
+By default, git-multimail mostly takes its configuration from the
+following ``git config`` settings:
+
+multimailhook.environment
+ This describes the general environment of the repository. In most
+ cases, you do not need to specify a value for this variable:
+ `git-multimail` will autodetect which environment to use.
+ Currently supported values:
+
+ generic
+ the username of the pusher is read from $USER or $USERNAME and
+ the repository name is derived from the repository's path.
+
+ gitolite
+ Environment to use when ``git-multimail`` is ran as a gitolite_
+ hook.
+
+ The username of the pusher is read from $GL_USER, the repository
+ name is read from $GL_REPO, and the From: header value is
+ optionally read from gitolite.conf (see multimailhook.from).
+
+ For more information about gitolite and git-multimail, read
+ `<doc/gitolite.rst>`__
+
+ stash
+ Environment to use when ``git-multimail`` is ran as an Atlassian
+ BitBucket Server (formerly known as Atlassian Stash) hook.
+
+ **Warning:** this mode was provided by a third-party contributor
+ and never tested by the git-multimail maintainers. It is
+ provided as-is and may or may not work for you.
+
+ This value is automatically assumed when the stash-specific
+ flags (``--stash-user`` and ``--stash-repo``) are specified on
+ the command line. When this environment is active, the username
+ and repo come from these two command line flags, which must be
+ specified.
+
+ gerrit
+ Environment to use when ``git-multimail`` is ran as a
+ ``ref-updated`` Gerrit hook.
+
+ This value is used when the gerrit-specific command line flags
+ (``--oldrev``, ``--newrev``, ``--refname``, ``--project``) for
+ gerrit's ref-updated hook are present. When this environment is
+ active, the username of the pusher is taken from the
+ ``--submitter`` argument if that command line option is passed,
+ otherwise 'Gerrit' is used. The repository name is taken from
+ the ``--project`` option on the command line, which must be passed.
+
+ For more information about gerrit and git-multimail, read
+ `<doc/gerrit.rst>`__
+
+ If none of these environments is suitable for your setup, then you
+ can implement a Python class that inherits from Environment and
+ instantiate it via a script that looks like the example
+ post-receive script.
+
+ The environment value can be specified on the command line using
+ the ``--environment`` option. If it is not specified on the
+ command line or by ``multimailhook.environment``, the value is
+ guessed as follows:
+
+ * If stash-specific (respectively gerrit-specific) command flags
+ are present on the command-line, then ``stash`` (respectively
+ ``gerrit``) is used.
+
+ * If the environment variables $GL_USER and $GL_REPO are set, then
+ ``gitolite`` is used.
+
+ * If none of the above apply, then ``generic`` is used.
+
+multimailhook.repoName
+ A short name of this Git repository, to be used in various places
+ in the notification email text. The default is to use $GL_REPO
+ for gitolite repositories, or otherwise to derive this value from
+ the repository path name.
+
+multimailhook.mailingList
+ The list of email addresses to which notification emails should be
+ sent, as RFC 2822 email addresses separated by commas. This
+ configuration option can be multivalued. Leave it unset or set it
+ to the empty string to not send emails by default. The next few
+ settings can be used to configure specific address lists for
+ specific types of notification email.
+
+multimailhook.refchangeList
+ The list of email addresses to which summary emails about
+ reference changes should be sent, as RFC 2822 email addresses
+ separated by commas. This configuration option can be
+ multivalued. The default is the value in
+ multimailhook.mailingList. Set this value to "none" (or the empty
+ string) to prevent reference change emails from being sent even if
+ multimailhook.mailingList is set.
+
+multimailhook.announceList
+ The list of email addresses to which emails about new annotated
+ tags should be sent, as RFC 2822 email addresses separated by
+ commas. This configuration option can be multivalued. The
+ default is the value in multimailhook.refchangeList or
+ multimailhook.mailingList. Set this value to "none" (or the empty
+ string) to prevent annotated tag announcement emails from being sent
+ even if one of the other values is set.
+
+multimailhook.commitList
+ The list of email addresses to which emails about individual new
+ commits should be sent, as RFC 2822 email addresses separated by
+ commas. This configuration option can be multivalued. The
+ default is the value in multimailhook.mailingList. Set this value
+ to "none" (or the empty string) to prevent notification emails about
+ individual commits from being sent even if
+ multimailhook.mailingList is set.
+
+multimailhook.announceShortlog
+ If this option is set to true, then emails about changes to
+ annotated tags include a shortlog of changes since the previous
+ tag. This can be useful if the annotated tags represent releases;
+ then the shortlog will be a kind of rough summary of what has
+ happened since the last release. But if your tagging policy is
+ not so straightforward, then the shortlog might be confusing
+ rather than useful. Default is false.
+
+multimailhook.commitEmailFormat
+ The format of email messages for the individual commits, can be "text" or
+ "html". In the latter case, the emails will include diffs using colorized
+ HTML instead of plain text used by default. Note that this currently the
+ ref change emails are always sent in plain text.
+
+ Note that when using "html", the formatting is done by parsing the
+ output of ``git log`` with ``-p``. When using
+ ``multimailhook.commitLogOpts`` to specify a ``--format`` for
+ ``git log``, one may get false positive (e.g. lines in the body of
+ the message starting with ``+++`` or ``---`` colored in red or
+ green).
+
+ By default, all the message is HTML-escaped. See
+ ``multimailhook.htmlInIntro`` to change this behavior.
+
+multimailhook.commitBrowseURL
+ Used to generate a link to an online repository browser in commit
+ emails. This variable must be a string. Format directives like
+ ``%(<variable>)s`` will be expanded the same way as template
+ strings. In particular, ``%(id)s`` will be replaced by the full
+ Git commit identifier (40-chars hexadecimal).
+
+ If the string does not contain any format directive, then
+ ``%(id)s`` will be automatically added to the string. If you don't
+ want ``%(id)s`` to be automatically added, use the empty format
+ directive ``%()s`` anywhere in the string.
+
+ For example, a suitable value for the git-multimail project itself
+ would be
+ ``https://github.com/git-multimail/git-multimail/commit/%(id)s``.
+
+multimailhook.htmlInIntro, multimailhook.htmlInFooter
+ When generating an HTML message, git-multimail escapes any HTML
+ sequence by default. This means that if a template contains HTML
+ like ``<a href="foo">link</a>``, the reader will see the HTML
+ source code and not a proper link.
+
+ Set ``multimailhook.htmlInIntro`` to true to allow writing HTML
+ formatting in introduction templates. Similarly, set
+ ``multimailhook.htmlInFooter`` for HTML in the footer.
+
+ Variables expanded in the template are still escaped. For example,
+ if a repository's path contains a ``<``, it will be rendered as
+ such in the message.
+
+ Read `<doc/customizing-emails.rst>`__ for more details and
+ examples.
+
+multimailhook.refchangeShowGraph
+ If this option is set to true, then summary emails about reference
+ changes will additionally include:
+
+ * a graph of the added commits (if any)
+
+ * a graph of the discarded commits (if any)
+
+ The log is generated by running ``git log --graph`` with the options
+ specified in graphOpts. The default is false.
+
+multimailhook.refchangeShowLog
+ If this option is set to true, then summary emails about reference
+ changes will include a detailed log of the added commits in
+ addition to the one line summary. The log is generated by running
+ ``git log`` with the options specified in multimailhook.logOpts.
+ Default is false.
+
+multimailhook.mailer
+ This option changes the way emails are sent. Accepted values are:
+
+ * **sendmail (the default)**: use the command ``/usr/sbin/sendmail`` or
+ ``/usr/lib/sendmail`` (or sendmailCommand, if configured). This
+ mode can be further customized via the following options:
+
+ multimailhook.sendmailCommand
+ The command used by mailer ``sendmail`` to send emails. Shell
+ quoting is allowed in the value of this setting, but remember that
+ Git requires double-quotes to be escaped; e.g.::
+
+ git config multimailhook.sendmailcommand '/usr/sbin/sendmail -oi -t -F \"Git Repo\"'
+
+ Default is '/usr/sbin/sendmail -oi -t' or
+ '/usr/lib/sendmail -oi -t' (depending on which file is
+ present and executable).
+
+ multimailhook.envelopeSender
+ If set then pass this value to sendmail via the -f option to set
+ the envelope sender address.
+
+ * **smtp**: use Python's smtplib. This is useful when the sendmail
+ command is not available on the system. This mode can be
+ further customized via the following options:
+
+ multimailhook.smtpServer
+ The name of the SMTP server to connect to. The value can
+ also include a colon and a port number; e.g.,
+ ``mail.example.com:25``. Default is 'localhost' using port 25.
+
+ multimailhook.smtpUser, multimailhook.smtpPass
+ Server username and password. Required if smtpEncryption is 'ssl'.
+ Note that the username and password currently need to be
+ set cleartext in the configuration file, which is not
+ recommended. If you need to use this option, be sure your
+ configuration file is read-only.
+
+ multimailhook.envelopeSender
+ The sender address to be passed to the SMTP server. If
+ unset, then the value of multimailhook.from is used.
+
+ multimailhook.smtpServerTimeout
+ Timeout in seconds. Default is 10.
+
+ multimailhook.smtpEncryption
+ Set the security type. Allowed values: ``none``, ``ssl``, ``tls`` (starttls).
+ Default is ``none``.
+
+ multimailhook.smtpCACerts
+ Set the path to a list of trusted CA certificate to verify the
+ server certificate, only supported when ``smtpEncryption`` is
+ ``tls``. If unset or empty, the server certificate is not
+ verified. If it targets a file containing a list of trusted CA
+ certificates (PEM format) these CAs will be used to verify the
+ server certificate. For debian, you can set
+ ``/etc/ssl/certs/ca-certificates.crt`` for using the system
+ trusted CAs. For self-signed server, you can add your server
+ certificate to the system store::
+
+ cd /usr/local/share/ca-certificates/
+ openssl s_client -starttls smtp \
+ -connect mail.example.net:587 -showcerts \
+ </dev/null 2>/dev/null \
+ | openssl x509 -outform PEM >mail.example.net.crt
+ update-ca-certificates
+
+ and used the updated ``/etc/ssl/certs/ca-certificates.crt``. Or
+ directly use your ``/path/to/mail.example.net.crt``. Default is
+ unset.
+
+ multimailhook.smtpServerDebugLevel
+ Integer number. Set to greater than 0 to activate debugging.
+
+multimailhook.from, multimailhook.fromCommit, multimailhook.fromRefchange
+ If set, use this value in the From: field of generated emails.
+ ``fromCommit`` is used for commit emails, ``fromRefchange`` is
+ used for refchange emails, and ``from`` is used as fall-back in
+ all cases.
+
+ The value for these variables can be either:
+
+ - An email address, which will be used directly.
+
+ - The value ``pusher``, in which case the pusher's address (if
+ available) will be used.
+
+ - The value ``author`` (meaningful only for ``fromCommit``), in which
+ case the commit author's address will be used.
+
+ If config values are unset, the value of the From: header is
+ determined as follows:
+
+ 1. (gitolite environment only)
+ 1.a) If ``multimailhook.MailaddressMap`` is set, and is a path
+ to an existing file (if relative, it is considered relative to
+ the place where ``gitolite.conf`` is located), then this file
+ should contain lines like::
+
+ username Firstname Lastname <email@example.com>
+
+ git-multimail will then look for a line where ``$GL_USER``
+ matches the ``username`` part, and use the rest of the line for
+ the ``From:`` header.
+
+ 1.b) Parse gitolite.conf, looking for a block of comments that
+ looks like this::
+
+ # BEGIN USER EMAILS
+ # username Firstname Lastname <email@example.com>
+ # END USER EMAILS
+
+ If that block exists, and there is a line between the BEGIN
+ USER EMAILS and END USER EMAILS lines where the first field
+ matches the gitolite username ($GL_USER), use the rest of the
+ line for the From: header.
+
+ 2. If the user.email configuration setting is set, use its value
+ (and the value of user.name, if set).
+
+ 3. Use the value of multimailhook.envelopeSender.
+
+multimailhook.MailaddressMap
+ (gitolite environment only)
+ File to look for a ``From:`` address based on the user doing the
+ push. Defaults to unset. See ``multimailhook.from`` for details.
+
+multimailhook.administrator
+ The name and/or email address of the administrator of the Git
+ repository; used in FOOTER_TEMPLATE. Default is
+ multimailhook.envelopesender if it is set; otherwise a generic
+ string is used.
+
+multimailhook.emailPrefix
+ All emails have this string prepended to their subjects, to aid
+ email filtering (though filtering based on the X-Git-* email
+ headers is probably more robust). Default is the short name of
+ the repository in square brackets; e.g., ``[myrepo]``. Set this
+ value to the empty string to suppress the email prefix. You may
+ use the placeholder ``%(repo_shortname)s`` for the short name of
+ the repository.
+
+multimailhook.emailMaxLines
+ The maximum number of lines that should be included in the body of
+ a generated email. If not specified, there is no limit. Lines
+ beyond the limit are suppressed and counted, and a final line is
+ added indicating the number of suppressed lines.
+
+multimailhook.emailMaxLineLength
+ The maximum length of a line in the email body. Lines longer than
+ this limit are truncated to this length with a trailing ``[...]``
+ added to indicate the missing text. The default is 500, because
+ (a) diffs with longer lines are probably from binary files, for
+ which a diff is useless, and (b) even if a text file has such long
+ lines, the diffs are probably unreadable anyway. To disable line
+ truncation, set this option to 0.
+
+multimailhook.subjectMaxLength
+ The maximum length of the subject line (i.e. the ``oneline`` field
+ in templates, not including the prefix). Lines longer than this
+ limit are truncated to this length with a trailing ``[...]`` added
+ to indicate the missing text. This option The default is to use
+ ``multimailhook.emailMaxLineLength``. This option avoids sending
+ emails with overly long subject lines, but should not be needed if
+ the commit messages follow the Git convention (one short subject
+ line, then a blank line, then the message body). To disable line
+ truncation, set this option to 0.
+
+multimailhook.maxCommitEmails
+ The maximum number of commit emails to send for a given change.
+ When the number of patches is larger that this value, only the
+ summary refchange email is sent. This can avoid accidental
+ mailbombing, for example on an initial push. To disable commit
+ emails limit, set this option to 0. The default is 500.
+
+multimailhook.excludeMergeRevisions
+ When sending out revision emails, do not consider merge commits (the
+ functional equivalent of `rev-list --no-merges`).
+ The default is `false` (send merge commit emails).
+
+multimailhook.emailStrictUTF8
+ If this boolean option is set to `true`, then the main part of the
+ email body is forced to be valid UTF-8. Any characters that are
+ not valid UTF-8 are converted to the Unicode replacement
+ character, U+FFFD. The default is `true`.
+
+ This option is ineffective with Python 3, where non-UTF-8
+ characters are unconditionally replaced.
+
+multimailhook.diffOpts
+ Options passed to ``git diff-tree`` when generating the summary
+ information for ReferenceChange emails. Default is ``--stat
+ --summary --find-copies-harder``. Add -p to those options to
+ include a unified diff of changes in addition to the usual summary
+ output. Shell quoting is allowed; see ``multimailhook.logOpts`` for
+ details.
+
+multimailhook.graphOpts
+ Options passed to ``git log --graph`` when generating graphs for the
+ reference change summary emails (used only if refchangeShowGraph
+ is true). The default is '--oneline --decorate'.
+
+ Shell quoting is allowed; see logOpts for details.
+
+multimailhook.logOpts
+ Options passed to ``git log`` to generate additional info for
+ reference change emails (used only if refchangeShowLog is set).
+ For example, adding -p will show each commit's complete diff. The
+ default is empty.
+
+ Shell quoting is allowed; for example, a log format that contains
+ spaces can be specified using something like::
+
+ git config multimailhook.logopts '--pretty=format:"%h %aN <%aE>%n%s%n%n%b%n"'
+
+ If you want to set this by editing your configuration file
+ directly, remember that Git requires double-quotes to be escaped
+ (see git-config(1) for more information)::
+
+ [multimailhook]
+ logopts = --pretty=format:\"%h %aN <%aE>%n%s%n%n%b%n\"
+
+multimailhook.commitLogOpts
+ Options passed to ``git log`` to generate additional info for
+ revision change emails. For example, adding --ignore-all-spaces
+ will suppress whitespace changes. The default options are ``-C
+ --stat -p --cc``. Shell quoting is allowed; see
+ multimailhook.logOpts for details.
+
+multimailhook.dateSubstitute
+ String to use as a substitute for ``Date:`` in the output of ``git
+ log`` while formatting commit messages. This is useful to avoid
+ emitting a line that can be interpreted by mailers as the start of
+ a cited message (Zimbra webmail in particular). Defaults to
+ ``CommitDate:``. Set to an empty string or ``none`` to deactivate
+ the behavior.
+
+multimailhook.emailDomain
+ Domain name appended to the username of the person doing the push
+ to convert it into an email address
+ (via ``"%s@%s" % (username, emaildomain)``). More complicated
+ schemes can be implemented by overriding Environment and
+ overriding its get_pusher_email() method.
+
+multimailhook.replyTo, multimailhook.replyToCommit, multimailhook.replyToRefchange
+ Addresses to use in the Reply-To: field for commit emails
+ (replyToCommit) and refchange emails (replyToRefchange).
+ multimailhook.replyTo is used as default when replyToCommit or
+ replyToRefchange is not set. The shortcuts ``pusher`` and
+ ``author`` are allowed with the same semantics as for
+ ``multimailhook.from``. In addition, the value ``none`` can be
+ used to omit the ``Reply-To:`` field.
+
+ The default is ``pusher`` for refchange emails, and ``author`` for
+ commit emails.
+
+multimailhook.quiet
+ Do not output the list of email recipients from the hook
+
+multimailhook.stdout
+ For debugging, send emails to stdout rather than to the
+ mailer. Equivalent to the --stdout command line option
+
+multimailhook.scanCommitForCc
+ If this option is set to true, than recipients from lines in commit body
+ that starts with ``CC:`` will be added to CC list.
+ Default: false
+
+multimailhook.combineWhenSingleCommit
+ If this option is set to true and a single new commit is pushed to
+ a branch, combine the summary and commit email messages into a
+ single email.
+ Default: true
+
+multimailhook.refFilterInclusionRegex, multimailhook.refFilterExclusionRegex, multimailhook.refFilterDoSendRegex, multimailhook.refFilterDontSendRegex
+ **Warning:** these options are experimental. They should work, but
+ the user-interface is not stable yet (in particular, the option
+ names may change). If you want to participate in stabilizing the
+ feature, please contact the maintainers and/or send pull-requests.
+ If you are happy with the current shape of the feature, please
+ report it too.
+
+ Regular expressions that can be used to limit refs for which email
+ updates will be sent. It is an error to specify both an inclusion
+ and an exclusion regex. If a ``refFilterInclusionRegex`` is
+ specified, emails will only be sent for refs which match this
+ regex. If a ``refFilterExclusionRegex`` regex is specified,
+ emails will be sent for all refs except those that match this
+ regex (or that match a predefined regex specific to the
+ environment, such as "^refs/notes" for most environments and
+ "^refs/notes|^refs/changes" for the gerrit environment).
+
+ The expressions are matched against the complete refname, and is
+ considered to match if any substring matches. For example, to
+ filter-out all tags, set ``refFilterExclusionRegex`` to
+ ``^refs/tags/`` (note the leading ``^`` but no trailing ``$``). If
+ you set ``refFilterExclusionRegex`` to ``master``, then any ref
+ containing ``master`` will be excluded (the ``master`` branch, but
+ also ``refs/tags/master`` or ``refs/heads/foo-master-bar``).
+
+ ``refFilterDoSendRegex`` and ``refFilterDontSendRegex`` are
+ analogous to ``refFilterInclusionRegex`` and
+ ``refFilterExclusionRegex`` with one difference: with
+ ``refFilterDoSendRegex`` and ``refFilterDontSendRegex``, commits
+ introduced by one excluded ref will not be considered as new when
+ they reach an included ref. Typically, if you add a branch ``foo``
+ to ``refFilterDontSendRegex``, push commits to this branch, and
+ later merge branch ``foo`` into ``master``, then the notification
+ email for ``master`` will contain a commit email only for the
+ merge commit. If you include ``foo`` in
+ ``refFilterExclusionRegex``, then at the time of merge, you will
+ receive one commit email per commit in the branch.
+
+ These variables can be multi-valued, like::
+
+ [multimailhook]
+ refFilterExclusionRegex = ^refs/tags/
+ refFilterExclusionRegex = ^refs/heads/master$
+
+ You can also provide a whitespace-separated list like::
+
+ [multimailhook]
+ refFilterExclusionRegex = ^refs/tags/ ^refs/heads/master$
+
+ Both examples exclude tags and the master branch, and are
+ equivalent to::
+
+ [multimailhook]
+ refFilterExclusionRegex = ^refs/tags/|^refs/heads/master$
+
+ ``refFilterInclusionRegex`` and ``refFilterExclusionRegex`` are
+ strictly stronger than ``refFilterDoSendRegex`` and
+ ``refFilterDontSendRegex``. In other words, adding a ref to a
+ DoSend/DontSend regex has no effect if it is already excluded by a
+ Exclusion/Inclusion regex.
+
+multimailhook.logFile, multimailhook.errorLogFile, multimailhook.debugLogFile
+
+ When set, these variable designate path to files where
+ git-multimail will log some messages. Normal messages and error
+ messages are sent to ``logFile``, and error messages are also sent
+ to ``errorLogFile``. Debug messages and all other messages are
+ sent to ``debugLogFile``. The recommended way is to set only one
+ of these variables, but it is also possible to set several of them
+ (part of the information is then duplicated in several log files,
+ for example errors are duplicated to all log files).
+
+ Relative path are relative to the Git repository where the push is
+ done.
+
+multimailhook.verbose
+
+ Verbosity level of git-multimail on its standard output. By
+ default, show only error and info messages. If set to true, show
+ also debug messages.
+
+Email filtering aids
+--------------------
+
+All emails include extra headers to enable fine tuned filtering and
+give information for debugging. All emails include the headers
+``X-Git-Host``, ``X-Git-Repo``, ``X-Git-Refname``, and ``X-Git-Reftype``.
+ReferenceChange emails also include headers ``X-Git-Oldrev`` and ``X-Git-Newrev``;
+Revision emails also include header ``X-Git-Rev``.
+
+
+Customizing email contents
+--------------------------
+
+git-multimail mostly generates emails by expanding templates. The
+templates can be customized. To avoid the need to edit
+``git_multimail.py`` directly, the preferred way to change the templates
+is to write a separate Python script that imports ``git_multimail.py`` as
+a module, then replaces the templates in place. See the provided
+post-receive script for an example of how this is done.
+
+
+Customizing git-multimail for your environment
+----------------------------------------------
+
+git-multimail is mostly customized via an "environment" that describes
+the local environment in which Git is running. Two types of
+environment are built in:
+
+GenericEnvironment
+ a stand-alone Git repository.
+
+GitoliteEnvironment
+ a Git repository that is managed by gitolite_. For such
+ repositories, the identity of the pusher is read from
+ environment variable $GL_USER, the name of the repository is read
+ from $GL_REPO (if it is not overridden by multimailhook.reponame),
+ and the From: header value is optionally read from gitolite.conf
+ (see multimailhook.from).
+
+By default, git-multimail assumes GitoliteEnvironment if $GL_USER and
+$GL_REPO are set, and otherwise assumes GenericEnvironment.
+Alternatively, you can choose one of these two environments explicitly
+by setting a ``multimailhook.environment`` config setting (which can
+have the value `generic` or `gitolite`) or by passing an --environment
+option to the script.
+
+If you need to customize the script in ways that are not supported by
+the existing environments, you can define your own environment class
+class using arbitrary Python code. To do so, you need to import
+``git_multimail.py`` as a Python module, as demonstrated by the example
+post-receive script. Then implement your environment class; it should
+usually inherit from one of the existing Environment classes and
+possibly one or more of the EnvironmentMixin classes. Then set the
+``environment`` variable to an instance of your own environment class
+and pass it to ``run_as_post_receive_hook()``.
+
+The standard environment classes, GenericEnvironment and
+GitoliteEnvironment, are in fact themselves put together out of a
+number of mixin classes, each of which handles one aspect of the
+customization. For the finest control over your configuration, you
+can specify exactly which mixin classes your own environment class
+should inherit from, and override individual methods (or even add your
+own mixin classes) to implement entirely new behaviors. If you
+implement any mixins that might be useful to other people, please
+consider sharing them with the community!
+
+
+Getting involved
+----------------
+
+Please, read `<CONTRIBUTING.rst>`__ for instructions on how to
+contribute to git-multimail.
+
+
+Footnotes
+---------
+
+.. [1] Because of the way information is passed to update hooks, the
+ script's method of determining whether a commit has already
+ been seen does not work when it is used as an ``update`` script.
+ In particular, no notification email will be generated for a
+ new commit that is added to multiple references in the same
+ push. A workaround is to use --force-send to force sending the
+ emails.
+
+.. _gitolite: https://github.com/sitaramc/gitolite
config multimailhook.mailingList = # Where emails should be sent
config multimailhook.from = # From address to use
+Note that by default, gitolite forbids ``<`` and ``>`` in variable
+values (for security/paranoia reasons, see
+`compensating for UNSAFE_PATT
+<http://gitolite.com/gitolite/git-config/index.html#compensating-for-unsafe95patt>`__
+in gitolite's documentation for explanations and a way to disable
+this). As a consequence, you will not be able to use ``First Last
+<First.Last@example.com>`` as recipient email, but specifying
+``First.Last@example.com`` alone works.
+
Obviously, you can customize all parameters on a per-repository basis by
adding these ``config multimailhook.*`` lines in the section
corresponding to a repository or set of repositories.
#! /usr/bin/env python
-__version__ = '1.4.0'
+__version__ = '1.5.0'
# Copyright (c) 2015-2016 Matthieu Moy and others
# Copyright (c) 2012-2014 Michael Haggerty and others
# Python < 2.6 do not have ssl, but that's OK if we don't use it.
pass
import time
-import cgi
+
+import uuid
+import base64
PYTHON3 = sys.version_info >= (3, 0)
for element in iterable:
if not element:
return False
- return True
+ return True
def is_ascii(s):
return out.decode(sys.getdefaultencoding())
except UnicodeEncodeError:
return out.decode(ENCODING)
+
+ import html
+
+ def html_escape(s):
+ return html.escape(s)
+
else:
def is_string(s):
try:
def next(it):
return it.next()
+ import cgi
+
+ def html_escape(s):
+ return cgi.escape(s, True)
try:
from email.charset import Charset
Message-ID: %(msgid)s
From: %(fromaddr)s
Reply-To: %(reply_to)s
+Thread-Index: %(thread_index)s
X-Git-Host: %(fqdn)s
X-Git-Repo: %(repo_shortname)s
X-Git-Refname: %(refname)s
Reply-To: %(reply_to)s
In-Reply-To: %(reply_to_msgid)s
References: %(reply_to_msgid)s
+Thread-Index: %(thread_index)s
X-Git-Host: %(fqdn)s
X-Git-Repo: %(repo_shortname)s
X-Git-Refname: %(refname)s
def __eq__(self, other):
return isinstance(other, GitObject) and self.sha1 == other.sha1
+ def __ne__(self, other):
+ return not self == other
+
def __hash__(self):
return hash(self.sha1)
if html_escape_val:
for k in values:
if is_string(values[k]):
- values[k] = cgi.escape(values[k], True)
+ values[k] = html_escape(values[k])
for line in template.splitlines(True):
yield line % values
raise NotImplementedError()
- def generate_email_body(self):
+ def generate_email_body(self, push):
"""Generate the main part of the email body, a line at a time.
The text in the body might be truncated after a specified
yield "<pre style='margin:0'>\n"
for line in lines:
- yield cgi.escape(line)
+ yield html_escape(line)
yield '</pre>\n'
else:
fgcolor = '404040'
# Chop the trailing LF, we don't want it inside <pre>.
- line = cgi.escape(line[:-1])
+ line = html_escape(line[:-1])
if bgcolor or fgcolor:
style = 'display:block; white-space:pre;'
self.author = read_git_output(['log', '--no-walk', '--format=%aN <%aE>', self.rev.sha1])
self.recipients = self.environment.get_revision_recipients(self)
+ # -s is short for --no-patch, but -s works on older git's (e.g. 1.7)
+ self.parents = read_git_lines(['show', '-s', '--format=%P',
+ self.rev.sha1])[0].split()
+
self.cc_recipients = ''
if self.environment.get_scancommitforcc():
self.cc_recipients = ', '.join(to.strip() for to in self._cc_recipients())
oneline = oneline[:max_subject_length - 6] + ' [...]'
values['rev'] = self.rev.sha1
+ values['parents'] = ' '.join(self.parents)
values['rev_short'] = self.rev.short
values['change_type'] = self.change_type
values['refname'] = self.refname
values['short_refname'] = self.reference_change.short_refname
values['refname_type'] = self.reference_change.refname_type
values['reply_to_msgid'] = self.reference_change.msgid
+ values['thread_index'] = self.reference_change.thread_index
values['num'] = self.num
values['tot'] = self.tot
values['recipients'] = self.recipients
old=old, new=new, rev=rev,
)
+ @staticmethod
+ def make_thread_index():
+ """Return a string appropriate for the Thread-Index header,
+ needed by MS Outlook to get threading right.
+
+ The format is (base64-encoded):
+ - 1 byte must be 1
+ - 5 bytes encode a date (hardcoded here)
+ - 16 bytes for a globally unique identifier
+
+ FIXME: Unfortunately, even with the Thread-Index field, MS
+ Outlook doesn't seem to do the threading reliably (see
+ https://github.com/git-multimail/git-multimail/pull/194).
+ """
+ thread_index = b'\x01\x00\x00\x12\x34\x56' + uuid.uuid4().bytes
+ return base64.standard_b64encode(thread_index).decode('ascii')
+
def __init__(self, environment, refname, short_refname, old, new, rev):
Change.__init__(self, environment)
self.change_type = {
self.new = new
self.rev = rev
self.msgid = make_msgid()
+ self.thread_index = self.make_thread_index()
self.diffopts = environment.diffopts
self.graphopts = environment.graphopts
self.logopts = environment.logopts
values['refname'] = self.refname
values['short_refname'] = self.short_refname
values['msgid'] = self.msgid
+ values['thread_index'] = self.thread_index
values['recipients'] = self.recipients
values['oldrev'] = str(self.old)
values['oldrev_short'] = self.old.short
def __init__(self, environment):
self.environment = environment
+ def close(self):
+ pass
+
def send(self, lines, to_addrs):
"""Send an email consisting of lines.
self.username = smtpuser
self.password = smtppass
self.smtpcacerts = smtpcacerts
+ self.loggedin = False
try:
def call(klass, server, timeout):
try:
% (self.smtpserver, sys.exc_info()[1]))
sys.exit(1)
- def __del__(self):
+ def close(self):
if hasattr(self, 'smtp'):
self.smtp.quit()
del self.smtp
+ def __del__(self):
+ self.close()
+
def send(self, lines, to_addrs):
try:
if self.username or self.password:
- self.smtp.login(self.username, self.password)
+ if not self.loggedin:
+ self.smtp.login(self.username, self.password)
+ self.loggedin = True
msg = ''.join(lines)
# turn comma-separated list into Python list if needed.
if is_string(to_addrs):
to_addrs = [email for (name, email) in getaddresses([to_addrs])]
self.smtp.sendmail(self.envelopesender, to_addrs, msg)
+ except socket.timeout:
+ self.environment.get_logger().error(
+ '*** Error sending email ***\n'
+ '*** SMTP server timed out (timeout is %s)\n'
+ % self.smtpservertimeout)
except smtplib.SMTPResponseException:
err = sys.exc_info()[1]
self.environment.get_logger().error(
SEPARATOR = '=' * 75 + '\n'
- def __init__(self, f):
+ def __init__(self, f, environment=None):
+ super(OutputMailer, self).__init__(environment=environment)
self.f = f
def send(self, lines, to_addrs):
self.html_in_footer = False
self.commitBrowseURL = None
self.maxcommitemails = 500
+ self.excludemergerevisions = False
self.diffopts = ['--stat', '--summary', '--find-copies-harder']
self.graphopts = ['--oneline', '--decorate']
self.logopts = []
self.commitBrowseURL = config.get('commitBrowseURL')
+ self.excludemergerevisions = config.get('excludeMergeRevisions')
+
maxcommitemails = config.get('maxcommitemails')
if maxcommitemails is not None:
try:
return self.osenv.get('GL_USER', 'unknown user')
-class GitoliteEnvironmentLowPrecMixin(Environment):
+class GitoliteEnvironmentLowPrecMixin(
+ ConfigEnvironmentMixin,
+ Environment):
+
def get_repo_shortname(self):
# The gitolite environment variable $GL_REPO is a pretty good
# repo_shortname (though it's probably not as good as a value
super(GitoliteEnvironmentLowPrecMixin, self).get_repo_shortname()
)
+ @staticmethod
+ def _compile_regex(re_template):
+ return (
+ re.compile(re_template % x)
+ for x in (
+ r'BEGIN\s+USER\s+EMAILS',
+ r'([^\s]+)\s+(.*)',
+ r'END\s+USER\s+EMAILS',
+ ))
+
def get_fromaddr(self, change=None):
GL_USER = self.osenv.get('GL_USER')
if GL_USER is not None:
GL_CONF = self.osenv.get(
'GL_CONF',
os.path.join(GL_ADMINDIR, 'conf', 'gitolite.conf'))
+
+ mailaddress_map = self.config.get('MailaddressMap')
+ # If relative, consider relative to GL_CONF:
+ if mailaddress_map:
+ mailaddress_map = os.path.join(os.path.dirname(GL_CONF),
+ mailaddress_map)
+ if os.path.isfile(mailaddress_map):
+ f = open(mailaddress_map, 'rU')
+ try:
+ # Leading '#' is optional
+ re_begin, re_user, re_end = self._compile_regex(
+ r'^(?:\s*#)?\s*%s\s*$')
+ for l in f:
+ l = l.rstrip('\n')
+ if re_begin.match(l) or re_end.match(l):
+ continue # Ignore these lines
+ m = re_user.match(l)
+ if m:
+ if m.group(1) == GL_USER:
+ return m.group(2)
+ else:
+ continue # Not this user, but not an error
+ raise ConfigurationException(
+ "Syntax error in mail address map.\n"
+ "Check file {}.\n"
+ "Line: {}".format(mailaddress_map, l))
+
+ finally:
+ f.close()
+
if os.path.isfile(GL_CONF):
f = open(GL_CONF, 'rU')
try:
in_user_emails_section = False
- re_template = r'^\s*#\s*%s\s*$'
- re_begin, re_user, re_end = (
- re.compile(re_template % x)
- for x in (
- r'BEGIN\s+USER\s+EMAILS',
- re.escape(GL_USER) + r'\s+(.*)',
- r'END\s+USER\s+EMAILS',
- ))
+ re_begin, re_user, re_end = self._compile_regex(
+ r'^\s*#\s*%s\s*$')
for l in f:
l = l.rstrip('\n')
if not in_user_emails_section:
if re_end.match(l):
break
m = re_user.match(l)
- if m:
- return m.group(1)
+ if m and m.group(1) == GL_USER:
+ return m.group(2)
finally:
f.close()
return super(GitoliteEnvironmentLowPrecMixin, self).get_fromaddr(change)
self.__repo = repo
def get_pusher(self):
- return re.match('(.*?)\s*<', self.__user).group(1)
+ return re.match(r'(.*?)\s*<', self.__user).group(1)
def get_pusher_email(self):
return self.__user
if self.__submitter.find('<') != -1:
# Submitter has a configured email, we transformed
# __submitter into an RFC 2822 string already.
- return re.match('(.*?)\s*<', self.__submitter).group(1)
+ return re.match(r'(.*?)\s*<', self.__submitter).group(1)
else:
# Submitter has no configured email, it's just his name.
return self.__submitter
for (num, sha1) in enumerate(sha1s):
rev = Revision(change, GitObject(sha1), num=num + 1, tot=len(sha1s))
+ if len(rev.parents) > 1 and change.environment.excludemergerevisions:
+ # skipping a merge commit
+ continue
if not rev.recipients and rev.cc_recipients:
change.environment.log_msg('*** Replacing Cc: with To:')
rev.recipients = rev.cc_recipients
changes.append(
ReferenceChange.create(environment, oldrev, newrev, refname)
)
- if changes:
- push = Push(environment, changes)
+ if not changes:
+ mailer.close()
+ return
+ push = Push(environment, changes)
+ try:
push.send_emails(mailer, body_filter=environment.filter_body)
- if hasattr(mailer, '__del__'):
- mailer.__del__()
+ finally:
+ mailer.close()
def run_as_update_hook(environment, mailer, refname, oldrev, newrev, force_send=False):
refname,
),
]
+ if not changes:
+ mailer.close()
+ return
push = Push(environment, changes, force_send)
- push.send_emails(mailer, body_filter=environment.filter_body)
- if hasattr(mailer, '__del__'):
- mailer.__del__()
+ try:
+ push.send_emails(mailer, body_filter=environment.filter_body)
+ finally:
+ mailer.close()
def check_ref_filter(environment):
low_prec_mixin = known_env['lowprec']
environment_mixins.append(low_prec_mixin)
environment_mixins.append(Environment)
- klass_name = env_name.capitalize() + 'Environement'
+ klass_name = env_name.capitalize() + 'Environment'
environment_klass = type(
klass_name,
tuple(environment_mixins),
environment, 'git_multimail.error', environment.error_log_file, logging.ERROR)
self.loggers.append(error_log_file)
- def info(self, msg):
+ def info(self, msg, *args, **kwargs):
for l in self.loggers:
- l.info(msg)
+ l.info(msg, *args, **kwargs)
- def debug(self, msg):
+ def debug(self, msg, *args, **kwargs):
for l in self.loggers:
- l.debug(msg)
+ l.debug(msg, *args, **kwargs)
- def warning(self, msg):
+ def warning(self, msg, *args, **kwargs):
for l in self.loggers:
- l.warning(msg)
+ l.warning(msg, *args, **kwargs)
- def error(self, msg):
+ def error(self, msg, *args, **kwargs):
for l in self.loggers:
- l.error(msg)
+ l.error(msg, *args, **kwargs)
def main(args):
show_env(environment, sys.stderr)
if options.stdout or environment.stdout:
- mailer = OutputMailer(sys.stdout)
+ mailer = OutputMailer(sys.stdout, environment)
else:
mailer = choose_mailer(config, environment)
sys.stderr.write(msg)
sys.exit(1)
+
if __name__ == '__main__':
main(sys.argv[1:])
try:
read_output(
- ['git', 'config']
- + local_option
- + ['--get-regexp', '^%s\.' % (section,)]
+ ['git', 'config'] +
+ local_option +
+ ['--get-regexp', '^%s\.' % (section,)]
)
- except CommandError, e:
+ except CommandError:
+ t, e, traceback = sys.exc_info()
if e.retcode == 1:
# This means that no settings were found.
return True
sys.stderr.write(
'...copying "%s.%s" to "%s.%s"\n' % (old.section, name, new.section, name)
)
- new.set_recipients(name, old.get_recipients(name))
+ old_recipients = old.get_all(name, default=None)
+ old_recipients = ', '.join(o.strip() for o in old_recipients)
+ new.set_recipients(name, old_recipients)
if strict:
sys.stderr.write(
"""
import sys
-import os
# If necessary, add the path to the directory containing
# git_multimail.py to the Python path as follows. (This is not
# Use Python's smtplib to send emails. Both arguments are required.
#mailer = git_multimail.SMTPMailer(
+# environment=environment,
# envelopesender='git-repo@example.com',
# # The smtpserver argument can also include a port number; e.g.,
# # smtpserver='mail.example.com:25'
}
static int apply_single_file_filter(const char *path, const char *src, size_t len, int fd,
- struct strbuf *dst, const char *cmd)
+ struct strbuf *dst, const char *cmd)
{
/*
* Create a pipeline to have the command filter the buffer's
static void handle_filter_error(const struct strbuf *filter_status,
struct cmd2process *entry,
- const unsigned int wanted_capability) {
+ const unsigned int wanted_capability)
+{
if (!strcmp(filter_status->buf, "error"))
; /* The filter signaled a problem with the file. */
else if (!strcmp(filter_status->buf, "abort") && wanted_capability) {
}
static int ident_to_git(const char *path, const char *src, size_t len,
- struct strbuf *buf, int ident)
+ struct strbuf *buf, int ident)
{
char *dst, *dollar;
}
static int ident_to_worktree(const char *path, const char *src, size_t len,
- struct strbuf *buf, int ident)
+ struct strbuf *buf, int ident)
{
struct object_id oid;
char *to_free = NULL, *dollar, *spc;
}
static int read_request(FILE *fh, struct credential *c,
- struct strbuf *action, int *timeout) {
+ struct strbuf *action, int *timeout)
+{
static struct strbuf item = STRBUF_INIT;
const char *p;
return error(_("color moved setting must be one of 'no', 'default', 'blocks', 'zebra', 'dimmed-zebra', 'plain'"));
}
-static int parse_color_moved_ws(const char *arg)
+static unsigned parse_color_moved_ws(const char *arg)
{
int ret = 0;
struct string_list l = STRING_LIST_INIT_DUP;
strbuf_addstr(&sb, i->string);
strbuf_trim(&sb);
- if (!strcmp(sb.buf, "ignore-space-change"))
+ if (!strcmp(sb.buf, "no"))
+ ret = 0;
+ else if (!strcmp(sb.buf, "ignore-space-change"))
ret |= XDF_IGNORE_WHITESPACE_CHANGE;
else if (!strcmp(sb.buf, "ignore-space-at-eol"))
ret |= XDF_IGNORE_WHITESPACE_AT_EOL;
ret |= XDF_IGNORE_WHITESPACE;
else if (!strcmp(sb.buf, "allow-indentation-change"))
ret |= COLOR_MOVED_WS_ALLOW_INDENTATION_CHANGE;
- else
- error(_("ignoring unknown color-moved-ws mode '%s'"), sb.buf);
+ else {
+ ret |= COLOR_MOVED_WS_ERROR;
+ error(_("unknown color-moved-ws mode '%s', possible values are 'ignore-space-change', 'ignore-space-at-eol', 'ignore-all-space', 'allow-indentation-change'"), sb.buf);
+ }
strbuf_release(&sb);
}
if ((ret & COLOR_MOVED_WS_ALLOW_INDENTATION_CHANGE) &&
- (ret & XDF_WHITESPACE_FLAGS))
- die(_("color-moved-ws: allow-indentation-change cannot be combined with other white space modes"));
+ (ret & XDF_WHITESPACE_FLAGS)) {
+ error(_("color-moved-ws: allow-indentation-change cannot be combined with other whitespace modes"));
+ ret |= COLOR_MOVED_WS_ERROR;
+ }
string_list_clear(&l, 0);
return 0;
}
if (!strcmp(var, "diff.colormovedws")) {
- int cm = parse_color_moved_ws(value);
- if (cm < 0)
+ unsigned cm = parse_color_moved_ws(value);
+ if (cm & COLOR_MOVED_WS_ERROR)
return -1;
diff_color_moved_ws_default = cm;
return 0;
if (done_preparing)
return external_diff_cmd;
- external_diff_cmd = getenv("GIT_EXTERNAL_DIFF");
+ external_diff_cmd = xstrdup_or_null(getenv("GIT_EXTERNAL_DIFF"));
if (!external_diff_cmd)
external_diff_cmd = external_diff_cmd_cfg;
done_preparing = 1;
const char *line;
int len;
int flags;
+ int indent_off; /* Offset to first non-whitespace character */
+ int indent_width; /* The visual width of the indentation */
enum diff_symbol s;
};
#define EMITTED_DIFF_SYMBOL_INIT {NULL}
struct moved_entry *next_line;
};
-/**
- * The struct ws_delta holds white space differences between moved lines, i.e.
- * between '+' and '-' lines that have been detected to be a move.
- * The string contains the difference in leading white spaces, before the
- * rest of the line is compared using the white space config for move
- * coloring. The current_longer indicates if the first string in the
- * comparision is longer than the second.
- */
-struct ws_delta {
- char *string;
- unsigned int current_longer : 1;
-};
-#define WS_DELTA_INIT { NULL, 0 }
-
struct moved_block {
struct moved_entry *match;
- struct ws_delta wsd;
+ int wsd; /* The whitespace delta of this block */
};
static void moved_block_clear(struct moved_block *b)
{
- FREE_AND_NULL(b->wsd.string);
- b->match = NULL;
+ memset(b, 0, sizeof(*b));
}
-static int compute_ws_delta(const struct emitted_diff_symbol *a,
- const struct emitted_diff_symbol *b,
- struct ws_delta *out)
+#define INDENT_BLANKLINE INT_MIN
+
+static void fill_es_indent_data(struct emitted_diff_symbol *es)
{
- const struct emitted_diff_symbol *longer = a->len > b->len ? a : b;
- const struct emitted_diff_symbol *shorter = a->len > b->len ? b : a;
- int d = longer->len - shorter->len;
+ unsigned int off = 0, i;
+ int width = 0, tab_width = es->flags & WS_TAB_WIDTH_MASK;
+ const char *s = es->line;
+ const int len = es->len;
- if (strncmp(longer->line + d, shorter->line, shorter->len))
+ /* skip any \v \f \r at start of indentation */
+ while (s[off] == '\f' || s[off] == '\v' ||
+ (s[off] == '\r' && off < len - 1))
+ off++;
+
+ /* calculate the visual width of indentation */
+ while(1) {
+ if (s[off] == ' ') {
+ width++;
+ off++;
+ } else if (s[off] == '\t') {
+ width += tab_width - (width % tab_width);
+ while (s[++off] == '\t')
+ width += tab_width;
+ } else {
+ break;
+ }
+ }
+
+ /* check if this line is blank */
+ for (i = off; i < len; i++)
+ if (!isspace(s[i]))
+ break;
+
+ if (i == len) {
+ es->indent_width = INDENT_BLANKLINE;
+ es->indent_off = len;
+ } else {
+ es->indent_off = off;
+ es->indent_width = width;
+ }
+}
+
+static int compute_ws_delta(const struct emitted_diff_symbol *a,
+ const struct emitted_diff_symbol *b,
+ int *out)
+{
+ int a_len = a->len,
+ b_len = b->len,
+ a_off = a->indent_off,
+ a_width = a->indent_width,
+ b_off = b->indent_off,
+ b_width = b->indent_width;
+ int delta;
+
+ if (a_width == INDENT_BLANKLINE && b_width == INDENT_BLANKLINE) {
+ *out = INDENT_BLANKLINE;
+ return 1;
+ }
+
+ if (a->s == DIFF_SYMBOL_PLUS)
+ delta = a_width - b_width;
+ else
+ delta = b_width - a_width;
+
+ if (a_len - a_off != b_len - b_off ||
+ memcmp(a->line + a_off, b->line + b_off, a_len - a_off))
return 0;
- out->string = xmemdupz(longer->line, d);
- out->current_longer = (a == longer);
+ *out = delta;
return 1;
}
int n)
{
struct emitted_diff_symbol *l = &o->emitted_symbols->buf[n];
- int al = cur->es->len, cl = l->len;
+ int al = cur->es->len, bl = match->es->len, cl = l->len;
const char *a = cur->es->line,
*b = match->es->line,
*c = l->line;
-
- int wslen;
+ int a_off = cur->es->indent_off,
+ a_width = cur->es->indent_width,
+ c_off = l->indent_off,
+ c_width = l->indent_width;
+ int delta;
/*
- * We need to check if 'cur' is equal to 'match'.
- * As those are from the same (+/-) side, we do not need to adjust for
- * indent changes. However these were found using fuzzy matching
- * so we do have to check if they are equal.
+ * We need to check if 'cur' is equal to 'match'. As those
+ * are from the same (+/-) side, we do not need to adjust for
+ * indent changes. However these were found using fuzzy
+ * matching so we do have to check if they are equal. Here we
+ * just check the lengths. We delay calling memcmp() to check
+ * the contents until later as if the length comparison for a
+ * and c fails we can avoid the call all together.
*/
- if (strcmp(a, b))
+ if (al != bl)
return 1;
- if (!pmb->wsd.string)
- /*
- * The white space delta is not active? This can happen
- * when we exit early in this function.
- */
- return 1;
+ /* If 'l' and 'cur' are both blank then they match. */
+ if (a_width == INDENT_BLANKLINE && c_width == INDENT_BLANKLINE)
+ return 0;
/*
- * The indent changes of the block are known and stored in
- * pmb->wsd; however we need to check if the indent changes of the
- * current line are still the same as before.
- *
- * To do so we need to compare 'l' to 'cur', adjusting the
- * one of them for the white spaces, depending which was longer.
+ * The indent changes of the block are known and stored in pmb->wsd;
+ * however we need to check if the indent changes of the current line
+ * match those of the current block and that the text of 'l' and 'cur'
+ * after the indentation match.
*/
+ if (cur->es->s == DIFF_SYMBOL_PLUS)
+ delta = a_width - c_width;
+ else
+ delta = c_width - a_width;
- wslen = strlen(pmb->wsd.string);
- if (pmb->wsd.current_longer) {
- c += wslen;
- cl -= wslen;
- } else {
- a += wslen;
- al -= wslen;
- }
-
- if (al != cl || memcmp(a, c, al))
- return 1;
+ /*
+ * If the previous lines of this block were all blank then set its
+ * whitespace delta.
+ */
+ if (pmb->wsd == INDENT_BLANKLINE)
+ pmb->wsd = delta;
- return 0;
+ return !(delta == pmb->wsd && al - a_off == cl - c_off &&
+ !memcmp(a, b, al) && !
+ memcmp(a + a_off, c + c_off, al - a_off));
}
static int moved_entry_cmp(const void *hashmap_cmp_fn_data,
continue;
}
+ if (o->color_moved_ws_handling &
+ COLOR_MOVED_WS_ALLOW_INDENTATION_CHANGE)
+ fill_es_indent_data(&o->emitted_symbols->buf[n]);
key = prepare_entry(o, n);
if (prev_line && prev_line->es->s == o->emitted_symbols->buf[n].s)
prev_line->next_line = key;
if (lp < pmb_nr && rp > -1 && lp < rp) {
pmb[lp] = pmb[rp];
- pmb[rp].match = NULL;
- pmb[rp].wsd.string = NULL;
+ memset(&pmb[rp], 0, sizeof(pmb[rp]));
rp--;
lp++;
}
* The last block consists of the (n - block_length)'th line up to but not
* including the nth line.
*
+ * Returns 0 if the last block is empty or is unset by this function, non zero
+ * otherwise.
+ *
* NEEDSWORK: This uses the same heuristic as blame_entry_score() in blame.c.
* Think of a way to unify them.
*/
-static void adjust_last_block(struct diff_options *o, int n, int block_length)
+static int adjust_last_block(struct diff_options *o, int n, int block_length)
{
int i, alnum_count = 0;
if (o->color_moved == COLOR_MOVED_PLAIN)
- return;
+ return block_length;
for (i = 1; i < block_length + 1; i++) {
const char *c = o->emitted_symbols->buf[n - i].line;
for (; *c; c++) {
continue;
alnum_count++;
if (alnum_count >= COLOR_MOVED_MIN_ALNUM_COUNT)
- return;
+ return 1;
}
}
for (i = 1; i < block_length + 1; i++)
o->emitted_symbols->buf[n - i].flags &= ~DIFF_SYMBOL_MOVED_LINE;
+ return 0;
}
/* Find blocks of moved code, delegate actual coloring decision to helper */
{
struct moved_block *pmb = NULL; /* potentially moved blocks */
int pmb_nr = 0, pmb_alloc = 0;
- int n, flipped_block = 1, block_length = 0;
+ int n, flipped_block = 0, block_length = 0;
for (n = 0; n < o->emitted_symbols->nr; n++) {
struct moved_entry *key;
struct moved_entry *match = NULL;
struct emitted_diff_symbol *l = &o->emitted_symbols->buf[n];
+ enum diff_symbol last_symbol = 0;
switch (l->s) {
case DIFF_SYMBOL_PLUS:
free(key);
break;
default:
- flipped_block = 1;
+ flipped_block = 0;
}
if (!match) {
moved_block_clear(&pmb[i]);
pmb_nr = 0;
block_length = 0;
+ flipped_block = 0;
+ last_symbol = l->s;
continue;
}
- l->flags |= DIFF_SYMBOL_MOVED_LINE;
-
- if (o->color_moved == COLOR_MOVED_PLAIN)
+ if (o->color_moved == COLOR_MOVED_PLAIN) {
+ last_symbol = l->s;
+ l->flags |= DIFF_SYMBOL_MOVED_LINE;
continue;
+ }
if (o->color_moved_ws_handling &
COLOR_MOVED_WS_ALLOW_INDENTATION_CHANGE)
&pmb[pmb_nr].wsd))
pmb[pmb_nr++].match = match;
} else {
- pmb[pmb_nr].wsd.string = NULL;
+ pmb[pmb_nr].wsd = 0;
pmb[pmb_nr++].match = match;
}
}
- flipped_block = (flipped_block + 1) % 2;
+ if (adjust_last_block(o, n, block_length) &&
+ pmb_nr && last_symbol != l->s)
+ flipped_block = (flipped_block + 1) % 2;
+ else
+ flipped_block = 0;
- adjust_last_block(o, n, block_length);
block_length = 0;
}
- block_length++;
-
- if (flipped_block && o->color_moved != COLOR_MOVED_BLOCKS)
- l->flags |= DIFF_SYMBOL_MOVED_LINE_ALT;
+ if (pmb_nr) {
+ block_length++;
+ l->flags |= DIFF_SYMBOL_MOVED_LINE;
+ if (flipped_block && o->color_moved != COLOR_MOVED_BLOCKS)
+ l->flags |= DIFF_SYMBOL_MOVED_LINE_ALT;
+ }
+ last_symbol = l->s;
}
adjust_last_block(o, n, block_length);
static void emit_diff_symbol(struct diff_options *o, enum diff_symbol s,
const char *line, int len, unsigned flags)
{
- struct emitted_diff_symbol e = {line, len, flags, s};
+ struct emitted_diff_symbol e = {line, len, flags, 0, 0, s};
if (o->emitted_symbols)
append_emitted_diff_symbol(o, &e);
strbuf_release(&msgbuf);
}
-static struct diff_tempfile *claim_diff_tempfile(void) {
+static struct diff_tempfile *claim_diff_tempfile(void)
+{
int i;
for (i = 0; i < ARRAY_SIZE(diff_temp); i++)
if (!diff_temp[i].name)
o->found_changes = 1;
} else {
/* Crazy xdl interfaces.. */
- const char *diffopts = getenv("GIT_DIFF_OPTS");
+ const char *diffopts;
const char *v;
xpparam_t xpp;
xdemitconf_t xecfg;
xecfg.flags |= XDL_EMIT_FUNCCONTEXT;
if (pe)
xdiff_set_find_func(&xecfg, pe->pattern, pe->cflags);
+
+ diffopts = getenv("GIT_DIFF_OPTS");
if (!diffopts)
;
else if (skip_prefix(diffopts, "--unified=", &v))
xecfg.ctxlen = strtoul(v, NULL, 10);
else if (skip_prefix(diffopts, "-u", &v))
xecfg.ctxlen = strtoul(v, NULL, 10);
+
if (o->word_diff)
init_diff_words_data(&ecbdata, o, one, two);
if (xdi_diff_outf(&mf1, &mf2, NULL, fn_out_consume,
return 0;
}
-static void enable_patch_output(int *fmt) {
+static void enable_patch_output(int *fmt)
+{
*fmt &= ~DIFF_FORMAT_NO_OUTPUT;
*fmt |= DIFF_FORMAT_PATCH;
}
else if (skip_prefix(arg, "--color-moved=", &arg)) {
int cm = parse_color_moved(arg);
if (cm < 0)
- die("bad --color-moved argument: %s", arg);
+ return error("bad --color-moved argument: %s", arg);
options->color_moved = cm;
+ } else if (!strcmp(arg, "--no-color-moved-ws")) {
+ options->color_moved_ws_handling = 0;
} else if (skip_prefix(arg, "--color-moved-ws=", &arg)) {
- options->color_moved_ws_handling = parse_color_moved_ws(arg);
+ unsigned cm = parse_color_moved_ws(arg);
+ if (cm & COLOR_MOVED_WS_ERROR)
+ return -1;
+ options->color_moved_ws_handling = cm;
} else if (skip_to_optional_arg_default(arg, "--color-words", &options->word_regex, NULL)) {
options->use_color = 1;
options->word_diff = DIFF_WORDS_COLOR;
/* XDF_WHITESPACE_FLAGS regarding block detection are set at 2, 3, 4 */
#define COLOR_MOVED_WS_ALLOW_INDENTATION_CHANGE (1<<5)
- int color_moved_ws_handling;
+ #define COLOR_MOVED_WS_ERROR (1<<0)
+ unsigned color_moved_ws_handling;
struct repository *repo;
};
if (textconv_one == textconv_two && diff_unmodified_pair(p))
return 0;
+ if ((o->pickaxe_opts & DIFF_PICKAXE_KIND_G) &&
+ !o->flags.text &&
+ ((!textconv_one && diff_filespec_is_binary(o->repo, p->one)) ||
+ (!textconv_two && diff_filespec_is_binary(o->repo, p->two))))
+ return 0;
+
mf1.size = fill_textconv(o->repo, textconv_one, p->one, &mf1.ptr);
mf2.size = fill_textconv(o->repo, textconv_two, p->two, &mf2.ptr);
#define DO_MATCH_DIRECTORY (1<<1)
#define DO_MATCH_SUBMODULE (1<<2)
-static int match_attrs(const struct index_state *istate,
- const char *name, int namelen,
- const struct pathspec_item *item)
-{
- int i;
- char *to_free = NULL;
-
- if (name[namelen])
- name = to_free = xmemdupz(name, namelen);
-
- git_check_attr(istate, name, item->attr_check);
-
- free(to_free);
-
- for (i = 0; i < item->attr_match_nr; i++) {
- const char *value;
- int matched;
- enum attr_match_mode match_mode;
-
- value = item->attr_check->items[i].value;
- match_mode = item->attr_match[i].match_mode;
-
- if (ATTR_TRUE(value))
- matched = (match_mode == MATCH_SET);
- else if (ATTR_FALSE(value))
- matched = (match_mode == MATCH_UNSET);
- else if (ATTR_UNSET(value))
- matched = (match_mode == MATCH_UNSPECIFIED);
- else
- matched = (match_mode == MATCH_VALUE &&
- !strcmp(item->attr_match[i].value, value));
- if (!matched)
- return 0;
- }
-
- return 1;
-}
-
/*
* Does 'match' match the given name?
* A match is found if
strncmp(item->match, name - prefix, item->prefix))
return 0;
- if (item->attr_match_nr && !match_attrs(istate, name, namelen, item))
+ if (item->attr_match_nr &&
+ !match_pathspec_attrs(istate, name, namelen, item))
return 0;
/* If the match was just the prefix, we matched */
return !available;
}
-int finish_delayed_checkout(struct checkout *state)
+int finish_delayed_checkout(struct checkout *state, int *nr_checkouts)
{
int errs = 0;
unsigned delayed_object_count;
ce = index_file_exists(state->istate, path->string,
strlen(path->string), 0);
if (ce) {
- errs |= checkout_entry(ce, state, NULL);
+ errs |= checkout_entry(ce, state, NULL, nr_checkouts);
filtered_bytes += ce->ce_stat_data.sd_size;
display_throughput(progress, filtered_bytes);
} else
* its name is returned in topath[], which must be able to hold at
* least TEMPORARY_FILENAME_LENGTH bytes long.
*/
-int checkout_entry(struct cache_entry *ce,
- const struct checkout *state, char *topath)
+int checkout_entry(struct cache_entry *ce, const struct checkout *state,
+ char *topath, int *nr_checkouts)
{
static struct strbuf path = STRBUF_INIT;
struct stat st;
return 0;
create_directories(path.buf, path.len, state);
+ if (nr_checkouts)
+ (*nr_checkouts)++;
return write_entry(ce, path.buf, state, 0);
}
static char *git_namespace;
-static const char *super_prefix;
+static char *super_prefix;
/*
* Repository-local GIT_* environment variables; see cache.h for details.
{
static int initialized;
if (!initialized) {
- super_prefix = getenv(GIT_SUPER_PREFIX_ENVIRONMENT);
+ super_prefix = xstrdup_or_null(getenv(GIT_SUPER_PREFIX_ENVIRONMENT));
initialized = 1;
}
return super_prefix;
packet_buf_write(req_buf, "deepen-not %s", s->string);
}
}
+ if (args->deepen_relative)
+ packet_buf_write(req_buf, "deepen-relative\n");
}
static void add_wants(int no_dependents, const struct ref *wants, struct strbuf *req_buf)
static void receive_shallow_info(struct fetch_pack_args *args,
struct packet_reader *reader)
{
+ int line_received = 0;
+
process_section_header(reader, "shallow-info", 0);
while (packet_reader_read(reader) == PACKET_READ_NORMAL) {
const char *arg;
if (get_oid_hex(arg, &oid))
die(_("invalid shallow line: %s"), reader->line);
register_shallow(the_repository, &oid);
+ line_received = 1;
continue;
}
if (skip_prefix(reader->line, "unshallow ", &arg)) {
die(_("error in object: %s"), reader->line);
if (unregister_shallow(&oid))
die(_("no shallow found: %s"), reader->line);
+ line_received = 1;
continue;
}
die(_("expected shallow/unshallow, got %s"), reader->line);
reader->status != PACKET_READ_DELIM)
die(_("error processing shallow info: %d"), reader->status);
- setup_alternate_shallow(&shallow_lock, &alternate_shallow_file, NULL);
- args->deepen = 1;
+ if (line_received) {
+ setup_alternate_shallow(&shallow_lock, &alternate_shallow_file,
+ NULL);
+ args->deepen = 1;
+ }
}
static void receive_wanted_refs(struct packet_reader *reader,
#endif
#if defined(__CYGWIN__)
-#include "compat/cygwin.h"
+#include "compat/win32/path-utils.h"
#endif
#if defined(__MINGW32__)
/* pull in Windows compatibility stuff */
+#include "compat/win32/path-utils.h"
#include "compat/mingw.h"
#elif defined(_MSC_VER)
#include "compat/msvc.h"
#define query_user_email() NULL
#endif
+#ifdef __TANDEM
+#include <floss.h(floss_execl,floss_execlp,floss_execv,floss_execvp)>
+#include <floss.h(floss_getpwuid)>
+#ifndef NSIG
+/*
+ * NonStop NSE and NSX do not provide NSIG. SIGGUARDIAN(99) is the highest
+ * known, by detective work using kill -l as a list is all signals
+ * instead of signal.h where it should be.
+ */
+# define NSIG 100
+#endif
+#endif
+
#if defined(__HP_cc) && (__HP_cc >= 61000)
#define NORETURN __attribute__((noreturn))
#define NORETURN_PTR
#ifdef NO_MEMMEM
#define memmem gitmemmem
void *gitmemmem(const void *haystack, size_t haystacklen,
- const void *needle, size_t needlelen);
+ const void *needle, size_t needlelen);
#endif
#ifdef OVERRIDE_STRDUP
m,merge! use merging strategies to rebase
i,interactive! let the user edit the list of commits to rebase
x,exec=! add exec lines after each commit of the editable list
+y=! same as --reschedule-failed-exec -x
k,keep-empty preserve empty commits during rebase
allow-empty-message allow rebasing commits with empty messages
stat! display a diffstat of what changed upstream
edit-todo! edit the todo list during an interactive rebase
quit! abort but keep HEAD where it is
show-current-patch! show the patch file being applied or merged
+reschedule-failed-exec automatically reschedule failed exec commands
"
. git-sh-setup
set_reflog_action rebase
keep_empty=
allow_empty_message=--allow-empty-message
signoff=
+reschedule_failed_exec=
test "$(git config --bool rebase.autosquash)" = "true" && autosquash=t
case "$(git config --bool commit.gpgsign)" in
true) gpg_sign_opt=-S ;;
*) gpg_sign_opt= ;;
esac
+test "$(git config --bool rebase.reschedulefailedexec)" = "true" &&
+reschedule_failed_exec=--reschedule-failed-exec
. git-rebase--common
read_basic_state () {
signoff="$(cat "$state_dir"/signoff)"
force_rebase=t
}
+ test -f "$state_dir"/reschedule-failed-exec &&
+ reschedule_failed_exec=t
}
finish_rebase () {
"$allow_empty_message" "$autosquash" "$verbose" \
"$force_rebase" "$onto_name" "$head_name" "$strategy" \
"$strategy_opts" "$cmd" "$switch_to" \
- "$allow_rerere_autoupdate" "$gpg_sign_opt" "$signoff"
+ "$allow_rerere_autoupdate" "$gpg_sign_opt" "$signoff" \
+ "$reschedule_failed_exec"
}
run_specific_rebase () {
cmd="${cmd}exec ${1#--exec=}${LF}"
test -z "$interactive_rebase" && interactive_rebase=implied
;;
+ -y*)
+ reschedule_failed_exec=--reschedule-failed-exec
+ cmd="${cmd}exec ${1#-y}${LF}"
+ test -z "$interactive_rebase" && interactive_rebase=implied
+ ;;
--interactive)
interactive_rebase=explicit
;;
--gpg-sign=*)
gpg_sign_opt="-S${1#--gpg-sign=}"
;;
+ --reschedule-failed-exec)
+ reschedule_failed_exec=--reschedule-failed-exec
+ ;;
+ --no-reschedule-failed-exec)
+ reschedule_failed_exec=
+ ;;
--)
shift
break
# git-rebase.txt caveats with "unless you know what you are doing"
test -n "$rebase_merges" &&
die "$(gettext "error: cannot combine '--preserve-merges' with '--rebase-merges'")"
+
+ test -n "$reschedule_failed_exec" &&
+ die "$(gettext "error: cannot combine '--preserve-merges' with '--reschedule-failed-exec'")"
fi
if test -n "$rebase_merges"
die_bad_access("p4 error: {0}".format(data))
else:
die_bad_access("unknown error")
+ elif code == "info":
+ return
else:
die_bad_access("unknown error code {0}".format(code))
author= author name and email address for patches without any
patches= path to the quilt patches
series= path to the quilt series file
+keep-non-patch Pass -b to git mailinfo
"
SUBDIRECTORY_ON=Yes
. git-sh-setup
shift
QUILT_SERIES="$1"
;;
+ --keep-non-patch)
+ MAILINFO_OPT="-b"
+ ;;
--)
shift
break;;
continue
fi
echo $patch_name
- git mailinfo "$tmp_msg" "$tmp_patch" \
+ git mailinfo $MAILINFO_OPT "$tmp_msg" "$tmp_patch" \
<"$QUILT_PATCHES/$patch_name" >"$tmp_info" || exit 3
test -s "$tmp_patch" || {
echo "Patch is empty. Was it split wrong?"
"$state_dir"/allow_rerere_autoupdate
test -n "$gpg_sign_opt" && echo "$gpg_sign_opt" > "$state_dir"/gpg_sign_opt
test -n "$signoff" && echo "$signoff" >"$state_dir"/signoff
+ test -n "$reschedule_failed_exec" && : > "$state_dir"/reschedule-failed-exec
}
apply_autostash () {
return 0;
}
-static void commit_pager_choice(void) {
+static void commit_pager_choice(void)
+{
switch (use_pager) {
case 0:
setenv("GIT_PAGER", "cat", 1);
#include "block-sha1/sha1.h"
#endif
+#if defined(SHA256_GCRYPT)
+#include "sha256/gcrypt.h"
+#elif defined(SHA256_OPENSSL)
+#include <openssl/sha.h>
+#else
+#include "sha256/block/sha256.h"
+#endif
+
#ifndef platform_SHA_CTX
/*
* platform's underlying implementation of SHA-1; could be OpenSSL,
#define git_SHA1_Update platform_SHA1_Update
#define git_SHA1_Final platform_SHA1_Final
+#ifndef platform_SHA256_CTX
+#define platform_SHA256_CTX SHA256_CTX
+#define platform_SHA256_Init SHA256_Init
+#define platform_SHA256_Update SHA256_Update
+#define platform_SHA256_Final SHA256_Final
+#endif
+
+#define git_SHA256_CTX platform_SHA256_CTX
+#define git_SHA256_Init platform_SHA256_Init
+#define git_SHA256_Update platform_SHA256_Update
+#define git_SHA256_Final platform_SHA256_Final
+
#ifdef SHA1_MAX_BLOCK_SIZE
#include "compat/sha1-chunked.h"
#undef git_SHA1_Update
#define GIT_HASH_UNKNOWN 0
/* SHA-1 */
#define GIT_HASH_SHA1 1
+/* SHA-256 */
+#define GIT_HASH_SHA256 2
/* Number of algorithms supported (including unknown). */
-#define GIT_HASH_NALGOS (GIT_HASH_SHA1 + 1)
+#define GIT_HASH_NALGOS (GIT_HASH_SHA256 + 1)
/* A suitably aligned type for stack allocations of hash contexts. */
union git_hash_ctx {
git_SHA_CTX sha1;
+ git_SHA256_CTX sha256;
};
typedef union git_hash_ctx git_hash_ctx;
/* The length of the hash in hex characters. */
size_t hexsz;
+ /* The block size of the hash. */
+ size_t blksz;
+
/* The hash initialization function. */
git_hash_init_fn init_fn;
};
extern const struct git_hash_algo hash_algos[GIT_HASH_NALGOS];
+/*
+ * Return a GIT_HASH_* constant based on the name. Returns GIT_HASH_UNKNOWN if
+ * the name doesn't match a known algorithm.
+ */
+int hash_algo_by_name(const char *name);
+/* Identical, except based on the format ID. */
+int hash_algo_by_id(uint32_t format_id);
+/* Identical, except for a pointer to struct git_hash_algo. */
+static inline int hash_algo_by_ptr(const struct git_hash_algo *p)
+{
+ return p - hash_algos;
+}
+
#endif
return ret;
}
-char *sha1_to_hex_r(char *buffer, const unsigned char *sha1)
+char *hash_to_hex_algop_r(char *buffer, const unsigned char *hash,
+ const struct git_hash_algo *algop)
{
static const char hex[] = "0123456789abcdef";
char *buf = buffer;
int i;
- for (i = 0; i < the_hash_algo->rawsz; i++) {
- unsigned int val = *sha1++;
+ for (i = 0; i < algop->rawsz; i++) {
+ unsigned int val = *hash++;
*buf++ = hex[val >> 4];
*buf++ = hex[val & 0xf];
}
return buffer;
}
+char *sha1_to_hex_r(char *buffer, const unsigned char *sha1)
+{
+ return hash_to_hex_algop_r(buffer, sha1, &hash_algos[GIT_HASH_SHA1]);
+}
+
char *oid_to_hex_r(char *buffer, const struct object_id *oid)
{
- return sha1_to_hex_r(buffer, oid->hash);
+ return hash_to_hex_algop_r(buffer, oid->hash, the_hash_algo);
}
-char *sha1_to_hex(const unsigned char *sha1)
+char *hash_to_hex_algop(const unsigned char *hash, const struct git_hash_algo *algop)
{
static int bufno;
static char hexbuffer[4][GIT_MAX_HEXSZ + 1];
bufno = (bufno + 1) % ARRAY_SIZE(hexbuffer);
- return sha1_to_hex_r(hexbuffer[bufno], sha1);
+ return hash_to_hex_algop_r(hexbuffer[bufno], hash, algop);
+}
+
+char *sha1_to_hex(const unsigned char *sha1)
+{
+ return hash_to_hex_algop(sha1, &hash_algos[GIT_HASH_SHA1]);
+}
+
+char *hash_to_hex(const unsigned char *hash)
+{
+ return hash_to_hex_algop(hash, the_hash_algo);
}
char *oid_to_hex(const struct object_id *oid)
{
- return sha1_to_hex(oid->hash);
+ return hash_to_hex_algop(oid->hash, the_hash_algo);
}
strbuf_addstr(&buf, "Pragma:");
if (options && options->no_cache)
strbuf_addstr(&buf, " no-cache");
- if (options && options->keep_error)
- curl_easy_setopt(slot->curl, CURLOPT_FAILONERROR, 0);
if (options && options->initial_request &&
http_follow_config == HTTP_FOLLOW_INITIAL)
curl_easy_setopt(slot->curl, CURLOPT_FOLLOWLOCATION, 1);
curl_easy_setopt(slot->curl, CURLOPT_URL, url);
curl_easy_setopt(slot->curl, CURLOPT_HTTPHEADER, headers);
curl_easy_setopt(slot->curl, CURLOPT_ENCODING, "");
+ curl_easy_setopt(slot->curl, CURLOPT_FAILONERROR, 0);
ret = run_one_slot(slot, &results);
return ret;
/*
- * If we are using KEEP_ERROR, the previous request may have
- * put cruft into our output stream; we should clear it out before
- * making our next request. We only know how to do this for
- * the strbuf case, but that is enough to satisfy current callers.
+ * The previous request may have put cruft into our output stream; we
+ * should clear it out before making our next request.
*/
- if (options && options->keep_error) {
- switch (target) {
- case HTTP_REQUEST_STRBUF:
- strbuf_reset(result);
- break;
- default:
- BUG("HTTP_KEEP_ERROR is only supported with strbufs");
+ switch (target) {
+ case HTTP_REQUEST_STRBUF:
+ strbuf_reset(result);
+ break;
+ case HTTP_REQUEST_FILE:
+ if (fflush(result)) {
+ error_errno("unable to flush a file");
+ return HTTP_START_FAILED;
}
+ rewind(result);
+ if (ftruncate(fileno(result), 0) < 0) {
+ error_errno("unable to truncate a file");
+ return HTTP_START_FAILED;
+ }
+ break;
+ default:
+ BUG("Unknown http_request target");
}
credential_fill(&http_auth);
/* Options for http_get_*() */
struct http_get_options {
unsigned no_cache:1,
- keep_error:1,
initial_request:1;
/* If non-NULL, returns the content-type of the response. */
}
static int curl_append_msgs_to_imap(struct imap_server_conf *server,
- struct strbuf* all_msgs, int total) {
+ struct strbuf* all_msgs, int total)
+{
int ofs = 0;
int n = 0;
struct buffer msgbuf = { STRBUF_INIT, 0 };
while (tree_entry(&desc, &entry)) {
if (match != all_entries_interesting) {
- match = tree_entry_interesting(&entry, base, 0,
+ match = tree_entry_interesting(ctx->revs->repo->index,
+ &entry, base, 0,
&ctx->revs->diffopt.pathspec);
if (match == all_entries_not_interesting)
break;
#include "argv-array.h"
#include "ls-refs.h"
#include "pkt-line.h"
+#include "config.h"
/*
* Check if one of the prefixes is a prefix of the ref.
const char *refname_nons = strip_namespace(refname);
struct strbuf refline = STRBUF_INIT;
+ if (ref_is_hidden(refname_nons, refname))
+ return 0;
+
if (!ref_match(&data->prefixes, refname))
return 0;
return 0;
}
+static int ls_refs_config(const char *var, const char *value, void *data)
+{
+ /*
+ * We only serve fetches over v2 for now, so respect only "uploadpack"
+ * config. This may need to eventually be expanded to "receive", but we
+ * don't yet know how that information will be passed to ls-refs.
+ */
+ return parse_hide_refs_config(var, value, "uploadpack");
+}
+
int ls_refs(struct repository *r, struct argv_array *keys,
struct packet_reader *request)
{
memset(&data, 0, sizeof(data));
+ git_config(ls_refs_config, NULL);
+
while (packet_reader_read(request) != PACKET_READ_FLUSH) {
const char *arg = request->line;
const char *out;
{
struct pathspec match_all;
memset(&match_all, 0, sizeof(match_all));
- read_tree_recursive(tree, "", 0, 0, &match_all, save_files_dirs, o);
+ read_tree_recursive(the_repository, tree, "", 0, 0,
+ &match_all, save_files_dirs, o);
}
static int get_tree_entry_if_blob(const struct object_id *tree,
* Be sure to call odb_load_loose_cache() before using.
*/
char loose_objects_subdir_seen[256];
- struct oid_array loose_objects_cache;
+ struct oid_array loose_objects_cache[256];
/*
* Path to the alternative object store. If this is a relative path,
void add_to_alternates_memory(const char *dir);
/*
- * Populate an odb's loose object cache for one particular subdirectory (i.e.,
- * the one that corresponds to the first byte of objects you're interested in,
- * from 0 to 255 inclusive).
+ * Populate and return the loose object cache array corresponding to the
+ * given object ID.
*/
-void odb_load_loose_cache(struct object_directory *odb, int subdir_nr);
+struct oid_array *odb_loose_cache(struct object_directory *odb,
+ const struct object_id *oid);
+
+/* Empty the loose object cache for the specified object directory. */
+void odb_clear_loose_cache(struct object_directory *odb);
struct packed_git {
struct packed_git *next;
static void free_object_directory(struct object_directory *odb)
{
free(odb->path);
- oid_array_clear(&odb->loose_objects_cache);
+ odb_clear_loose_cache(odb);
free(odb);
}
{
struct object_directory *odb;
- for (odb = r->objects->odb; odb; odb = odb->next) {
- oid_array_clear(&odb->loose_objects_cache);
- memset(&odb->loose_objects_subdir_seen, 0,
- sizeof(odb->loose_objects_subdir_seen));
- }
+ for (odb = r->objects->odb; odb; odb = odb->next)
+ odb_clear_loose_cache(odb);
r->objects->approximate_object_count_valid = 0;
r->objects->packed_git_initialized = 0;
}
static int parse_long_opt(struct parse_opt_ctx_t *p, const char *arg,
- const struct option *options)
+ const struct option *options)
{
const struct option *all_opts = options;
const char *arg_end = strchrnul(arg, '=');
* Returns the number of arguments left in argv[].
*/
extern int parse_options(int argc, const char **argv, const char *prefix,
- const struct option *options,
- const char * const usagestr[], int flags);
+ const struct option *options,
+ const char * const usagestr[], int flags);
extern NORETURN void usage_with_options(const char * const *usagestr,
- const struct option *options);
+ const struct option *options);
extern NORETURN void usage_msg_opt(const char *msg,
const char * const *usagestr,
FREE_AND_NULL(pathspec->items);
pathspec->nr = 0;
}
+
+int match_pathspec_attrs(const struct index_state *istate,
+ const char *name, int namelen,
+ const struct pathspec_item *item)
+{
+ int i;
+ char *to_free = NULL;
+
+ if (name[namelen])
+ name = to_free = xmemdupz(name, namelen);
+
+ git_check_attr(istate, name, item->attr_check);
+
+ free(to_free);
+
+ for (i = 0; i < item->attr_match_nr; i++) {
+ const char *value;
+ int matched;
+ enum attr_match_mode match_mode;
+
+ value = item->attr_check->items[i].value;
+ match_mode = item->attr_match[i].match_mode;
+
+ if (ATTR_TRUE(value))
+ matched = (match_mode == MATCH_SET);
+ else if (ATTR_FALSE(value))
+ matched = (match_mode == MATCH_UNSET);
+ else if (ATTR_UNSET(value))
+ matched = (match_mode == MATCH_UNSPECIFIED);
+ else
+ matched = (match_mode == MATCH_VALUE &&
+ !strcmp(item->attr_match[i].value, value));
+ if (!matched)
+ return 0;
+ }
+
+ return 1;
+}
* Any arguments used are copied. It is safe for the caller to modify
* or free 'prefix' and 'args' after calling this function.
*/
-extern void parse_pathspec(struct pathspec *pathspec,
- unsigned magic_mask,
- unsigned flags,
- const char *prefix,
- const char **args);
-extern void copy_pathspec(struct pathspec *dst, const struct pathspec *src);
-extern void clear_pathspec(struct pathspec *);
+void parse_pathspec(struct pathspec *pathspec,
+ unsigned magic_mask,
+ unsigned flags,
+ const char *prefix,
+ const char **args);
+void copy_pathspec(struct pathspec *dst, const struct pathspec *src);
+void clear_pathspec(struct pathspec *);
static inline int ps_strncmp(const struct pathspec_item *item,
const char *s1, const char *s2, size_t n)
return strcmp(s1, s2);
}
-extern void add_pathspec_matches_against_index(const struct pathspec *pathspec,
- const struct index_state *istate,
- char *seen);
-extern char *find_pathspecs_matching_against_index(const struct pathspec *pathspec,
- const struct index_state *istate);
+void add_pathspec_matches_against_index(const struct pathspec *pathspec,
+ const struct index_state *istate,
+ char *seen);
+char *find_pathspecs_matching_against_index(const struct pathspec *pathspec,
+ const struct index_state *istate);
+int match_pathspec_attrs(const struct index_state *istate,
+ const char *name, int namelen,
+ const struct pathspec_item *item);
#endif /* PATHSPEC_H */
* Return value is the same as in (1).
*/
static size_t quote_c_style_counted(const char *name, ssize_t maxlen,
- struct strbuf *sb, FILE *fp, int no_dq)
+ struct strbuf *sb, FILE *fp, int no_dq)
{
#undef EMIT
#define EMIT(c) \
static struct index_entry_offset_table *read_ieot_extension(const char *mmap, size_t mmap_size, size_t offset)
{
- const char *index = NULL;
- uint32_t extsize, ext_version;
- struct index_entry_offset_table *ieot;
- int i, nr;
-
- /* find the IEOT extension */
- if (!offset)
- return NULL;
- while (offset <= mmap_size - the_hash_algo->rawsz - 8) {
- extsize = get_be32(mmap + offset + 4);
- if (CACHE_EXT((mmap + offset)) == CACHE_EXT_INDEXENTRYOFFSETTABLE) {
- index = mmap + offset + 4 + 4;
- break;
- }
- offset += 8;
- offset += extsize;
- }
- if (!index)
- return NULL;
-
- /* validate the version is IEOT_VERSION */
- ext_version = get_be32(index);
- if (ext_version != IEOT_VERSION) {
- error("invalid IEOT version %d", ext_version);
- return NULL;
- }
- index += sizeof(uint32_t);
-
- /* extension size - version bytes / bytes per entry */
- nr = (extsize - sizeof(uint32_t)) / (sizeof(uint32_t) + sizeof(uint32_t));
- if (!nr) {
- error("invalid number of IEOT entries %d", nr);
- return NULL;
- }
- ieot = xmalloc(sizeof(struct index_entry_offset_table)
- + (nr * sizeof(struct index_entry_offset)));
- ieot->nr = nr;
- for (i = 0; i < nr; i++) {
- ieot->entries[i].offset = get_be32(index);
- index += sizeof(uint32_t);
- ieot->entries[i].nr = get_be32(index);
- index += sizeof(uint32_t);
- }
-
- return ieot;
+ const char *index = NULL;
+ uint32_t extsize, ext_version;
+ struct index_entry_offset_table *ieot;
+ int i, nr;
+
+ /* find the IEOT extension */
+ if (!offset)
+ return NULL;
+ while (offset <= mmap_size - the_hash_algo->rawsz - 8) {
+ extsize = get_be32(mmap + offset + 4);
+ if (CACHE_EXT((mmap + offset)) == CACHE_EXT_INDEXENTRYOFFSETTABLE) {
+ index = mmap + offset + 4 + 4;
+ break;
+ }
+ offset += 8;
+ offset += extsize;
+ }
+ if (!index)
+ return NULL;
+
+ /* validate the version is IEOT_VERSION */
+ ext_version = get_be32(index);
+ if (ext_version != IEOT_VERSION) {
+ error("invalid IEOT version %d", ext_version);
+ return NULL;
+ }
+ index += sizeof(uint32_t);
+
+ /* extension size - version bytes / bytes per entry */
+ nr = (extsize - sizeof(uint32_t)) / (sizeof(uint32_t) + sizeof(uint32_t));
+ if (!nr) {
+ error("invalid number of IEOT entries %d", nr);
+ return NULL;
+ }
+ ieot = xmalloc(sizeof(struct index_entry_offset_table)
+ + (nr * sizeof(struct index_entry_offset)));
+ ieot->nr = nr;
+ for (i = 0; i < nr; i++) {
+ ieot->entries[i].offset = get_be32(index);
+ index += sizeof(uint32_t);
+ ieot->entries[i].nr = get_be32(index);
+ index += sizeof(uint32_t);
+ }
+
+ return ieot;
}
static void write_ieot_extension(struct strbuf *sb, struct index_entry_offset_table *ieot)
{
- uint32_t buffer;
- int i;
+ uint32_t buffer;
+ int i;
- /* version */
- put_be32(&buffer, IEOT_VERSION);
- strbuf_add(sb, &buffer, sizeof(uint32_t));
+ /* version */
+ put_be32(&buffer, IEOT_VERSION);
+ strbuf_add(sb, &buffer, sizeof(uint32_t));
- /* ieot */
- for (i = 0; i < ieot->nr; i++) {
+ /* ieot */
+ for (i = 0; i < ieot->nr; i++) {
- /* offset */
- put_be32(&buffer, ieot->entries[i].offset);
- strbuf_add(sb, &buffer, sizeof(uint32_t));
+ /* offset */
+ put_be32(&buffer, ieot->entries[i].offset);
+ strbuf_add(sb, &buffer, sizeof(uint32_t));
- /* count */
- put_be32(&buffer, ieot->entries[i].nr);
- strbuf_add(sb, &buffer, sizeof(uint32_t));
- }
+ /* count */
+ put_be32(&buffer, ieot->entries[i].nr);
+ strbuf_add(sb, &buffer, sizeof(uint32_t));
+ }
}
static int objectsize_atom_parser(const struct ref_format *format, struct used_atom *atom,
const char *arg, struct strbuf *err)
+{
+ if (!arg) {
+ if (*atom->name == '*')
+ oi_deref.info.sizep = &oi_deref.size;
+ else
+ oi.info.sizep = &oi.size;
+ } else if (!strcmp(arg, "disk")) {
+ if (*atom->name == '*')
+ oi_deref.info.disk_sizep = &oi_deref.disk_size;
+ else
+ oi.info.disk_sizep = &oi.disk_size;
+ } else
+ return strbuf_addf_ret(err, -1, _("unrecognized %%(objectsize) argument: %s"), arg);
+ return 0;
+}
+
+static int deltabase_atom_parser(const struct ref_format *format, struct used_atom *atom,
+ const char *arg, struct strbuf *err)
{
if (arg)
- return strbuf_addf_ret(err, -1, _("%%(objectsize) does not take arguments"));
+ return strbuf_addf_ret(err, -1, _("%%(deltabase) does not take arguments"));
if (*atom->name == '*')
- oi_deref.info.sizep = &oi_deref.size;
+ oi_deref.info.delta_base_sha1 = oi_deref.delta_base_oid.hash;
else
- oi.info.sizep = &oi.size;
+ oi.info.delta_base_sha1 = oi.delta_base_oid.hash;
return 0;
}
{ "objecttype", SOURCE_OTHER, FIELD_STR, objecttype_atom_parser },
{ "objectsize", SOURCE_OTHER, FIELD_ULONG, objectsize_atom_parser },
{ "objectname", SOURCE_OTHER, FIELD_STR, objectname_atom_parser },
+ { "deltabase", SOURCE_OTHER, FIELD_STR, deltabase_atom_parser },
{ "tree", SOURCE_OBJ },
{ "parent", SOURCE_OBJ },
{ "numparent", SOURCE_OBJ, FIELD_ULONG },
name++;
if (!strcmp(name, "objecttype"))
v->s = xstrdup(type_name(oi->type));
- else if (!strcmp(name, "objectsize")) {
+ else if (!strcmp(name, "objectsize:disk")) {
+ v->value = oi->disk_size;
+ v->s = xstrfmt("%"PRIuMAX, (uintmax_t)oi->disk_size);
+ } else if (!strcmp(name, "objectsize")) {
v->value = oi->size;
v->s = xstrfmt("%"PRIuMAX , (uintmax_t)oi->size);
- }
+ } else if (!strcmp(name, "deltabase"))
+ v->s = xstrdup(oid_to_hex(&oi->delta_base_oid));
else if (deref)
grab_objectname(name, &oi->oid, v, &used_atom[i]);
}
OBJECT_INFO_LOOKUP_REPLACE))
return strbuf_addf_ret(err, -1, _("missing object %s for %s"),
oid_to_hex(&oi->oid), ref->refname);
+ if (oi->info.disk_sizep && oi->disk_size < 0)
+ BUG("Object size is less than zero.");
if (oi->info.contentp) {
*obj = parse_object_buffer(the_repository, &oi->oid, oi->type, oi->size, oi->content, &eaten);
http_options.extra_headers = &extra_headers;
http_options.initial_request = 1;
http_options.no_cache = 1;
- http_options.keep_error = 1;
http_ret = http_get_strbuf(refs_url.buf, &buffer, &http_options);
switch (http_ret) {
}
#endif
+struct rpc_in_data {
+ struct rpc_state *rpc;
+ struct active_request_slot *slot;
+};
+
+/*
+ * A callback for CURLOPT_WRITEFUNCTION. The return value is the bytes consumed
+ * from ptr.
+ */
static size_t rpc_in(char *ptr, size_t eltsize,
size_t nmemb, void *buffer_)
{
size_t size = eltsize * nmemb;
- struct rpc_state *rpc = buffer_;
+ struct rpc_in_data *data = buffer_;
+ long response_code;
+
+ if (curl_easy_getinfo(data->slot->curl, CURLINFO_RESPONSE_CODE,
+ &response_code) != CURLE_OK)
+ return size;
+ if (response_code >= 300)
+ return size;
if (size)
- rpc->any_written = 1;
- write_or_die(rpc->in, ptr, size);
+ data->rpc->any_written = 1;
+ write_or_die(data->rpc->in, ptr, size);
return size;
}
return err;
}
-static curl_off_t xcurl_off_t(size_t len) {
+static curl_off_t xcurl_off_t(size_t len)
+{
uintmax_t size = len;
if (size > maximum_signed_value_of_type(curl_off_t))
die("cannot handle pushes this big");
size_t gzip_size = 0;
int err, large_request = 0;
int needs_100_continue = 0;
+ struct rpc_in_data rpc_in_data;
/* Try to load the entire request, if we can fit it into the
* allocated buffer space we can use HTTP/1.0 and avoid the
curl_easy_setopt(slot->curl, CURLOPT_HTTPHEADER, headers);
curl_easy_setopt(slot->curl, CURLOPT_WRITEFUNCTION, rpc_in);
- curl_easy_setopt(slot->curl, CURLOPT_FILE, rpc);
+ rpc_in_data.rpc = rpc;
+ rpc_in_data.slot = slot;
+ curl_easy_setopt(slot->curl, CURLOPT_FILE, &rpc_in_data);
+ curl_easy_setopt(slot->curl, CURLOPT_FAILONERROR, 0);
rpc->any_written = 0;
if (!name)
return 0;
if (!strcmp(subkey, "insteadof")) {
- rewrite = make_rewrite(&rewrites, name, namelen);
if (!value)
return config_error_nonbool(key);
+ rewrite = make_rewrite(&rewrites, name, namelen);
add_instead_of(rewrite, xstrdup(value));
} else if (!strcmp(subkey, "pushinsteadof")) {
- rewrite = make_rewrite(&rewrites_push, name, namelen);
if (!value)
return config_error_nonbool(key);
+ rewrite = make_rewrite(&rewrites_push, name, namelen);
add_instead_of(rewrite, xstrdup(value));
}
}
return -1;
}
-/*
- * Initialize 'submodule' as the submodule given by 'path' in parent repository
- * 'superproject'.
- * Return 0 upon success and a non-zero value upon failure.
- */
-int repo_submodule_init(struct repository *submodule,
+int repo_submodule_init(struct repository *subrepo,
struct repository *superproject,
- const char *path)
+ const struct submodule *sub)
{
- const struct submodule *sub;
struct strbuf gitdir = STRBUF_INIT;
struct strbuf worktree = STRBUF_INIT;
int ret = 0;
- sub = submodule_from_path(superproject, &null_oid, path);
if (!sub) {
ret = -1;
goto out;
}
- strbuf_repo_worktree_path(&gitdir, superproject, "%s/.git", path);
- strbuf_repo_worktree_path(&worktree, superproject, "%s", path);
+ strbuf_repo_worktree_path(&gitdir, superproject, "%s/.git", sub->path);
+ strbuf_repo_worktree_path(&worktree, superproject, "%s", sub->path);
- if (repo_init(submodule, gitdir.buf, worktree.buf)) {
+ if (repo_init(subrepo, gitdir.buf, worktree.buf)) {
/*
* If initilization fails then it may be due to the submodule
* not being populated in the superproject's worktree. Instead
strbuf_repo_git_path(&gitdir, superproject,
"modules/%s", sub->name);
- if (repo_init(submodule, gitdir.buf, NULL)) {
+ if (repo_init(subrepo, gitdir.buf, NULL)) {
ret = -1;
goto out;
}
}
- submodule->submodule_prefix = xstrfmt("%s%s/",
- superproject->submodule_prefix ?
- superproject->submodule_prefix :
- "", path);
+ subrepo->submodule_prefix = xstrfmt("%s%s/",
+ superproject->submodule_prefix ?
+ superproject->submodule_prefix :
+ "", sub->path);
out:
strbuf_release(&gitdir);
void repo_set_hash_algo(struct repository *repo, int algo);
void initialize_the_repository(void);
int repo_init(struct repository *r, const char *gitdir, const char *worktree);
-int repo_submodule_init(struct repository *submodule,
+
+/*
+ * Initialize the repository 'subrepo' as the submodule given by the
+ * struct submodule 'sub' in parent repository 'superproject'.
+ * Return 0 upon success and a non-zero value upon failure, which may happen
+ * if the submodule is not found, or 'sub' is NULL.
+ */
+struct submodule;
+int repo_submodule_init(struct repository *subrepo,
struct repository *superproject,
- const char *path);
+ const struct submodule *sub);
void repo_clear(struct repository *repo);
/*
revs->abbrev = DEFAULT_ABBREV;
revs->ignore_merges = 1;
revs->simplify_history = 1;
+ revs->pruning.repo = r;
revs->pruning.flags.recursive = 1;
revs->pruning.flags.quick = 1;
revs->pruning.add_remove = file_add_remove;
}
static void add_pending_commit_list(struct rev_info *revs,
- struct commit_list *commit_list,
- unsigned int flags)
+ struct commit_list *commit_list,
+ unsigned int flags)
{
while (commit_list) {
struct object *object = &commit_list->item->object;
if (!cant_be_filename)
verify_non_filename(revs->prefix, arg);
object = get_reference(revs, arg, &oid, flags ^ local_flags);
+ if (!object)
+ return revs->ignore_missing ? 0 : -1;
add_rev_cmdline(revs, object, arg_, REV_CMD_REV, flags ^ local_flags);
add_pending_object_with_path(revs, object, arg, oc.mode, oc.path);
free(oc.path);
}
static int handle_revision_opt(struct rev_info *revs, int argc, const char **argv,
- int *unkc, const char **unkv)
+ int *unkc, const char **unkv,
+ const struct setup_revision_opt* opt)
{
const char *arg = argv[0];
const char *optarg;
revs->limited = 1;
} else if (!strcmp(arg, "--ignore-missing")) {
revs->ignore_missing = 1;
- } else if (revs->allow_exclude_promisor_objects_opt &&
+ } else if (opt && opt->allow_exclude_promisor_objects &&
!strcmp(arg, "--exclude-promisor-objects")) {
if (fetch_if_missing)
BUG("exclude_promisor_objects can only be used when fetch_if_missing is 0");
const char * const usagestr[])
{
int n = handle_revision_opt(revs, ctx->argc, ctx->argv,
- &ctx->cpidx, ctx->out);
+ &ctx->cpidx, ctx->out, NULL);
if (n <= 0) {
error("unknown option `%s'", ctx->argv[0]);
usage_with_options(usagestr, options);
continue;
}
- opts = handle_revision_opt(revs, argc - i, argv + i, &left, argv);
+ opts = handle_revision_opt(revs, argc - i, argv + i,
+ &left, argv, opt);
if (opts > 0) {
i += opts - 1;
continue;
do_not_die_on_missing_tree:1,
/* for internal use only */
- allow_exclude_promisor_objects_opt:1,
exclude_promisor_objects:1;
/* Diff flags */
const char *def;
void (*tweak)(struct rev_info *, struct setup_revision_opt *);
const char *submodule; /* TODO: drop this and use rev_info->repo */
- int assume_dashdash;
+ unsigned int assume_dashdash:1,
+ allow_exclude_promisor_objects:1;
unsigned revarg_opt;
};
static GIT_PATH_FUNC(rebase_path_strategy_opts, "rebase-merge/strategy_opts")
static GIT_PATH_FUNC(rebase_path_allow_rerere_autoupdate, "rebase-merge/allow_rerere_autoupdate")
static GIT_PATH_FUNC(rebase_path_quiet, "rebase-merge/quiet")
+static GIT_PATH_FUNC(rebase_path_reschedule_failed_exec, "rebase-merge/reschedule-failed-exec")
static int git_sequencer_config(const char *k, const char *v, void *cb)
{
return res;
}
-static void flush_rewritten_pending(void) {
+static void flush_rewritten_pending(void)
+{
struct strbuf buf = STRBUF_INIT;
struct object_id newoid;
FILE *out;
}
static void record_in_rewritten(struct object_id *oid,
- enum todo_command next_command) {
+ enum todo_command next_command)
+{
FILE *out = fopen_or_warn(rebase_path_rewritten_pending(), "a");
if (!out)
return error(_("commit %s does not have parent %d"),
oid_to_hex(&commit->object.oid), opts->mainline);
parent = p->item;
- } else if (0 < opts->mainline)
- return error(_("mainline was specified but commit %s is not a merge."),
- oid_to_hex(&commit->object.oid));
+ } else if (1 < opts->mainline)
+ /*
+ * Non-first parent explicitly specified as mainline for
+ * non-merge commit
+ */
+ return error(_("commit %s does not have parent %d"),
+ oid_to_hex(&commit->object.oid), opts->mainline);
else
parent = commit->parents->item;
opts->signoff = 1;
}
+ if (file_exists(rebase_path_reschedule_failed_exec()))
+ opts->reschedule_failed_exec = 1;
+
read_strategy_opts(opts, &buf);
strbuf_release(&buf);
write_file(rebase_path_gpg_sign_opt(), "-S%s\n", opts->gpg_sign);
if (opts->signoff)
write_file(rebase_path_signoff(), "--signoff\n");
+ if (opts->reschedule_failed_exec)
+ write_file(rebase_path_reschedule_failed_exec(), "%s", "");
return 0;
}
*end_of_arg = saved;
/* Reread the todo file if it has changed. */
- if (res)
- ; /* fall through */
- else if (stat(get_todo_path(opts), &st))
+ if (res) {
+ if (opts->reschedule_failed_exec)
+ reschedule = 1;
+ } else if (stat(get_todo_path(opts), &st))
res = error_errno(_("could not stat '%s'"),
get_todo_path(opts));
else if (match_stat_data(&todo_list->stat, &st)) {
int allow_empty_message;
int keep_redundant_commits;
int verbose;
+ int reschedule_failed_exec;
int mainline;
return NULL;
}
-static const char *setup_nongit(const char *cwd, int *nongit_ok)
-{
- if (!nongit_ok)
- die(_("not a git repository (or any of the parent directories): %s"), DEFAULT_GIT_DIR_ENVIRONMENT);
- if (chdir(cwd))
- die_errno(_("cannot come back to cwd"));
- *nongit_ok = 1;
- return NULL;
-}
-
static dev_t get_device_or_die(const char *path, const char *prefix, int prefix_len)
{
struct stat buf;
{
static struct strbuf cwd = STRBUF_INIT;
struct strbuf dir = STRBUF_INIT, gitdir = STRBUF_INIT;
- const char *prefix;
+ const char *prefix = NULL;
struct repository_format repo_fmt;
/*
strbuf_addbuf(&dir, &cwd);
switch (setup_git_directory_gently_1(&dir, &gitdir, 1)) {
- case GIT_DIR_NONE:
- prefix = NULL;
- break;
case GIT_DIR_EXPLICIT:
prefix = setup_explicit_git_dir(gitdir.buf, &cwd, &repo_fmt, nongit_ok);
break;
prefix = setup_bare_git_dir(&cwd, dir.len, &repo_fmt, nongit_ok);
break;
case GIT_DIR_HIT_CEILING:
- prefix = setup_nongit(cwd.buf, nongit_ok);
+ if (!nongit_ok)
+ die(_("not a git repository (or any of the parent directories): %s"),
+ DEFAULT_GIT_DIR_ENVIRONMENT);
+ *nongit_ok = 1;
break;
case GIT_DIR_HIT_MOUNT_POINT:
- if (nongit_ok) {
- *nongit_ok = 1;
- strbuf_release(&cwd);
- strbuf_release(&dir);
- return NULL;
- }
- die(_("not a git repository (or any parent up to mount point %s)\n"
- "Stopping at filesystem boundary (GIT_DISCOVERY_ACROSS_FILESYSTEM not set)."),
- dir.buf);
+ if (!nongit_ok)
+ die(_("not a git repository (or any parent up to mount point %s)\n"
+ "Stopping at filesystem boundary (GIT_DISCOVERY_ACROSS_FILESYSTEM not set)."),
+ dir.buf);
+ *nongit_ok = 1;
+ break;
+ case GIT_DIR_NONE:
+ /*
+ * As a safeguard against setup_git_directory_gently_1 returning
+ * this value, fallthrough to BUG. Otherwise it is possible to
+ * set startup_info->have_repository to 1 when we did nothing to
+ * find a repository.
+ */
default:
BUG("unhandled setup_git_directory_1() result");
}
- if (prefix)
- setenv(GIT_PREFIX_ENVIRONMENT, prefix, 1);
- else
+ /*
+ * At this point, nongit_ok is stable. If it is non-NULL and points
+ * to a non-zero value, then this means that we haven't found a
+ * repository and that the caller expects startup_info to reflect
+ * this.
+ *
+ * Regardless of the state of nongit_ok, startup_info->prefix and
+ * the GIT_PREFIX environment variable must always match. For details
+ * see Documentation/config/alias.txt.
+ */
+ if (nongit_ok && *nongit_ok) {
+ startup_info->have_repository = 0;
+ startup_info->prefix = NULL;
setenv(GIT_PREFIX_ENVIRONMENT, "", 1);
-
- startup_info->have_repository = !nongit_ok || !*nongit_ok;
- startup_info->prefix = prefix;
+ } else {
+ startup_info->have_repository = 1;
+ startup_info->prefix = prefix;
+ if (prefix)
+ setenv(GIT_PREFIX_ENVIRONMENT, prefix, 1);
+ else
+ setenv(GIT_PREFIX_ENVIRONMENT, "", 1);
+ }
/*
* Not all paths through the setup code will call 'set_git_dir()' (which
* the user has set GIT_DIR. It may be beneficial to disallow bogus
* GIT_DIR values at some point in the future.
*/
- if (startup_info->have_repository || getenv(GIT_DIR_ENVIRONMENT)) {
+ if (/* GIT_DIR_EXPLICIT, GIT_DIR_DISCOVERED, GIT_DIR_BARE */
+ startup_info->have_repository ||
+ /* GIT_DIR_EXPLICIT */
+ getenv(GIT_DIR_ENVIRONMENT)) {
if (!the_repository->gitdir) {
const char *gitdir = getenv(GIT_DIR_ENVIRONMENT);
if (!gitdir)
}
return 0;
}
+
+void oid_array_filter(struct oid_array *array,
+ for_each_oid_fn want,
+ void *cb_data)
+{
+ unsigned nr = array->nr, src, dst;
+ struct object_id *oids = array->oid;
+
+ for (src = dst = 0; src < nr; src++) {
+ if (want(&oids[src], cb_data)) {
+ if (src != dst)
+ oidcpy(&oids[dst], &oids[src]);
+ dst++;
+ }
+ }
+ array->nr = dst;
+}
int oid_array_for_each_unique(struct oid_array *array,
for_each_oid_fn fn,
void *data);
+void oid_array_filter(struct oid_array *array,
+ for_each_oid_fn want,
+ void *cbdata);
#endif /* SHA1_ARRAY_H */
#define EMPTY_TREE_SHA1_BIN_LITERAL \
"\x4b\x82\x5d\xc6\x42\xcb\x6e\xb9\xa0\x60" \
"\xe5\x4b\xf8\xd6\x92\x88\xfb\xee\x49\x04"
+#define EMPTY_TREE_SHA256_BIN_LITERAL \
+ "\x6e\xf1\x9b\x41\x22\x5c\x53\x69\xf1\xc1" \
+ "\x04\xd4\x5d\x8d\x85\xef\xa9\xb0\x57\xb5" \
+ "\x3b\x14\xb4\xb9\xb9\x39\xdd\x74\xde\xcc" \
+ "\x53\x21"
#define EMPTY_BLOB_SHA1_BIN_LITERAL \
"\xe6\x9d\xe2\x9b\xb2\xd1\xd6\x43\x4b\x8b" \
"\x29\xae\x77\x5a\xd8\xc2\xe4\x8c\x53\x91"
+#define EMPTY_BLOB_SHA256_BIN_LITERAL \
+ "\x47\x3a\x0f\x4c\x3b\xe8\xa9\x36\x81\xa2" \
+ "\x67\xe3\xb1\xe9\xa7\xdc\xda\x11\x85\x43" \
+ "\x6f\xe1\x41\xf7\x74\x91\x20\xa3\x03\x72" \
+ "\x18\x13"
const unsigned char null_sha1[GIT_MAX_RAWSZ];
const struct object_id null_oid;
static const struct object_id empty_blob_oid = {
EMPTY_BLOB_SHA1_BIN_LITERAL
};
+static const struct object_id empty_tree_oid_sha256 = {
+ EMPTY_TREE_SHA256_BIN_LITERAL
+};
+static const struct object_id empty_blob_oid_sha256 = {
+ EMPTY_BLOB_SHA256_BIN_LITERAL
+};
static void git_hash_sha1_init(git_hash_ctx *ctx)
{
git_SHA1_Final(hash, &ctx->sha1);
}
+
+static void git_hash_sha256_init(git_hash_ctx *ctx)
+{
+ git_SHA256_Init(&ctx->sha256);
+}
+
+static void git_hash_sha256_update(git_hash_ctx *ctx, const void *data, size_t len)
+{
+ git_SHA256_Update(&ctx->sha256, data, len);
+}
+
+static void git_hash_sha256_final(unsigned char *hash, git_hash_ctx *ctx)
+{
+ git_SHA256_Final(hash, &ctx->sha256);
+}
+
static void git_hash_unknown_init(git_hash_ctx *ctx)
{
BUG("trying to init unknown hash");
0x00000000,
0,
0,
+ 0,
git_hash_unknown_init,
git_hash_unknown_update,
git_hash_unknown_final,
NULL,
},
{
- "sha-1",
+ "sha1",
/* "sha1", big-endian */
0x73686131,
GIT_SHA1_RAWSZ,
GIT_SHA1_HEXSZ,
+ GIT_SHA1_BLKSZ,
git_hash_sha1_init,
git_hash_sha1_update,
git_hash_sha1_final,
&empty_tree_oid,
&empty_blob_oid,
},
+ {
+ "sha256",
+ /* "s256", big-endian */
+ 0x73323536,
+ GIT_SHA256_RAWSZ,
+ GIT_SHA256_HEXSZ,
+ GIT_SHA256_BLKSZ,
+ git_hash_sha256_init,
+ git_hash_sha256_update,
+ git_hash_sha256_final,
+ &empty_tree_oid_sha256,
+ &empty_blob_oid_sha256,
+ }
};
const char *empty_tree_oid_hex(void)
return oid_to_hex_r(buf, the_hash_algo->empty_blob);
}
+int hash_algo_by_name(const char *name)
+{
+ int i;
+ if (!name)
+ return GIT_HASH_UNKNOWN;
+ for (i = 1; i < GIT_HASH_NALGOS; i++)
+ if (!strcmp(name, hash_algos[i].name))
+ return i;
+ return GIT_HASH_UNKNOWN;
+}
+
+int hash_algo_by_id(uint32_t format_id)
+{
+ int i;
+ for (i = 1; i < GIT_HASH_NALGOS; i++)
+ if (format_id == hash_algos[i].format_id)
+ return i;
+ return GIT_HASH_UNKNOWN;
+}
+
+
/*
* This is meant to hold a *small* number of objects that you would
* want read_sha1_file() to be able to return, but yet you do not want
static int quick_has_loose(struct repository *r,
const unsigned char *sha1)
{
- int subdir_nr = sha1[0];
struct object_id oid;
struct object_directory *odb;
prepare_alt_odb(r);
for (odb = r->objects->odb; odb; odb = odb->next) {
- odb_load_loose_cache(odb, subdir_nr);
- if (oid_array_lookup(&odb->loose_objects_cache, &oid) >= 0)
+ if (oid_array_lookup(odb_loose_cache(odb, &oid), &oid) >= 0)
return 1;
}
return 0;
if (!*size) {
/* mmap() is forbidden on empty files */
error(_("object file %s is empty"), path);
+ close(fd);
return NULL;
}
map = xmmap(NULL, *size, PROT_READ, MAP_PRIVATE, fd, 0);
return 0;
}
-void odb_load_loose_cache(struct object_directory *odb, int subdir_nr)
+struct oid_array *odb_loose_cache(struct object_directory *odb,
+ const struct object_id *oid)
{
+ int subdir_nr = oid->hash[0];
struct strbuf buf = STRBUF_INIT;
if (subdir_nr < 0 ||
BUG("subdir_nr out of range");
if (odb->loose_objects_subdir_seen[subdir_nr])
- return;
+ return &odb->loose_objects_cache[subdir_nr];
strbuf_addstr(&buf, odb->path);
for_each_file_in_obj_subdir(subdir_nr, &buf,
append_loose_object,
NULL, NULL,
- &odb->loose_objects_cache);
+ &odb->loose_objects_cache[subdir_nr]);
odb->loose_objects_subdir_seen[subdir_nr] = 1;
strbuf_release(&buf);
+ return &odb->loose_objects_cache[subdir_nr];
+}
+
+void odb_clear_loose_cache(struct object_directory *odb)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(odb->loose_objects_cache); i++)
+ oid_array_clear(&odb->loose_objects_cache[i]);
+ memset(&odb->loose_objects_subdir_seen, 0,
+ sizeof(odb->loose_objects_subdir_seen));
}
static int check_stream_sha1(git_zstream *stream,
static void find_short_object_filename(struct disambiguate_state *ds)
{
- int subdir_nr = ds->bin_pfx.hash[0];
struct object_directory *odb;
for (odb = the_repository->objects->odb;
odb && !ds->ambiguous;
odb = odb->next) {
int pos;
+ struct oid_array *loose_objects;
- odb_load_loose_cache(odb, subdir_nr);
- pos = oid_array_lookup(&odb->loose_objects_cache, &ds->bin_pfx);
+ loose_objects = odb_loose_cache(odb, &ds->bin_pfx);
+ pos = oid_array_lookup(loose_objects, &ds->bin_pfx);
if (pos < 0)
pos = -1 - pos;
- while (!ds->ambiguous && pos < odb->loose_objects_cache.nr) {
+ while (!ds->ambiguous && pos < loose_objects->nr) {
const struct object_id *oid;
- oid = odb->loose_objects_cache.oid + pos;
+ oid = loose_objects->oid + pos;
if (!match_sha(ds->len, ds->bin_pfx.hash, oid->hash))
break;
update_candidates(ds, oid);
--- /dev/null
+#include "git-compat-util.h"
+#include "./sha256.h"
+
+#undef RND
+#undef BLKSIZE
+
+#define BLKSIZE blk_SHA256_BLKSIZE
+
+void blk_SHA256_Init(blk_SHA256_CTX *ctx)
+{
+ ctx->offset = 0;
+ ctx->size = 0;
+ ctx->state[0] = 0x6a09e667ul;
+ ctx->state[1] = 0xbb67ae85ul;
+ ctx->state[2] = 0x3c6ef372ul;
+ ctx->state[3] = 0xa54ff53aul;
+ ctx->state[4] = 0x510e527ful;
+ ctx->state[5] = 0x9b05688cul;
+ ctx->state[6] = 0x1f83d9abul;
+ ctx->state[7] = 0x5be0cd19ul;
+}
+
+static inline uint32_t ror(uint32_t x, unsigned n)
+{
+ return (x >> n) | (x << (32 - n));
+}
+
+static inline uint32_t ch(uint32_t x, uint32_t y, uint32_t z)
+{
+ return z ^ (x & (y ^ z));
+}
+
+static inline uint32_t maj(uint32_t x, uint32_t y, uint32_t z)
+{
+ return ((x | y) & z) | (x & y);
+}
+
+static inline uint32_t sigma0(uint32_t x)
+{
+ return ror(x, 2) ^ ror(x, 13) ^ ror(x, 22);
+}
+
+static inline uint32_t sigma1(uint32_t x)
+{
+ return ror(x, 6) ^ ror(x, 11) ^ ror(x, 25);
+}
+
+static inline uint32_t gamma0(uint32_t x)
+{
+ return ror(x, 7) ^ ror(x, 18) ^ (x >> 3);
+}
+
+static inline uint32_t gamma1(uint32_t x)
+{
+ return ror(x, 17) ^ ror(x, 19) ^ (x >> 10);
+}
+
+static void blk_SHA256_Transform(blk_SHA256_CTX *ctx, const unsigned char *buf)
+{
+
+ uint32_t S[8], W[64], t0, t1;
+ int i;
+
+ /* copy state into S */
+ for (i = 0; i < 8; i++)
+ S[i] = ctx->state[i];
+
+ /* copy the state into 512-bits into W[0..15] */
+ for (i = 0; i < 16; i++, buf += sizeof(uint32_t))
+ W[i] = get_be32(buf);
+
+ /* fill W[16..63] */
+ for (i = 16; i < 64; i++)
+ W[i] = gamma1(W[i - 2]) + W[i - 7] + gamma0(W[i - 15]) + W[i - 16];
+
+#define RND(a,b,c,d,e,f,g,h,i,ki) \
+ t0 = h + sigma1(e) + ch(e, f, g) + ki + W[i]; \
+ t1 = sigma0(a) + maj(a, b, c); \
+ d += t0; \
+ h = t0 + t1;
+
+ RND(S[0],S[1],S[2],S[3],S[4],S[5],S[6],S[7],0,0x428a2f98);
+ RND(S[7],S[0],S[1],S[2],S[3],S[4],S[5],S[6],1,0x71374491);
+ RND(S[6],S[7],S[0],S[1],S[2],S[3],S[4],S[5],2,0xb5c0fbcf);
+ RND(S[5],S[6],S[7],S[0],S[1],S[2],S[3],S[4],3,0xe9b5dba5);
+ RND(S[4],S[5],S[6],S[7],S[0],S[1],S[2],S[3],4,0x3956c25b);
+ RND(S[3],S[4],S[5],S[6],S[7],S[0],S[1],S[2],5,0x59f111f1);
+ RND(S[2],S[3],S[4],S[5],S[6],S[7],S[0],S[1],6,0x923f82a4);
+ RND(S[1],S[2],S[3],S[4],S[5],S[6],S[7],S[0],7,0xab1c5ed5);
+ RND(S[0],S[1],S[2],S[3],S[4],S[5],S[6],S[7],8,0xd807aa98);
+ RND(S[7],S[0],S[1],S[2],S[3],S[4],S[5],S[6],9,0x12835b01);
+ RND(S[6],S[7],S[0],S[1],S[2],S[3],S[4],S[5],10,0x243185be);
+ RND(S[5],S[6],S[7],S[0],S[1],S[2],S[3],S[4],11,0x550c7dc3);
+ RND(S[4],S[5],S[6],S[7],S[0],S[1],S[2],S[3],12,0x72be5d74);
+ RND(S[3],S[4],S[5],S[6],S[7],S[0],S[1],S[2],13,0x80deb1fe);
+ RND(S[2],S[3],S[4],S[5],S[6],S[7],S[0],S[1],14,0x9bdc06a7);
+ RND(S[1],S[2],S[3],S[4],S[5],S[6],S[7],S[0],15,0xc19bf174);
+ RND(S[0],S[1],S[2],S[3],S[4],S[5],S[6],S[7],16,0xe49b69c1);
+ RND(S[7],S[0],S[1],S[2],S[3],S[4],S[5],S[6],17,0xefbe4786);
+ RND(S[6],S[7],S[0],S[1],S[2],S[3],S[4],S[5],18,0x0fc19dc6);
+ RND(S[5],S[6],S[7],S[0],S[1],S[2],S[3],S[4],19,0x240ca1cc);
+ RND(S[4],S[5],S[6],S[7],S[0],S[1],S[2],S[3],20,0x2de92c6f);
+ RND(S[3],S[4],S[5],S[6],S[7],S[0],S[1],S[2],21,0x4a7484aa);
+ RND(S[2],S[3],S[4],S[5],S[6],S[7],S[0],S[1],22,0x5cb0a9dc);
+ RND(S[1],S[2],S[3],S[4],S[5],S[6],S[7],S[0],23,0x76f988da);
+ RND(S[0],S[1],S[2],S[3],S[4],S[5],S[6],S[7],24,0x983e5152);
+ RND(S[7],S[0],S[1],S[2],S[3],S[4],S[5],S[6],25,0xa831c66d);
+ RND(S[6],S[7],S[0],S[1],S[2],S[3],S[4],S[5],26,0xb00327c8);
+ RND(S[5],S[6],S[7],S[0],S[1],S[2],S[3],S[4],27,0xbf597fc7);
+ RND(S[4],S[5],S[6],S[7],S[0],S[1],S[2],S[3],28,0xc6e00bf3);
+ RND(S[3],S[4],S[5],S[6],S[7],S[0],S[1],S[2],29,0xd5a79147);
+ RND(S[2],S[3],S[4],S[5],S[6],S[7],S[0],S[1],30,0x06ca6351);
+ RND(S[1],S[2],S[3],S[4],S[5],S[6],S[7],S[0],31,0x14292967);
+ RND(S[0],S[1],S[2],S[3],S[4],S[5],S[6],S[7],32,0x27b70a85);
+ RND(S[7],S[0],S[1],S[2],S[3],S[4],S[5],S[6],33,0x2e1b2138);
+ RND(S[6],S[7],S[0],S[1],S[2],S[3],S[4],S[5],34,0x4d2c6dfc);
+ RND(S[5],S[6],S[7],S[0],S[1],S[2],S[3],S[4],35,0x53380d13);
+ RND(S[4],S[5],S[6],S[7],S[0],S[1],S[2],S[3],36,0x650a7354);
+ RND(S[3],S[4],S[5],S[6],S[7],S[0],S[1],S[2],37,0x766a0abb);
+ RND(S[2],S[3],S[4],S[5],S[6],S[7],S[0],S[1],38,0x81c2c92e);
+ RND(S[1],S[2],S[3],S[4],S[5],S[6],S[7],S[0],39,0x92722c85);
+ RND(S[0],S[1],S[2],S[3],S[4],S[5],S[6],S[7],40,0xa2bfe8a1);
+ RND(S[7],S[0],S[1],S[2],S[3],S[4],S[5],S[6],41,0xa81a664b);
+ RND(S[6],S[7],S[0],S[1],S[2],S[3],S[4],S[5],42,0xc24b8b70);
+ RND(S[5],S[6],S[7],S[0],S[1],S[2],S[3],S[4],43,0xc76c51a3);
+ RND(S[4],S[5],S[6],S[7],S[0],S[1],S[2],S[3],44,0xd192e819);
+ RND(S[3],S[4],S[5],S[6],S[7],S[0],S[1],S[2],45,0xd6990624);
+ RND(S[2],S[3],S[4],S[5],S[6],S[7],S[0],S[1],46,0xf40e3585);
+ RND(S[1],S[2],S[3],S[4],S[5],S[6],S[7],S[0],47,0x106aa070);
+ RND(S[0],S[1],S[2],S[3],S[4],S[5],S[6],S[7],48,0x19a4c116);
+ RND(S[7],S[0],S[1],S[2],S[3],S[4],S[5],S[6],49,0x1e376c08);
+ RND(S[6],S[7],S[0],S[1],S[2],S[3],S[4],S[5],50,0x2748774c);
+ RND(S[5],S[6],S[7],S[0],S[1],S[2],S[3],S[4],51,0x34b0bcb5);
+ RND(S[4],S[5],S[6],S[7],S[0],S[1],S[2],S[3],52,0x391c0cb3);
+ RND(S[3],S[4],S[5],S[6],S[7],S[0],S[1],S[2],53,0x4ed8aa4a);
+ RND(S[2],S[3],S[4],S[5],S[6],S[7],S[0],S[1],54,0x5b9cca4f);
+ RND(S[1],S[2],S[3],S[4],S[5],S[6],S[7],S[0],55,0x682e6ff3);
+ RND(S[0],S[1],S[2],S[3],S[4],S[5],S[6],S[7],56,0x748f82ee);
+ RND(S[7],S[0],S[1],S[2],S[3],S[4],S[5],S[6],57,0x78a5636f);
+ RND(S[6],S[7],S[0],S[1],S[2],S[3],S[4],S[5],58,0x84c87814);
+ RND(S[5],S[6],S[7],S[0],S[1],S[2],S[3],S[4],59,0x8cc70208);
+ RND(S[4],S[5],S[6],S[7],S[0],S[1],S[2],S[3],60,0x90befffa);
+ RND(S[3],S[4],S[5],S[6],S[7],S[0],S[1],S[2],61,0xa4506ceb);
+ RND(S[2],S[3],S[4],S[5],S[6],S[7],S[0],S[1],62,0xbef9a3f7);
+ RND(S[1],S[2],S[3],S[4],S[5],S[6],S[7],S[0],63,0xc67178f2);
+
+ for (i = 0; i < 8; i++)
+ ctx->state[i] += S[i];
+}
+
+void blk_SHA256_Update(blk_SHA256_CTX *ctx, const void *data, size_t len)
+{
+ unsigned int len_buf = ctx->size & 63;
+
+ ctx->size += len;
+
+ /* Read the data into buf and process blocks as they get full */
+ if (len_buf) {
+ unsigned int left = 64 - len_buf;
+ if (len < left)
+ left = len;
+ memcpy(len_buf + ctx->buf, data, left);
+ len_buf = (len_buf + left) & 63;
+ len -= left;
+ data = ((const char *)data + left);
+ if (len_buf)
+ return;
+ blk_SHA256_Transform(ctx, ctx->buf);
+ }
+ while (len >= 64) {
+ blk_SHA256_Transform(ctx, data);
+ data = ((const char *)data + 64);
+ len -= 64;
+ }
+ if (len)
+ memcpy(ctx->buf, data, len);
+}
+
+void blk_SHA256_Final(unsigned char *digest, blk_SHA256_CTX *ctx)
+{
+ static const unsigned char pad[64] = { 0x80 };
+ unsigned int padlen[2];
+ int i;
+
+ /* Pad with a binary 1 (ie 0x80), then zeroes, then length */
+ padlen[0] = htonl((uint32_t)(ctx->size >> 29));
+ padlen[1] = htonl((uint32_t)(ctx->size << 3));
+
+ i = ctx->size & 63;
+ blk_SHA256_Update(ctx, pad, 1 + (63 & (55 - i)));
+ blk_SHA256_Update(ctx, padlen, 8);
+
+ /* copy output */
+ for (i = 0; i < 8; i++, digest += sizeof(uint32_t))
+ put_be32(digest, ctx->state[i]);
+}
--- /dev/null
+#ifndef SHA256_BLOCK_SHA256_H
+#define SHA256_BLOCK_SHA256_H
+
+#define blk_SHA256_BLKSIZE 64
+
+struct blk_SHA256_CTX {
+ uint32_t state[8];
+ uint64_t size;
+ uint32_t offset;
+ uint8_t buf[blk_SHA256_BLKSIZE];
+};
+
+typedef struct blk_SHA256_CTX blk_SHA256_CTX;
+
+void blk_SHA256_Init(blk_SHA256_CTX *ctx);
+void blk_SHA256_Update(blk_SHA256_CTX *ctx, const void *data, size_t len);
+void blk_SHA256_Final(unsigned char *digest, blk_SHA256_CTX *ctx);
+
+#define platform_SHA256_CTX blk_SHA256_CTX
+#define platform_SHA256_Init blk_SHA256_Init
+#define platform_SHA256_Update blk_SHA256_Update
+#define platform_SHA256_Final blk_SHA256_Final
+
+#endif
--- /dev/null
+#ifndef SHA256_GCRYPT_H
+#define SHA256_GCRYPT_H
+
+#include <gcrypt.h>
+
+#define SHA256_DIGEST_SIZE 32
+
+typedef gcry_md_hd_t gcrypt_SHA256_CTX;
+
+inline void gcrypt_SHA256_Init(gcrypt_SHA256_CTX *ctx)
+{
+ gcry_md_open(ctx, GCRY_MD_SHA256, 0);
+}
+
+inline void gcrypt_SHA256_Update(gcrypt_SHA256_CTX *ctx, const void *data, size_t len)
+{
+ gcry_md_write(*ctx, data, len);
+}
+
+inline void gcrypt_SHA256_Final(unsigned char *digest, gcrypt_SHA256_CTX *ctx)
+{
+ memcpy(digest, gcry_md_read(*ctx, GCRY_MD_SHA256), SHA256_DIGEST_SIZE);
+}
+
+#define platform_SHA256_CTX gcrypt_SHA256_CTX
+#define platform_SHA256_Init gcrypt_SHA256_Init
+#define platform_SHA256_Update gcrypt_SHA256_Update
+#define platform_SHA256_Final gcrypt_SHA256_Final
+
+#endif
int is_repository_shallow(struct repository *r)
{
+ /*
+ * NEEDSWORK: This function updates
+ * r->parsed_objects->{is_shallow,shallow_stat} as a side effect but
+ * there is no corresponding function to clear them when the shallow
+ * file is updated.
+ */
+
FILE *fp;
char buf[1024];
const char *path = r->parsed_objects->alternate_shallow_file;
struct keyword_entry *p = keywords + i;
int len = strlen(p->keyword);
- if (n <= len)
+ if (n < len)
continue;
/*
* Match case insensitively, so we colorize output from existing
* messages. We only highlight the word precisely, so
* "successful" stays uncolored.
*/
- if (!strncasecmp(p->keyword, src, len) && !isalnum(src[len])) {
+ if (!strncasecmp(p->keyword, src, len) &&
+ (len == n || !isalnum(src[len]))) {
strbuf_addstr(dest, p->color);
strbuf_add(dest, src, len);
strbuf_addstr(dest, GIT_COLOR_RESET);
return *item->string != '\0';
}
-void string_list_remove_empty_items(struct string_list *list, int free_util) {
+void string_list_remove_empty_items(struct string_list *list, int free_util)
+{
filter_string_list(list, free_util, item_is_not_empty, NULL);
}
#include "commit-reach.h"
static int config_update_recurse_submodules = RECURSE_SUBMODULES_OFF;
-static struct string_list changed_submodule_names = STRING_LIST_INIT_DUP;
static int initialized_fetch_ref_tips;
static struct oid_array ref_tips_before_fetch;
static struct oid_array ref_tips_after_fetch;
DEFAULT_GIT_DIR_ENVIRONMENT);
}
+static void prepare_submodule_repo_env_in_gitdir(struct argv_array *out)
+{
+ prepare_submodule_repo_env_no_git_dir(out);
+ argv_array_pushf(out, "%s=.", GIT_DIR_ENVIRONMENT);
+}
+
/* Helper function to display the submodule header line prior to the full
* summary output. If it can locate the submodule objects directory it will
* attempt to lookup both the left and right commits and put them into the
oid_array_append(&ref_tips_after_fetch, oid);
}
-static void calculate_changed_submodule_paths(struct repository *r)
+static void calculate_changed_submodule_paths(struct repository *r,
+ struct string_list *changed_submodule_names)
{
struct argv_array argv = ARGV_ARRAY_INIT;
- struct string_list changed_submodules = STRING_LIST_INIT_DUP;
- const struct string_list_item *name;
+ struct string_list_item *name;
/* No need to check if there are no submodules configured */
if (!submodule_from_path(r, NULL, NULL))
* Collect all submodules (whether checked out or not) for which new
* commits have been recorded upstream in "changed_submodule_names".
*/
- collect_changed_submodules(r, &changed_submodules, &argv);
+ collect_changed_submodules(r, changed_submodule_names, &argv);
- for_each_string_list_item(name, &changed_submodules) {
+ for_each_string_list_item(name, changed_submodule_names) {
struct oid_array *commits = name->util;
const struct submodule *submodule;
const char *path = NULL;
if (!path)
continue;
- if (!submodule_has_commits(r, path, commits))
- string_list_append(&changed_submodule_names, name->string);
+ if (submodule_has_commits(r, path, commits)) {
+ oid_array_clear(commits);
+ *name->string = '\0';
+ }
}
- free_submodules_oids(&changed_submodules);
+ string_list_remove_empty_items(changed_submodule_names, 1);
+
argv_array_clear(&argv);
oid_array_clear(&ref_tips_before_fetch);
oid_array_clear(&ref_tips_after_fetch);
int default_option;
int quiet;
int result;
+
+ struct string_list changed_submodule_names;
+
+ /* Pending fetches by OIDs */
+ struct fetch_task **oid_fetch_tasks;
+ int oid_fetch_tasks_nr, oid_fetch_tasks_alloc;
};
-#define SPF_INIT {0, ARGV_ARRAY_INIT, NULL, NULL, 0, 0, 0, 0}
+#define SPF_INIT {0, ARGV_ARRAY_INIT, NULL, NULL, 0, 0, 0, 0, \
+ STRING_LIST_INIT_DUP, \
+ NULL, 0, 0}
static int get_fetch_recurse_config(const struct submodule *submodule,
struct submodule_parallel_fetch *spf)
return spf->default_option;
}
+/*
+ * Fetch in progress (if callback data) or
+ * pending (if in oid_fetch_tasks in struct submodule_parallel_fetch)
+ */
+struct fetch_task {
+ struct repository *repo;
+ const struct submodule *sub;
+ unsigned free_sub : 1; /* Do we need to free the submodule? */
+
+ struct oid_array *commits; /* Ensure these commits are fetched */
+};
+
+/**
+ * When a submodule is not defined in .gitmodules, we cannot access it
+ * via the regular submodule-config. Create a fake submodule, which we can
+ * work on.
+ */
+static const struct submodule *get_non_gitmodules_submodule(const char *path)
+{
+ struct submodule *ret = NULL;
+ const char *name = default_name_or_path(path);
+
+ if (!name)
+ return NULL;
+
+ ret = xmalloc(sizeof(*ret));
+ memset(ret, 0, sizeof(*ret));
+ ret->path = name;
+ ret->name = name;
+
+ return (const struct submodule *) ret;
+}
+
+static struct fetch_task *fetch_task_create(struct repository *r,
+ const char *path)
+{
+ struct fetch_task *task = xmalloc(sizeof(*task));
+ memset(task, 0, sizeof(*task));
+
+ task->sub = submodule_from_path(r, &null_oid, path);
+ if (!task->sub) {
+ /*
+ * No entry in .gitmodules? Technically not a submodule,
+ * but historically we supported repositories that happen to be
+ * in-place where a gitlink is. Keep supporting them.
+ */
+ task->sub = get_non_gitmodules_submodule(path);
+ if (!task->sub) {
+ free(task);
+ return NULL;
+ }
+
+ task->free_sub = 1;
+ }
+
+ return task;
+}
+
+static void fetch_task_release(struct fetch_task *p)
+{
+ if (p->free_sub)
+ free((void*)p->sub);
+ p->free_sub = 0;
+ p->sub = NULL;
+
+ if (p->repo)
+ repo_clear(p->repo);
+ FREE_AND_NULL(p->repo);
+}
+
+static struct repository *get_submodule_repo_for(struct repository *r,
+ const struct submodule *sub)
+{
+ struct repository *ret = xmalloc(sizeof(*ret));
+
+ if (repo_submodule_init(ret, r, sub)) {
+ /*
+ * No entry in .gitmodules? Technically not a submodule,
+ * but historically we supported repositories that happen to be
+ * in-place where a gitlink is. Keep supporting them.
+ */
+ struct strbuf gitdir = STRBUF_INIT;
+ strbuf_repo_worktree_path(&gitdir, r, "%s/.git", sub->path);
+ if (repo_init(ret, gitdir.buf, NULL)) {
+ strbuf_release(&gitdir);
+ free(ret);
+ return NULL;
+ }
+ strbuf_release(&gitdir);
+ }
+
+ return ret;
+}
+
static int get_next_submodule(struct child_process *cp,
struct strbuf *err, void *data, void **task_cb)
{
- int ret = 0;
struct submodule_parallel_fetch *spf = data;
for (; spf->count < spf->r->index->cache_nr; spf->count++) {
- struct strbuf submodule_path = STRBUF_INIT;
- struct strbuf submodule_git_dir = STRBUF_INIT;
- struct strbuf submodule_prefix = STRBUF_INIT;
const struct cache_entry *ce = spf->r->index->cache[spf->count];
- const char *git_dir, *default_argv;
- const struct submodule *submodule;
- struct submodule default_submodule = SUBMODULE_INIT;
+ const char *default_argv;
+ struct fetch_task *task;
if (!S_ISGITLINK(ce->ce_mode))
continue;
- submodule = submodule_from_path(spf->r, &null_oid, ce->name);
- if (!submodule) {
- const char *name = default_name_or_path(ce->name);
- if (name) {
- default_submodule.path = default_submodule.name = name;
- submodule = &default_submodule;
- }
- }
+ task = fetch_task_create(spf->r, ce->name);
+ if (!task)
+ continue;
- switch (get_fetch_recurse_config(submodule, spf))
+ switch (get_fetch_recurse_config(task->sub, spf))
{
default:
case RECURSE_SUBMODULES_DEFAULT:
case RECURSE_SUBMODULES_ON_DEMAND:
- if (!submodule || !unsorted_string_list_lookup(&changed_submodule_names,
- submodule->name))
+ if (!task->sub ||
+ !string_list_lookup(
+ &spf->changed_submodule_names,
+ task->sub->name))
continue;
default_argv = "on-demand";
break;
continue;
}
- strbuf_repo_worktree_path(&submodule_path, spf->r, "%s", ce->name);
- strbuf_addf(&submodule_git_dir, "%s/.git", submodule_path.buf);
- strbuf_addf(&submodule_prefix, "%s%s/", spf->prefix, ce->name);
- git_dir = read_gitfile(submodule_git_dir.buf);
- if (!git_dir)
- git_dir = submodule_git_dir.buf;
- if (is_directory(git_dir)) {
+ task->repo = get_submodule_repo_for(spf->r, task->sub);
+ if (task->repo) {
+ struct strbuf submodule_prefix = STRBUF_INIT;
child_process_init(cp);
- cp->dir = strbuf_detach(&submodule_path, NULL);
- prepare_submodule_repo_env(&cp->env_array);
+ cp->dir = task->repo->gitdir;
+ prepare_submodule_repo_env_in_gitdir(&cp->env_array);
cp->git_cmd = 1;
if (!spf->quiet)
strbuf_addf(err, "Fetching submodule %s%s\n",
argv_array_pushv(&cp->args, spf->args.argv);
argv_array_push(&cp->args, default_argv);
argv_array_push(&cp->args, "--submodule-prefix");
+
+ strbuf_addf(&submodule_prefix, "%s%s/",
+ spf->prefix,
+ task->sub->path);
argv_array_push(&cp->args, submodule_prefix.buf);
- ret = 1;
- }
- strbuf_release(&submodule_path);
- strbuf_release(&submodule_git_dir);
- strbuf_release(&submodule_prefix);
- if (ret) {
+
spf->count++;
+ *task_cb = task;
+
+ strbuf_release(&submodule_prefix);
return 1;
+ } else {
+
+ fetch_task_release(task);
+ free(task);
+
+ /*
+ * An empty directory is normal,
+ * the submodule is not initialized
+ */
+ if (S_ISGITLINK(ce->ce_mode) &&
+ !is_empty_dir(ce->name)) {
+ spf->result = 1;
+ strbuf_addf(err,
+ _("Could not access submodule '%s'"),
+ ce->name);
+ }
}
}
+
+ if (spf->oid_fetch_tasks_nr) {
+ struct fetch_task *task =
+ spf->oid_fetch_tasks[spf->oid_fetch_tasks_nr - 1];
+ struct strbuf submodule_prefix = STRBUF_INIT;
+ spf->oid_fetch_tasks_nr--;
+
+ strbuf_addf(&submodule_prefix, "%s%s/",
+ spf->prefix, task->sub->path);
+
+ child_process_init(cp);
+ prepare_submodule_repo_env_in_gitdir(&cp->env_array);
+ cp->git_cmd = 1;
+ cp->dir = task->repo->gitdir;
+
+ argv_array_init(&cp->args);
+ argv_array_pushv(&cp->args, spf->args.argv);
+ argv_array_push(&cp->args, "on-demand");
+ argv_array_push(&cp->args, "--submodule-prefix");
+ argv_array_push(&cp->args, submodule_prefix.buf);
+
+ /* NEEDSWORK: have get_default_remote from submodule--helper */
+ argv_array_push(&cp->args, "origin");
+ oid_array_for_each_unique(task->commits,
+ append_oid_to_argv, &cp->args);
+
+ *task_cb = task;
+ strbuf_release(&submodule_prefix);
+ return 1;
+ }
+
return 0;
}
void *cb, void *task_cb)
{
struct submodule_parallel_fetch *spf = cb;
+ struct fetch_task *task = task_cb;
spf->result = 1;
+ fetch_task_release(task);
return 0;
}
+static int commit_missing_in_sub(const struct object_id *oid, void *data)
+{
+ struct repository *subrepo = data;
+
+ enum object_type type = oid_object_info(subrepo, oid, NULL);
+
+ return type != OBJ_COMMIT;
+}
+
static int fetch_finish(int retvalue, struct strbuf *err,
void *cb, void *task_cb)
{
struct submodule_parallel_fetch *spf = cb;
+ struct fetch_task *task = task_cb;
+
+ struct string_list_item *it;
+ struct oid_array *commits;
if (retvalue)
spf->result = 1;
+ if (!task || !task->sub)
+ BUG("callback cookie bogus");
+
+ /* Is this the second time we process this submodule? */
+ if (task->commits)
+ goto out;
+
+ it = string_list_lookup(&spf->changed_submodule_names, task->sub->name);
+ if (!it)
+ /* Could be an unchanged submodule, not contained in the list */
+ goto out;
+
+ commits = it->util;
+ oid_array_filter(commits,
+ commit_missing_in_sub,
+ task->repo);
+
+ /* Are there commits we want, but do not exist? */
+ if (commits->nr) {
+ task->commits = commits;
+ ALLOC_GROW(spf->oid_fetch_tasks,
+ spf->oid_fetch_tasks_nr + 1,
+ spf->oid_fetch_tasks_alloc);
+ spf->oid_fetch_tasks[spf->oid_fetch_tasks_nr] = task;
+ spf->oid_fetch_tasks_nr++;
+ return 0;
+ }
+
+out:
+ fetch_task_release(task);
+
return 0;
}
argv_array_push(&spf.args, "--recurse-submodules-default");
/* default value, "--submodule-prefix" and its value are added later */
- calculate_changed_submodule_paths(r);
+ calculate_changed_submodule_paths(r, &spf.changed_submodule_names);
+ string_list_sort(&spf.changed_submodule_names);
run_processes_parallel(max_parallel_jobs,
get_next_submodule,
fetch_start_failure,
argv_array_clear(&spf.args);
out:
- string_list_clear(&changed_submodule_names, 1);
+ free_submodules_oids(&spf.changed_submodule_names);
return spf.result;
}
return ret;
}
+void submodule_unset_core_worktree(const struct submodule *sub)
+{
+ char *config_path = xstrfmt("%s/modules/%s/config",
+ get_git_common_dir(), sub->name);
+
+ if (git_config_set_in_file_gently(config_path, "core.worktree", NULL))
+ warning(_("Could not unset core.worktree setting in submodule '%s'"),
+ sub->path);
+
+ free(config_path);
+}
+
static const char *get_super_prefix_or_empty(void)
{
const char *s = get_super_prefix();
if (is_empty_dir(path))
rmdir_or_warn(path);
+
+ submodule_unset_core_worktree(sub);
}
}
out:
const char *new_head,
unsigned flags);
+void submodule_unset_core_worktree(const struct submodule *sub);
+
/*
* Prepare the "env_array" parameter of a "struct child_process" for executing
* a submodule by clearing any repo-specific environment variables, but
*/
int check_leading_path(const char *name, int len)
{
- return threaded_check_leading_path(&default_cache, name, len);
+ return threaded_check_leading_path(&default_cache, name, len);
}
/*
this feature by setting the GIT_TEST_CHAIN_LINT environment
variable to "1" or "0", respectively.
+--stress::
+--stress=<N>::
+ Run the test script repeatedly in multiple parallel jobs until
+ one of them fails. Useful for reproducing rare failures in
+ flaky tests. The number of parallel jobs is, in order of
+ precedence: <N>, or the value of the GIT_TEST_STRESS_LOAD
+ environment variable, or twice the number of available
+ processors (as shown by the 'getconf' utility), or 8.
+ Implies `--verbose -x --immediate` to get the most information
+ about the failure. Note that the verbose output of each test
+ job is saved to 't/test-results/$TEST_NAME.stress-<nr>.out',
+ and only the output of the failed test job is shown on the
+ terminal. The names of the trash directories get a
+ '.stress-<nr>' suffix, and the trash directory of the failed
+ test job is renamed to end with a '.stress-failed' suffix.
+
You can also set the GIT_TEST_INSTALLED environment variable to
the bindir of an existing git installation to test that installation.
You still need to have built this git sandbox, from which various
- Creates an empty test directory with an empty .git/objects database
and chdir(2) into it. This directory is 't/trash
directory.$test_name_without_dotsh', with t/ subject to change by
- the --root option documented above.
+ the --root option documented above, and a '.stress-<N>' suffix
+ appended by the --stress option.
- Defines standard test helper functions for your scripts to
use. These functions are designed to make all scripts behave
chomp;
}
+ /\bcp\s+-a/ and err 'cp -a is not portable';
/\bsed\s+-i/ and err 'sed -i is not portable';
/\becho\s+-[neE]/ and err 'echo with option is not portable (use printf)';
/^\s*declare\s+/ and err 'arrays/declare not portable';
--- /dev/null
+#include "test-tool.h"
+#include "cache.h"
+
+#define NUM_SECONDS 3
+
+static inline void compute_hash(const struct git_hash_algo *algo, git_hash_ctx *ctx, uint8_t *final, const void *p, size_t len)
+{
+ algo->init_fn(ctx);
+ algo->update_fn(ctx, p, len);
+ algo->final_fn(final, ctx);
+}
+
+int cmd__hash_speed(int ac, const char **av)
+{
+ git_hash_ctx ctx;
+ unsigned char hash[GIT_MAX_RAWSZ];
+ clock_t initial, start, end;
+ unsigned bufsizes[] = { 64, 256, 1024, 8192, 16384 };
+ int i;
+ void *p;
+ const struct git_hash_algo *algo = NULL;
+
+ if (ac == 2) {
+ for (i = 1; i < GIT_HASH_NALGOS; i++) {
+ if (!strcmp(av[1], hash_algos[i].name)) {
+ algo = &hash_algos[i];
+ break;
+ }
+ }
+ }
+ if (!algo)
+ die("usage: test-tool hash-speed algo_name");
+
+ /* Use this as an offset to make overflow less likely. */
+ initial = clock();
+
+ printf("algo: %s\n", algo->name);
+
+ for (i = 0; i < ARRAY_SIZE(bufsizes); i++) {
+ unsigned long j, kb;
+ double kb_per_sec;
+ p = xcalloc(1, bufsizes[i]);
+ start = end = clock() - initial;
+ for (j = 0; ((end - start) / CLOCKS_PER_SEC) < NUM_SECONDS; j++) {
+ compute_hash(algo, &ctx, hash, p, bufsizes[i]);
+
+ /*
+ * Only check elapsed time every 128 iterations to avoid
+ * dominating the runtime with system calls.
+ */
+ if (!(j & 127))
+ end = clock() - initial;
+ }
+ kb = j * bufsizes[i];
+ kb_per_sec = kb / (1024 * ((double)end - start) / CLOCKS_PER_SEC);
+ printf("size %u: %lu iters; %lu KiB; %0.2f KiB/s\n", bufsizes[i], j, kb, kb_per_sec);
+ free(p);
+ }
+
+ exit(0);
+}
--- /dev/null
+#include "test-tool.h"
+#include "cache.h"
+
+int cmd_hash_impl(int ac, const char **av, int algo)
+{
+ git_hash_ctx ctx;
+ unsigned char hash[GIT_MAX_HEXSZ];
+ unsigned bufsz = 8192;
+ int binary = 0;
+ char *buffer;
+ const struct git_hash_algo *algop = &hash_algos[algo];
+
+ if (ac == 2) {
+ if (!strcmp(av[1], "-b"))
+ binary = 1;
+ else
+ bufsz = strtoul(av[1], NULL, 10) * 1024 * 1024;
+ }
+
+ if (!bufsz)
+ bufsz = 8192;
+
+ while ((buffer = malloc(bufsz)) == NULL) {
+ fprintf(stderr, "bufsz %u is too big, halving...\n", bufsz);
+ bufsz /= 2;
+ if (bufsz < 1024)
+ die("OOPS");
+ }
+
+ algop->init_fn(&ctx);
+
+ while (1) {
+ ssize_t sz, this_sz;
+ char *cp = buffer;
+ unsigned room = bufsz;
+ this_sz = 0;
+ while (room) {
+ sz = xread(0, cp, room);
+ if (sz == 0)
+ break;
+ if (sz < 0)
+ die_errno("test-hash");
+ this_sz += sz;
+ cp += sz;
+ room -= sz;
+ }
+ if (this_sz == 0)
+ break;
+ algop->update_fn(&ctx, buffer, this_sz);
+ }
+ algop->final_fn(hash, &ctx);
+
+ if (binary)
+ fwrite(hash, 1, algop->rawsz, stdout);
+ else
+ puts(hash_to_hex_algop(hash, algop));
+ exit(0);
+}
int cmd__sha1(int ac, const char **av)
{
- git_SHA_CTX ctx;
- unsigned char sha1[20];
- unsigned bufsz = 8192;
- int binary = 0;
- char *buffer;
-
- if (ac == 2) {
- if (!strcmp(av[1], "-b"))
- binary = 1;
- else
- bufsz = strtoul(av[1], NULL, 10) * 1024 * 1024;
- }
-
- if (!bufsz)
- bufsz = 8192;
-
- while ((buffer = malloc(bufsz)) == NULL) {
- fprintf(stderr, "bufsz %u is too big, halving...\n", bufsz);
- bufsz /= 2;
- if (bufsz < 1024)
- die("OOPS");
- }
-
- git_SHA1_Init(&ctx);
-
- while (1) {
- ssize_t sz, this_sz;
- char *cp = buffer;
- unsigned room = bufsz;
- this_sz = 0;
- while (room) {
- sz = xread(0, cp, room);
- if (sz == 0)
- break;
- if (sz < 0)
- die_errno("test-sha1");
- this_sz += sz;
- cp += sz;
- room -= sz;
- }
- if (this_sz == 0)
- break;
- git_SHA1_Update(&ctx, buffer, this_sz);
- }
- git_SHA1_Final(sha1, &ctx);
-
- if (binary)
- fwrite(sha1, 1, 20, stdout);
- else
- puts(sha1_to_hex(sha1));
- exit(0);
+ return cmd_hash_impl(ac, av, GIT_HASH_SHA1);
}
--- /dev/null
+#include "test-tool.h"
+#include "cache.h"
+
+int cmd__sha256(int ac, const char **av)
+{
+ return cmd_hash_impl(ac, av, GIT_HASH_SHA256);
+}
X(three)
#undef X
-int cmd__sigchain(int argc, const char **argv) {
+int cmd__sigchain(int argc, const char **argv)
+{
sigchain_push(SIGTERM, one);
sigchain_push(SIGTERM, two);
sigchain_push(SIGTERM, three);
int cmd__submodule_nested_repo_config(int argc, const char **argv)
{
- struct repository submodule;
+ struct repository subrepo;
+ const struct submodule *sub;
if (argc < 3)
die_usage(argc, argv, "Wrong number of arguments.");
setup_git_directory();
- if (repo_submodule_init(&submodule, the_repository, argv[1])) {
+ sub = submodule_from_path(the_repository, &null_oid, argv[1]);
+ if (repo_submodule_init(&subrepo, the_repository, sub)) {
die_usage(argc, argv, "Submodule not found.");
}
/* Read the config of _child_ submodules. */
- print_config_from_gitmodules(&submodule, argv[2]);
+ print_config_from_gitmodules(&subrepo, argv[2]);
submodule_free(the_repository);
{ "example-decorate", cmd__example_decorate },
{ "genrandom", cmd__genrandom },
{ "hashmap", cmd__hashmap },
+ { "hash-speed", cmd__hash_speed },
{ "index-version", cmd__index_version },
{ "json-writer", cmd__json_writer },
{ "lazy-init-name-hash", cmd__lazy_init_name_hash },
{ "scrap-cache-tree", cmd__scrap_cache_tree },
{ "sha1", cmd__sha1 },
{ "sha1-array", cmd__sha1_array },
+ { "sha256", cmd__sha256 },
{ "sigchain", cmd__sigchain },
{ "strcmp-offset", cmd__strcmp_offset },
{ "string-list", cmd__string_list },
int cmd__example_decorate(int argc, const char **argv);
int cmd__genrandom(int argc, const char **argv);
int cmd__hashmap(int argc, const char **argv);
+int cmd__hash_speed(int argc, const char **argv);
int cmd__index_version(int argc, const char **argv);
int cmd__json_writer(int argc, const char **argv);
int cmd__lazy_init_name_hash(int argc, const char **argv);
int cmd__scrap_cache_tree(int argc, const char **argv);
int cmd__sha1(int argc, const char **argv);
int cmd__sha1_array(int argc, const char **argv);
+int cmd__sha256(int argc, const char **argv);
int cmd__sigchain(int argc, const char **argv);
int cmd__strcmp_offset(int argc, const char **argv);
int cmd__string_list(int argc, const char **argv);
#endif
int cmd__write_cache(int argc, const char **argv);
+int cmd_hash_impl(int ac, const char **av, int algo);
+
#endif
test_skip_or_die $GIT_TEST_GIT_DAEMON "file system does not support FIFOs"
fi
-LIB_GIT_DAEMON_PORT=${LIB_GIT_DAEMON_PORT-${this_test#t}}
+test_set_port LIB_GIT_DAEMON_PORT
GIT_DAEMON_PID=
GIT_DAEMON_DOCUMENT_ROOT_PATH="$PWD"/repo
"$@" "$GIT_DAEMON_DOCUMENT_ROOT_PATH" \
>&3 2>git_daemon_output &
GIT_DAEMON_PID=$!
- >daemon.log
{
read -r line <&7
- printf "%s\n" "$line"
- printf >&4 "%s\n" "$line"
- (
- while read -r line <&7
- do
- printf "%s\n" "$line"
- printf >&4 "%s\n" "$line"
- done
- ) &
- } 7<git_daemon_output >>"$TRASH_DIRECTORY/daemon.log" &&
+ printf "%s\n" "$line" >&4
+ cat <&7 >&4 &
+ } 7<git_daemon_output &&
# Check expected output
if test x"$(expr "$line" : "\[[0-9]*\] \(.*\)")" != x"Ready to rumble"
(cd / && "$PYTHON_PATH" -c 'import time; print(int(time.time()))')
}
-# Try to pick a unique port: guess a large number, then hope
-# no more than one of each test is running.
-#
-# This does not handle the case where somebody else is running the
-# same tests and has chosen the same ports.
-testid=${this_test#t}
-git_p4_test_start=9800
-P4DPORT=$((10669 + ($testid - $git_p4_test_start)))
+test_set_port P4DPORT
P4PORT=localhost:$P4DPORT
P4CLIENT=client
GIT_DIR=$PWD/.git
GIT_SVN_DIR=$GIT_DIR/svn/refs/remotes/git-svn
SVN_TREE=$GIT_SVN_DIR/svn-tree
+test_set_port SVNSERVE_PORT
svn >/dev/null 2>&1
if test $? -ne 1
}
start_svnserve () {
- SVNSERVE_PORT=${SVNSERVE_PORT-${this_test#t}}
svnserve --listen-port $SVNSERVE_PORT \
--root "$rawsvnrepo" \
--listen-once \
esac
LIB_HTTPD_PATH=${LIB_HTTPD_PATH-"$DEFAULT_HTTPD_PATH"}
-LIB_HTTPD_PORT=${LIB_HTTPD_PORT-${this_test#t}}
+test_set_port LIB_HTTPD_PORT
TEST_PATH="$TEST_DIRECTORY"/lib-httpd
HTTPD_ROOT_PATH="$PWD"/httpd
SetEnv GIT_EXEC_PATH ${GIT_EXEC_PATH}
SetEnv GIT_HTTP_EXPORT_ALL
</LocationMatch>
+ScriptAliasMatch /error_git_upload_pack/(.*)/git-upload-pack error.sh/
ScriptAliasMatch /smart_*[^/]*/(.*) ${GIT_EXEC_PATH}/git-http-backend/$1
ScriptAlias /broken_smart/ broken-smart-http.sh/
ScriptAlias /error/ error.sh/
then
mkdir -p submodule_update/.git/modules/sub1/modules &&
cp -r submodule_update_repo/.git/modules/sub1/modules/sub2 submodule_update/.git/modules/sub1/modules/sub2
- GIT_WORK_TREE=. git -C submodule_update/.git/modules/sub1/modules/sub2 config --unset core.worktree
+ # core.worktree is unset for sub2 as it is not checked out
fi &&
# indicate we are interested in the submodule:
git -C submodule_update config submodule.sub1.url "bogus" &&
git branch -t remove_sub1 origin/remove_sub1 &&
$command remove_sub1 &&
test_superproject_content origin/remove_sub1 &&
- ! test -e sub1
+ ! test -e sub1 &&
+ test_must_fail git config -f .git/modules/sub1/config core.worktree
)
'
# ... absorbing a .git directory along the way.
--- /dev/null
+#!/bin/sh
+
+test_description='test basic hash implementation'
+. ./test-lib.sh
+
+
+test_expect_success 'test basic SHA-1 hash values' '
+ test-tool sha1 </dev/null >actual &&
+ grep da39a3ee5e6b4b0d3255bfef95601890afd80709 actual &&
+ printf "a" | test-tool sha1 >actual &&
+ grep 86f7e437faa5a7fce15d1ddcb9eaeaea377667b8 actual &&
+ printf "abc" | test-tool sha1 >actual &&
+ grep a9993e364706816aba3e25717850c26c9cd0d89d actual &&
+ printf "message digest" | test-tool sha1 >actual &&
+ grep c12252ceda8be8994d5fa0290a47231c1d16aae3 actual &&
+ printf "abcdefghijklmnopqrstuvwxyz" | test-tool sha1 >actual &&
+ grep 32d10c7b8cf96570ca04ce37f2a19d84240d3a89 actual &&
+ perl -e "$| = 1; print q{aaaaaaaaaa} for 1..100000;" | \
+ test-tool sha1 >actual &&
+ grep 34aa973cd4c4daa4f61eeb2bdbad27316534016f actual &&
+ printf "blob 0\0" | test-tool sha1 >actual &&
+ grep e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 actual &&
+ printf "blob 3\0abc" | test-tool sha1 >actual &&
+ grep f2ba8f84ab5c1bce84a7b441cb1959cfc7093b7f actual &&
+ printf "tree 0\0" | test-tool sha1 >actual &&
+ grep 4b825dc642cb6eb9a060e54bf8d69288fbee4904 actual
+'
+
+test_expect_success 'test basic SHA-256 hash values' '
+ test-tool sha256 </dev/null >actual &&
+ grep e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855 actual &&
+ printf "a" | test-tool sha256 >actual &&
+ grep ca978112ca1bbdcafac231b39a23dc4da786eff8147c4e72b9807785afee48bb actual &&
+ printf "abc" | test-tool sha256 >actual &&
+ grep ba7816bf8f01cfea414140de5dae2223b00361a396177a9cb410ff61f20015ad actual &&
+ printf "message digest" | test-tool sha256 >actual &&
+ grep f7846f55cf23e14eebeab5b4e1550cad5b509e3348fbc4efa3a1413d393cb650 actual &&
+ printf "abcdefghijklmnopqrstuvwxyz" | test-tool sha256 >actual &&
+ grep 71c480df93d6ae2f1efad1447c66c9525e316218cf51fc8d9ed832f2daf18b73 actual &&
+ # Try to exercise the chunking code by turning autoflush on.
+ perl -e "$| = 1; print q{aaaaaaaaaa} for 1..100000;" | \
+ test-tool sha256 >actual &&
+ grep cdc76e5c9914fb9281a1c7e284d73e67f1809a48a497200e046d39ccc7112cd0 actual &&
+ perl -e "$| = 1; print q{abcdefghijklmnopqrstuvwxyz} for 1..100000;" | \
+ test-tool sha256 >actual &&
+ grep e406ba321ca712ad35a698bf0af8d61fc4dc40eca6bdcea4697962724ccbde35 actual &&
+ printf "blob 0\0" | test-tool sha256 >actual &&
+ grep 473a0f4c3be8a93681a267e3b1e9a7dcda1185436fe141f7749120a303721813 actual &&
+ printf "blob 3\0abc" | test-tool sha256 >actual &&
+ grep c1cf6e465077930e88dc5136641d402f72a229ddd996f627d60e9639eaba35a6 actual &&
+ printf "tree 0\0" | test-tool sha256 >actual &&
+ grep 6ef19b41225c5369f1c104d45d8d85efa9b057b53b14b4b9b939dd74decc5321 actual
+'
+
+test_done
do
rm crlf_false_attr__$f.txt &&
if test -z "$ceol"; then
- git checkout crlf_false_attr__$f.txt
+ git checkout -- crlf_false_attr__$f.txt
else
- git -c core.eol=$ceol checkout crlf_false_attr__$f.txt
+ git -c core.eol=$ceol checkout -- crlf_false_attr__$f.txt
fi
done
test_expect_success '-c with comment char defined in .git/config' '
test_config core.commentchar = &&
printf "= foo\n" >expect &&
- printf "foo" | (
- mkdir sub && cd sub && git stripspace -c
- ) >actual &&
+ rm -fr sub &&
+ mkdir sub &&
+ printf "foo" | git -C sub stripspace -c >actual &&
+ test_cmp expect actual
+'
+
+test_expect_success '-c outside git repository' '
+ printf "# foo\n" >expect &&
+ printf "foo" | nongit git stripspace -c >actual &&
test_cmp expect actual
'
test_must_be_empty err
'
-test_expect_success 'run_command is restricted to PATH' '
+
+test_lazy_prereq RUNS_COMMANDS_FROM_PWD '
+ write_script runs-commands-from-pwd <<-\EOF &&
+ true
+ EOF
+ runs-commands-from-pwd >/dev/null 2>&1
+'
+
+test_expect_success !RUNS_COMMANDS_FROM_PWD 'run_command is restricted to PATH' '
write_script should-not-run <<-\EOF &&
echo yikes
EOF
grep $(git -C repo rev-parse bar) out # sanity check that some walking was done
'
-test_expect_success 'rev-list accepts missing and promised objects on command line' '
+test_expect_success 'rev-list dies for missing objects on cmd line' '
rm -rf repo &&
test_create_repo repo &&
test_commit -C repo foo &&
git -C repo config core.repositoryformatversion 1 &&
git -C repo config extensions.partialclone "arbitrary string" &&
- git -C repo rev-list --exclude-promisor-objects --objects "$COMMIT" "$TREE" "$BLOB"
+
+ for OBJ in "$COMMIT" "$TREE" "$BLOB"; do
+ test_must_fail git -C repo rev-list --objects \
+ --exclude-promisor-objects "$OBJ" &&
+ test_must_fail git -C repo rev-list --objects-edge-aggressive \
+ --exclude-promisor-objects "$OBJ" &&
+
+ # Do not die or crash when --ignore-missing is passed.
+ git -C repo rev-list --ignore-missing --objects \
+ --exclude-promisor-objects "$OBJ" &&
+ git -C repo rev-list --ignore-missing --objects-edge-aggressive \
+ --exclude-promisor-objects "$OBJ"
+ done
'
test_expect_success 'gc repacks promisor objects separately from non-promisor objects' '
! grep "$TREE_HASH" out
'
-LIB_HTTPD_PORT=12345 # default port, 410, cannot be used as non-root
. "$TEST_DIRECTORY"/lib-httpd.sh
start_httpd
git worktree move --force --force flump ploof
'
+test_expect_success 'move a repo with uninitialized submodule' '
+ git init withsub &&
+ (
+ cd withsub &&
+ test_commit initial &&
+ git submodule add "$PWD"/.git sub &&
+ git commit -m withsub &&
+ git worktree add second HEAD &&
+ git worktree move second third
+ )
+'
+
+test_expect_success 'not move a repo with initialized submodule' '
+ (
+ cd withsub &&
+ git -C third submodule update &&
+ test_must_fail git worktree move third forth
+ )
+'
+
test_expect_success 'remove main worktree' '
test_must_fail git worktree remove .
'
)
'
+test_expect_success 'remove a repo with uninitialized submodule' '
+ (
+ cd withsub &&
+ git worktree add to-remove HEAD &&
+ git worktree remove to-remove
+ )
+'
+
+test_expect_success 'not remove a repo with initialized submodule' '
+ (
+ cd withsub &&
+ git worktree add to-remove HEAD &&
+ git -C to-remove submodule update &&
+ test_must_fail git worktree remove to-remove
+ )
+'
+
test_done
test_path_is_file execed
'
+test_expect_success '--reschedule-failed-exec' '
+ test_when_finished "git rebase --abort" &&
+ test_must_fail git rebase -x false --reschedule-failed-exec HEAD^ &&
+ grep "^exec false" .git/rebase-merge/git-rebase-todo &&
+ git rebase --abort &&
+ test_must_fail git -c rebase.rescheduleFailedExec=true \
+ rebase -x false HEAD^ 2>err &&
+ grep "^exec false" .git/rebase-merge/git-rebase-todo &&
+ test_i18ngrep "has been rescheduled" err &&
+ git rebase --abort &&
+ test_must_fail git rebase -y false HEAD^ 2>err &&
+ test_i18ngrep "has been rescheduled" err
+'
+
test_done
test_expect_code 129 git cherry-pick -m 0 b
'
-test_expect_success 'cherry-pick a non-merge with -m should fail' '
+test_expect_success 'cherry-pick explicit first parent of a non-merge' '
git reset --hard &&
git checkout a^0 &&
- test_expect_code 128 git cherry-pick -m 1 b &&
- git diff --exit-code a --
+ git cherry-pick -m 1 b &&
+ git diff --exit-code c --
'
'
-test_expect_success 'revert a non-merge with -m should fail' '
+test_expect_success 'revert explicit first parent of a non-merge' '
git reset --hard &&
git checkout c^0 &&
- test_must_fail git revert -m 1 b &&
- git diff --exit-code c
+ git revert -m 1 b &&
+ git diff --exit-code a --
'
git checkout -b new A
'
-test_expect_success 'cherry-pick a non-merge with --ff and -m should fail' '
+test_expect_success 'cherry-pick explicit first parent of a non-merge with --ff' '
git reset --hard A -- &&
- test_must_fail git cherry-pick --ff -m 1 B &&
- git diff --exit-code A --
+ git cherry-pick --ff -m 1 B &&
+ git diff --exit-code C --
'
test_expect_success 'cherry pick a merge with --ff but without -m should fail' '
test_expect_success 'cherry-pick persists opts correctly' '
pristine_detach initial &&
- test_expect_code 128 git cherry-pick -s -m 1 --strategy=recursive -X patience -X ours initial..anotherpick &&
+ # to make sure that the session to cherry-pick a sequence
+ # gets interrupted, use a high-enough number that is larger
+ # than the number of parents of any commit we have created
+ mainline=4 &&
+ test_expect_code 128 git cherry-pick -s -m $mainline --strategy=recursive -X patience -X ours initial..anotherpick &&
test_path_is_dir .git/sequencer &&
test_path_is_file .git/sequencer/head &&
test_path_is_file .git/sequencer/todo &&
echo "true" >expect &&
git config --file=.git/sequencer/opts --get-all options.signoff >actual &&
test_cmp expect actual &&
- echo "1" >expect &&
+ echo "$mainline" >expect &&
git config --file=.git/sequencer/opts --get-all options.mainline >actual &&
test_cmp expect actual &&
echo "recursive" >expect &&
<BOLD;MAGENTA>-a long line to exceed per-line minimum<RESET>
<BOLD;MAGENTA>-another long line to exceed per-line minimum<RESET>
<RED>-original file<RESET>
- <BOLD;YELLOW>+<RESET>Q<BOLD;YELLOW>a long line to exceed per-line minimum<RESET>
- <BOLD;YELLOW>+<RESET>Q<BOLD;YELLOW>another long line to exceed per-line minimum<RESET>
+ <BOLD;CYAN>+<RESET>Q<BOLD;CYAN>a long line to exceed per-line minimum<RESET>
+ <BOLD;CYAN>+<RESET>Q<BOLD;CYAN>another long line to exceed per-line minimum<RESET>
<GREEN>+<RESET><GREEN>new file<RESET>
EOF
test_cmp expected actual
QQQthat has similar lines
QQQto previous blocks, but with different indent
QQQYetQAnotherQoutlierQ
+ QLine with internal w h i t e s p a c e change
EOF
git add text.txt &&
QQthat has similar lines
QQto previous blocks, but with different indent
QQYetQAnotherQoutlier
+ QLine with internal whitespace change
EOF
git diff --color --color-moved --color-moved-ws=allow-indentation-change >actual.raw &&
<BOLD>diff --git a/text.txt b/text.txt<RESET>
<BOLD>--- a/text.txt<RESET>
<BOLD>+++ b/text.txt<RESET>
- <CYAN>@@ -1,14 +1,14 @@<RESET>
+ <CYAN>@@ -1,15 +1,15 @@<RESET>
<BOLD;MAGENTA>-QIndented<RESET>
<BOLD;MAGENTA>-QText across<RESET>
<BOLD;MAGENTA>-Qsome lines<RESET>
<BOLD;MAGENTA>-QQQthat has similar lines<RESET>
<BOLD;MAGENTA>-QQQto previous blocks, but with different indent<RESET>
<RED>-QQQYetQAnotherQoutlierQ<RESET>
+ <RED>-QLine with internal w h i t e s p a c e change<RESET>
<BOLD;CYAN>+<RESET>QQ<BOLD;CYAN>Indented<RESET>
<BOLD;CYAN>+<RESET>QQ<BOLD;CYAN>Text across<RESET>
<BOLD;CYAN>+<RESET>QQ<BOLD;CYAN>some lines<RESET>
<BOLD;CYAN>+<RESET>QQ<BOLD;CYAN>that has similar lines<RESET>
<BOLD;CYAN>+<RESET>QQ<BOLD;CYAN>to previous blocks, but with different indent<RESET>
<GREEN>+<RESET>QQ<GREEN>YetQAnotherQoutlier<RESET>
+ <GREEN>+<RESET>Q<GREEN>Line with internal whitespace change<RESET>
EOF
test_cmp expected actual
'
+test_expect_success 'bogus settings in move detection erroring out' '
+ test_must_fail git diff --color-moved=bogus 2>err &&
+ test_i18ngrep "must be one of" err &&
+ test_i18ngrep bogus err &&
+
+ test_must_fail git -c diff.colormoved=bogus diff 2>err &&
+ test_i18ngrep "must be one of" err &&
+ test_i18ngrep "from command-line config" err &&
+
+ test_must_fail git diff --color-moved-ws=bogus 2>err &&
+ test_i18ngrep "possible values" err &&
+ test_i18ngrep bogus err &&
+
+ test_must_fail git -c diff.colormovedws=bogus diff 2>err &&
+ test_i18ngrep "possible values" err &&
+ test_i18ngrep "from command-line config" err
+'
+
test_expect_success 'compare whitespace delta incompatible with other space options' '
test_must_fail git diff \
--color-moved-ws=allow-indentation-change,ignore-all-space \
test_i18ngrep allow-indentation-change err
'
+EMPTY=''
+test_expect_success 'compare mixed whitespace delta across moved blocks' '
+
+ git reset --hard &&
+ tr Q_ "\t " <<-EOF >text.txt &&
+ ${EMPTY}
+ ____too short without
+ ${EMPTY}
+ ___being grouped across blank line
+ ${EMPTY}
+ context
+ lines
+ to
+ anchor
+ ____Indented text to
+ _Q____be further indented by four spaces across
+ ____Qseveral lines
+ QQ____These two lines have had their
+ ____indentation reduced by four spaces
+ Qdifferent indentation change
+ ____too short
+ EOF
+
+ git add text.txt &&
+ git commit -m "add text.txt" &&
+
+ tr Q_ "\t " <<-EOF >text.txt &&
+ context
+ lines
+ to
+ anchor
+ QIndented text to
+ QQbe further indented by four spaces across
+ Q____several lines
+ ${EMPTY}
+ QQtoo short without
+ ${EMPTY}
+ Q_______being grouped across blank line
+ ${EMPTY}
+ Q_QThese two lines have had their
+ indentation reduced by four spaces
+ QQdifferent indentation change
+ __Qtoo short
+ EOF
+
+ git -c color.diff.whitespace="normal red" \
+ -c core.whitespace=space-before-tab \
+ diff --color --color-moved --ws-error-highlight=all \
+ --color-moved-ws=allow-indentation-change >actual.raw &&
+ grep -v "index" actual.raw | test_decode_color >actual &&
+
+ cat <<-\EOF >expected &&
+ <BOLD>diff --git a/text.txt b/text.txt<RESET>
+ <BOLD>--- a/text.txt<RESET>
+ <BOLD>+++ b/text.txt<RESET>
+ <CYAN>@@ -1,16 +1,16 @@<RESET>
+ <BOLD;MAGENTA>-<RESET>
+ <BOLD;MAGENTA>-<RESET><BOLD;MAGENTA> too short without<RESET>
+ <BOLD;MAGENTA>-<RESET>
+ <BOLD;MAGENTA>-<RESET><BOLD;MAGENTA> being grouped across blank line<RESET>
+ <BOLD;MAGENTA>-<RESET>
+ <RESET>context<RESET>
+ <RESET>lines<RESET>
+ <RESET>to<RESET>
+ <RESET>anchor<RESET>
+ <BOLD;MAGENTA>-<RESET><BOLD;MAGENTA> Indented text to<RESET>
+ <BOLD;MAGENTA>-<RESET><BRED> <RESET> <BOLD;MAGENTA> be further indented by four spaces across<RESET>
+ <BOLD;MAGENTA>-<RESET><BRED> <RESET> <BOLD;MAGENTA>several lines<RESET>
+ <BOLD;BLUE>-<RESET> <BOLD;BLUE> These two lines have had their<RESET>
+ <BOLD;BLUE>-<RESET><BOLD;BLUE> indentation reduced by four spaces<RESET>
+ <BOLD;MAGENTA>-<RESET> <BOLD;MAGENTA>different indentation change<RESET>
+ <RED>-<RESET><RED> too short<RESET>
+ <BOLD;CYAN>+<RESET> <BOLD;CYAN>Indented text to<RESET>
+ <BOLD;CYAN>+<RESET> <BOLD;CYAN>be further indented by four spaces across<RESET>
+ <BOLD;CYAN>+<RESET> <BOLD;CYAN> several lines<RESET>
+ <BOLD;YELLOW>+<RESET>
+ <BOLD;YELLOW>+<RESET> <BOLD;YELLOW>too short without<RESET>
+ <BOLD;YELLOW>+<RESET>
+ <BOLD;YELLOW>+<RESET> <BOLD;YELLOW> being grouped across blank line<RESET>
+ <BOLD;YELLOW>+<RESET>
+ <BOLD;CYAN>+<RESET> <BRED> <RESET> <BOLD;CYAN>These two lines have had their<RESET>
+ <BOLD;CYAN>+<RESET><BOLD;CYAN>indentation reduced by four spaces<RESET>
+ <BOLD;YELLOW>+<RESET> <BOLD;YELLOW>different indentation change<RESET>
+ <GREEN>+<RESET><BRED> <RESET> <GREEN>too short<RESET>
+ EOF
+
+ test_cmp expected actual
+'
+
test_done
rm .gitattributes
'
+test_expect_success 'setup log -[GS] binary & --text' '
+ git checkout --orphan GS-binary-and-text &&
+ git read-tree --empty &&
+ printf "a\na\0a\n" >data.bin &&
+ git add data.bin &&
+ git commit -m "create binary file" data.bin &&
+ printf "a\na\0a\n" >>data.bin &&
+ git commit -m "modify binary file" data.bin &&
+ git rm data.bin &&
+ git commit -m "delete binary file" data.bin &&
+ git log >full-log
+'
+
+test_expect_success 'log -G ignores binary files' '
+ git log -Ga >log &&
+ test_must_be_empty log
+'
+
+test_expect_success 'log -G looks into binary files with -a' '
+ git log -a -Ga >log &&
+ test_cmp log full-log
+'
+
+test_expect_success 'log -G looks into binary files with textconv filter' '
+ test_when_finished "rm .gitattributes" &&
+ echo "* diff=bin" >.gitattributes &&
+ git -c diff.bin.textconv=cat log -Ga >log &&
+ test_cmp log full-log
+'
+
+test_expect_success 'log -S looks into binary files' '
+ git log -Sa >log &&
+ test_cmp log full-log
+'
+
test_done
test_description='test corner cases of git-archive'
. ./test-lib.sh
-test_expect_success 'create commit with empty tree' '
- git commit --allow-empty -m foo
+# the 10knuls.tar file is used to test for an empty git generated tar
+# without having to invoke tar because an otherwise valid empty GNU tar
+# will be considered broken by {Open,Net}BSD tar
+test_expect_success 'create commit with empty tree and fake empty tar' '
+ git commit --allow-empty -m foo &&
+ perl -e "print \"\\0\" x 10240" >10knuls.tar
'
# Make a dir and clean it up afterwards
test_expect_success 'tar archive of empty tree is empty' '
git archive --format=tar HEAD: >empty.tar &&
- perl -e "print \"\\0\" x 10240" >10knuls.tar &&
test_cmp_bin 10knuls.tar empty.tar
'
test_expect_success 'archive empty subtree with no pathspec' '
git archive --format=tar $root_tree >subtree-all.tar &&
- make_dir extract &&
- "$TAR" xf subtree-all.tar -C extract &&
- check_dir extract
+ test_cmp_bin 10knuls.tar subtree-all.tar
'
test_expect_success 'archive empty subtree by direct pathspec' '
git archive --format=tar $root_tree -- sub >subtree-path.tar &&
- make_dir extract &&
- "$TAR" xf subtree-path.tar -C extract &&
- check_dir extract
+ test_cmp_bin 10knuls.tar subtree-path.tar
'
ZIPINFO=zipinfo
. ./test-lib.sh
test_expect_success setup '
- echo Data for commit0. >a &&
- echo Data for commit0. >b &&
- git update-index --add a &&
- git update-index --add b &&
- tree0=$(git write-tree) &&
- commit0=$(echo setup | git commit-tree $tree0) &&
- git update-ref refs/heads/master $commit0 &&
- git clone ./. clone1 &&
- git clone ./. clone2 &&
- GIT_DIR=clone2/.git git branch new2 &&
- echo Data for commit1. >clone2/b &&
- GIT_DIR=clone2/.git git add clone2/b &&
- GIT_DIR=clone2/.git git commit -m new2
-'
-
-for clone in 1 2; do
- cat >clone${clone}/.git/hooks/post-checkout <<'EOF'
-#!/bin/sh
-echo $@ > $GIT_DIR/post-checkout.args
-EOF
- chmod u+x clone${clone}/.git/hooks/post-checkout
-done
-
-test_expect_success 'post-checkout runs as expected ' '
- GIT_DIR=clone1/.git git checkout master &&
- test -e clone1/.git/post-checkout.args
+ mkdir -p .git/hooks &&
+ write_script .git/hooks/post-checkout <<-\EOF &&
+ echo "$@" >.git/post-checkout.args
+ EOF
+ test_commit one &&
+ test_commit two &&
+ test_commit rebase-on-me &&
+ git reset --hard HEAD^ &&
+ test_commit three
'
test_expect_success 'post-checkout receives the right arguments with HEAD unchanged ' '
- old=$(awk "{print \$1}" clone1/.git/post-checkout.args) &&
- new=$(awk "{print \$2}" clone1/.git/post-checkout.args) &&
- flag=$(awk "{print \$3}" clone1/.git/post-checkout.args) &&
+ test_when_finished "rm -f .git/post-checkout.args" &&
+ git checkout master &&
+ read old new flag <.git/post-checkout.args &&
test $old = $new && test $flag = 1
'
-test_expect_success 'post-checkout runs as expected ' '
- GIT_DIR=clone1/.git git checkout master &&
- test -e clone1/.git/post-checkout.args
-'
-
test_expect_success 'post-checkout args are correct with git checkout -b ' '
- GIT_DIR=clone1/.git git checkout -b new1 &&
- old=$(awk "{print \$1}" clone1/.git/post-checkout.args) &&
- new=$(awk "{print \$2}" clone1/.git/post-checkout.args) &&
- flag=$(awk "{print \$3}" clone1/.git/post-checkout.args) &&
+ test_when_finished "rm -f .git/post-checkout.args" &&
+ git checkout -b new1 &&
+ read old new flag <.git/post-checkout.args &&
test $old = $new && test $flag = 1
'
test_expect_success 'post-checkout receives the right args with HEAD changed ' '
- GIT_DIR=clone2/.git git checkout new2 &&
- old=$(awk "{print \$1}" clone2/.git/post-checkout.args) &&
- new=$(awk "{print \$2}" clone2/.git/post-checkout.args) &&
- flag=$(awk "{print \$3}" clone2/.git/post-checkout.args) &&
+ test_when_finished "rm -f .git/post-checkout.args" &&
+ git checkout two &&
+ read old new flag <.git/post-checkout.args &&
test $old != $new && test $flag = 1
'
test_expect_success 'post-checkout receives the right args when not switching branches ' '
- GIT_DIR=clone2/.git git checkout master b &&
- old=$(awk "{print \$1}" clone2/.git/post-checkout.args) &&
- new=$(awk "{print \$2}" clone2/.git/post-checkout.args) &&
- flag=$(awk "{print \$3}" clone2/.git/post-checkout.args) &&
+ test_when_finished "rm -f .git/post-checkout.args" &&
+ git checkout master -- three.t &&
+ read old new flag <.git/post-checkout.args &&
test $old = $new && test $flag = 0
'
-if test "$(git config --bool core.filemode)" = true; then
-mkdir -p templates/hooks
-cat >templates/hooks/post-checkout <<'EOF'
-#!/bin/sh
-echo $@ > $GIT_DIR/post-checkout.args
-EOF
-chmod +x templates/hooks/post-checkout
+test_expect_success 'post-checkout is triggered on rebase' '
+ test_when_finished "rm -f .git/post-checkout.args" &&
+ git checkout -b rebase-test master &&
+ rm -f .git/post-checkout.args &&
+ git rebase rebase-on-me &&
+ read old new flag <.git/post-checkout.args &&
+ test $old != $new && test $flag = 1
+'
+
+test_expect_success 'post-checkout is triggered on rebase with fast-forward' '
+ test_when_finished "rm -f .git/post-checkout.args" &&
+ git checkout -b ff-rebase-test rebase-on-me^ &&
+ rm -f .git/post-checkout.args &&
+ git rebase rebase-on-me &&
+ read old new flag <.git/post-checkout.args &&
+ test $old != $new && test $flag = 1
+'
test_expect_success 'post-checkout hook is triggered by clone' '
+ mkdir -p templates/hooks &&
+ write_script templates/hooks/post-checkout <<-\EOF &&
+ echo "$@" >$GIT_DIR/post-checkout.args
+ EOF
git clone --template=templates . clone3 &&
test -f clone3/.git/post-checkout.args
'
-fi
test_done
echo " " "error: leading space"
echo " "
echo Err
+ echo SUCCESS
exit 0
EOF
echo 1 >file &&
grep "<BOLD;RED>error<RESET>: error" decoded &&
grep "<YELLOW>hint<RESET>:" decoded &&
grep "<BOLD;GREEN>success<RESET>:" decoded &&
+ grep "<BOLD;GREEN>SUCCESS<RESET>" decoded &&
grep "<BOLD;YELLOW>warning<RESET>:" decoded
'
) >input.dup
'
-test_expect_success 'fetch refs from cmdline' '
- (
- cd client &&
- git fetch-pack --no-progress .. $(cat ../input)
- ) >output &&
- cut -d " " -f 2 <output | sort >actual &&
- test_cmp expect actual
+test_expect_success 'setup fetch refs from cmdline v[12]' '
+ cp -r client client1 &&
+ cp -r client client2
'
+for version in '' 1 2
+do
+ test_expect_success "protocol.version=$version fetch refs from cmdline" "
+ (
+ cd client$version &&
+ GIT_TEST_PROTOCOL_VERSION=$version git fetch-pack --no-progress .. \$(cat ../input)
+ ) >output &&
+ cut -d ' ' -f 2 <output | sort >actual &&
+ test_cmp expect actual
+ "
+done
+
test_expect_success 'fetch refs from stdin' '
(
cd client &&
grep refs/tags/magic actual
'
+test_expect_success 'protocol v2 supports hiderefs' '
+ test_config uploadpack.hiderefs refs/tags &&
+ git -c protocol.version=2 ls-remote . >actual &&
+ ! grep refs/tags actual
+'
+
test_expect_success 'ls-remote --symref' '
git fetch origin &&
cat >expect <<-EOF &&
# This test spawns a daemon, so run it only if the user would be OK with
# testing with git-daemon.
test_expect_success PIPE,JGIT,GIT_DAEMON 'indicate no refs in standards-compliant empty remote' '
- JGIT_DAEMON_PORT=${JGIT_DAEMON_PORT-${this_test#t}} &&
+ test_set_port JGIT_DAEMON_PORT &&
JGIT_DAEMON_PID= &&
git init --bare empty.git &&
>empty.git/git-daemon-export-ok &&
git config fetch.recurseSubmodules true &&
(
cd downstream &&
+ GIT_TRACE=$(pwd)/trace.out git fetch &&
+ grep "1 tasks" trace.out &&
GIT_TRACE=$(pwd)/trace.out git fetch --jobs 7 &&
grep "7 tasks" trace.out &&
git config submodule.fetchJobs 8 &&
test_cmp expect actual
'
+test_expect_success "fetch new submodule commits on-demand outside standard refspec" '
+ # add a second submodule and ensure it is around in downstream first
+ git clone submodule sub1 &&
+ git submodule add ./sub1 &&
+ git commit -m "adding a second submodule" &&
+ git -C downstream pull &&
+ git -C downstream submodule update --init --recursive &&
+
+ git checkout --detach &&
+
+ C=$(git -C submodule commit-tree -m "new change outside refs/heads" HEAD^{tree}) &&
+ git -C submodule update-ref refs/changes/1 $C &&
+ git update-index --cacheinfo 160000 $C submodule &&
+ test_tick &&
+
+ D=$(git -C sub1 commit-tree -m "new change outside refs/heads" HEAD^{tree}) &&
+ git -C sub1 update-ref refs/changes/2 $D &&
+ git update-index --cacheinfo 160000 $D sub1 &&
+
+ git commit -m "updated submodules outside of refs/heads" &&
+ E=$(git rev-parse HEAD) &&
+ git update-ref refs/changes/3 $E &&
+ (
+ cd downstream &&
+ git fetch --recurse-submodules origin refs/changes/3:refs/heads/my_branch &&
+ git -C submodule cat-file -t $C &&
+ git -C sub1 cat-file -t $D &&
+ git checkout --recurse-submodules FETCH_HEAD
+ )
+'
+
+test_expect_success 'fetch new submodule commit on-demand in FETCH_HEAD' '
+ # depends on the previous test for setup
+
+ C=$(git -C submodule commit-tree -m "another change outside refs/heads" HEAD^{tree}) &&
+ git -C submodule update-ref refs/changes/4 $C &&
+ git update-index --cacheinfo 160000 $C submodule &&
+ test_tick &&
+
+ D=$(git -C sub1 commit-tree -m "another change outside refs/heads" HEAD^{tree}) &&
+ git -C sub1 update-ref refs/changes/5 $D &&
+ git update-index --cacheinfo 160000 $D sub1 &&
+
+ git commit -m "updated submodules outside of refs/heads" &&
+ E=$(git rev-parse HEAD) &&
+ git update-ref refs/changes/6 $E &&
+ (
+ cd downstream &&
+ git fetch --recurse-submodules origin refs/changes/6 &&
+ git -C submodule cat-file -t $C &&
+ git -C sub1 cat-file -t $D &&
+ git checkout --recurse-submodules FETCH_HEAD
+ )
+'
+
+test_expect_success 'fetch new submodule commits on-demand without .gitmodules entry' '
+ # depends on the previous test for setup
+
+ git config -f .gitmodules --remove-section submodule.sub1 &&
+ git add .gitmodules &&
+ git commit -m "delete gitmodules file" &&
+ git checkout -B master &&
+ git -C downstream fetch &&
+ git -C downstream checkout origin/master &&
+
+ C=$(git -C submodule commit-tree -m "yet another change outside refs/heads" HEAD^{tree}) &&
+ git -C submodule update-ref refs/changes/7 $C &&
+ git update-index --cacheinfo 160000 $C submodule &&
+ test_tick &&
+
+ D=$(git -C sub1 commit-tree -m "yet another change outside refs/heads" HEAD^{tree}) &&
+ git -C sub1 update-ref refs/changes/8 $D &&
+ git update-index --cacheinfo 160000 $D sub1 &&
+
+ git commit -m "updated submodules outside of refs/heads" &&
+ E=$(git rev-parse HEAD) &&
+ git update-ref refs/changes/9 $E &&
+ (
+ cd downstream &&
+ git fetch --recurse-submodules origin refs/changes/9 &&
+ git -C submodule cat-file -t $C &&
+ git -C sub1 cat-file -t $D &&
+ git checkout --recurse-submodules FETCH_HEAD
+ )
+'
+
+test_expect_success 'fetch new submodule commit intermittently referenced by superproject' '
+ # depends on the previous test for setup
+
+ D=$(git -C sub1 commit-tree -m "change 10 outside refs/heads" HEAD^{tree}) &&
+ E=$(git -C sub1 commit-tree -m "change 11 outside refs/heads" HEAD^{tree}) &&
+ F=$(git -C sub1 commit-tree -m "change 12 outside refs/heads" HEAD^{tree}) &&
+
+ git -C sub1 update-ref refs/changes/10 $D &&
+ git update-index --cacheinfo 160000 $D sub1 &&
+ git commit -m "updated submodules outside of refs/heads" &&
+
+ git -C sub1 update-ref refs/changes/11 $E &&
+ git update-index --cacheinfo 160000 $E sub1 &&
+ git commit -m "updated submodules outside of refs/heads" &&
+
+ git -C sub1 update-ref refs/changes/12 $F &&
+ git update-index --cacheinfo 160000 $F sub1 &&
+ git commit -m "updated submodules outside of refs/heads" &&
+
+ G=$(git rev-parse HEAD) &&
+ git update-ref refs/changes/13 $G &&
+ (
+ cd downstream &&
+ git fetch --recurse-submodules origin refs/changes/13 &&
+
+ git -C sub1 cat-file -t $D &&
+ git -C sub1 cat-file -t $E &&
+ git -C sub1 cat-file -t $F
+ )
+'
+
test_done
git ls-remote "$GIT_DAEMON_URL/escape.git"
'
-test_expect_success 'daemon log records all attributes' '
- cat >expect <<-\EOF &&
- Extended attribute "host": localhost
- Extended attribute "protocol": version=1
- EOF
- >daemon.log &&
- GIT_OVERRIDE_VIRTUAL_HOST=localhost \
- git -c protocol.version=1 \
- ls-remote "$GIT_DAEMON_URL/interp.git" &&
- grep -i extended.attribute daemon.log | cut -d" " -f2- >actual &&
- test_cmp expect actual
-'
-
test_expect_success FAKENC 'hostname interpolation works after LF-stripping' '
{
printf "git-upload-pack /interp.git\n\0host=localhost" | packetize
--- /dev/null
+#!/bin/sh
+
+test_description='test GIT_CURL_VERBOSE'
+. ./test-lib.sh
+. "$TEST_DIRECTORY"/lib-httpd.sh
+start_httpd
+
+test_expect_success 'setup repository' '
+ mkdir "$HTTPD_DOCUMENT_ROOT_PATH/repo.git" &&
+ git -C "$HTTPD_DOCUMENT_ROOT_PATH/repo.git" --bare init &&
+ git config push.default matching &&
+ echo content >file &&
+ git add file &&
+ git commit -m one &&
+ git remote add public "$HTTPD_DOCUMENT_ROOT_PATH/repo.git" &&
+ git push public master:master
+'
+
+test_expect_success 'failure in git-upload-pack is shown' '
+ test_might_fail env GIT_CURL_VERBOSE=1 \
+ git clone "$HTTPD_URL/error_git_upload_pack/smart/repo.git" \
+ 2>curl_log &&
+ grep "< HTTP/1.1 500 Intentional Breakage" curl_log
+'
+
+stop_httpd
+
+test_done
expect_ssh "$@"
}
-test_expect_success !MINGW 'clone c:temp is ssl' '
+test_expect_success !MINGW,!CYGWIN 'clone c:temp is ssl' '
test_clone_url c:temp c temp
'
grep "fetch< version 2" trace
'
+test_expect_success 'ensure that multiple fetches in same process from a shallow repo works' '
+ rm -rf server client trace &&
+
+ test_create_repo server &&
+ test_commit -C server one &&
+ test_commit -C server two &&
+ test_commit -C server three &&
+ git clone --shallow-exclude two "file://$(pwd)/server" client &&
+
+ git -C server tag -a -m "an annotated tag" twotag two &&
+
+ # Triggers tag following (thus, 2 fetches in one process)
+ GIT_TRACE_PACKET="$(pwd)/trace" git -C client -c protocol.version=2 \
+ fetch --shallow-exclude one origin &&
+ # Ensure that protocol v2 is used
+ grep "fetch< version 2" trace
+'
+
+test_expect_success 'deepen-relative' '
+ rm -rf server client trace &&
+
+ test_create_repo server &&
+ test_commit -C server one &&
+ test_commit -C server two &&
+ test_commit -C server three &&
+ git clone --depth 1 "file://$(pwd)/server" client &&
+ test_commit -C server four &&
+
+ # Sanity check that only "three" is downloaded
+ git -C client log --pretty=tformat:%s master >actual &&
+ echo three >expected &&
+ test_cmp expected actual &&
+
+ GIT_TRACE_PACKET="$(pwd)/trace" git -C client -c protocol.version=2 \
+ fetch --deepen=1 origin &&
+ # Ensure that protocol v2 is used
+ grep "fetch< version 2" trace &&
+
+ git -C client log --pretty=tformat:%s origin/master >actual &&
+ cat >expected <<-\EOF &&
+ four
+ three
+ two
+ EOF
+ test_cmp expected actual
+'
+
# Test protocol v2 with 'http://' transport
#
. "$TEST_DIRECTORY"/lib-httpd.sh
mkdir sub &&
while read path
do
- : >$path &&
+ echo content >$path &&
git add $path || return 1
done <expect &&
git commit -m "initial commit" &&
test_must_be_empty actual
'
+test_expect_success 'pathspec with labels and non existent .gitattributes (2)' '
+ test_must_fail git grep content HEAD -- ":(attr:label)"
+'
+
test_expect_success 'setup .gitattributes' '
cat <<-\EOF >.gitattributes &&
fileA labelA
test_cmp expect actual
'
+test_expect_success 'check specific set attr (2)' '
+ cat <<-\EOF >expect &&
+ HEAD:fileSetLabel
+ HEAD:sub/fileSetLabel
+ EOF
+ git grep -l content HEAD ":(attr:label)" >actual &&
+ test_cmp expect actual
+'
+
test_expect_success 'check specific unset attr' '
cat <<-\EOF >expect &&
fileUnsetLabel
test_cmp expect actual
'
+test_expect_success 'check specific unset attr (2)' '
+ cat <<-\EOF >expect &&
+ HEAD:fileUnsetLabel
+ HEAD:sub/fileUnsetLabel
+ EOF
+ git grep -l content HEAD ":(attr:-label)" >actual &&
+ test_cmp expect actual
+'
+
test_expect_success 'check specific value attr' '
cat <<-\EOF >expect &&
fileValue
test_must_be_empty actual
'
+test_expect_success 'check specific value attr (2)' '
+ cat <<-\EOF >expect &&
+ HEAD:fileValue
+ HEAD:sub/fileValue
+ EOF
+ git grep -l content HEAD ":(attr:label=foo)" >actual &&
+ test_cmp expect actual &&
+ test_must_fail git grep -l content HEAD ":(attr:label=bar)"
+'
+
test_expect_success 'check unspecified attr' '
cat <<-\EOF >expect &&
.gitattributes
test_cmp expect actual
'
+test_expect_success 'check unspecified attr (2)' '
+ cat <<-\EOF >expect &&
+ HEAD:.gitattributes
+ HEAD:fileA
+ HEAD:fileAB
+ HEAD:fileAC
+ HEAD:fileB
+ HEAD:fileBC
+ HEAD:fileC
+ HEAD:fileNoLabel
+ HEAD:fileWrongLabel
+ HEAD:sub/fileA
+ HEAD:sub/fileAB
+ HEAD:sub/fileAC
+ HEAD:sub/fileB
+ HEAD:sub/fileBC
+ HEAD:sub/fileC
+ HEAD:sub/fileNoLabel
+ HEAD:sub/fileWrongLabel
+ EOF
+ git grep -l ^ HEAD ":(attr:!label)" >actual &&
+ test_cmp expect actual
+'
+
test_expect_success 'check multiple unspecified attr' '
cat <<-\EOF >expect &&
.gitattributes
test_atom head push:strip=-1 master
test_atom head objecttype commit
test_atom head objectsize 171
+test_atom head objectsize:disk 138
+test_atom head deltabase 0000000000000000000000000000000000000000
test_atom head objectname $(git rev-parse refs/heads/master)
test_atom head objectname:short $(git rev-parse --short refs/heads/master)
test_atom head objectname:short=1 $(git rev-parse --short=1 refs/heads/master)
test_atom tag push ''
test_atom tag objecttype tag
test_atom tag objectsize 154
+test_atom tag objectsize:disk 138
+test_atom tag '*objectsize:disk' 138
+test_atom tag deltabase 0000000000000000000000000000000000000000
+test_atom tag '*deltabase' 0000000000000000000000000000000000000000
test_atom tag objectname $(git rev-parse refs/tags/testtag)
test_atom tag objectname:short $(git rev-parse --short refs/tags/testtag)
test_atom head objectname:short=1 $(git rev-parse --short=1 refs/heads/master)
rmdir init
'
+test_expect_success 'submodule deinit should unset core.worktree' '
+ test_path_is_file .git/modules/example/config &&
+ test_must_fail git config -f .git/modules/example/config core.worktree
+'
+
test_expect_success 'submodule deinit from subdirectory' '
git submodule update --init &&
git config submodule.example.foo bar &&
GIT_WORK_TREE=../../../nested git -C sub1/.git/modules/nested config \
core.worktree "../../../nested" &&
# make sure this re-setup is correct
- git status --ignore-submodules=none
+ git status --ignore-submodules=none &&
+
+ # also make sure this old setup does not regress
+ git submodule update --init --recursive >out 2>err &&
+ test_must_be_empty out &&
+ test_must_be_empty err
'
test_expect_success 'absorb the git dir in a nested submodule' '
test_expect_success '<ref>: completes paths' '
test_completion "git show mytag:f" <<-\EOF
- file1 Z
- file2 Z
+ file1Z
+ file2Z
EOF
'
git add "name with spaces" &&
git commit -m spaces &&
test_completion "git show HEAD:nam" <<-\EOF
- name with spaces Z
+ name with spacesZ
EOF
'
git add "name with \${meta}" &&
git commit -m meta &&
test_completion "git show HEAD:nam" <<-\EOF
- name with ${meta} Z
- name with spaces Z
+ name with ${meta}Z
+ name with spacesZ
EOF
'
fi &&
eval "printf '%s' \"\${$var}\""
}
+
+# Choose a port number based on the test script's number and store it in
+# the given variable name, unless that variable already contains a number.
+test_set_port () {
+ local var=$1 port
+
+ if test $# -ne 1 || test -z "$var"
+ then
+ BUG "test_set_port requires a variable name"
+ fi
+
+ eval port=\$$var
+ case "$port" in
+ "")
+ # No port is set in the given env var, use the test
+ # number as port number instead.
+ # Remove not only the leading 't', but all leading zeros
+ # as well, so the arithmetic below won't (mis)interpret
+ # a test number like '0123' as an octal value.
+ port=${this_test#${this_test%%[1-9]*}}
+ if test "${port:-0}" -lt 1024
+ then
+ # root-only port, use a larger one instead.
+ port=$(($port + 10000))
+ fi
+ ;;
+ *[^0-9]*|0*)
+ error >&7 "invalid port number: $port"
+ ;;
+ *)
+ # The user has specified the port.
+ ;;
+ esac
+
+ # Make sure that parallel '--stress' test jobs get different
+ # ports.
+ port=$(($port + ${GIT_TEST_STRESS_JOB_NR:-0}))
+ eval $var=$port
+}
exit 1
fi
+# Parse options while taking care to leave $@ intact, so we will still
+# have all the original command line options when executing the test
+# script again for '--tee' and '--verbose-log' below.
+store_arg_to=
+prev_opt=
+for opt
+do
+ if test -n "$store_arg_to"
+ then
+ eval $store_arg_to=\$opt
+ store_arg_to=
+ prev_opt=
+ continue
+ fi
+
+ case "$opt" in
+ -d|--d|--de|--deb|--debu|--debug)
+ debug=t ;;
+ -i|--i|--im|--imm|--imme|--immed|--immedi|--immedia|--immediat|--immediate)
+ immediate=t ;;
+ -l|--l|--lo|--lon|--long|--long-|--long-t|--long-te|--long-tes|--long-test|--long-tests)
+ GIT_TEST_LONG=t; export GIT_TEST_LONG ;;
+ -r)
+ store_arg_to=run_list
+ ;;
+ --run=*)
+ run_list=${opt#--*=} ;;
+ -h|--h|--he|--hel|--help)
+ help=t ;;
+ -v|--v|--ve|--ver|--verb|--verbo|--verbos|--verbose)
+ verbose=t ;;
+ --verbose-only=*)
+ verbose_only=${opt#--*=}
+ ;;
+ -q|--q|--qu|--qui|--quie|--quiet)
+ # Ignore --quiet under a TAP::Harness. Saying how many tests
+ # passed without the ok/not ok details is always an error.
+ test -z "$HARNESS_ACTIVE" && quiet=t ;;
+ --with-dashes)
+ with_dashes=t ;;
+ --no-color)
+ color= ;;
+ --va|--val|--valg|--valgr|--valgri|--valgrin|--valgrind)
+ valgrind=memcheck
+ tee=t
+ ;;
+ --valgrind=*)
+ valgrind=${opt#--*=}
+ tee=t
+ ;;
+ --valgrind-only=*)
+ valgrind_only=${opt#--*=}
+ tee=t
+ ;;
+ --tee)
+ tee=t ;;
+ --root=*)
+ root=${opt#--*=} ;;
+ --chain-lint)
+ GIT_TEST_CHAIN_LINT=1 ;;
+ --no-chain-lint)
+ GIT_TEST_CHAIN_LINT=0 ;;
+ -x)
+ trace=t ;;
+ -V|--verbose-log)
+ verbose_log=t
+ tee=t
+ ;;
+ --stress)
+ stress=t ;;
+ --stress=*)
+ stress=${opt#--*=}
+ case "$stress" in
+ *[^0-9]*|0*|"")
+ echo "error: --stress=<N> requires the number of jobs to run" >&2
+ exit 1
+ ;;
+ *) # Good.
+ ;;
+ esac
+ ;;
+ *)
+ echo "error: unknown test option '$opt'" >&2; exit 1 ;;
+ esac
+
+ prev_opt=$opt
+done
+if test -n "$store_arg_to"
+then
+ echo "error: $prev_opt requires an argument" >&2
+ exit 1
+fi
+
+if test -n "$valgrind_only"
+then
+ test -z "$valgrind" && valgrind=memcheck
+ test -z "$verbose" && verbose_only="$valgrind_only"
+elif test -n "$valgrind"
+then
+ test -z "$verbose_log" && verbose=t
+fi
+
+if test -n "$stress"
+then
+ verbose=t
+ trace=t
+ immediate=t
+fi
+
+TEST_STRESS_JOB_SFX="${GIT_TEST_STRESS_JOB_NR:+.stress-$GIT_TEST_STRESS_JOB_NR}"
+TEST_NAME="$(basename "$0" .sh)"
+TEST_RESULTS_DIR="$TEST_OUTPUT_DIRECTORY/test-results"
+TEST_RESULTS_BASE="$TEST_RESULTS_DIR/$TEST_NAME$TEST_STRESS_JOB_SFX"
+TRASH_DIRECTORY="trash directory.$TEST_NAME$TEST_STRESS_JOB_SFX"
+test -n "$root" && TRASH_DIRECTORY="$root/$TRASH_DIRECTORY"
+case "$TRASH_DIRECTORY" in
+/*) ;; # absolute path is good
+ *) TRASH_DIRECTORY="$TEST_OUTPUT_DIRECTORY/$TRASH_DIRECTORY" ;;
+esac
+
+# If --stress was passed, run this test repeatedly in several parallel loops.
+if test "$GIT_TEST_STRESS_STARTED" = "done"
+then
+ : # Don't stress test again.
+elif test -n "$stress"
+then
+ if test "$stress" != t
+ then
+ job_count=$stress
+ elif test -n "$GIT_TEST_STRESS_LOAD"
+ then
+ job_count="$GIT_TEST_STRESS_LOAD"
+ elif job_count=$(getconf _NPROCESSORS_ONLN 2>/dev/null) &&
+ test -n "$job_count"
+ then
+ job_count=$((2 * $job_count))
+ else
+ job_count=8
+ fi
+
+ mkdir -p "$TEST_RESULTS_DIR"
+ stressfail="$TEST_RESULTS_BASE.stress-failed"
+ rm -f "$stressfail"
+
+ stress_exit=0
+ trap '
+ kill $job_pids 2>/dev/null
+ wait
+ stress_exit=1
+ ' TERM INT HUP
+
+ job_pids=
+ job_nr=0
+ while test $job_nr -lt "$job_count"
+ do
+ (
+ GIT_TEST_STRESS_STARTED=done
+ GIT_TEST_STRESS_JOB_NR=$job_nr
+ export GIT_TEST_STRESS_STARTED GIT_TEST_STRESS_JOB_NR
+
+ trap '
+ kill $test_pid 2>/dev/null
+ wait
+ exit 1
+ ' TERM INT
+
+ cnt=0
+ while ! test -e "$stressfail"
+ do
+ $TEST_SHELL_PATH "$0" "$@" >"$TEST_RESULTS_BASE.stress-$job_nr.out" 2>&1 &
+ test_pid=$!
+
+ if wait $test_pid
+ then
+ printf "OK %2d.%d\n" $GIT_TEST_STRESS_JOB_NR $cnt
+ else
+ echo $GIT_TEST_STRESS_JOB_NR >>"$stressfail"
+ printf "FAIL %2d.%d\n" $GIT_TEST_STRESS_JOB_NR $cnt
+ fi
+ cnt=$(($cnt + 1))
+ done
+ ) &
+ job_pids="$job_pids $!"
+ job_nr=$(($job_nr + 1))
+ done
+
+ wait
+
+ if test -f "$stressfail"
+ then
+ echo "Log(s) of failed test run(s):"
+ for failed_job_nr in $(sort -n "$stressfail")
+ do
+ echo "Contents of '$TEST_RESULTS_BASE.stress-$failed_job_nr.out':"
+ cat "$TEST_RESULTS_BASE.stress-$failed_job_nr.out"
+ done
+ rm -rf "$TRASH_DIRECTORY.stress-failed"
+ # Move the last one.
+ mv "$TRASH_DIRECTORY.stress-$failed_job_nr" "$TRASH_DIRECTORY.stress-failed"
+ fi
+
+ exit $stress_exit
+fi
+
# if --tee was passed, write the output not only to the terminal, but
# additionally to the file test-results/$BASENAME.out, too.
-case "$GIT_TEST_TEE_STARTED, $* " in
-done,*)
- # do not redirect again
- ;;
-*' --tee '*|*' --va'*|*' -V '*|*' --verbose-log '*)
- mkdir -p "$TEST_OUTPUT_DIRECTORY/test-results"
- BASE="$TEST_OUTPUT_DIRECTORY/test-results/$(basename "$0" .sh)"
+if test "$GIT_TEST_TEE_STARTED" = "done"
+then
+ : # do not redirect again
+elif test -n "$tee"
+then
+ mkdir -p "$TEST_RESULTS_DIR"
# Make this filename available to the sub-process in case it is using
# --verbose-log.
- GIT_TEST_TEE_OUTPUT_FILE=$BASE.out
+ GIT_TEST_TEE_OUTPUT_FILE=$TEST_RESULTS_BASE.out
export GIT_TEST_TEE_OUTPUT_FILE
# Truncate before calling "tee -a" to get rid of the results
>"$GIT_TEST_TEE_OUTPUT_FILE"
(GIT_TEST_TEE_STARTED=done ${TEST_SHELL_PATH} "$0" "$@" 2>&1;
- echo $? >"$BASE.exit") | tee -a "$GIT_TEST_TEE_OUTPUT_FILE"
- test "$(cat "$BASE.exit")" = 0
+ echo $? >"$TEST_RESULTS_BASE.exit") | tee -a "$GIT_TEST_TEE_OUTPUT_FILE"
+ test "$(cat "$TEST_RESULTS_BASE.exit")" = 0
exit
- ;;
-esac
+fi
+
+if test -n "$trace" && test -n "$test_untraceable"
+then
+ # '-x' tracing requested, but this test script can't be reliably
+ # traced, unless it is run with a Bash version supporting
+ # BASH_XTRACEFD (introduced in Bash v4.1).
+ #
+ # Perform this version check _after_ the test script was
+ # potentially re-executed with $TEST_SHELL_PATH for '--tee' or
+ # '--verbose-log', so the right shell is checked and the
+ # warning is issued only once.
+ if test -n "$BASH_VERSION" && eval '
+ test ${BASH_VERSINFO[0]} -gt 4 || {
+ test ${BASH_VERSINFO[0]} -eq 4 &&
+ test ${BASH_VERSINFO[1]} -ge 1
+ }
+ '
+ then
+ : Executed by a Bash version supporting BASH_XTRACEFD. Good.
+ else
+ echo >&2 "warning: ignoring -x; '$0' is untraceable without BASH_XTRACEFD"
+ trace=
+ fi
+fi
+if test -n "$trace" && test -z "$verbose_log"
+then
+ verbose=t
+fi
# For repeatability, reset the environment to known value.
# TERM is sanitized below, after saving color control sequences.
# Add libc MALLOC and MALLOC_PERTURB test
# only if we are not executing the test with valgrind
-if expr " $GIT_TEST_OPTS " : ".* --valgrind " >/dev/null ||
+if test -n "$valgrind" ||
test -n "$TEST_NO_MALLOC_CHECK"
then
setup_malloc_check () {
) &&
color=t
-while test "$#" -ne 0
-do
- case "$1" in
- -d|--d|--de|--deb|--debu|--debug)
- debug=t; shift ;;
- -i|--i|--im|--imm|--imme|--immed|--immedi|--immedia|--immediat|--immediate)
- immediate=t; shift ;;
- -l|--l|--lo|--lon|--long|--long-|--long-t|--long-te|--long-tes|--long-test|--long-tests)
- GIT_TEST_LONG=t; export GIT_TEST_LONG; shift ;;
- -r)
- shift; test "$#" -ne 0 || {
- echo 'error: -r requires an argument' >&2;
- exit 1;
- }
- run_list=$1; shift ;;
- --run=*)
- run_list=${1#--*=}; shift ;;
- -h|--h|--he|--hel|--help)
- help=t; shift ;;
- -v|--v|--ve|--ver|--verb|--verbo|--verbos|--verbose)
- verbose=t; shift ;;
- --verbose-only=*)
- verbose_only=${1#--*=}
- shift ;;
- -q|--q|--qu|--qui|--quie|--quiet)
- # Ignore --quiet under a TAP::Harness. Saying how many tests
- # passed without the ok/not ok details is always an error.
- test -z "$HARNESS_ACTIVE" && quiet=t; shift ;;
- --with-dashes)
- with_dashes=t; shift ;;
- --no-color)
- color=; shift ;;
- --va|--val|--valg|--valgr|--valgri|--valgrin|--valgrind)
- valgrind=memcheck
- shift ;;
- --valgrind=*)
- valgrind=${1#--*=}
- shift ;;
- --valgrind-only=*)
- valgrind_only=${1#--*=}
- shift ;;
- --tee)
- shift ;; # was handled already
- --root=*)
- root=${1#--*=}
- shift ;;
- --chain-lint)
- GIT_TEST_CHAIN_LINT=1
- shift ;;
- --no-chain-lint)
- GIT_TEST_CHAIN_LINT=0
- shift ;;
- -x)
- # Some test scripts can't be reliably traced with '-x',
- # unless the test is run with a Bash version supporting
- # BASH_XTRACEFD (introduced in Bash v4.1). Check whether
- # this test is marked as such, and ignore '-x' if it
- # isn't executed with a suitable Bash version.
- if test -z "$test_untraceable" || {
- test -n "$BASH_VERSION" && {
- test ${BASH_VERSINFO[0]} -gt 4 || {
- test ${BASH_VERSINFO[0]} -eq 4 &&
- test ${BASH_VERSINFO[1]} -ge 1
- }
- }
- }
- then
- trace=t
- else
- echo >&2 "warning: ignoring -x; '$0' is untraceable without BASH_XTRACEFD"
- fi
- shift ;;
- -V|--verbose-log)
- verbose_log=t
- shift ;;
- *)
- echo "error: unknown test option '$1'" >&2; exit 1 ;;
- esac
-done
-
-if test -n "$valgrind_only"
-then
- test -z "$valgrind" && valgrind=memcheck
- test -z "$verbose" && verbose_only="$valgrind_only"
-elif test -n "$valgrind"
-then
- test -z "$verbose_log" && verbose=t
-fi
-
-if test -n "$trace" && test -z "$verbose_log"
-then
- verbose=t
-fi
-
if test -n "$color"
then
# Save the color control sequences now rather than run tput
GIT_EXIT_OK=
trap 'die' EXIT
-trap 'exit $?' INT
+trap 'exit $?' INT TERM HUP
# The user-facing functions are loaded from a separate file so that
# test_perf subshells can have them too
if test -z "$HARNESS_ACTIVE"
then
- test_results_dir="$TEST_OUTPUT_DIRECTORY/test-results"
- mkdir -p "$test_results_dir"
- base=${0##*/}
- test_results_path="$test_results_dir/${base%.sh}.counts"
+ mkdir -p "$TEST_RESULTS_DIR"
- cat >"$test_results_path" <<-EOF
+ cat >"$TEST_RESULTS_BASE.counts" <<-EOF
total $test_count
success $test_success
fixed $test_fixed
fi
# Test repository
-TRASH_DIRECTORY="trash directory.$(basename "$0" .sh)"
-test -n "$root" && TRASH_DIRECTORY="$root/$TRASH_DIRECTORY"
-case "$TRASH_DIRECTORY" in
-/*) ;; # absolute path is good
- *) TRASH_DIRECTORY="$TEST_OUTPUT_DIRECTORY/$TRASH_DIRECTORY" ;;
-esac
rm -fr "$TRASH_DIRECTORY" || {
GIT_EXIT_OK=t
echo >&5 "FATAL: Cannot prepare test area"
}
-static int has_attribute(const char *attrs, const char *attr) {
+static int has_attribute(const char *attrs, const char *attr)
+{
int len;
if (!attrs)
return 0;
return 0; /* No space for more. */
transfer_debug("%s is readable", t->src_name);
- bytes = read(t->src, t->buf + t->bufuse, BUFFERSIZE - t->bufuse);
- if (bytes < 0 && errno != EWOULDBLOCK && errno != EAGAIN &&
- errno != EINTR) {
+ bytes = xread(t->src, t->buf + t->bufuse, BUFFERSIZE - t->bufuse);
+ if (bytes < 0) {
error_errno(_("read(%s) failed"), t->src_name);
return -1;
} else if (bytes == 0) {
transfer_debug("%s is writable", t->dest_name);
bytes = xwrite(t->dest, t->buf, t->bufuse);
- if (bytes < 0 && errno != EWOULDBLOCK) {
+ if (bytes < 0) {
error_errno(_("write(%s) failed"), t->dest_name);
return -1;
} else if (bytes > 0) {
enum interesting match;
while (t->size) {
- match = tree_entry_interesting(&t->entry, base, 0, &opt->pathspec);
+ match = tree_entry_interesting(opt->repo->index, &t->entry,
+ base, 0, &opt->pathspec);
if (match) {
if (match == all_entries_not_interesting)
t->size = 0;
}
}
-static inline int prune_traversal(struct name_entry *e,
+static inline int prune_traversal(struct index_state *istate,
+ struct name_entry *e,
struct traverse_info *info,
struct strbuf *base,
int still_interesting)
return 2;
if (still_interesting < 0)
return still_interesting;
- return tree_entry_interesting(e, base, 0, info->pathspec);
+ return tree_entry_interesting(istate, e, base,
+ 0, info->pathspec);
}
-int traverse_trees(int n, struct tree_desc *t, struct traverse_info *info)
+int traverse_trees(struct index_state *istate,
+ int n, struct tree_desc *t,
+ struct traverse_info *info)
{
int error = 0;
struct name_entry *entry = xmalloc(n*sizeof(*entry));
}
if (!mask)
break;
- interesting = prune_traversal(e, info, &base, interesting);
+ interesting = prune_traversal(istate, e, info, &base, interesting);
if (interesting < 0)
break;
if (interesting) {
* Pre-condition: either baselen == base_offset (i.e. empty path)
* or base[baselen-1] == '/' (i.e. with trailing slash).
*/
-static enum interesting do_match(const struct name_entry *entry,
+static enum interesting do_match(struct index_state *istate,
+ const struct name_entry *entry,
struct strbuf *base, int base_offset,
const struct pathspec *ps,
int exclude)
PATHSPEC_LITERAL |
PATHSPEC_GLOB |
PATHSPEC_ICASE |
- PATHSPEC_EXCLUDE);
+ PATHSPEC_EXCLUDE |
+ PATHSPEC_ATTR);
if (!ps->nr) {
if (!ps->recursive ||
if (!ps->recursive ||
!(ps->magic & PATHSPEC_MAXDEPTH) ||
- ps->max_depth == -1)
- return all_entries_interesting;
-
- return within_depth(base_str + matchlen + 1,
- baselen - matchlen - 1,
- !!S_ISDIR(entry->mode),
- ps->max_depth) ?
- entry_interesting : entry_not_interesting;
+ ps->max_depth == -1) {
+ if (!item->attr_match_nr)
+ return all_entries_interesting;
+ else
+ goto interesting;
+ }
+
+ if (within_depth(base_str + matchlen + 1,
+ baselen - matchlen - 1,
+ !!S_ISDIR(entry->mode),
+ ps->max_depth))
+ goto interesting;
+ else
+ return entry_not_interesting;
}
/* Either there must be no base, or the base must match. */
if (match_entry(item, entry, pathlen,
match + baselen, matchlen - baselen,
&never_interesting))
- return entry_interesting;
+ goto interesting;
if (item->nowildcard_len < item->len) {
if (!git_fnmatch(item, match + baselen, entry->path,
item->nowildcard_len - baselen))
- return entry_interesting;
+ goto interesting;
/*
* Match all directories. We'll try to
!ps_strncmp(item, match + baselen,
entry->path,
item->nowildcard_len - baselen))
- return entry_interesting;
+ goto interesting;
}
continue;
if (!git_fnmatch(item, match, base->buf + base_offset,
item->nowildcard_len)) {
strbuf_setlen(base, base_offset + baselen);
- return entry_interesting;
+ goto interesting;
}
/*
!ps_strncmp(item, match, base->buf + base_offset,
item->nowildcard_len)) {
strbuf_setlen(base, base_offset + baselen);
- return entry_interesting;
+ goto interesting;
}
strbuf_setlen(base, base_offset + baselen);
*/
if (ps->recursive && S_ISDIR(entry->mode))
return entry_interesting;
+ continue;
+interesting:
+ if (item->attr_match_nr) {
+ int ret;
+
+ /*
+ * Must not return all_entries_not_interesting
+ * prematurely. We do not know if all entries do not
+ * match some attributes with current attr API.
+ */
+ never_interesting = entry_not_interesting;
+
+ /*
+ * Consider all directories interesting (because some
+ * of those files inside may match some attributes
+ * even though the parent dir does not)
+ *
+ * FIXME: attributes _can_ match directories and we
+ * can probably return all_entries_interesting or
+ * all_entries_not_interesting here if matched.
+ */
+ if (S_ISDIR(entry->mode))
+ return entry_interesting;
+
+ strbuf_add(base, entry->path, pathlen);
+ ret = match_pathspec_attrs(istate, base->buf + base_offset,
+ base->len - base_offset, item);
+ strbuf_setlen(base, base_offset + baselen);
+ if (!ret)
+ continue;
+ }
+ return entry_interesting;
}
return never_interesting; /* No matches */
}
* Pre-condition: either baselen == base_offset (i.e. empty path)
* or base[baselen-1] == '/' (i.e. with trailing slash).
*/
-enum interesting tree_entry_interesting(const struct name_entry *entry,
+enum interesting tree_entry_interesting(struct index_state *istate,
+ const struct name_entry *entry,
struct strbuf *base, int base_offset,
const struct pathspec *ps)
{
enum interesting positive, negative;
- positive = do_match(entry, base, base_offset, ps, 0);
+ positive = do_match(istate, entry, base, base_offset, ps, 0);
/*
* case | entry | positive | negative | result
positive <= entry_not_interesting) /* #1, #2, #11, #12 */
return positive;
- negative = do_match(entry, base, base_offset, ps, 1);
+ negative = do_match(istate, entry, base, base_offset, ps, 1);
/* #8, #18 */
if (positive == all_entries_interesting &&
#include "cache.h"
-struct strbuf;
-
struct name_entry {
struct object_id oid;
const char *path;
struct traverse_info;
typedef int (*traverse_callback_t)(int n, unsigned long mask, unsigned long dirmask, struct name_entry *entry, struct traverse_info *);
-int traverse_trees(int n, struct tree_desc *t, struct traverse_info *info);
+int traverse_trees(struct index_state *istate, int n, struct tree_desc *t, struct traverse_info *info);
enum follow_symlinks_result {
FOUND = 0, /* This includes out-of-tree links */
all_entries_interesting = 2 /* yes, and all subsequent entries will be */
};
-extern enum interesting tree_entry_interesting(const struct name_entry *,
- struct strbuf *, int,
- const struct pathspec *ps);
+enum interesting tree_entry_interesting(struct index_state *istate,
+ const struct name_entry *,
+ struct strbuf *, int,
+ const struct pathspec *ps);
#endif
ADD_CACHE_JUST_APPEND);
}
-static int read_tree_1(struct tree *tree, struct strbuf *base,
+static int read_tree_1(struct repository *r,
+ struct tree *tree, struct strbuf *base,
int stage, const struct pathspec *pathspec,
read_tree_fn_t fn, void *context)
{
while (tree_entry(&desc, &entry)) {
if (retval != all_entries_interesting) {
- retval = tree_entry_interesting(&entry, base, 0, pathspec);
+ retval = tree_entry_interesting(r->index, &entry,
+ base, 0, pathspec);
if (retval == all_entries_not_interesting)
break;
if (retval == entry_not_interesting)
else if (S_ISGITLINK(entry.mode)) {
struct commit *commit;
- commit = lookup_commit(the_repository, &entry.oid);
+ commit = lookup_commit(r, &entry.oid);
if (!commit)
die("Commit %s in submodule path %s%s not found",
oid_to_hex(&entry.oid),
len = tree_entry_len(&entry);
strbuf_add(base, entry.path, len);
strbuf_addch(base, '/');
- retval = read_tree_1(lookup_tree(the_repository, &oid),
+ retval = read_tree_1(r, lookup_tree(r, &oid),
base, stage, pathspec,
fn, context);
strbuf_setlen(base, oldlen);
return 0;
}
-int read_tree_recursive(struct tree *tree,
+int read_tree_recursive(struct repository *r,
+ struct tree *tree,
const char *base, int baselen,
int stage, const struct pathspec *pathspec,
read_tree_fn_t fn, void *context)
int ret;
strbuf_add(&sb, base, baselen);
- ret = read_tree_1(tree, &sb, stage, pathspec, fn, context);
+ ret = read_tree_1(r, tree, &sb, stage, pathspec, fn, context);
strbuf_release(&sb);
return ret;
}
ce2->name, ce2->ce_namelen, ce_stage(ce2));
}
-int read_tree(struct tree *tree, int stage, struct pathspec *match,
- struct index_state *istate)
+int read_tree(struct repository *r, struct tree *tree, int stage,
+ struct pathspec *match, struct index_state *istate)
{
read_tree_fn_t fn = NULL;
int i, err;
if (!fn)
fn = read_one_entry_quick;
- err = read_tree_recursive(tree, "", 0, stage, match, fn, istate);
+ err = read_tree_recursive(r, tree, "", 0, stage, match, fn, istate);
if (fn == read_one_entry || err)
return err;
#include "object.h"
-extern const char *tree_type;
+struct repository;
struct strbuf;
struct tree {
unsigned long size;
};
+extern const char *tree_type;
+
struct tree *lookup_tree(struct repository *r, const struct object_id *oid);
int parse_tree_buffer(struct tree *item, void *buffer, unsigned long size);
#define READ_TREE_RECURSIVE 1
typedef int (*read_tree_fn_t)(const struct object_id *, struct strbuf *, const char *, unsigned int, int, void *);
-extern int read_tree_recursive(struct tree *tree,
- const char *base, int baselen,
- int stage, const struct pathspec *pathspec,
- read_tree_fn_t fn, void *context);
+int read_tree_recursive(struct repository *r,
+ struct tree *tree,
+ const char *base, int baselen,
+ int stage, const struct pathspec *pathspec,
+ read_tree_fn_t fn, void *context);
-extern int read_tree(struct tree *tree, int stage, struct pathspec *pathspec,
- struct index_state *istate);
+int read_tree(struct repository *r, struct tree *tree,
+ int stage, struct pathspec *pathspec,
+ struct index_state *istate);
#endif /* TREE_H */
repo_read_gitmodules(the_repository);
} else if (state && (ce->ce_flags & CE_UPDATE)) {
submodule_free(the_repository);
- checkout_entry(ce, state, NULL);
+ checkout_entry(ce, state, NULL, NULL);
repo_read_gitmodules(the_repository);
}
}
display_progress(progress, ++cnt);
ce->ce_flags &= ~CE_UPDATE;
if (o->update && !o->dry_run) {
- errs |= checkout_entry(ce, &state, NULL);
+ errs |= checkout_entry(ce, &state, NULL, NULL);
}
}
}
stop_progress(&progress);
- errs |= finish_delayed_checkout(&state);
+ errs |= finish_delayed_checkout(&state, NULL);
if (o->update)
git_attr_set_direction(GIT_ATTR_CHECKIN);
struct name_entry *names,
struct traverse_info *info)
{
+ struct unpack_trees_options *o = info->data;
int i, ret, bottom;
int nr_buf = 0;
struct tree_desc t[MAX_UNPACK_TREES];
nr_entries = all_trees_same_as_cache_tree(n, dirmask, names, info);
if (nr_entries > 0) {
- struct unpack_trees_options *o = info->data;
int pos = index_pos_by_traverse_info(names, info);
if (!o->merge || df_conflicts)
}
bottom = switch_cache_bottom(&newinfo);
- ret = traverse_trees(n, t, &newinfo);
+ ret = traverse_trees(o->src_index, n, t, &newinfo);
restore_cache_bottom(&newinfo, bottom);
for (i = 0; i < nr_buf; i++)
}
trace_performance_enter();
- ret = traverse_trees(len, t, &info);
+ ret = traverse_trees(o->src_index, len, t, &info);
trace_performance_leave("traverse_trees");
if (ret < 0)
goto return_failed;
static timestamp_t oldest_have;
-static int deepen_relative;
static int multi_ack;
static int no_done;
static int use_thin_pack, use_ofs_delta, use_include_tag;
}
}
+static int check_ref(const char *refname_full, const struct object_id *oid,
+ int flag, void *cb_data);
+
static void deepen(int depth, int deepen_relative,
struct object_array *shallows, struct object_array *want_obj)
{
struct object_array reachable_shallows = OBJECT_ARRAY_INIT;
struct commit_list *result;
+ /*
+ * Checking for reachable shallows requires that our refs be
+ * marked with OUR_REF.
+ */
+ head_ref_namespaced(check_ref, NULL);
+ for_each_namespaced_ref(check_ref, NULL);
+
get_reachable_list(shallows, &reachable_shallows);
result = get_shallow_commits(&reachable_shallows,
depth + 1,
static int send_shallow_list(int depth, int deepen_rev_list,
timestamp_t deepen_since,
struct string_list *deepen_not,
+ int deepen_relative,
struct object_array *shallows,
struct object_array *want_obj)
{
int has_non_tip = 0;
timestamp_t deepen_since = 0;
int deepen_rev_list = 0;
+ int deepen_relative = 0;
shallow_nr = 0;
for (;;) {
return;
if (send_shallow_list(depth, deepen_rev_list, deepen_since,
- &deepen_not, &shallows, want_obj))
+ &deepen_not, deepen_relative, &shallows,
+ want_obj))
packet_flush(1);
object_array_clear(&shallows);
}
if (!send_shallow_list(data->depth, data->deepen_rev_list,
data->deepen_since, &data->deepen_not,
+ data->deepen_relative,
&data->shallows, want_obj) &&
is_repository_shallow(the_repository))
deepen(INFINITE_DEPTH, data->deepen_relative, &data->shallows,
strbuf_complete(buf, '/');
}
-void str_end_url_with_slash(const char *url, char **dest) {
+void str_end_url_with_slash(const char *url, char **dest)
+{
struct strbuf buf = STRBUF_INIT;
end_url_with_slash(&buf, url);
free(*dest);
return 0;
}
-struct userdiff_driver *userdiff_find_by_name(const char *name) {
+struct userdiff_driver *userdiff_find_by_name(const char *name)
+{
int len = strlen(name);
return userdiff_find_by_namelen(name, len);
}