Docfix.
* po/describe-not-necessarily-7:
describe doc: remove '7-char' abbreviation reference
# A list of macros that should be interpreted as foreach loops instead of as
# function calls.
-ForEachMacros: ['for_each_string_list_item']
+ForEachMacros: ['for_each_string_list_item', 'for_each_wanted_builtin', 'for_each_builtin', 'for_each_ut']
# The maximum number of consecutive empty lines to keep.
MaxEmptyLinesToKeep: 1
a mailing list (git@vger.kernel.org) for code submissions, code
reviews, and bug reports.
-Nevertheless, you can use [submitGit](http://submitgit.herokuapp.com/) to
+Nevertheless, you can use [GitGitGadget](https://gitgitgadget.github.io/) to
conveniently send your Pull Requests commits to our mailing list.
Please read ["A note from the maintainer"](https://git.kernel.org/pub/scm/git/git.git/plain/MaintNotes?h=todo)
Thanks for taking the time to contribute to Git! Please be advised that the
Git community does not use github.com for their contributions. Instead, we use
a mailing list (git@vger.kernel.org) for code submissions, code reviews, and
-bug reports. Nevertheless, you can use submitGit to conveniently send your Pull
-Requests commits to our mailing list.
+bug reports. Nevertheless, you can use GitGitGadget (https://gitgitgadget.github.io/)
+to conveniently send your Pull Requests commits to our mailing list.
Please read the "guidelines for contributing" linked above!
/git-init-db
/git-interpret-trailers
/git-instaweb
-/git-legacy-rebase
+/git-legacy-stash
/git-log
/git-ls-files
/git-ls-remote
compiler:
addons:
before_install:
- - env: jobname=Windows
- os: linux
- compiler:
- addons:
- before_install:
- script:
- - >
- test "$TRAVIS_REPO_SLUG" != "git/git" ||
- ci/run-windows-build.sh $TRAVIS_BRANCH $(git rev-parse HEAD)
- after_failure:
- env: jobname=Linux32
os: linux
compiler:
manpage-base-url.xsl
SubmittingPatches.txt
tmp-doc-diff/
+GIT-ASCIIDOCFLAGS
or commands:
Literal examples (e.g. use of command-line options, command names,
- branch names, configuration and environment variables) must be
- typeset in monospace (i.e. wrapped with backticks):
+ branch names, URLs, pathnames (files and directories), configuration and
+ environment variables) must be typeset in monospace (i.e. wrapped with
+ backticks):
`--pretty=oneline`
`git rev-list`
`remote.pushDefault`
+ `http://git.example.com`
+ `.git/config`
`GIT_DIR`
`HEAD`
$(wildcard git-*.txt))
MAN1_TXT += git.txt
MAN1_TXT += gitk.txt
-MAN1_TXT += gitremote-helpers.txt
MAN1_TXT += gitweb.txt
MAN5_TXT += gitattributes.txt
MAN7_TXT += giteveryday.txt
MAN7_TXT += gitglossary.txt
MAN7_TXT += gitnamespaces.txt
+MAN7_TXT += gitremote-helpers.txt
MAN7_TXT += gitrevisions.txt
MAN7_TXT += gitsubmodules.txt
MAN7_TXT += gittutorial-2.txt
show_tool_names can_merge "* " || :' >mergetools-merge.txt && \
date >$@
+TRACK_ASCIIDOCFLAGS = $(subst ','\'',$(ASCIIDOC_COMMON):$(ASCIIDOC_HTML):$(ASCIIDOC_DOCBOOK))
+
+GIT-ASCIIDOCFLAGS: FORCE
+ @FLAGS='$(TRACK_ASCIIDOCFLAGS)'; \
+ if test x"$$FLAGS" != x"`cat GIT-ASCIIDOCFLAGS 2>/dev/null`" ; then \
+ echo >&2 " * new asciidoc flags"; \
+ echo "$$FLAGS" >GIT-ASCIIDOCFLAGS; \
+ fi
+
clean:
$(RM) *.xml *.xml+ *.html *.html+ *.1 *.5 *.7
$(RM) *.texi *.texi+ *.texi++ git.info gitman.info
$(RM) SubmittingPatches.txt
$(RM) $(cmds_txt) $(mergetools_txt) *.made
$(RM) manpage-base-url.xsl
+ $(RM) GIT-ASCIIDOCFLAGS
-$(MAN_HTML): %.html : %.txt asciidoc.conf
+$(MAN_HTML): %.html : %.txt asciidoc.conf asciidoctor-extensions.rb GIT-ASCIIDOCFLAGS
$(QUIET_ASCIIDOC)$(RM) $@+ $@ && \
$(TXT_TO_HTML) -d manpage -o $@+ $< && \
mv $@+ $@
-$(OBSOLETE_HTML): %.html : %.txto asciidoc.conf
+$(OBSOLETE_HTML): %.html : %.txto asciidoc.conf asciidoctor-extensions.rb GIT-ASCIIDOCFLAGS
$(QUIET_ASCIIDOC)$(RM) $@+ $@ && \
$(TXT_TO_HTML) -o $@+ $< && \
mv $@+ $@
manpage-base-url.xsl: manpage-base-url.xsl.in
$(QUIET_GEN)sed "s|@@MAN_BASE_URL@@|$(MAN_BASE_URL)|" $< > $@
-%.1 %.5 %.7 : %.xml manpage-base-url.xsl
+%.1 %.5 %.7 : %.xml manpage-base-url.xsl $(wildcard manpage*.xsl)
$(QUIET_XMLTO)$(RM) $@ && \
$(XMLTO) -m $(MANPAGE_XSL) $(XMLTO_EXTRA) man $<
-%.xml : %.txt asciidoc.conf
+%.xml : %.txt asciidoc.conf asciidoctor-extensions.rb GIT-ASCIIDOCFLAGS
$(QUIET_ASCIIDOC)$(RM) $@+ $@ && \
$(TXT_TO_XML) -d manpage -o $@+ $< && \
mv $@+ $@
-user-manual.xml: user-manual.txt user-manual.conf
+user-manual.xml: user-manual.txt user-manual.conf asciidoctor-extensions.rb GIT-ASCIIDOCFLAGS
$(QUIET_ASCIIDOC)$(RM) $@+ $@ && \
$(TXT_TO_XML) -d book -o $@+ $< && \
mv $@+ $@
$(QUIET_GEN)cd technical && '$(SHELL_PATH_SQ)' ./api-index.sh
technical/%.html: ASCIIDOC_EXTRA += -a git-relative-html-prefix=../
-$(patsubst %,%.html,$(API_DOCS) technical/api-index $(TECH_DOCS)): %.html : %.txt asciidoc.conf
+$(patsubst %,%.html,$(API_DOCS) technical/api-index $(TECH_DOCS)): %.html : %.txt \
+ asciidoc.conf GIT-ASCIIDOCFLAGS
$(QUIET_ASCIIDOC)$(TXT_TO_HTML) $*.txt
SubmittingPatches.txt: SubmittingPatches
WEBDOC_DEST = /pub/software/scm/git/docs
howto/%.html: ASCIIDOC_EXTRA += -a git-relative-html-prefix=../
-$(patsubst %.txt,%.html,$(wildcard howto/*.txt)): %.html : %.txt
+$(patsubst %.txt,%.html,$(wildcard howto/*.txt)): %.html : %.txt GIT-ASCIIDOCFLAGS
$(QUIET_ASCIIDOC)$(RM) $@+ $@ && \
sed -e '1,/^$$/d' $< | \
$(TXT_TO_HTML) - >$@+ && \
--- /dev/null
+Git 2.22 Release Notes
+======================
+
+Updates since v2.21
+-------------------
+
+UI, Workflows & Features
+
+ * "git checkout --no-overlay" can be used to trigger a new mode of
+ checking out paths out of the tree-ish, that allows paths that
+ match the pathspec that are in the current index and working tree
+ and are not in the tree-ish.
+
+ * The %(trailers) formatter in "git log --format=..." now allows to
+ optionally pick trailers selectively by keyword, show only values,
+ etc.
+
+ * Four new configuration variables {author,committer}.{name,email}
+ have been introduced to override user.{name,email} in more specific
+ cases.
+
+ * Command-line completion (in contrib/) learned to tab-complete the
+ "git submodule absorbgitdirs" subcommand.
+
+ * "git branch" learned a new subcommand "--show-current".
+
+ * Output from "diff --cc" did not show the original paths when the
+ merge involved renames. A new option adds the paths in the
+ original trees to the output.
+
+ * The command line completion (in contrib/) has been taught to
+ complete more subcommand parameters.
+
+ * The final report from "git bisect" used to show the suspected
+ culprit using a raw "diff-tree", with which there is no output for
+ a merge commit. This has been updated to use a more modern and
+ human readable output that still is concise enough.
+
+ * "git rebase --rebase-merges" replaces its old "--preserve-merges"
+ option; the latter is now marked as deprecated.
+
+ * Error message given while cloning with --recurse-submodules has
+ been updated.
+
+ * The completion helper code now pays attention to repository-local
+ configuration (when available), which allows --list-cmds to honour
+ a repository specific setting of completion.commands, for example.
+
+ * "git mergetool" learned to offer Sublime Merge (smerge) as one of
+ its backends.
+
+
+Performance, Internal Implementation, Development Support etc.
+
+ * The diff machinery, one of the oldest parts of the system, which
+ long predates the parse-options API, uses fairly long and complex
+ handcrafted option parser. This is being rewritten to use the
+ parse-options API.
+
+ * The implementation of pack-redundant has been updated for
+ performance in a repository with many packfiles.
+
+ * A more structured way to obtain execution trace has been added.
+
+ * "git prune" has been taught to take advantage of reachability
+ bitmap when able.
+
+ * The command line parser of "git commit-tree" has been rewritten to
+ use the parse-options API.
+
+ * Suggest GitGitGadget instead of submitGit as a way to submit
+ patches based on GitHub PR to us.
+
+ * The test framework has been updated to help developers by making it
+ easier to run most of the tests under different versions of
+ over-the-wire protocols.
+
+ * Dev support update to make it easier to compare two formatted
+ results from our documentation.
+
+ * The scripted "git rebase" implementation has been retired.
+
+ * "git multi-pack-index verify" did not scale well with the number of
+ packfiles, which is being improved.
+
+ * "git stash" has been rewritten in C.
+
+ * The "check-docs" Makefile target to support developers has been
+ updated.
+
+ * The tests have been updated not to rely on the abbreviated option
+ names the parse-options API offers, to protect us from an
+ abbreviated form of an option that used to be unique within the
+ command getting non-unique when a new option that share the same
+ prefix is added.
+
+
+Fixes since v2.21
+-----------------
+
+ * "git prune-packed" did not notice and complain against excess
+ arguments given from the command line, which now it does.
+ (merge 9b0bd87ed2 rj/prune-packed-excess-args later to maint).
+
+ * Split-index fix.
+ (merge 6e37c8ed3c nd/split-index-null-base-fix later to maint).
+
+ * "git diff --no-index" may still want to access Git goodies like
+ --ext-diff and --textconv, but so far these have been ignored,
+ which has been corrected.
+ (merge 287ab28bfa jk/diff-no-index-initialize later to maint).
+
+ * Unify RPC code for smart http in protocol v0/v1 and v2, which fixes
+ a bug in the latter (lack of authentication retry) and generally
+ improves the code base.
+ (merge a97d00799a jt/http-auth-proto-v2-fix later to maint).
+
+ * The include file compat/bswap.h has been updated so that it is safe
+ to (accidentally) include it more than once.
+ (merge 33aa579a55 jk/guard-bswap-header later to maint).
+
+ * The set of header files used by "make hdr-check" unconditionally
+ included sha256/gcrypt.h, even when it is not used, causing the
+ make target to fail. We now skip it when GCRYPT_SHA256 is not in
+ use.
+ (merge f23aa18e7f rj/hdr-check-gcrypt-fix later to maint).
+
+ * The Makefile uses 'find' utility to enumerate all the *.h header
+ files, which is expensive on platforms with slow filesystems; it
+ now optionally uses "ls-files" if working within a repository,
+ which is a trick similar to how all sources are enumerated to run
+ ETAGS on.
+ (merge 92b88eba9f js/find-lib-h-with-ls-files-when-possible later to maint).
+
+ * "git rebase" that was reimplemented in C did not set ORIG_HEAD
+ correctly, which has been corrected.
+ (merge cbd29ead92 js/rebase-orig-head-fix later to maint).
+
+ * Dev support.
+ (merge f545737144 js/stress-test-ui-tweak later to maint).
+
+ * CFLAGS now can be tweaked when invoking Make while using
+ DEVELOPER=YesPlease; this did not work well before.
+ (merge 6d5d4b4e93 ab/makefile-help-devs-more later to maint).
+
+ * "git fsck --connectivity-only" omits computation necessary to sift
+ the objects that are not reachable from any of the refs into
+ unreachable and dangling. This is now enabled when dangling
+ objects are requested (which is done by default, but can be
+ overridden with the "--no-dangling" option).
+ (merge 8d8c2a5aef jk/fsck-doc later to maint).
+
+ * On platforms where "git fetch" is killed with SIGPIPE (e.g. OSX),
+ the upload-pack that runs on the other end that hangs up after
+ detecting an error could cause "git fetch" to die with a signal,
+ which led to a flakey test. "git fetch" now ignores SIGPIPE during
+ the network portion of its operation (this is not a problem as we
+ check the return status from our write(2)s).
+ (merge 143588949c jk/no-sigpipe-during-network-transport later to maint).
+
+ * A recent update broke "is this object available to us?" check for
+ well-known objects like an empty tree (which should yield "yes",
+ even when there is no on-disk object for an empty tree), which has
+ been corrected.
+ (merge f06ab027ef jk/virtual-objects-do-exist later to maint).
+
+ * The setup code has been cleaned up to avoid leaks around the
+ repository_format structure.
+ (merge e8805af1c3 ma/clear-repository-format later to maint).
+
+ * "git config --type=color ..." is meant to replace "git config --get-color"
+ but there is a slight difference that wasn't documented, which is
+ now fixed.
+ (merge cd8e7593b9 jk/config-type-color-ends-with-lf later to maint).
+
+ * When the "clean" filter can reduce the size of a huge file in the
+ working tree down to a small "token" (a la Git LFS), there is no
+ point in allocating a huge scratch area upfront, but the buffer is
+ sized based on the original file size. The convert mechanism now
+ allocates very minimum and reallocates as it receives the output
+ from the clean filter process.
+ (merge 02156ab031 jh/resize-convert-scratch-buffer later to maint).
+
+ * "git rebase" uses the refs/rewritten/ hierarchy to store its
+ intermediate states, which inherently makes the hierarchy per
+ worktree, but it didn't quite work well.
+ (merge b9317d55a3 nd/rewritten-ref-is-per-worktree later to maint).
+
+ * "git log -L<from>,<to>:<path>" with "-s" did not suppress the patch
+ output as it should. This has been corrected.
+ (merge 05314efaea jk/line-log-with-patch later to maint).
+
+ * "git worktree add" used to do a "find an available name with stat
+ and then mkdir", which is race-prone. This has been fixed by using
+ mkdir and reacting to EEXIST in a loop.
+ (merge 7af01f2367 ms/worktree-add-atomic-mkdir later to maint).
+
+ * Build update for SHA-1 with collision detection.
+ (merge 07a20f569b jk/sha1dc later to maint).
+
+ * Build procedure has been fixed around use of asciidoctor instead of
+ asciidoc.
+ (merge 185f9a0ea0 ma/asciidoctor-fixes later to maint).
+
+ * remote-http transport did not anonymize URLs reported in its error
+ messages at places.
+ (merge c1284b21f2 js/anonymize-remote-curl-diag later to maint).
+
+ * Error messages given from the http transport have been updated so
+ that they can be localized.
+ (merge ed8b4132c8 js/remote-curl-i18n later to maint).
+
+ * "git init" forgot to read platform-specific repository
+ configuration, which made Windows port to ignore settings of
+ core.hidedotfiles, for example.
+
+ * A corner-case object name ambiguity while the sequencer machinery
+ is working (e.g. "rebase -i -x") has been fixed.
+
+ * "git format-patch" did not diagnose an error while opening the
+ output file for the cover-letter, which has been corrected.
+ (merge 2fe95f494c jc/format-patch-error-check later to maint).
+
+ * "git checkout -f <branch>" while the index has an unmerged path
+ incorrectly left some paths in an unmerged state, which has been
+ corrected.
+
+ * A corner case bug in the refs API has been corrected.
+ (merge d3322eb28b jk/refs-double-abort later to maint).
+
+ * Unicode update.
+ (merge 584b62c37b bb/unicode-12 later to maint).
+
+ * dumb-http walker has been updated to share more error recovery
+ strategy with the normal codepath.
+
+ * A buglet in configuration parser has been fixed.
+ (merge 19e7fdaa58 nd/include-if-wildmatch later to maint).
+
+ * The documentation for "git read-tree --reset -u" has been updated.
+ (merge b5a0bd694c nd/read-tree-reset-doc later to maint).
+
+ * Code cleanup, docfix, build fix, etc.
+ (merge 11f470aee7 jc/test-yes-doc later to maint).
+ (merge 90503a240b js/doc-symref-in-proto-v1 later to maint).
+ (merge 5c326d1252 jk/unused-params later to maint).
+ (merge 68cabbfda3 dl/doc-submodule-wo-subcommand later to maint).
+ (merge 9903623761 ab/receive-pack-use-after-free-fix later to maint).
+ (merge 1ede45e44b en/merge-options-doc later to maint).
+ (merge 3e14dd2c8e rd/doc-hook-used-in-sample later to maint).
+ (merge c271dc28fd nd/no-more-check-racy later to maint).
+ (merge e6e15194a8 yb/utf-16le-bom-spellfix later to maint).
+ (merge bb101aaf0c rd/attr.c-comment-typofix later to maint).
+ (merge 716a5af812 rd/gc-prune-doc-fix later to maint).
+ (merge 50b206371d js/untravis-windows later to maint).
+ (merge dbf47215e3 js/rebase-recreate-merge later to maint).
+ (merge 56cb2d30f8 dl/reset-doc-no-wrt-abbrev later to maint).
+ (merge 64eca306a2 ja/dir-rename-doc-markup-fix later to maint).
+ (merge af91b0230c dl/ignore-docs later to maint).
+ (merge 59a06e947b ra/t3600-test-path-funcs later to maint).
+ (merge e041d0781b ar/t4150-remove-cruft later to maint).
+ (merge 8d75a1d183 ma/asciidoctor-fixes-more later to maint).
+ (merge 74cc547b0f mh/pack-protocol-doc-fix later to maint).
+ (merge ed31851fa6 ab/doc-misc-typofixes later to maint).
+ (merge a7256debd4 nd/checkout-m-doc-update later to maint).
+ (merge 3a9e1ad78d jt/t5551-protocol-v2-does-not-have-half-auth later to maint).
+ (merge 0b918b75af sg/t5318-cleanup later to maint).
+ (merge 68ed71b53c cb/doco-mono later to maint).
+ (merge a34dca2451 nd/interpret-trailers-docfix later to maint).
Some parts of the system have dedicated maintainers with their own
repositories.
-- 'git-gui/' comes from git-gui project, maintained by Pat Thoyts:
+- `git-gui/` comes from git-gui project, maintained by Pat Thoyts:
git://repo.or.cz/git-gui.git
-- 'gitk-git/' comes from Paul Mackerras's gitk project:
+- `gitk-git/` comes from Paul Mackerras's gitk project:
git://ozlabs.org/~paulus/gitk
-- 'po/' comes from the localization coordinator, Jiang Xin:
+- `po/` comes from the localization coordinator, Jiang Xin:
https://github.com/git-l10n/git-po/
def process(parent, target, attrs)
if parent.document.basebackend? 'html'
prefix = parent.document.attr('git-relative-html-prefix')
- %(<a href="#{prefix}#{target}.html">#{target}(#{attrs[1]})</a>\n)
+ %(<a href="#{prefix}#{target}.html">#{target}(#{attrs[1]})</a>)
elsif parent.document.basebackend? 'docbook'
"<citerefentry>\n" \
"<refentrytitle>#{target}</refentrytitle>" \
"<manvolnum>#{attrs[1]}</manvolnum>\n" \
- "</citerefentry>\n"
+ "</citerefentry>"
end
end
end
so that the local merge commits are included in the rebase (see
linkgit:git-rebase[1] for details).
+
-When preserve, also pass `--preserve-merges` along to 'git rebase'
-so that locally committed merge commits will not be flattened
-by running 'git pull'.
+When `preserve` (deprecated in favor of `merges`), also pass
+`--preserve-merges` along to 'git rebase' so that locally committed merge
+commits will not be flattened by running 'git pull'.
+
When the value is `interactive`, the rebase is run in interactive mode.
+
core.excludesFile::
Specifies the pathname to the file that contains patterns to
describe paths that are not meant to be tracked, in addition
- to '.gitignore' (per-directory) and '.git/info/exclude'.
+ to `.gitignore` (per-directory) and `.git/info/exclude`.
Defaults to `$XDG_CONFIG_HOME/git/ignore`.
If `$XDG_CONFIG_HOME` is either not set or empty, `$HOME/.config/git/ignore`
is used instead. See linkgit:gitignore[5].
command-line argument and write the password on its STDOUT.
core.attributesFile::
- In addition to '.gitattributes' (per-directory) and
- '.git/info/attributes', Git looks into this file for attributes
+ In addition to `.gitattributes` (per-directory) and
+ `.git/info/attributes`, Git looks into this file for attributes
(see linkgit:gitattributes[5]). Path expansions are made the same
way as for `core.excludesFile`. Its default value is
`$XDG_CONFIG_HOME/git/attributes`. If `$XDG_CONFIG_HOME` is either not
core.hooksPath::
By default Git will look for your hooks in the
- '$GIT_DIR/hooks' directory. Set this to different path,
- e.g. '/etc/git/hooks', and Git will try to find your hooks in
- that directory, e.g. '/etc/git/hooks/pre-receive' instead of
- in '$GIT_DIR/hooks/pre-receive'.
+ `$GIT_DIR/hooks` directory. Set this to different path,
+ e.g. `/etc/git/hooks`, and Git will try to find your hooks in
+ that directory, e.g. `/etc/git/hooks/pre-receive` instead of
+ in `$GIT_DIR/hooks/pre-receive`.
+
The path can be either absolute or relative. A relative path is
taken as relative to the directory where the hooks are run (see
diff.dirstat::
A comma separated list of `--dirstat` parameters specifying the
- default behavior of the `--dirstat` option to linkgit:git-diff[1]`
+ default behavior of the `--dirstat` option to linkgit:git-diff[1]
and friends. The defaults can be overridden on the command line
(using `--dirstat=<param1,param2,...>`). The fallback defaults
(when not changed by `diff.dirstat`) are `changes,noncumulative,3`.
environment variable. The command is called with parameters
as described under "git Diffs" in linkgit:git[1]. Note: if
you want to use an external diff program only on a subset of
- your files, you might want to use linkgit:gitattributes[5] instead.
+ your files, you might want to use linkgit:gitattributes[5] instead.
diff.ignoreSubmodules::
Sets the default value of --ignore-submodules. Note that this
vice versa by configuring the `fsck.<msg-id>` setting where the
`<msg-id>` is the fsck message ID and the value is one of `error`,
`warn` or `ignore`. For convenience, fsck prefixes the error/warning
-with the message ID, e.g. "missingEmail: invalid author/committer line
-- missing email" means that setting `fsck.missingEmail = ignore` will
-hide that issue.
+with the message ID, e.g. "missingEmail: invalid author/committer
+line - missing email" means that setting `fsck.missingEmail = ignore`
+will hide that issue.
+
In general, it is better to enumerate existing objects with problems
with `fsck.skipList`, instead of listing the kind of breakages these
gc.aggressiveDepth::
The depth parameter used in the delta compression
algorithm used by 'git gc --aggressive'. This defaults
- to 50.
+ to 50, which is the default for the `--depth` option when
+ `--aggressive` isn't in use.
++
+See the documentation for the `--depth` option in
+linkgit:git-repack[1] for more details.
gc.aggressiveWindow::
The window size parameter used in the delta compression
algorithm used by 'git gc --aggressive'. This defaults
- to 250.
+ to 250, which is a much more aggressive window size than
+ the default `--window` of 10.
++
+See the documentation for the `--window` option in
+linkgit:git-repack[1] for more details.
gc.auto::
When there are approximately more than this many loose
objects in the repository, `git gc --auto` will pack them.
Some Porcelain commands use this command to perform a
light-weight garbage collection from time to time. The
- default value is 6700. Setting this to 0 disables it.
+ default value is 6700.
++
+Setting this to 0 disables not only automatic packing based on the
+number of loose objects, but any other heuristic `git gc --auto` will
+otherwise use to determine if there's work to do, such as
+`gc.autoPackLimit`.
gc.autoPackLimit::
When there are more than this many packs that are not
marked with `*.keep` file in the repository, `git gc
--auto` consolidates them into one larger pack. The
- default value is 50. Setting this to 0 disables it.
+ default value is 50. Setting this to 0 disables it.
+ Setting `gc.auto` to 0 will also disable this.
++
+See the `gc.bigPackThreshold` configuration variable below. When in
+use, it'll affect how the auto pack limit works.
gc.autoDetach::
Make `git gc --auto` return immediately and run in background
this configuration variable is ignored, all packs except the base pack
will be repacked. After this the number of packs should go below
gc.autoPackLimit and gc.bigPackThreshold should be respected again.
++
+If the amount of memory estimated for `git repack` to run smoothly is
+not available and `gc.bigPackThreshold` is not set, the largest pack
+will also be excluded (this is the equivalent of running `git gc` with
+`--keep-base-pack`).
gc.writeCommitGraph::
If true, then gc will rewrite the commit-graph file when
- linkgit:git-gc[1] is run. When using linkgit:git-gc[1]
- '--auto' the commit-graph will be updated if housekeeping is
+ linkgit:git-gc[1] is run. When using `git gc --auto`
+ the commit-graph will be updated if housekeeping is
required. Default is false. See linkgit:git-commit-graph[1]
for details.
With "<pattern>" (e.g. "refs/stash")
in the middle, the setting applies only to the refs that
match the <pattern>.
++
+These types of entries are generally created as a result of using `git
+commit --amend` or `git rebase` and are the commits prior to the amend
+or rebase occurring. Since these changes are not part of the current
+project most users will want to expire them sooner, which is why the
+default is more aggressive than `gc.reflogExpire`.
gc.rerereResolved::
Records of conflicted merge you resolved earlier are
gpg.<format>.program::
Use this to customize the program used for the signing format you
chose. (see `gpg.program` and `gpg.format`) `gpg.program` can still
- be used as a legacy synonym for `gpg.openpgp.program`. The default
+ be used as a legacy synonym for `gpg.openpgp.program`. The default
value for `gpg.x509.program` is "gpgsm".
so that the local merge commits are included in the rebase (see
linkgit:git-rebase[1] for details).
+
-When preserve, also pass `--preserve-merges` along to 'git rebase'
-so that locally committed merge commits will not be flattened
-by running 'git pull'.
+When `preserve` (deprecated in favor of `merges`), also pass
+`--preserve-merges` along to 'git rebase' so that locally committed merge
+commits will not be flattened by running 'git pull'.
+
When the value is `interactive`, the rebase is run in interactive mode.
+
rebase.useBuiltin::
- Set to `false` to use the legacy shellscript implementation of
- linkgit:git-rebase[1]. Is `true` by default, which means use
- the built-in rewrite of it in C.
-+
-The C rewrite is first included with Git version 2.20. This option
-serves an an escape hatch to re-enable the legacy version in case any
-bugs are found in the rewrite. This option and the shellscript version
-of linkgit:git-rebase[1] will be removed in some future release.
-+
-If you find some reason to set this option to `false` other than
-one-off testing you should report the behavior difference as a bug in
-git.
+ Unused configuration variable. Used in Git versions 2.20 and
+ 2.21 as an escape hatch to enable the legacy shellscript
+ implementation of rebase. Now the built-in rewrite of it in C
+ is always used. Setting this will emit a warning, to alert any
+ remaining users that setting this now does nothing.
rebase.stat::
Whether to show a diffstat of what changed upstream since the last
-user.email::
- Your email address to be recorded in any newly created commits.
- Can be overridden by the `GIT_AUTHOR_EMAIL`, `GIT_COMMITTER_EMAIL`, and
- `EMAIL` environment variables. See linkgit:git-commit-tree[1].
-
user.name::
- Your full name to be recorded in any newly created commits.
- Can be overridden by the `GIT_AUTHOR_NAME` and `GIT_COMMITTER_NAME`
- environment variables. See linkgit:git-commit-tree[1].
+user.email::
+author.name::
+author.email::
+committer.name::
+committer.email::
+ The `user.name` and `user.email` variables determine what ends
+ up in the `author` and `committer` field of commit
+ objects.
+ If you need the `author` or `committer` to be different, the
+ `author.name`, `author.email`, `committer.name` or
+ `committer.email` variables can be set.
+ Also, all of these can be overridden by the `GIT_AUTHOR_NAME`,
+ `GIT_AUTHOR_EMAIL`, `GIT_COMMITTER_NAME`,
+ `GIT_COMMITTER_EMAIL` and `EMAIL` environment variables.
+ See linkgit:git-commit-tree[1] for more information.
user.useConfigOnly::
Instruct Git to avoid trying to guess defaults for `user.email`
. there are more "src" modes and "src" sha1
. status is concatenated status characters for each parent
. no optional "score" number
-. single path, only for "dst"
+. tab-separated pathname(s) of the file
-Example:
+For `-c` and `--cc`, only the destination or final path is shown even
+if the file was renamed on any side of history. With
+`--combined-all-paths`, the name of the path in each parent is shown
+followed by the name of the path in the merge commit.
+
+Examples for `-c` and `--cc` without `--combined-all-paths`:
+------------------------------------------------
+::100644 100644 100644 fabadb8 cc95eb0 4866510 MM desc.c
+::100755 100755 100755 52b7a2d 6d1ac04 d2ac7d7 RM bar.sh
+::100644 100644 100644 e07d6c5 9042e82 ee91881 RR phooey.c
+------------------------------------------------
+
+Examples when `--combined-all-paths` added to either `-c` or `--cc`:
------------------------------------------------
-::100644 100644 100644 fabadb8 cc95eb0 4866510 MM describe.c
+::100644 100644 100644 fabadb8 cc95eb0 4866510 MM desc.c desc.c desc.c
+::100755 100755 100755 52b7a2d 6d1ac04 d2ac7d7 RM foo.sh bar.sh bar.sh
+::100644 100644 100644 e07d6c5 9042e82 ee91881 RR fooey.c fuey.c phooey.c
------------------------------------------------
Note that 'combined diff' lists only files which were modified from
Similar to two-line header for traditional 'unified' diff
format, `/dev/null` is used to signal created or deleted
files.
++
+However, if the --combined-all-paths option is provided, instead of a
+two-line from-file/to-file you get a N+1 line from-file/to-file header,
+where N is the number of parents in the merge commit
+
+ --- a/file
+ --- a/file
+ --- a/file
+ +++ b/file
++
+This extended format can be useful if rename or copy detection is
+active, to allow you to see the original name of the file in different
+parents.
4. Chunk header format is modified to prevent people from
accidentally feeding it to `patch -p1`. Combined diff format
-U<n>::
--unified=<n>::
Generate diffs with <n> lines of context instead of
- the usual three.
+ the usual three. Implies `--patch`.
ifndef::git-format-patch[]
Implies `-p`.
endif::git-format-patch[]
+--output=<file>::
+ Output to a specific file instead of stdout.
+
+--output-indicator-new=<char>::
+--output-indicator-old=<char>::
+--output-indicator-context=<char>::
+ Specify the character used to indicate new, old or context
+ lines in the generated patch. Normally they are '+', '-' and
+ ' ' respectively.
+
ifndef::git-format-patch[]
--raw::
ifndef::git-log[]
number of modified files, as well as number of added and deleted
lines.
+-X[<param1,param2,...>]::
--dirstat[=<param1,param2,...>]::
Output the distribution of relative amount of changes for each
sub-directory. The behavior of `--dirstat` can be customized by
and accumulating child directory counts in the parent directories:
`--dirstat=files,10,cumulative`.
+--cumulative::
+ Synonym for --dirstat=cumulative
+
+--dirstat-by-file[=<param1,param2>...]::
+ Synonym for --dirstat=files,param1,param2...
+
--summary::
Output a condensed summary of extended header information
such as creations, renames and mode changes.
Turn off rename detection, even when the configuration
file gives the default to do so.
+--[no-]rename-empty::
+ Whether to use empty blobs as rename source.
+
ifndef::git-format-patch[]
--check::
Warn if changes introduce conflict markers or whitespace errors.
--binary::
In addition to `--full-index`, output a binary diff that
- can be applied with `git-apply`.
+ can be applied with `git-apply`. Implies `--patch`.
--abbrev[=<n>]::
Instead of showing the full 40-byte hexadecimal object
doc-diff [options] <from> <to> [-- <diff-options>]
doc-diff (-c|--clean)
--
-j=n parallel argument to pass to make
-f force rebuild; do not rely on cached results
-c,clean cleanup temporary working files
+j=n parallel argument to pass to make
+f force rebuild; do not rely on cached results
+c,clean cleanup temporary working files
+from-asciidoc use asciidoc with the 'from'-commit
+from-asciidoctor use asciidoctor with the 'from'-commit
+asciidoc use asciidoc with both commits
+to-asciidoc use asciidoc with the 'to'-commit
+to-asciidoctor use asciidoctor with the 'to'-commit
+asciidoctor use asciidoctor with both commits
+cut-header-footer cut away header and footer
"
SUBDIRECTORY_OK=1
. "$(git --exec-path)/git-sh-setup"
parallel=
force=
clean=
+from_program=
+to_program=
+cut_header_footer=
while test $# -gt 0
do
case "$1" in
clean=t ;;
-f)
force=t ;;
+ --from-asciidoctor)
+ from_program=-asciidoctor ;;
+ --to-asciidoctor)
+ to_program=-asciidoctor ;;
+ --asciidoctor)
+ from_program=-asciidoctor
+ to_program=-asciidoctor ;;
+ --from-asciidoc)
+ from_program=-asciidoc ;;
+ --to-asciidoc)
+ to_program=-asciidoc ;;
+ --asciidoc)
+ from_program=-asciidoc
+ to_program=-asciidoc ;;
+ --cut-header-footer)
+ cut_header_footer=-cut-header-footer ;;
--)
shift; break ;;
*)
ln -s "$dots/config.mak" "$tmp/worktree/config.mak"
fi
+construct_makemanflags () {
+ if test "$1" = "-asciidoc"
+ then
+ echo USE_ASCIIDOCTOR=
+ elif test "$1" = "-asciidoctor"
+ then
+ echo USE_ASCIIDOCTOR=YesPlease
+ fi
+}
+
+from_makemanflags=$(construct_makemanflags "$from_program") &&
+to_makemanflags=$(construct_makemanflags "$to_program") &&
+
+from_dir=$from_oid$from_program$cut_header_footer &&
+to_dir=$to_oid$to_program$cut_header_footer &&
+
# generate_render_makefile <srcdir> <dstdir>
generate_render_makefile () {
find "$1" -type f |
done
}
-# render_tree <committish_oid>
+# render_tree <committish_oid> <directory_name> <makemanflags>
render_tree () {
# Skip install-man entirely if we already have an installed directory.
# We can't rely on make here, since "install-man" unconditionally
# we then can't rely on during the render step). We use "mv" to make
# sure we don't get confused by a previous run that failed partway
# through.
- if ! test -d "$tmp/installed/$1"
+ oid=$1 &&
+ dname=$2 &&
+ makemanflags=$3 &&
+ if ! test -d "$tmp/installed/$dname"
then
- git -C "$tmp/worktree" checkout --detach "$1" &&
+ git -C "$tmp/worktree" checkout --detach "$oid" &&
make -j$parallel -C "$tmp/worktree" \
+ $makemanflags \
GIT_VERSION=omitted \
SOURCE_DATE_EPOCH=0 \
- DESTDIR="$tmp/installed/$1+" \
+ DESTDIR="$tmp/installed/$dname+" \
install-man &&
- mv "$tmp/installed/$1+" "$tmp/installed/$1"
+ mv "$tmp/installed/$dname+" "$tmp/installed/$dname"
fi &&
# As with "installed" above, we skip the render if it's already been
# done. So using make here is primarily just about running in
# parallel.
- if ! test -d "$tmp/rendered/$1"
+ if ! test -d "$tmp/rendered/$dname"
then
- generate_render_makefile "$tmp/installed/$1" "$tmp/rendered/$1+" |
+ generate_render_makefile "$tmp/installed/$dname" \
+ "$tmp/rendered/$dname+" |
make -j$parallel -f - &&
- mv "$tmp/rendered/$1+" "$tmp/rendered/$1"
+ mv "$tmp/rendered/$dname+" "$tmp/rendered/$dname"
+
+ if test "$cut_header_footer" = "-cut-header-footer"
+ then
+ for f in $(find "$tmp/rendered/$dname" -type f)
+ do
+ tail -n +3 "$f" | head -n -2 |
+ sed -e '1{/^$/d}' -e '${/^$/d}' >"$f+" &&
+ mv "$f+" "$f" ||
+ return 1
+ done
+ fi
fi
}
-render_tree $from_oid &&
-render_tree $to_oid &&
-git -C $tmp/rendered diff --no-index "$@" $from_oid $to_oid
+render_tree $from_oid $from_dir $from_makemanflags &&
+render_tree $to_oid $to_dir $to_makemanflags &&
+git -C $tmp/rendered diff --no-index "$@" $from_dir $to_dir
for command-line options).
-CONFIGURATION
--------------
-
-The optional configuration variable `core.excludesFile` indicates a path to a
-file containing patterns of file names to exclude from git-add, similar to
-$GIT_DIR/info/exclude. Patterns in the exclude file are used in addition to
-those in info/exclude. See linkgit:gitignore[5].
-
-
EXAMPLES
--------
am.threeWay configuration variable. For more information,
see am.threeWay in linkgit:git-config[1].
+--rerere-autoupdate::
+--no-rerere-autoupdate::
+ Allow the rerere mechanism to update the index with the
+ result of auto-conflict resolution if possible.
+
--ignore-space-change::
--ignore-whitespace::
--whitespace=<option>::
--------
[verse]
'git branch' [--color[=<when>] | --no-color] [-r | -a]
- [--list] [-v [--abbrev=<length> | --no-abbrev]]
+ [--list] [--show-current] [-v [--abbrev=<length> | --no-abbrev]]
[--column[=<options>] | --no-column] [--sort=<key>]
[(--merged | --no-merged) [<commit>]]
[--contains [<commit]] [--no-contains [<commit>]]
branch --list 'maint-*'`, list only the branches that match
the pattern(s).
+--show-current::
+ Print the name of the current branch. In detached HEAD state,
+ nothing is printed.
+
-v::
-vv::
--verbose::
+
When checking out paths from the index, this option lets you recreate
the conflicted merge in the specified paths.
++
+When switching branches with `--merge`, staged changes may be lost.
--conflict=<style>::
The same as --merge option above, but changes the way the
This means that you can use `git checkout -p` to selectively discard
edits from your current working tree. See the ``Interactive Mode''
section of linkgit:git-add[1] to learn how to operate the `--patch` mode.
++
+Note that this option uses the no overlay mode by default (see also
+`--[no-]overlay`), and currently doesn't support overlay mode.
--ignore-other-worktrees::
`git checkout` refuses when the wanted ref is already checked
Do not attempt to create a branch if a remote tracking branch
of the same name exists.
+--[no-]overlay::
+ In the default overlay mode, `git checkout` never
+ removes files from the index or the working tree. When
+ specifying `--no-overlay`, files that appear in the index and
+ working tree, but not in <tree-ish> are removed, to make them
+ match <tree-ish> exactly.
+
<branch>::
Branch to checkout; if it refers to a branch (i.e., a name that,
when prepended with "refs/heads/", is a valid ref), then that
Pass the merge strategy-specific option through to the
merge strategy. See linkgit:git-merge[1] for details.
+--rerere-autoupdate::
+--no-rerere-autoupdate::
+ Allow the rerere mechanism to update the index with the
+ result of auto-conflict resolution if possible.
+
SEQUENCER SUBCOMMANDS
---------------------
include::sequencer.txt[]
-e <pattern>::
--exclude=<pattern>::
- In addition to those found in .gitignore (per directory) and
- $GIT_DIR/info/exclude, also consider these patterns to be in the
- set of the ignore rules in effect.
+ Use the given exclude pattern in addition to the standard ignore rules
+ (see linkgit:gitignore[5]).
-x::
- Don't use the standard ignore rules read from .gitignore (per
- directory) and $GIT_DIR/info/exclude, but do still use the ignore
- rules given with `-e` options. This allows removing all untracked
+ Don't use the standard ignore rules (see linkgit:gitignore[5]), but
+ still use the ignore rules given with `-e` options from the command
+ line. This allows removing all untracked
files, including build products. This can be used (possibly in
conjunction with 'git reset') to create a pristine
working directory to test a clean build.
emits the new commit object id on stdout. The log message is read
from the standard input, unless `-m` or `-F` options are given.
+The `-m` and `-F` options can be given any number of times, in any
+order. The commit log message will be composed in the order in which
+the options are given.
+
A commit object may have any number of parents. With exactly one
parent, it is an ordinary commit. Having more than one parent makes
the commit a merge between several lines of history. Initial (root)
OPTIONS
-------
<tree>::
- An existing tree object
+ An existing tree object.
-p <parent>::
Each `-p` indicates the id of a parent commit object.
-F <file>::
Read the commit log message from the given file. Use `-` to read
- from the standard input.
+ from the standard input. This can be given more than once and the
+ content of each file becomes its own paragraph.
-S[<keyid>]::
--gpg-sign[=<keyid>]::
--local::
For writing options: write to the repository `.git/config` file.
- This is the default behavior.
+ This is the default behavior.
+
For reading options: read only from the repository `.git/config` rather than
from all available files.
output. The optional `default` parameter is used instead, if
there is no color configured for `name`.
+
-`--type=color [--default=<default>]` is preferred over `--get-color`.
+`--type=color [--default=<default>]` is preferred over `--get-color`
+(but note that `--get-color` will omit the trailing newline printed by
+`--type=color`).
-e::
--edit::
This is sort of "Git root" - if you run 'git daemon' with
'--base-path=/srv/git' on example.com, then if you later try to pull
'git://example.com/hello.git', 'git daemon' will interpret the path
- as '/srv/git/hello.git'.
+ as `/srv/git/hello.git`.
--base-path-relaxed::
If --base-path is enabled and repo lookup fails, with this option
--------
[verse]
'git diff-tree' [--stdin] [-m] [-s] [-v] [--no-commit-id] [--pretty]
- [-t] [-r] [-c | --cc] [--root] [<common diff options>]
- <tree-ish> [<tree-ish>] [<path>...]
+ [-t] [-r] [-c | --cc] [--combined-all-paths] [--root]
+ [<common diff options>] <tree-ish> [<tree-ish>] [<path>...]
DESCRIPTION
-----------
itself and the commit log message is not shown, just like in any other
"empty diff" case.
+--combined-all-paths::
+ This flag causes combined diffs (used for merge commits) to
+ list the name of the file from all parents. It thus only has
+ effect when -c or --cc are specified, and is likely only
+ useful if filename changes are detected (i.e. when either
+ rename or copy detection have been requested).
+
--always::
Show the commit itself and the commit log message even
if the diff itself is empty.
include::pretty-formats.txt[]
+
include::diff-format.txt[]
GIT
all `filemodify`, `filecopy`, `filerename` and `notemodify` commands in
the same commit, as `filedeleteall` wipes the branch clean (see below).
-The `LF` after the command is optional (it used to be required).
+The `LF` after the command is optional (it used to be required). Note
+that for reasons of backward compatibility, if the commit ends with a
+`data` command (i.e. it has has no `from`, `merge`, `filemodify`,
+`filedelete`, `filecopy`, `filerename`, `filedeleteall` or
+`notemodify` commands) then two `LF` commands may appear at the end of
+the command instead of just one.
`author`
^^^^^^^^
'get-mark' SP ':' <idnum> LF
....
-This command can be used anywhere in the stream that comments are
-accepted. In particular, the `get-mark` command can be used in the
-middle of a commit but not in the middle of a `data` command.
-
See ``Responses To Commands'' below for details about how to read
this output safely.
<contents> LF
====
-This command can be used anywhere in the stream that comments are
-accepted. In particular, the `cat-blob` command can be used in the
-middle of a commit but not in the middle of a `data` command.
+This command can be used where a `filemodify` directive can appear,
+allowing it to be used in the middle of a commit. For a `filemodify`
+using an inline directive, it can also appear right before the `data`
+directive.
See ``Responses To Commands'' below for details about how to read
this output safely.
blob or tree from a previous commit for use in the current one (with
`filemodify`).
-The `ls` command can be used anywhere in the stream that comments are
-accepted, including the middle of a commit.
+The `ls` command can also be used where a `filemodify` directive can
+appear, allowing it to be used in the middle of a commit.
Reading from the active commit::
This form can only be used in the middle of a `commit`.
to force recomputation of all deltas can significantly reduce the
final packfile size (30-50% smaller can be quite typical).
+Instead of running `git repack` you can also run `git gc
+--aggressive`, which will also optimize other things after an import
+(e.g. pack loose refs). As noted in the "AGGRESSIVE" section in
+linkgit:git-gc[1] the `--aggressive` option will find new deltas with
+the `-f` option to linkgit:git-repack[1]. For the reasons elaborated
+on above using `--aggressive` after a fast-import is one of the few
+cases where it's known to be worthwhile.
MEMORY UTILIZATION
------------------
rewriting. When applying a tree filter, the command needs to
temporarily check out the tree to some directory, which may consume
considerable space in case of large projects. By default it
- does this in the '.git-rewrite/' directory but you can override
+ does this in the `.git-rewrite/` directory but you can override
that choice by this parameter.
-f::
with --no-full.
--connectivity-only::
- Check only the connectivity of tags, commits and tree objects. By
- avoiding to unpack blobs, this speeds up the operation, at the
- expense of missing corrupt objects or other problematic issues.
+ Check only the connectivity of reachable objects, making sure
+ that any objects referenced by a reachable tag, commit, or tree
+ is present. This speeds up the operation by avoiding reading
+ blobs entirely (though it does still check that referenced blobs
+ exist). This will detect corruption in commits and trees, but
+ not do any semantic checks (e.g., for format errors). Corruption
+ in blob objects will not be detected at all.
++
+Unreachable tags, commits, and trees will also be accessed to find the
+tips of dangling segments of history. Use `--no-dangling` if you don't
+care about this output and want to speed it up further.
--strict::
Enable more strict checking, namely to catch a file mode
reflog, rerere metadata or stale working trees. May also update ancillary
indexes such as the commit-graph.
-Users are encouraged to run this task on a regular basis within
-each repository to maintain good disk space utilization and good
-operating performance.
+When common porcelain operations that create objects are run, they
+will check whether the repository has grown substantially since the
+last maintenance, and if so run `git gc` automatically. See `gc.auto`
+below for how to disable this behavior.
-Some git commands may automatically run 'git gc'; see the `--auto` flag
-below for details. If you know what you're doing and all you want is to
-disable this behavior permanently without further considerations, just do:
-
-----------------------
-$ git config --global gc.auto 0
-----------------------
+Running `git gc` manually should only be needed when adding objects to
+a repository without regularly running such porcelain commands, to do
+a one-off repository optimization, or e.g. to clean up a suboptimal
+mass-import. See the "PACKFILE OPTIMIZATION" section in
+linkgit:git-fast-import[1] for more details on the import case.
OPTIONS
-------
space utilization and performance. This option will cause
'git gc' to more aggressively optimize the repository at the expense
of taking much more time. The effects of this optimization are
- persistent, so this option only needs to be used occasionally; every
- few hundred changesets or so.
+ mostly persistent. See the "AGGRESSIVE" section below for details.
--auto::
With this option, 'git gc' checks whether any housekeeping is
required; if not, it exits without performing any work.
- Some git commands run `git gc --auto` after performing
- operations that could create many loose objects. Housekeeping
- is required if there are too many loose objects or too many
- packs in the repository.
-+
-If the number of loose objects exceeds the value of the `gc.auto`
-configuration variable, then all loose objects are combined into a
-single pack using `git repack -d -l`. Setting the value of `gc.auto`
-to 0 disables automatic packing of loose objects.
+
-If the number of packs exceeds the value of `gc.autoPackLimit`,
-then existing packs (except those marked with a `.keep` file
-or over `gc.bigPackThreshold` limit)
-are consolidated into a single pack by using the `-A` option of
-'git repack'.
-If the amount of memory is estimated not enough for `git repack` to
-run smoothly and `gc.bigPackThreshold` is not set, the largest
-pack will also be excluded (this is the equivalent of running `git gc`
-with `--keep-base-pack`).
-Setting `gc.autoPackLimit` to 0 disables automatic consolidation of
-packs.
+See the `gc.auto` option in the "CONFIGURATION" section below for how
+this heuristic works.
+
-If houskeeping is required due to many loose objects or packs, all
+Once housekeeping is triggered by exceeding the limits of
+configuration options such as `gc.auto` and `gc.autoPackLimit`, all
other housekeeping tasks (e.g. rerere, working trees, reflog...) will
be performed as well.
--prune=<date>::
Prune loose objects older than date (default is 2 weeks ago,
overridable by the config variable `gc.pruneExpire`).
- --prune=all prunes loose objects regardless of their age and
+ --prune=now prunes loose objects regardless of their age and
increases the risk of corruption if another process is writing to
the repository concurrently; see "NOTES" below. --prune is on by
default.
`.keep` files are consolidated into a single pack. When this
option is used, `gc.bigPackThreshold` is ignored.
+AGGRESSIVE
+----------
+
+When the `--aggressive` option is supplied, linkgit:git-repack[1] will
+be invoked with the `-f` flag, which in turn will pass
+`--no-reuse-delta` to linkgit:git-pack-objects[1]. This will throw
+away any existing deltas and re-compute them, at the expense of
+spending much more time on the repacking.
+
+The effects of this are mostly persistent, e.g. when packs and loose
+objects are coalesced into one another pack the existing deltas in
+that pack might get re-used, but there are also various cases where we
+might pick a sub-optimal delta from a newer pack instead.
+
+Furthermore, supplying `--aggressive` will tweak the `--depth` and
+`--window` options passed to linkgit:git-repack[1]. See the
+`gc.aggressiveDepth` and `gc.aggressiveWindow` settings below. By
+using a larger window size we're more likely to find more optimal
+deltas.
+
+It's probably not worth it to use this option on a given repository
+without running tailored performance benchmarks on it. It takes a lot
+more time, and the resulting space/delta optimization may or may not
+be worth it. Not using this at all is the right trade-off for most
+users and their repositories.
+
CONFIGURATION
-------------
-The optional configuration variable `gc.reflogExpire` can be
-set to indicate how long historical entries within each branch's
-reflog should remain available in this repository. The setting is
-expressed as a length of time, for example '90 days' or '3 months'.
-It defaults to '90 days'.
-
-The optional configuration variable `gc.reflogExpireUnreachable`
-can be set to indicate how long historical reflog entries which
-are not part of the current branch should remain available in
-this repository. These types of entries are generally created as
-a result of using `git commit --amend` or `git rebase` and are the
-commits prior to the amend or rebase occurring. Since these changes
-are not part of the current project most users will want to expire
-them sooner. This option defaults to '30 days'.
-
-The above two configuration variables can be given to a pattern. For
-example, this sets non-default expiry values only to remote-tracking
-branches:
-
-------------
-[gc "refs/remotes/*"]
- reflogExpire = never
- reflogExpireUnreachable = 3 days
-------------
-
-The optional configuration variable `gc.rerereResolved` indicates
-how long records of conflicted merge you resolved earlier are
-kept. This defaults to 60 days.
-
-The optional configuration variable `gc.rerereUnresolved` indicates
-how long records of conflicted merge you have not resolved are
-kept. This defaults to 15 days.
-
-The optional configuration variable `gc.packRefs` determines if
-'git gc' runs 'git pack-refs'. This can be set to "notbare" to enable
-it within all non-bare repos or it can be set to a boolean value.
-This defaults to true.
-
-The optional configuration variable `gc.writeCommitGraph` determines if
-'git gc' should run 'git commit-graph write'. This can be set to a
-boolean value. This defaults to false.
-
-The optional configuration variable `gc.aggressiveWindow` controls how
-much time is spent optimizing the delta compression of the objects in
-the repository when the --aggressive option is specified. The larger
-the value, the more time is spent optimizing the delta compression. See
-the documentation for the --window option in linkgit:git-repack[1] for
-more details. This defaults to 250.
-
-Similarly, the optional configuration variable `gc.aggressiveDepth`
-controls --depth option in linkgit:git-repack[1]. This defaults to 50.
-
-The optional configuration variable `gc.pruneExpire` controls how old
-the unreferenced loose objects have to be before they are pruned. The
-default is "2 weeks ago".
-
-Optional configuration variable `gc.worktreePruneExpire` controls how
-old a stale working tree should be before `git worktree prune` deletes
-it. Default is "3 months ago".
+The below documentation is the same as what's found in
+linkgit:git-config[1]:
+include::config/gc.txt[]
NOTES
-----
particular, it will keep not only objects referenced by your current set
of branches and tags, but also objects referenced by the index,
remote-tracking branches, refs saved by 'git filter-branch' in
-refs/original/, or reflogs (which may reference commits in branches
-that were later amended or rewound).
+refs/original/, reflogs (which may reference commits in branches
+that were later amended or rewound), and anything else in the refs/* namespace.
If you are expecting some objects to be deleted and they aren't, check
all of those locations and decide whether it makes sense in your case to
remove those references.
However, these features fall short of a complete solution, so users who
run commands concurrently have to live with some risk of corruption (which
-seems to be low in practice) unless they turn off automatic garbage
-collection with 'git config gc.auto 0'.
+seems to be low in practice).
HOOKS
-----
mechanism. Only useful with `--untracked`.
--exclude-standard::
- Do not pay attention to ignored files specified via the `.gitignore`
+ Do not pay attention to ignored files specified via the `.gitignore`
mechanism. Only useful when searching files in the current directory
with `--no-index`.
already opened konqueror in a new tab if possible.
For consistency, we also try such a trick if 'man.konqueror.path' is
-set to something like 'A_PATH_TO/konqueror'. That means we will try to
-launch 'A_PATH_TO/kfmclient' instead.
+set to something like `A_PATH_TO/konqueror`. That means we will try to
+launch `A_PATH_TO/kfmclient` instead.
If you really want to use 'konqueror', then you can use something like
the following:
Accelerated static Apache 2.x::
Similar to the above, but Apache can be used to return static
- files that are stored on disk. On many systems this may
+ files that are stored on disk. On many systems this may
be more efficient as Apache can ask the kernel to copy the
file contents from the file system directly to the network:
+
NAME
----
-git-interpret-trailers - add or parse structured information in commit messages
+git-interpret-trailers - Add or parse structured information in commit messages
SYNOPSIS
--------
displayed.
--refs::
- Do not show peeled tags or pseudorefs like HEAD in the output.
+ Do not show peeled tags or pseudorefs like `HEAD` in the output.
-q::
--quiet::
taken as relative to the current working directory. E.g. when you are
in a directory 'sub' that has a directory 'dir', you can run 'git
ls-tree -r HEAD dir' to list the contents of the tree (that is
- 'sub/dir' in `HEAD`). You don't want to give a tree that is not at the
+ `sub/dir` in `HEAD`). You don't want to give a tree that is not at the
root level (e.g. `git ls-tree -r HEAD:sub dir`) in this case, as that
- would result in asking for 'sub/sub/dir' in the `HEAD` commit.
+ would result in asking for `sub/sub/dir` in the `HEAD` commit.
However, the current working directory can be ignored by passing
--full-tree option.
If `--log` is specified, a shortlog of the commits being merged
will be appended to the specified message.
---[no-]rerere-autoupdate::
+--rerere-autoupdate::
+--no-rerere-autoupdate::
Allow the rerere mechanism to update the index with the
result of auto-conflict resolution if possible.
-C <object>::
--reuse-message=<object>::
- Take the given blob object (for example, another note) as the
+ Take the given blob object (for example, another note) as the
note message. (Use `git notes copy <object>` instead to
copy notes between objects.)
started.
--reset::
- Same as -m, except that unmerged entries are discarded
- instead of failing.
+ Same as -m, except that unmerged entries are discarded instead
+ of failing. When used with `-u`, updates leading to loss of
+ working tree changes will not abort the operation.
-u::
After a successful merge, update the files in the work
Instead of reading tree object(s) into the index, just empty
it.
+-q::
+--quiet::
+ Quiet, suppress feedback messages.
+
<tree-ish#>::
The id of the tree object(s) to be read/merged.
+
See also INCOMPATIBLE OPTIONS below.
+--rerere-autoupdate::
+--no-rerere-autoupdate::
+ Allow the rerere mechanism to update the index with the
+ result of auto-conflict resolution if possible.
+
-S[<keyid>]::
--gpg-sign[=<keyid>]::
GPG-sign commits. The `keyid` argument is optional and
+
By default, or when `no-rebase-cousins` was specified, commits which do not
have `<upstream>` as direct ancestor will keep their original branch point,
-i.e. commits that would be excluded by gitlink:git-log[1]'s
+i.e. commits that would be excluded by linkgit:git-log[1]'s
`--ancestry-path` option will keep their original ancestry by default. If
the `rebase-cousins` mode is turned on, such commits are instead rebased
onto `<upstream>` (or `<onto>`, if specified).
+
-The `--rebase-merges` mode is similar in spirit to `--preserve-merges`, but
-in contrast to that option works well in interactive rebases: commits can be
-reordered, inserted and dropped at will.
+The `--rebase-merges` mode is similar in spirit to the deprecated
+`--preserve-merges`, but in contrast to that option works well in interactive
+rebases: commits can be reordered, inserted and dropped at will.
+
It is currently only possible to recreate the merge commits using the
`recursive` merge strategy; Different merge strategies can be used only via
-p::
--preserve-merges::
- Recreate merge commits instead of flattening the history by replaying
- commits a merge commit introduces. Merge conflict resolutions or manual
- amendments to merge commits are not preserved.
+ [DEPRECATED: use `--rebase-merges` instead] Recreate merge commits
+ instead of flattening the history by replaying commits a merge commit
+ introduces. Merge conflict resolutions or manual amendments to merge
+ commits are not preserved.
+
This uses the `--interactive` machinery internally, but combining it
with the `--interactive` option explicitly is generally not a good
BUGS
----
-The todo list presented by `--preserve-merges --interactive` does not
-represent the topology of the revision graph. Editing commits and
-rewording their commit messages should work fine, but attempts to
-reorder commits tend to produce counterintuitive results. Use
-`--rebase-merges` in such scenarios instead.
+The todo list presented by the deprecated `--preserve-merges --interactive`
+does not represent the topology of the revision graph (use `--rebase-merges`
+instead). Editing commits and rewording their commit messages should work
+fine, but attempts to reorder commits tend to produce counterintuitive results.
+Use `--rebase-merges` in such scenarios instead.
For example, an attempt to rearrange
------------
link-level address).
"ext::git-server-alias foo %G/repo% with% spaces %Vfoo"::
- Represents a repository with path '/repo with spaces' accessed
+ Represents a repository with path `/repo with spaces` accessed
using the helper program "git-server-alias foo". The hostname for
the remote server passed in the protocol stream will be "foo"
(this allows multiple virtual Git servers to share a
SEE ALSO
--------
-linkgit:gitremote-helpers[1]
+linkgit:gitremote-helpers[7]
GIT
---
SEE ALSO
--------
-linkgit:gitremote-helpers[1]
+linkgit:gitremote-helpers[7]
GIT
---
git-remote-helpers
==================
-This document has been moved to linkgit:gitremote-helpers[1].
+This document has been moved to linkgit:gitremote-helpers[7].
Please let the owners of the referring site know so that they can update the
link you clicked to get here.
+++ /dev/null
-git-remote-testgit(1)
-=====================
-
-NAME
-----
-git-remote-testgit - Example remote-helper
-
-
-SYNOPSIS
---------
-[verse]
-git clone testgit::<source-repo> [<destination>]
-
-DESCRIPTION
------------
-
-This command is a simple remote-helper, that is used both as a
-testcase for the remote-helper functionality, and as an example to
-show remote-helper authors one possible implementation.
-
-The best way to learn more is to read the comments and source code in
-'git-remote-testgit'.
-
-SEE ALSO
---------
-linkgit:gitremote-helpers[1]
-
-GIT
----
-Part of the linkgit:git[1] suite
hand resolutions to their corresponding automerge results.
[NOTE]
-You need to set the configuration variable rerere.enabled in order to
+You need to set the configuration variable `rerere.enabled` in order to
enable this command.
`reset --merge` is meant to be used when resetting out of a conflicted
merge. Any mergy operation guarantees that the working tree file that is
-involved in the merge does not have local change wrt the index before
-it starts, and that it writes the result out to the working tree. So if
+involved in the merge does not have a local change with respect to the index
+before it starts, and that it writes the result out to the working tree. So if
we see some difference between the index and the target and also
between the index and the working tree, then it means that we are not
resetting out from a state that a mergy operation left after failing
Pass the merge strategy-specific option through to the
merge strategy. See linkgit:git-merge[1] for details.
+--rerere-autoupdate::
+--no-rerere-autoupdate::
+ Allow the rerere mechanism to update the index with the
+ result of auto-conflict resolution if possible.
+
SEQUENCER SUBCOMMANDS
---------------------
include::sequencer.txt[]
--------
[verse]
'git stash' list [<options>]
-'git stash' show [<stash>]
+'git stash' show [<options>] [<stash>]
'git stash' drop [-q|--quiet] [<stash>]
'git stash' ( pop | apply ) [--index] [-q|--quiet] [<stash>]
'git stash' branch <branchname> [<stash>]
The command takes options applicable to the 'git log'
command to control what is shown and how. See linkgit:git-log[1].
-show [<stash>]::
+show [<options>] [<stash>]::
Show the changes recorded in the stash entry as a diff between the
stashed contents and the commit back when the stash entry was first
command line arguments. Parsers should ignore headers they
don't recognize.
-### Branch Headers
+Branch Headers
+^^^^^^^^^^^^^^
If `--branch` is given, a series of header lines are printed with
information about the current branch.
------------------------------------------------------------
....
-### Changed Tracked Entries
+Changed Tracked Entries
+^^^^^^^^^^^^^^^^^^^^^^^
Following the headers, a series of lines are printed for tracked
entries. One of three different line formats may be used to describe
--------------------------------------------------------
....
-### Other Items
+Other Items
+^^^^^^^^^^^
Following the tracked entries (and if requested), a series of
lines will be printed for untracked and then ignored items
! <path>
-### Pathname Format Notes and -z
+Pathname Format Notes and -z
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^
When the `-z` option is given, pathnames are printed as is and
without any quoting and lines are terminated with a NUL (ASCII 0x00)
SYNOPSIS
--------
[verse]
+'git submodule' [--quiet] [--cached]
'git submodule' [--quiet] add [<options>] [--] <repository> [<path>]
'git submodule' [--quiet] status [--cached] [--recursive] [--] [<path>...]
'git submodule' [--quiet] init [--] [<path>...]
'git submodule' [--quiet] deinit [-f|--force] (--all|[--] <path>...)
'git submodule' [--quiet] update [<options>] [--] [<path>...]
+'git submodule' [--quiet] set-branch [<options>] [--] <path>
'git submodule' [--quiet] summary [<options>] [--] [<path>...]
'git submodule' [--quiet] foreach [--recursive] <command>
'git submodule' [--quiet] sync [--recursive] [--] [<path>...]
COMMANDS
--------
+With no arguments, shows the status of existing submodules. Several
+subcommands are available to perform operations on the submodules.
+
add [-b <branch>] [-f|--force] [--name <name>] [--reference <repository>] [--depth <depth>] [--] <repository> [<path>]::
Add the given repository as a submodule at the given path
to the changeset to be committed next to the current
or ../), the location relative to the superproject's default remote
repository (Please note that to specify a repository 'foo.git'
which is located right next to a superproject 'bar.git', you'll
-have to use '../foo.git' instead of './foo.git' - as one might expect
+have to use `../foo.git` instead of `./foo.git` - as one might expect
when following the rules for relative URLs - because the evaluation
of relative URLs in Git is identical to that of relative directories).
+
If `--recursive` is specified, this command will recurse into the
registered submodules, and update any nested submodules within.
--
+set-branch ((-d|--default)|(-b|--branch <branch>)) [--] <path>::
+ Sets the default remote tracking branch for the submodule. The
+ `--branch` option allows the remote branch to be specified. The
+ `--default` option removes the submodule.<name>.branch configuration
+ key, which causes the tracking branch to default to 'master'.
+
summary [--cached|--files] [(-n|--summary-limit) <n>] [commit] [--] [<path>...]::
Show commit summary between the given commit (defaults to HEAD) and
working tree/index. For a submodule in question, a series of commits
This option is only valid for the deinit command. Unregister all
submodules in the working tree.
--b::
---branch::
+-b <branch>::
+--branch <branch>::
Branch of repository to add as submodule.
The name of the branch is recorded as `submodule.<name>.branch` in
`.gitmodules` for `update --remote`. A special value of `.` is used to
indicate that the name of the branch in the submodule should be the
- same name as the current branch in the current repository.
+ same name as the current branch in the current repository. If the
+ option is not specified, it defaults to 'master'.
-f::
--force::
command-line argument.
+
This automatically updates the rev_map if needed (see
-'$GIT_DIR/svn/\*\*/.rev_map.*' in the FILES section below for details).
+'$GIT_DIR/svn/\**/.rev_map.*' in the FILES section below for details).
--localtime;;
Store Git commit times in the local time zone instead of UTC. This
and have no uncommitted changes.
+
This automatically updates the rev_map if needed (see
-'$GIT_DIR/svn/\*\*/.rev_map.*' in the FILES section below for details).
+'$GIT_DIR/svn/\**/.rev_map.*' in the FILES section below for details).
-l;;
--local;;
way to repair the repo is to use 'reset'.
+
Only the rev_map and refs/remotes/git-svn are changed (see
-'$GIT_DIR/svn/\*\*/.rev_map.*' in the FILES section below for details).
+'$GIT_DIR/svn/\**/.rev_map.*' in the FILES section below for details).
Follow 'reset' with a 'fetch' and then 'git reset' or 'git rebase' to
move local branches onto the new tree.
+
This option can only be used for one-shot imports as 'git svn'
will not be able to fetch again without metadata. Additionally,
-if you lose your '$GIT_DIR/svn/\*\*/.rev_map.*' files, 'git svn' will not
+if you lose your '$GIT_DIR/svn/\**/.rev_map.*' files, 'git svn' will not
be able to rebuild them.
+
The 'git svn log' command will not work on repositories using
FILES
-----
-$GIT_DIR/svn/\*\*/.rev_map.*::
+$GIT_DIR/svn/\**/.rev_map.*::
Mapping between Subversion revision numbers and Git commit
names. In a repository where the noMetadata option is not set,
this can be rebuilt from the git-svn-id: lines that are at the
man page on an already opened konqueror in a new tab if possible.
For consistency, we also try such a trick if 'browser.konqueror.path' is
-set to something like 'A_PATH_TO/konqueror'. That means we will try to
-launch 'A_PATH_TO/kfmclient' instead.
+set to something like `A_PATH_TO/konqueror`. That means we will try to
+launch `A_PATH_TO/kfmclient` instead.
If you really want to use 'konqueror', then you can use something like
the following:
In general, all pseudo refs are per working tree and all refs starting
with "refs/" are shared. Pseudo refs are ones like HEAD which are
-directly under GIT_DIR instead of inside GIT_DIR/refs. There are one
+directly under GIT_DIR instead of inside GIT_DIR/refs. There is one
exception to this: refs inside refs/bisect and refs/worktree is not
shared.
The command-line parameters passed to the configured command are
determined by the ssh variant. See `ssh.variant` option in
linkgit:git-config[1] for details.
-
+
`$GIT_SSH_COMMAND` takes precedence over `$GIT_SSH`, and is interpreted
by the shell, which allows additional arguments to be included.
Each line in `gitattributes` file is of form:
- pattern attr1 attr2 ...
+ pattern attr1 attr2 ...
That is, a pattern followed by an attributes list,
separated by whitespaces. Leading and trailing whitespaces are
support will checkout `foo.ps1` as UTF-8 encoded file. This will
typically cause trouble for the users of this file.
+
-If a Git client, that does not support the `working-tree-encoding`
-attribute, adds a new file `bar.ps1`, then `bar.ps1` will be
+If a Git client that does not support the `working-tree-encoding`
+attribute adds a new file `bar.ps1`, then `bar.ps1` will be
stored "as-is" internally (in this example probably as UTF-16).
A client with `working-tree-encoding` support will interpret the
internal contents as UTF-8 and try to convert it to UTF-16 on checkout.
Use the following attributes if your '*.ps1' files are UTF-16 little
endian encoded without BOM and you want Git to use Windows line endings
-in the working directory (use `UTF-16-LE-BOM` instead of `UTF-16LE` if
+in the working directory (use `UTF-16LE-BOM` instead of `UTF-16LE` if
you want UTF-16 little endian with BOM).
Please note, it is highly recommended to
explicitly define the line endings with `eol` if the `working-tree-encoding`
variable `GIT_EDITOR=:` if the command will not bring up an editor
to modify the commit message.
+The default 'pre-commit' hook, when enabled--and with the
+`hooks.allownonascii` config option unset or set to false--prevents
+the use of non-ASCII filenames.
+
prepare-commit-msg
~~~~~~~~~~~~~~~~~~
from standard input. Exiting with non-zero status from this script prevent
`git-p4 submit` from launching. Run `git-p4 submit --help` for details.
+post-index-change
+~~~~~~~~~~~~~~~~~
+
+This hook is invoked when the index is written in read-cache.c
+do_write_locked_index.
+
+The first parameter passed to the hook is the indicator for the
+working directory being updated. "1" meaning working directory
+was updated or "0" when the working directory was not updated.
+
+The second parameter passed to the hook is the indicator for whether
+or not the index was updated and the skip-worktree bit could have
+changed. "1" meaning skip-worktree bits could have been updated
+and "0" meaning they were not.
+
+Only one parameter should be set to "1" when the hook runs. The hook
+running passing "1", "1" should not be possible.
+
GIT
---
Part of the linkgit:git[1] suite
- Other consecutive asterisks are considered regular asterisks and
will match according to the previous rules.
+CONFIGURATION
+-------------
+
+The optional configuration variable `core.excludesFile` indicates a path to a
+file containing patterns of file names to exclude, similar to
+`$GIT_DIR/info/exclude`. Patterns in the exclude file are used in addition to
+those in `$GIT_DIR/info/exclude`.
+
NOTES
-----
-----
User configuration and preferences are stored at:
-* '$XDG_CONFIG_HOME/git/gitk' if it exists, otherwise
-* '$HOME/.gitk' if it exists
+* `$XDG_CONFIG_HOME/git/gitk` if it exists, otherwise
+* `$HOME/.gitk` if it exists
-If neither of the above exist then '$XDG_CONFIG_HOME/git/gitk' is created and
+If neither of the above exist then `$XDG_CONFIG_HOME/git/gitk` is created and
used by default. If '$XDG_CONFIG_HOME' is not set it defaults to
-'$HOME/.config' in all cases.
+`$HOME/.config` in all cases.
History
-------
This defines two submodules, `libfoo` and `libbar`. These are expected to
-be checked out in the paths 'include/foo' and 'include/bar', and for both
+be checked out in the paths `include/foo` and `include/bar`, and for both
submodules a URL is specified which can be used for cloning the submodules.
SEE ALSO
-gitremote-helpers(1)
+gitremote-helpers(7)
====================
NAME
'option dry-run' {'true'|'false'}:
If true, pretend the operation completed successfully,
- but don't actually change any repository data. For most
+ but don't actually change any repository data. For most
helpers this only applies to the 'push', if supported.
'option servpath <c-style-quoted-path>'::
linkgit:git-remote-fd[1]
-linkgit:git-remote-testgit[1]
-
linkgit:git-fast-import[1]
GIT
to the object database, not to the repository!) in your
alternates file, but it will not work if you use absolute
paths unless the absolute path in filesystem and web URL
- is the same. See also 'objects/info/http-alternates'.
+ is the same. See also `objects/info/http-alternates`.
objects/info/http-alternates::
This file records URLs to alternate object stores that
* built-in values (some set during build stage),
* common system-wide configuration file (defaults to
- '/etc/gitweb-common.conf'),
+ `/etc/gitweb-common.conf`),
* either per-instance configuration file (defaults to 'gitweb_config.perl'
in the same directory as the installed gitweb), or if it does not exists
- then fallback system-wide configuration file (defaults to '/etc/gitweb.conf').
+ then fallback system-wide configuration file (defaults to `/etc/gitweb.conf`).
Values obtained in later configuration files override values obtained earlier
in the above sequence.
subroutine. For example, one might want to put gitweb configuration
related to access control for viewing repositories via Gitolite (one
of Git repository management tools) in a separate file, e.g. in
-'/etc/gitweb-gitolite.conf'. To include it, put
+`/etc/gitweb-gitolite.conf`. To include it, put
--------------------------------------------------
read_config_file("/etc/gitweb-gitolite.conf");
http://git.example.com/gitweb.cgi/foo/bar.git
------------------------------------------------
+
-will map to the path '/srv/git/foo/bar.git' on the filesystem.
+will map to the path `/srv/git/foo/bar.git` on the filesystem.
$projects_list::
Name of a plain text file listing projects, or a name of directory
$mimetypes_file::
File to use for (filename extension based) guessing of MIME types before
- trying '/etc/mime.types'. *NOTE* that this path, if relative, is taken
+ trying `/etc/mime.types`. *NOTE* that this path, if relative, is taken
as relative to the current Git repository, not to CGI script. If unset,
- only '/etc/mime.types' is used (if present on filesystem). If no mimetypes
+ only `/etc/mime.types` is used (if present on filesystem). If no mimetypes
file is found, mimetype guessing based on extension of file is disabled.
Unset by default.
+
This list should contain the URI of gitweb's standard stylesheet. The default
URI of gitweb stylesheet can be set at build time using the `GITWEB_CSS`
-makefile variable. Its default value is 'static/gitweb.css'
-(or 'static/gitweb.min.css' if the `CSSMIN` variable is defined,
+makefile variable. Its default value is `static/gitweb.css`
+(or `static/gitweb.min.css` if the `CSSMIN` variable is defined,
i.e. if CSS minifier is used during build).
+
*Note*: there is also a legacy `$stylesheet` configuration variable, which was
is displayed in the top right corner of each gitweb page and used as
a logo for the Atom feed. Relative to the base URI of gitweb (as a path).
Can be adjusted when building gitweb using `GITWEB_LOGO` variable
- By default set to 'static/git-logo.png'.
+ By default set to `static/git-logo.png`.
$favicon::
Points to the location where you put 'git-favicon.png' on your web
may display them in the browser's URL bar and next to the site name in
bookmarks. Relative to the base URI of gitweb. Can be adjusted at
build time using `GITWEB_FAVICON` variable.
- By default set to 'static/git-favicon.png'.
+ By default set to `static/git-favicon.png`.
$javascript::
Points to the location where you put 'gitweb.js' on your web server,
Relative to the base URI of gitweb. Can be set at build time using
the `GITWEB_JS` build-time configuration variable.
+
-The default value is either 'static/gitweb.js', or 'static/gitweb.min.js' if
+The default value is either `static/gitweb.js`, or `static/gitweb.min.js` if
the `JSMIN` build variable was defined, i.e. if JavaScript minifier was used
at build time. *Note* that this single file is generated from multiple
individual JavaScript "modules".
doesn't result in some other type; by default "text/plain".
Gitweb guesses mimetype of a file to display based on extension
of its filename, using `$mimetypes_file` (if set and file exists)
- and '/etc/mime.types' files (see *mime.types*(5) manpage; only
+ and `/etc/mime.types` files (see *mime.types*(5) manpage; only
filename extension rules are supported by gitweb).
$default_text_plain_charset::
(for example one for `git://` protocol, and one for `http://`
protocol).
+
-Note that per repository configuration can be set in '$GIT_DIR/cloneurl'
+Note that per repository configuration can be set in `$GIT_DIR/cloneurl`
file, or as values of multi-value `gitweb.url` configuration variable in
project config. Per-repository configuration takes precedence over value
composed from `@git_base_url_list` elements and project name.
If the server load exceeds this value then gitweb will return
"503 Service Unavailable" error. The server load is taken to be 0
if gitweb cannot determine its value. Currently it works only on Linux,
- where it uses '/proc/loadavg'; the load there is the number of active
+ where it uses `/proc/loadavg`; the load there is the number of active
tasks on the system -- processes that are actually running -- averaged
over the last minute.
+
$per_request_config::
If this is set to code reference, it will be run once for each request.
- You can set parts of configuration that change per session this way.
+ You can set parts of configuration that change per session this way.
For example, one might use the following code in a gitweb configuration
file
+
Only one provider at a time can be selected ('default' is one element list).
If an unknown provider is specified, the feature is disabled.
*Note* that some providers might require extra Perl packages to be
-installed; see 'gitweb/INSTALL' for more details.
+installed; see `gitweb/INSTALL` for more details.
+
This feature can be configured on a per-repository basis via
repository's `gitweb.avatar` configuration variable.
CONFIGURATION
-------------
Various aspects of gitweb's behavior can be controlled through the configuration
-file 'gitweb_config.perl' or '/etc/gitweb.conf'. See the linkgit:gitweb.conf[5]
+file `gitweb_config.perl` or `/etc/gitweb.conf`. See the linkgit:gitweb.conf[5]
for details.
Repositories
our $projectroot = '/path/to/parent/directory';
-----------------------------------------------------------------------
-The default value for `$projectroot` is '/pub/git'. You can change it during
+The default value for `$projectroot` is `/pub/git`. You can change it during
building gitweb via `GITWEB_PROJECTROOT` build configuration variable.
By default all Git repositories under `$projectroot` are visible and available
-------------------------------------------------------------------------------
+
from the template during repository creation, usually installed in
-'/usr/share/git-core/templates/'. You can use the `gitweb.description` repo
+`/usr/share/git-core/templates/`. You can use the `gitweb.description` repo
configuration variable, but the file takes precedence.
category (or `gitweb.category`)::
Apache as CGI
~~~~~~~~~~~~~
Apache must be configured to support CGI scripts in the directory in
-which gitweb is installed. Let's assume that it is '/var/www/cgi-bin'
+which gitweb is installed. Let's assume that it is `/var/www/cgi-bin`
directory.
-----------------------------------------------------------------------
(for mod_perl 1.x) or ModPerl::Registry (for mod_perl 2.x) to enable
this support.
-Assuming that gitweb is installed to '/var/www/perl', the following
+Assuming that gitweb is installed to `/var/www/perl`, the following
Apache configuration (for mod_perl 2.x) is suitable.
-----------------------------------------------------------------------
~~~~~~~~~~~~~~~~~~~
Gitweb works with Apache and FastCGI. First you need to rename, copy
or symlink gitweb.cgi to gitweb.fcgi. Let's assume that gitweb is
-installed in '/usr/share/gitweb' directory. The following Apache
+installed in `/usr/share/gitweb` directory. The following Apache
configuration is suitable (UNTESTED!)
-----------------------------------------------------------------------
-----------------------------------------------------------------------
The above configuration expects your public repositories to live under
-'/pub/git' and will serve them as `http://git.domain.org/dir-under-pub-git`,
+`/pub/git` and will serve them as `http://git.domain.org/dir-under-pub-git`,
both as clonable Git URL and as browseable gitweb interface. If you then
start your linkgit:git-daemon[1] with `--base-path=/pub/git --export-all`
then you can even use the `git://` URL with exactly the same path.
Setting the environment variable `GITWEB_CONFIG` will tell gitweb to use the
-named file (i.e. in this example '/etc/gitweb.conf') as a configuration for
+named file (i.e. in this example `/etc/gitweb.conf`) as a configuration for
gitweb. You don't really need it in above example; it is required only if
your configuration file is in different place than built-in (during
-compiling gitweb) 'gitweb_config.perl' or '/etc/gitweb.conf'. See
+compiling gitweb) 'gitweb_config.perl' or `/etc/gitweb.conf`. See
linkgit:gitweb.conf[5] for details, especially information about precedence
rules.
If you use the rewrite rules from the example you *might* also need
something like the following in your gitweb configuration file
-('/etc/gitweb.conf' following example):
+(`/etc/gitweb.conf` following example):
----------------------------------------------------------------------------
@stylesheets = ("/some/absolute/path/gitweb.css");
$my_uri = "/";
Here actual project root is passed to gitweb via `GITWEB_PROJECT_ROOT`
environment variable from a web server, so you need to put the following
-line in gitweb configuration file ('/etc/gitweb.conf' in above example):
+line in gitweb configuration file (`/etc/gitweb.conf` in above example):
--------------------------------------------------------------------------
$projectroot = $ENV{'GITWEB_PROJECTROOT'} || "/pub/git";
--------------------------------------------------------------------------
These configurations enable two things. First, each unix user (`<user>`) of
the server will be able to browse through gitweb Git repositories found in
-'~/public_git/' with the following url:
+`~/public_git/` with the following url:
http://git.example.org/~<user>/
use the \'~' as first character, just comment or remove the second rewrite
rule, and uncomment one of the following according to what you want.
-Second, repositories found in '/pub/scm/' and '/var/git/' will be accessible
+Second, repositories found in `/pub/scm/` and `/var/git/` will be accessible
through `http://git.example.org/scm/` and `http://git.example.org/var/`.
You can add as many project roots as you want by adding rewrite rules like
the third and the fourth.
http://git.example.com/project.git/shortlog/sometag
i.e. without 'gitweb.cgi' part, by using a configuration such as the
-following. This configuration assumes that '/var/www/gitweb' is the
+following. This configuration assumes that `/var/www/gitweb` is the
DocumentRoot of your webserver, contains the gitweb.cgi script and
complementary static files (stylesheet, favicon, JavaScript):
`@stylesheets`, `$my_uri` and `$home_link`, but you lose "dumb client"
access to your project .git dirs (described in "Single URL for gitweb and
for fetching" section). A possible workaround for the latter is the
-following: in your project root dir (e.g. '/pub/git') have the projects
-named *without* a .git extension (e.g. '/pub/git/project' instead of
-'/pub/git/project.git') and configure Apache as follows:
+following: in your project root dir (e.g. `/pub/git`) have the projects
+named *without* a .git extension (e.g. `/pub/git/project` instead of
+`/pub/git/project.git`) and configure Apache as follows:
----------------------------------------------------------------------------
<VirtualHost *:80>
ServerAlias git.example.com
will provide human-friendly gitweb access.
This solution is not 100% bulletproof, in the sense that if some project has
-a named ref (branch, tag) starting with 'git/', then paths such as
+a named ref (branch, tag) starting with `git/`, then paths such as
http://git.example.com/project/command/abranch..git/abranch
--------
linkgit:gitweb.conf[5], linkgit:git-instaweb[1]
-'gitweb/README', 'gitweb/INSTALL'
+`gitweb/README`, `gitweb/INSTALL`
GIT
---
origin/name-of-upstream-branch, which you can see using
`git branch -r`.
+[[def_overlay]]overlay::
+ Only update and add files to the working directory, but don't
+ delete them, similar to how 'cp -R' would update the contents
+ in the destination directory. This is the default mode in a
+ <<def_checkout,checkout>> when checking out files from the
+ <<def_index,index>> or a <<def_tree-ish,tree-ish>>. In
+ contrast, no-overlay mode also deletes tracked files not
+ present in the source, similar to 'rsync --delete'.
+
[[def_pack]]pack::
A set of objects which have been compressed into one file (to save space
or to transmit them efficiently).
--------------
If you have to access the WebDAV server from behind an HTTP(S) proxy,
-set the variable 'all_proxy' to 'http://proxy-host.com:port', or
-'http://login-on-proxy:passwd-on-proxy@proxy-host.com:port'. See 'man
+set the variable 'all_proxy' to `http://proxy-host.com:port`, or
+`http://login-on-proxy:passwd-on-proxy@proxy-host.com:port`. See 'man
curl' for details.
Perform the merge and commit the result. This option can
be used to override --no-commit.
+
-With --no-commit perform the merge but pretend the merge
-failed and do not autocommit, to give the user a chance to
-inspect and further tweak the merge result before committing.
+With --no-commit perform the merge and stop just before creating
+a merge commit, to give the user a chance to inspect and further
+tweak the merge result before committing.
++
+Note that fast-forward updates do not create a merge commit and
+therefore there is no way to stop those merges with --no-commit.
+Thus, if you want to ensure your branch is not changed or updated
+by the merge command, use --no-ff with --no-commit.
--edit::
-e::
+
The placeholders are:
-- '%H': commit hash
-- '%h': abbreviated commit hash
-- '%T': tree hash
-- '%t': abbreviated tree hash
-- '%P': parent hashes
-- '%p': abbreviated parent hashes
-- '%an': author name
-- '%aN': author name (respecting .mailmap, see linkgit:git-shortlog[1]
- or linkgit:git-blame[1])
-- '%ae': author email
-- '%aE': author email (respecting .mailmap, see
- linkgit:git-shortlog[1] or linkgit:git-blame[1])
-- '%ad': author date (format respects --date= option)
-- '%aD': author date, RFC2822 style
-- '%ar': author date, relative
-- '%at': author date, UNIX timestamp
-- '%ai': author date, ISO 8601-like format
-- '%aI': author date, strict ISO 8601 format
-- '%cn': committer name
-- '%cN': committer name (respecting .mailmap, see
- linkgit:git-shortlog[1] or linkgit:git-blame[1])
-- '%ce': committer email
-- '%cE': committer email (respecting .mailmap, see
- linkgit:git-shortlog[1] or linkgit:git-blame[1])
-- '%cd': committer date (format respects --date= option)
-- '%cD': committer date, RFC2822 style
-- '%cr': committer date, relative
-- '%ct': committer date, UNIX timestamp
-- '%ci': committer date, ISO 8601-like format
-- '%cI': committer date, strict ISO 8601 format
-- '%d': ref names, like the --decorate option of linkgit:git-log[1]
-- '%D': ref names without the " (", ")" wrapping.
-- '%S': ref name given on the command line by which the commit was reached
- (like `git log --source`), only works with `git log`
-- '%e': encoding
-- '%s': subject
-- '%f': sanitized subject line, suitable for a filename
-- '%b': body
-- '%B': raw body (unwrapped subject and body)
+- Placeholders that expand to a single literal character:
+'%n':: newline
+'%%':: a raw '%'
+'%x00':: print a byte from a hex code
+
+- Placeholders that affect formatting of later placeholders:
+'%Cred':: switch color to red
+'%Cgreen':: switch color to green
+'%Cblue':: switch color to blue
+'%Creset':: reset color
+'%C(...)':: color specification, as described under Values in the
+ "CONFIGURATION FILE" section of linkgit:git-config[1]. By
+ default, colors are shown only when enabled for log output
+ (by `color.diff`, `color.ui`, or `--color`, and respecting
+ the `auto` settings of the former if we are going to a
+ terminal). `%C(auto,...)` is accepted as a historical
+ synonym for the default (e.g., `%C(auto,red)`). Specifying
+ `%C(always,...)` will show the colors even when color is
+ not otherwise enabled (though consider just using
+ `--color=always` to enable color for the whole output,
+ including this format and anything else git might color).
+ `auto` alone (i.e. `%C(auto)`) will turn on auto coloring
+ on the next placeholders until the color is switched
+ again.
+'%m':: left (`<`), right (`>`) or boundary (`-`) mark
+'%w([<w>[,<i1>[,<i2>]]])':: switch line wrapping, like the -w option of
+ linkgit:git-shortlog[1].
+'%<(<N>[,trunc|ltrunc|mtrunc])':: make the next placeholder take at
+ least N columns, padding spaces on
+ the right if necessary. Optionally
+ truncate at the beginning (ltrunc),
+ the middle (mtrunc) or the end
+ (trunc) if the output is longer than
+ N columns. Note that truncating
+ only works correctly with N >= 2.
+'%<|(<N>)':: make the next placeholder take at least until Nth
+ columns, padding spaces on the right if necessary
+'%>(<N>)', '%>|(<N>)':: similar to '%<(<N>)', '%<|(<N>)' respectively,
+ but padding spaces on the left
+'%>>(<N>)', '%>>|(<N>)':: similar to '%>(<N>)', '%>|(<N>)'
+ respectively, except that if the next
+ placeholder takes more spaces than given and
+ there are spaces on its left, use those
+ spaces
+'%><(<N>)', '%><|(<N>)':: similar to '%<(<N>)', '%<|(<N>)'
+ respectively, but padding both sides
+ (i.e. the text is centered)
+
+- Placeholders that expand to information extracted from the commit:
+'%H':: commit hash
+'%h':: abbreviated commit hash
+'%T':: tree hash
+'%t':: abbreviated tree hash
+'%P':: parent hashes
+'%p':: abbreviated parent hashes
+'%an':: author name
+'%aN':: author name (respecting .mailmap, see linkgit:git-shortlog[1]
+ or linkgit:git-blame[1])
+'%ae':: author email
+'%aE':: author email (respecting .mailmap, see linkgit:git-shortlog[1]
+ or linkgit:git-blame[1])
+'%ad':: author date (format respects --date= option)
+'%aD':: author date, RFC2822 style
+'%ar':: author date, relative
+'%at':: author date, UNIX timestamp
+'%ai':: author date, ISO 8601-like format
+'%aI':: author date, strict ISO 8601 format
+'%cn':: committer name
+'%cN':: committer name (respecting .mailmap, see
+ linkgit:git-shortlog[1] or linkgit:git-blame[1])
+'%ce':: committer email
+'%cE':: committer email (respecting .mailmap, see
+ linkgit:git-shortlog[1] or linkgit:git-blame[1])
+'%cd':: committer date (format respects --date= option)
+'%cD':: committer date, RFC2822 style
+'%cr':: committer date, relative
+'%ct':: committer date, UNIX timestamp
+'%ci':: committer date, ISO 8601-like format
+'%cI':: committer date, strict ISO 8601 format
+'%d':: ref names, like the --decorate option of linkgit:git-log[1]
+'%D':: ref names without the " (", ")" wrapping.
+'%S':: ref name given on the command line by which the commit was reached
+ (like `git log --source`), only works with `git log`
+'%e':: encoding
+'%s':: subject
+'%f':: sanitized subject line, suitable for a filename
+'%b':: body
+'%B':: raw body (unwrapped subject and body)
ifndef::git-rev-list[]
-- '%N': commit notes
+'%N':: commit notes
endif::git-rev-list[]
-- '%GG': raw verification message from GPG for a signed commit
-- '%G?': show "G" for a good (valid) signature,
- "B" for a bad signature,
- "U" for a good signature with unknown validity,
- "X" for a good signature that has expired,
- "Y" for a good signature made by an expired key,
- "R" for a good signature made by a revoked key,
- "E" if the signature cannot be checked (e.g. missing key)
- and "N" for no signature
-- '%GS': show the name of the signer for a signed commit
-- '%GK': show the key used to sign a signed commit
-- '%GF': show the fingerprint of the key used to sign a signed commit
-- '%GP': show the fingerprint of the primary key whose subkey was used
- to sign a signed commit
-- '%gD': reflog selector, e.g., `refs/stash@{1}` or
- `refs/stash@{2 minutes ago`}; the format follows the rules described
- for the `-g` option. The portion before the `@` is the refname as
- given on the command line (so `git log -g refs/heads/master` would
- yield `refs/heads/master@{0}`).
-- '%gd': shortened reflog selector; same as `%gD`, but the refname
- portion is shortened for human readability (so `refs/heads/master`
- becomes just `master`).
-- '%gn': reflog identity name
-- '%gN': reflog identity name (respecting .mailmap, see
- linkgit:git-shortlog[1] or linkgit:git-blame[1])
-- '%ge': reflog identity email
-- '%gE': reflog identity email (respecting .mailmap, see
- linkgit:git-shortlog[1] or linkgit:git-blame[1])
-- '%gs': reflog subject
-- '%Cred': switch color to red
-- '%Cgreen': switch color to green
-- '%Cblue': switch color to blue
-- '%Creset': reset color
-- '%C(...)': color specification, as described under Values in the
- "CONFIGURATION FILE" section of linkgit:git-config[1].
- By default, colors are shown only when enabled for log output (by
- `color.diff`, `color.ui`, or `--color`, and respecting the `auto`
- settings of the former if we are going to a terminal). `%C(auto,...)`
- is accepted as a historical synonym for the default (e.g.,
- `%C(auto,red)`). Specifying `%C(always,...)` will show the colors
- even when color is not otherwise enabled (though consider
- just using `--color=always` to enable color for the whole output,
- including this format and anything else git might color). `auto`
- alone (i.e. `%C(auto)`) will turn on auto coloring on the next
- placeholders until the color is switched again.
-- '%m': left (`<`), right (`>`) or boundary (`-`) mark
-- '%n': newline
-- '%%': a raw '%'
-- '%x00': print a byte from a hex code
-- '%w([<w>[,<i1>[,<i2>]]])': switch line wrapping, like the -w option of
- linkgit:git-shortlog[1].
-- '%<(<N>[,trunc|ltrunc|mtrunc])': make the next placeholder take at
- least N columns, padding spaces on the right if necessary.
- Optionally truncate at the beginning (ltrunc), the middle (mtrunc)
- or the end (trunc) if the output is longer than N columns.
- Note that truncating only works correctly with N >= 2.
-- '%<|(<N>)': make the next placeholder take at least until Nth
- columns, padding spaces on the right if necessary
-- '%>(<N>)', '%>|(<N>)': similar to '%<(<N>)', '%<|(<N>)'
- respectively, but padding spaces on the left
-- '%>>(<N>)', '%>>|(<N>)': similar to '%>(<N>)', '%>|(<N>)'
- respectively, except that if the next placeholder takes more spaces
- than given and there are spaces on its left, use those spaces
-- '%><(<N>)', '%><|(<N>)': similar to '%<(<N>)', '%<|(<N>)'
- respectively, but padding both sides (i.e. the text is centered)
-- %(trailers[:options]): display the trailers of the body as interpreted
- by linkgit:git-interpret-trailers[1]. The `trailers` string may be
- followed by a colon and zero or more comma-separated options. If the
- `only` option is given, omit non-trailer lines from the trailer block.
- If the `unfold` option is given, behave as if interpret-trailer's
- `--unfold` option was given. E.g., `%(trailers:only,unfold)` to do
- both.
+'%GG':: raw verification message from GPG for a signed commit
+'%G?':: show "G" for a good (valid) signature,
+ "B" for a bad signature,
+ "U" for a good signature with unknown validity,
+ "X" for a good signature that has expired,
+ "Y" for a good signature made by an expired key,
+ "R" for a good signature made by a revoked key,
+ "E" if the signature cannot be checked (e.g. missing key)
+ and "N" for no signature
+'%GS':: show the name of the signer for a signed commit
+'%GK':: show the key used to sign a signed commit
+'%GF':: show the fingerprint of the key used to sign a signed commit
+'%GP':: show the fingerprint of the primary key whose subkey was used
+ to sign a signed commit
+'%gD':: reflog selector, e.g., `refs/stash@{1}` or `refs/stash@{2
+ minutes ago`}; the format follows the rules described for the
+ `-g` option. The portion before the `@` is the refname as
+ given on the command line (so `git log -g refs/heads/master`
+ would yield `refs/heads/master@{0}`).
+'%gd':: shortened reflog selector; same as `%gD`, but the refname
+ portion is shortened for human readability (so
+ `refs/heads/master` becomes just `master`).
+'%gn':: reflog identity name
+'%gN':: reflog identity name (respecting .mailmap, see
+ linkgit:git-shortlog[1] or linkgit:git-blame[1])
+'%ge':: reflog identity email
+'%gE':: reflog identity email (respecting .mailmap, see
+ linkgit:git-shortlog[1] or linkgit:git-blame[1])
+'%gs':: reflog subject
+'%(trailers[:options])':: display the trailers of the body as
+ interpreted by
+ linkgit:git-interpret-trailers[1]. The
+ `trailers` string may be followed by a colon
+ and zero or more comma-separated options:
+** 'key=<K>': only show trailers with specified key. Matching is done
+ case-insensitively and trailing colon is optional. If option is
+ given multiple times trailer lines matching any of the keys are
+ shown. This option automatically enables the `only` option so that
+ non-trailer lines in the trailer block are hidden. If that is not
+ desired it can be disabled with `only=false`. E.g.,
+ `%(trailers:key=Reviewed-by)` shows trailer lines with key
+ `Reviewed-by`.
+** 'only[=val]': select whether non-trailer lines from the trailer
+ block should be included. The `only` keyword may optionally be
+ followed by an equal sign and one of `true`, `on`, `yes` to omit or
+ `false`, `off`, `no` to show the non-trailer lines. If option is
+ given without value it is enabled. If given multiple times the last
+ value is used.
+** 'separator=<SEP>': specify a separator inserted between trailer
+ lines. When this option is not given each trailer line is
+ terminated with a line feed character. The string SEP may contain
+ the literal formatting codes described above. To use comma as
+ separator one must use `%x2C` as it would otherwise be parsed as
+ next option. If separator option is given multiple times only the
+ last one is used. E.g., `%(trailers:key=Ticket,separator=%x2C )`
+ shows all trailer lines whose key is "Ticket" separated by a comma
+ and a space.
+** 'unfold[=val]': make it behave as if interpret-trailer's `--unfold`
+ option was given. In same way as to for `only` it can be followed
+ by an equal sign and explicit value. E.g.,
+ `%(trailers:only,unfold=true)` unfolds and shows all trailer lines.
+** 'valueonly[=val]': skip over the key part of the trailer line and only
+ show the value part. Also this optionally allows explicit value.
NOTE: Some placeholders may depend on other options given to the
revision traversal engine. For example, the `%g*` reflog options will
--filter-print-omitted::
Only useful with `--filter=`; prints a list of the objects omitted
- by the filter. Object IDs are prefixed with a ``~'' character.
+ by the filter. Object IDs are prefixed with a ``~'' character.
--missing=<missing-action>::
A debug option to help with future "partial clone" development.
author's). If `-local` is appended to the format (e.g.,
`iso-local`), the user's local time zone is used instead.
+
+--
`--date=relative` shows dates relative to the current time,
e.g. ``2 hours ago''. The `-local` option has no effect for
`--date=relative`.
-+
+
`--date=local` is an alias for `--date=default-local`.
-+
+
`--date=iso` (or `--date=iso8601`) shows timestamps in a ISO 8601-like format.
The differences to the strict ISO 8601 format are:
- a space between time and time zone
- no colon between hours and minutes of the time zone
-+
`--date=iso-strict` (or `--date=iso8601-strict`) shows timestamps in strict
ISO 8601 format.
-+
+
`--date=rfc` (or `--date=rfc2822`) shows timestamps in RFC 2822
format, often found in email messages.
-+
+
`--date=short` shows only the date, but not the time, in `YYYY-MM-DD` format.
-+
+
`--date=raw` shows the date as seconds since the epoch (1970-01-01
00:00:00 UTC), followed by a space, and then the timezone as an offset
from UTC (a `+` or `-` with four digits; the first two are hours, and
Note that the `-local` option does not affect the seconds-since-epoch
value (which is always measured in UTC), but does switch the accompanying
timezone value.
-+
+
`--date=human` shows the timezone if the timezone does not match the
current time-zone, and doesn't print the whole date if that matches
(ie skip printing year for dates that are "this year", but also skip
the whole date itself if it's in the last few days and we can just say
what weekday it was). For older dates the hour and minute is also
omitted.
-+
+
`--date=unix` shows the date as a Unix epoch timestamp (seconds since
1970). As with `--raw`, this is always in UTC and therefore `-local`
has no effect.
-+
+
`--date=format:...` feeds the format `...` to your system `strftime`,
except for %z and %Z, which are handled internally.
Use `--date=format:%c` to show the date in your system locale's
preferred format. See the `strftime` manual for a complete list of
format placeholders. When using `-local`, the correct syntax is
`--date=format-local:...`.
-+
+
`--date=default` is the default format, and is similar to
`--date=rfc2822`, with a few exceptions:
-
+--
- there is no comma after the day-of-week
- the time zone is omitted when the local time zone is used
the parents have only two variants and the merge result picks
one of them without modification.
+--combined-all-paths::
+ This flag causes combined diffs (used for merge commits) to
+ list the name of the file from all parents. It thus only has
+ effect when -c or --cc are specified, and is likely only
+ useful if filename changes are detected (i.e. when either
+ rename or copy detection have been requested).
+
-m::
This flag makes the merge commits show the full diff like
regular commits; for each merge parent, a separate log entry
when you run `git cherry-pick`.
+
Note that any of the 'refs/*' cases above may come either from
-the '$GIT_DIR/refs' directory or from the '$GIT_DIR/packed-refs' file.
+the `$GIT_DIR/refs` directory or from the `$GIT_DIR/packed-refs` file.
While the ref name encoding is unspecified, UTF-8 is preferred as
some output processing may assume ref names in UTF-8.
`git push` were run while `branchname` was checked out (or the current
`HEAD` if no branchname is specified). Since our push destination is
in a remote repository, of course, we report the local tracking branch
- that corresponds to that branch (i.e., something in 'refs/remotes/').
+ that corresponds to that branch (i.e., something in `refs/remotes/`).
+
Here's an example to make it more clear:
+
--continue::
Continue the operation in progress using the information in
- '.git/sequencer'. Can be used to continue after resolving
+ `.git/sequencer`. Can be used to continue after resolving
conflicts in a failed cherry-pick or revert.
--quit::
config-like files that the caller specifies (i.e., files like `.gitmodules`,
`~/.gitconfig` etc.). For example,
----------------------------------------
+----------------------------------------
struct config_set gm_config;
git_configset_init(&gm_config);
int b;
The filename will be prefixed by passing the filename along with
the prefix argument of `parse_options()` to `prefix_filename()`.
-`OPT_ARGUMENT(long, description)`::
+`OPT_ARGUMENT(long, &int_var, description)`::
Introduce a long-option argument that will be kept in `argv[]`.
+ If this option was seen, `int_var` will be set to one (except
+ if a `NULL` pointer was passed).
`OPT_NUMBER_CALLBACK(&var, description, func_ptr)`::
Recognize numerical options like -123 and feed the integer as
--- /dev/null
+= Trace2 API
+
+The Trace2 API can be used to print debug, performance, and telemetry
+information to stderr or a file. The Trace2 feature is inactive unless
+explicitly enabled by enabling one or more Trace2 Targets.
+
+The Trace2 API is intended to replace the existing (Trace1)
+printf-style tracing provided by the existing `GIT_TRACE` and
+`GIT_TRACE_PERFORMANCE` facilities. During initial implementation,
+Trace2 and Trace1 may operate in parallel.
+
+The Trace2 API defines a set of high-level messages with known fields,
+such as (`start`: `argv`) and (`exit`: {`exit-code`, `elapsed-time`}).
+
+Trace2 instrumentation throughout the Git code base sends Trace2
+messages to the enabled Trace2 Targets. Targets transform these
+messages content into purpose-specific formats and write events to
+their data streams. In this manner, the Trace2 API can drive
+many different types of analysis.
+
+Targets are defined using a VTable allowing easy extension to other
+formats in the future. This might be used to define a binary format,
+for example.
+
+== Trace2 Targets
+
+Trace2 defines the following set of Trace2 Targets.
+Format details are given in a later section.
+
+`GIT_TR2` (NORMAL)::
+
+ a simple printf format like GIT_TRACE.
++
+------------
+$ export GIT_TR2=~/log.normal
+$ git version
+git version 2.20.1.155.g426c96fcdb
+------------
++
+------------
+$ cat ~/log.normal
+12:28:42.620009 common-main.c:38 version 2.20.1.155.g426c96fcdb
+12:28:42.620989 common-main.c:39 start git version
+12:28:42.621101 git.c:432 cmd_name version (version)
+12:28:42.621215 git.c:662 exit elapsed:0.001227 code:0
+12:28:42.621250 trace2/tr2_tgt_normal.c:124 atexit elapsed:0.001265 code:0
+------------
+
+`GIT_TR2_PERF` (PERF)::
+
+ a column-based format to replace GIT_TRACE_PERFORMANCE suitable for
+ development and testing, possibly to complement tools like gprof.
++
+------------
+$ export GIT_TR2_PERF=~/log.perf
+$ git version
+git version 2.20.1.155.g426c96fcdb
+------------
++
+------------
+$ cat ~/log.perf
+12:28:42.620675 common-main.c:38 | d0 | main | version | | | | | 2.20.1.155.g426c96fcdb
+12:28:42.621001 common-main.c:39 | d0 | main | start | | | | | git version
+12:28:42.621111 git.c:432 | d0 | main | cmd_name | | | | | version (version)
+12:28:42.621225 git.c:662 | d0 | main | exit | | 0.001227 | | | code:0
+12:28:42.621259 trace2/tr2_tgt_perf.c:211 | d0 | main | atexit | | 0.001265 | | | code:0
+------------
+
+`GIT_TR2_EVENT` (EVENT)::
+
+ a JSON-based format of event data suitable for telemetry analysis.
++
+------------
+$ export GIT_TR2_EVENT=~/log.event
+$ git version
+git version 2.20.1.155.g426c96fcdb
+------------
++
+------------
+$ cat ~/log.event
+{"event":"version","sid":"1547659722619736-11614","thread":"main","time":"2019-01-16 17:28:42.620713","file":"common-main.c","line":38,"evt":"1","exe":"2.20.1.155.g426c96fcdb"}
+{"event":"start","sid":"1547659722619736-11614","thread":"main","time":"2019-01-16 17:28:42.621027","file":"common-main.c","line":39,"argv":["git","version"]}
+{"event":"cmd_name","sid":"1547659722619736-11614","thread":"main","time":"2019-01-16 17:28:42.621122","file":"git.c","line":432,"name":"version","hierarchy":"version"}
+{"event":"exit","sid":"1547659722619736-11614","thread":"main","time":"2019-01-16 17:28:42.621236","file":"git.c","line":662,"t_abs":0.001227,"code":0}
+{"event":"atexit","sid":"1547659722619736-11614","thread":"main","time":"2019-01-16 17:28:42.621268","file":"trace2/tr2_tgt_event.c","line":163,"t_abs":0.001265,"code":0}
+------------
+
+== Enabling a Target
+
+A Trace2 Target is enabled when the corresponding environment variable
+(`GIT_TR2`, `GIT_TR2_PERF`, or `GIT_TR2_EVENT`) is set. The following
+values are recognized.
+
+`0`::
+`false`::
+
+ Disables the target.
+
+`1`::
+`true`::
+
+ Enables the target and writes stream to `STDERR`.
+
+`[2-9]`::
+
+ Enables the target and writes to the already opened file descriptor.
+
+`<absolute-pathname>`::
+
+ Enables the target, opens and writes to the file in append mode.
+
+`af_unix:[<socket_type>:]<absolute-pathname>`::
+
+ Enables the target, opens and writes to a Unix Domain Socket
+ (on platforms that support them).
++
+Socket type can be either `stream` or `dgram`. If the socket type is
+omitted, Git will try both.
+
+== Trace2 API
+
+All public Trace2 functions and macros are defined in `trace2.h` and
+`trace2.c`. All public symbols are prefixed with `trace2_`.
+
+There are no public Trace2 data structures.
+
+The Trace2 code also defines a set of private functions and data types
+in the `trace2/` directory. These symbols are prefixed with `tr2_`
+and should only be used by functions in `trace2.c`.
+
+== Conventions for Public Functions and Macros
+
+The functions defined by the Trace2 API are declared and documented
+in `trace2.h`. It defines the API functions and wrapper macros for
+Trace2.
+
+Some functions have a `_fl()` suffix to indicate that they take `file`
+and `line-number` arguments.
+
+Some functions have a `_va_fl()` suffix to indicate that they also
+take a `va_list` argument.
+
+Some functions have a `_printf_fl()` suffix to indicate that they also
+take a varargs argument.
+
+There are CPP wrapper macros and ifdefs to hide most of these details.
+See `trace2.h` for more details. The following discussion will only
+describe the simplified forms.
+
+== Public API
+
+All Trace2 API functions send a messsage to all of the active
+Trace2 Targets. This section describes the set of available
+messages.
+
+It helps to divide these functions into groups for discussion
+purposes.
+
+=== Basic Command Messages
+
+These are concerned with the lifetime of the overall git process.
+
+`void trace2_initialize()`::
+
+ Determines if any Trace2 Targets should be enabled and
+ initializes the Trace2 facility. This includes starting the
+ elapsed time clocks and thread local storage (TLS).
++
+This function emits a "version" message containing the version of git
+and the Trace2 protocol.
++
+This function should be called from `main()` as early as possible in
+the life of the process.
+
+`int trace2_is_enabled()`::
+
+ Returns 1 if Trace2 is enabled (at least one target is
+ active).
+
+`void trace2_cmd_start(int argc, const char **argv)`::
+
+ Emits a "start" message containing the process command line
+ arguments.
+
+`int trace2_cmd_exit(int exit_code)`::
+
+ Emits an "exit" message containing the process exit-code and
+ elapsed time.
++
+Returns the exit-code.
+
+`void trace2_cmd_error(const char *fmt, va_list ap)`::
+
+ Emits an "error" message containing a formatted error message.
+
+`void trace2_cmd_path(const char *pathname)`::
+
+ Emits a "cmd_path" message with the full pathname of the
+ current process.
+
+=== Command Detail Messages
+
+These are concerned with describing the specific Git command
+after the command line, config, and environment are inspected.
+
+`void trace2_cmd_name(const char *name)`::
+
+ Emits a "cmd_name" message with the canonical name of the
+ command, for example "status" or "checkout".
+
+`void trace2_cmd_mode(const char *mode)`::
+
+ Emits a "cmd_mode" message with a qualifier name to further
+ describe the current git command.
++
+This message is intended to be used with git commands having multiple
+major modes. For example, a "checkout" command can checkout a new
+branch or it can checkout a single file, so the checkout code could
+emit a cmd_mode message of "branch" or "file".
+
+`void trace2_cmd_alias(const char *alias, const char **argv_expansion)`::
+
+ Emits an "alias" message containing the alias used and the
+ argument expansion.
+
+`void trace2_def_param(const char *parameter, const char *value)`::
+
+ Emits a "def_param" message containing a key/value pair.
++
+This message is intended to report some global aspect of the current
+command, such as a configuration setting or command line switch that
+significantly affects program performance or behavior, such as
+`core.abbrev`, `status.showUntrackedFiles`, or `--no-ahead-behind`.
+
+`void trace2_cmd_list_config()`::
+
+ Emits a "def_param" messages for "important" configuration
+ settings.
++
+The environment variable `GIT_TR2_CONFIG_PARAMS` can be set to a
+list of patterns of important configuration settings, for example:
+`core.*,remote.*.url`. This function will iterate over all config
+settings and emit a "def_param" message for each match.
+
+`void trace2_cmd_set_config(const char *key, const char *value)`::
+
+ Emits a "def_param" message for a specific configuration
+ setting IFF it matches the `GIT_TR2_CONFIG_PARAMS` pattern.
++
+This is used to hook into `git_config_set()` and catch any
+configuration changes and update a value previously reported by
+`trace2_cmd_list_config()`.
+
+`void trace2_def_repo(struct repository *repo)`::
+
+ Registers a repository with the Trace2 layer. Assigns a
+ unique "repo-id" to `repo->trace2_repo_id`.
++
+Emits a "worktree" messages containing the repo-id and the worktree
+pathname.
++
+Region and data messages (described later) may refer to this repo-id.
++
+The main/top-level repository will have repo-id value 1 (aka "r1").
++
+The repo-id field is in anticipation of future in-proc submodule
+repositories.
+
+=== Child Process Messages
+
+These are concerned with the various spawned child processes,
+including shell scripts, git commands, editors, pagers, and hooks.
+
+`void trace2_child_start(struct child_process *cmd)`::
+
+ Emits a "child_start" message containing the "child-id",
+ "child-argv", and "child-classification".
++
+Before calling this, set `cmd->trace2_child_class` to a name
+describing the type of child process, for example "editor".
++
+This function assigns a unique "child-id" to `cmd->trace2_child_id`.
+This field is used later during the "child_exit" message to associate
+it with the "child_start" message.
++
+This function should be called before spawning the child process.
+
+`void trace2_child_exit(struct child_proess *cmd, int child_exit_code)`::
+
+ Emits a "child_exit" message containing the "child-id",
+ the child's elapsed time and exit-code.
++
+The reported elapsed time includes the process creation overhead and
+time spend waiting for it to exit, so it may be slightly longer than
+the time reported by the child itself.
++
+This function should be called after reaping the child process.
+
+`int trace2_exec(const char *exe, const char **argv)`::
+
+ Emits a "exec" message containing the "exec-id" and the
+ argv of the new process.
++
+This function should be called before calling one of the `exec()`
+variants, such as `execvp()`.
++
+This function returns a unique "exec-id". This value is used later
+if the exec() fails and a "exec-result" message is necessary.
+
+`void trace2_exec_result(int exec_id, int error_code)`::
+
+ Emits a "exec_result" message containing the "exec-id"
+ and the error code.
++
+On Unix-based systems, `exec()` does not return if successful.
+This message is used to indicate that the `exec()` failed and
+that the current program is continuing.
+
+=== Git Thread Messages
+
+These messages are concerned with Git thread usage.
+
+`void trace2_thread_start(const char *thread_name)`::
+
+ Emits a "thread_start" message.
++
+The `thread_name` field should be a descriptive name, such as the
+unique name of the thread-proc. A unique "thread-id" will be added
+to the name to uniquely identify thread instances.
++
+Region and data messages (described later) may refer to this thread
+name.
++
+This function must be called by the thread-proc of the new thread
+(so that TLS data is properly initialized) and not by the caller
+of `pthread_create()`.
+
+`void trace2_thread_exit()`::
+
+ Emits a "thread_exit" message containing the thread name
+ and the thread elapsed time.
++
+This function must be called by the thread-proc before it returns
+(so that the coorect TLS data is used and cleaned up. It should
+not be called by the caller of `pthread_join()`.
+
+=== Region and Data Messages
+
+These are concerned with recording performance data
+over regions or spans of code.
+
+`void trace2_region_enter(const char *category, const char *label, const struct repository *repo)`::
+
+`void trace2_region_enter_printf(const char *category, const char *label, const struct repository *repo, const char *fmt, ...)`::
+
+`void trace2_region_enter_printf_va(const char *category, const char *label, const struct repository *repo, const char *fmt, va_list ap)`::
+
+ Emits a thread-relative "region_enter" message with optional
+ printf string.
++
+This function pushes a new region nesting stack level on the current
+thread and starts a clock for the new stack frame.
++
+The `category` field is an arbitrary category name used to classify
+regions by feature area, such as "status" or "index". At this time
+it is only just printed along with the rest of the message. It may
+be used in the future to filter messages.
++
+The `label` field is an arbitrary label used to describe the activity
+being started, such as "read_recursive" or "do_read_index".
++
+The `repo` field, if set, will be used to get the "repo-id", so that
+recursive oerations can be attributed to the correct repository.
+
+`void trace2_region_leave(const char *category, const char *label, const struct repository *repo)`::
+
+`void trace2_region_leave_printf(const char *category, const char *label, const struct repository *repo, const char *fmt, ...)`::
+
+`void trace2_region_leave_printf_va(const char *category, const char *label, const struct repository *repo, const char *fmt, va_list ap)`::
+
+ Emits a thread-relative "region_leave" message with optional
+ printf string.
++
+This function pops the region nesting stack on the current thread
+and reports the elapsed time of the stack frame.
++
+The `category`, `label`, and `repo` fields are the same as above.
+The `category` and `label` do not need to match the correpsonding
+"region_enter" message, but it makes the data stream easier to
+understand.
+
+`void trace2_data_string(const char *category, const struct repository *repo, const char *key, const char * value)`::
+
+`void trace2_data_intmax(const char *category, const struct repository *repo, const char *key, intmax value)`::
+
+`void trace2_data_json(const char *category, const struct repository *repo, const char *key, const struct json_writer *jw)`::
+
+ Emits a region- and thread-relative "data" or "data_json" message.
++
+This is a key/value pair message containing information about the
+current thread, region stack, and repository. This could be used
+to print the number of files in a directory during a multi-threaded
+recursive tree walk.
+
+`void trace2_printf(const char *fmt, ...)`::
+
+`void trace2_printf_va(const char *fmt, va_list ap)`::
+
+ Emits a region- and thread-relative "printf" message.
+
+== Trace2 Target Formats
+
+=== NORMAL Format
+
+NORMAL format is enabled when the `GIT_TR2` environment variable is
+set.
+
+Events are written as lines of the form:
+
+------------
+[<time> SP <filename>:<line> SP+] <event-name> [[SP] <event-message>] LF
+------------
+
+`<event-name>`::
+
+ is the event name.
+
+`<event-message>`::
+ is a free-form printf message intended for human consumption.
++
+Note that this may contain embedded LF or CRLF characters that are
+not escaped, so the event may spill across multiple lines.
+
+If `GIT_TR2_BRIEF` is true, the `time`, `filename`, and `line` fields
+are omitted.
+
+This target is intended to be more of a summary (like GIT_TRACE) and
+less detailed than the other targets. It ignores thread, region, and
+data messages, for example.
+
+=== PERF Format
+
+PERF format is enabled when the `GIT_TR2_PERF` environment variable
+is set.
+
+Events are written as lines of the form:
+
+------------
+[<time> SP <filename>:<line> SP+
+ BAR SP] d<depth> SP
+ BAR SP <thread-name> SP+
+ BAR SP <event-name> SP+
+ BAR SP [r<repo-id>] SP+
+ BAR SP [<t_abs>] SP+
+ BAR SP [<t_rel>] SP+
+ BAR SP [<category>] SP+
+ BAR SP DOTS* <perf-event-message>
+ LF
+------------
+
+`<depth>`::
+ is the git process depth. This is the number of parent
+ git processes. A top-level git command has depth value "d0".
+ A child of it has depth value "d1". A second level child
+ has depth value "d2" and so on.
+
+`<thread-name>`::
+ is a unique name for the thread. The primary thread
+ is called "main". Other thread names are of the form "th%d:%s"
+ and include a unique number and the name of the thread-proc.
+
+`<event-name>`::
+ is the event name.
+
+`<repo-id>`::
+ when present, is a number indicating the repository
+ in use. A `def_repo` event is emitted when a repository is
+ opened. This defines the repo-id and associated worktree.
+ Subsequent repo-specific events will reference this repo-id.
++
+Currently, this is always "r1" for the main repository.
+This field is in anticipation of in-proc submodules in the future.
+
+`<t_abs>`::
+ when present, is the absolute time in seconds since the
+ program started.
+
+`<t_rel>`::
+ when present, is time in seconds relative to the start of
+ the current region. For a thread-exit event, it is the elapsed
+ time of the thread.
+
+`<category>`::
+ is present on region and data events and is used to
+ indicate a broad category, such as "index" or "status".
+
+`<perf-event-message>`::
+ is a free-form printf message intended for human consumption.
+
+------------
+15:33:33.532712 wt-status.c:2310 | d0 | main | region_enter | r1 | 0.126064 | | status | label:print
+15:33:33.532712 wt-status.c:2331 | d0 | main | region_leave | r1 | 0.127568 | 0.001504 | status | label:print
+------------
+
+If `GIT_TR2_PERF_BRIEF` is true, the `time`, `file`, and `line`
+fields are omitted.
+
+------------
+d0 | main | region_leave | r1 | 0.011717 | 0.009122 | index | label:preload
+------------
+
+The PERF target is intended for interactive performance analysis
+during development and is quite noisy.
+
+=== EVENT Format
+
+EVENT format is enabled when the `GIT_TR2_EVENT` environment
+variable is set.
+
+Each event is a JSON-object containing multiple key/value pairs
+written as a single line and followed by a LF.
+
+------------
+'{' <key> ':' <value> [',' <key> ':' <value>]* '}' LF
+------------
+
+Some key/value pairs are common to all events and some are
+event-specific.
+
+==== Common Key/Value Pairs
+
+The following key/value pairs are common to all events:
+
+------------
+{
+ "event":"version",
+ "sid":"1547659722619736-11614",
+ "thread":"main",
+ "time":"2019-01-16 17:28:42.620713",
+ "file":"common-main.c",
+ "line":38,
+ ...
+}
+------------
+
+`"event":<event>`::
+ is the event name.
+
+`"sid":<sid>`::
+ is the session-id. This is a unique string to identify the
+ process instance to allow all events emitted by a process to
+ be identified. A session-id is used instead of a PID because
+ PIDs are recycled by the OS. For child git processes, the
+ session-id is prepended with the session-id of the parent git
+ process to allow parent-child relationships to be identified
+ during post-processing.
+
+`"thread":<thread>`::
+ is the thread name.
+
+`"time":<time>`::
+ is the UTC time of the event.
+
+`"file":<filename>`::
+ is source file generating the event.
+
+`"line":<line-number>`::
+ is the integer source line number generating the event.
+
+`"repo":<repo-id>`::
+ when present, is the integer repo-id as described previously.
+
+If `GIT_TR2_EVENT_BRIEF` is true, the `file` and `line` fields are omitted
+from all events and the `time` field is only present on the "start" and
+"atexit" events.
+
+==== Event-Specific Key/Value Pairs
+
+`"version"`::
+ This event gives the version of the executable and the EVENT format.
++
+------------
+{
+ "event":"version",
+ ...
+ "evt":"1", # EVENT format version
+ "exe":"2.20.1.155.g426c96fcdb" # git version
+}
+------------
+
+`"start"`::
+ This event contains the complete argv received by main().
++
+------------
+{
+ "event":"start",
+ ...
+ "argv":["git","version"]
+}
+------------
+
+`"exit"`::
+ This event is emitted when git calls `exit()`.
++
+------------
+{
+ "event":"exit",
+ ...
+ "t_abs":0.001227, # elapsed time in seconds
+ "code":0 # exit code
+}
+------------
+
+`"atexit"`::
+ This event is emitted by the Trace2 `atexit` routine during
+ final shutdown. It should be the last event emitted by the
+ process.
++
+(The elapsed time reported here is greater than the time reported in
+the "exit" event because it runs after all other atexit tasks have
+completed.)
++
+------------
+{
+ "event":"atexit",
+ ...
+ "t_abs":0.001227, # elapsed time in seconds
+ "code":0 # exit code
+}
+------------
+
+`"signal"`::
+ This event is emitted when the program is terminated by a user
+ signal. Depending on the platform, the signal event may
+ prevent the "atexit" event from being generated.
++
+------------
+{
+ "event":"signal",
+ ...
+ "t_abs":0.001227, # elapsed time in seconds
+ "signal":13 # SIGTERM, SIGINT, etc.
+}
+------------
+
+`"error"`::
+ This event is emitted when one of the `error()`, `die()`,
+ or `usage()` functions are called.
++
+------------
+{
+ "event":"error",
+ ...
+ "msg":"invalid option: --cahced", # formatted error message
+ "fmt":"invalid option: %s" # error format string
+}
+------------
++
+The error event may be emitted more than once. The format string
+allows post-processors to group errors by type without worrying
+about specific error arguments.
+
+`"cmd_path"`::
+ This event contains the discovered full path of the git
+ executable (on platforms that are configured to resolve it).
++
+------------
+{
+ "event":"cmd_path",
+ ...
+ "path":"C:/work/gfw/git.exe"
+}
+------------
+
+`"cmd_name"`::
+ This event contains the command name for this git process
+ and the hierarchy of commands from parent git processes.
++
+------------
+{
+ "event":"cmd_name",
+ ...
+ "name":"pack-objects",
+ "hierarchy":"push/pack-objects"
+}
+------------
++
+Normally, the "name" field contains the canonical name of the
+command. When a canonical name is not available, one of
+these special values are used:
++
+------------
+"_query_" # "git --html-path"
+"_run_dashed_" # when "git foo" tries to run "git-foo"
+"_run_shell_alias_" # alias expansion to a shell command
+"_run_git_alias_" # alias expansion to a git command
+"_usage_" # usage error
+------------
+
+`"cmd_mode"`::
+ This event, when present, describes the command variant This
+ event may be emitted more than once.
++
+------------
+{
+ "event":"cmd_mode",
+ ...
+ "name":"branch"
+}
+------------
++
+The "name" field is an arbitrary string to describe the command mode.
+For example, checkout can checkout a branch or an individual file.
+And these variations typically have different performance
+characteristics that are not comparable.
+
+`"alias"`::
+ This event is present when an alias is expanded.
++
+------------
+{
+ "event":"alias",
+ ...
+ "alias":"l", # registered alias
+ "argv":["log","--graph"] # alias expansion
+}
+------------
+
+`"child_start"`::
+ This event describes a child process that is about to be
+ spawned.
++
+------------
+{
+ "event":"child_start",
+ ...
+ "child_id":2,
+ "child_class":"?",
+ "use_shell":false,
+ "argv":["git","rev-list","--objects","--stdin","--not","--all","--quiet"]
+
+ "hook_name":"<hook_name>" # present when child_class is "hook"
+ "cd":"<path>" # present when cd is required
+}
+------------
++
+The "child_id" field can be used to match this child_start with the
+corresponding child_exit event.
++
+The "child_class" field is a rough classification, such as "editor",
+"pager", "transport/*", and "hook". Unclassified children are classified
+with "?".
+
+`"child_exit"`::
+ This event is generated after the current process has returned
+ from the waitpid() and collected the exit information from the
+ child.
++
+------------
+{
+ "event":"child_exit",
+ ...
+ "child_id":2,
+ "pid":14708, # child PID
+ "code":0, # child exit-code
+ "t_rel":0.110605 # observed run-time of child process
+}
+------------
++
+Note that the session-id of the child process is not available to
+the current/spawning process, so the child's PID is reported here as
+a hint for post-processing. (But it is only a hint because the child
+proces may be a shell script which doesn't have a session-id.)
++
+Note that the `t_rel` field contains the observed run time in seconds
+for the child process (starting before the fork/exec/spawn and
+stopping after the waitpid() and includes OS process creation overhead).
+So this time will be slightly larger than the atexit time reported by
+the child process itself.
+
+`"exec"`::
+ This event is generated before git attempts to `exec()`
+ another command rather than starting a child process.
++
+------------
+{
+ "event":"exec",
+ ...
+ "exec_id":0,
+ "exe":"git",
+ "argv":["foo", "bar"]
+}
+------------
++
+The "exec_id" field is a command-unique id and is only useful if the
+`exec()` fails and a corresponding exec_result event is generated.
+
+`"exec_result"`::
+ This event is generated if the `exec()` fails and control
+ returns to the current git command.
++
+------------
+{
+ "event":"exec_result",
+ ...
+ "exec_id":0,
+ "code":1 # error code (errno) from exec()
+}
+------------
+
+`"thread_start"`::
+ This event is generated when a thread is started. It is
+ generated from *within* the new thread's thread-proc (for TLS
+ reasons).
++
+------------
+{
+ "event":"thread_start",
+ ...
+ "thread":"th02:preload_thread" # thread name
+}
+------------
+
+`"thread_exit"`::
+ This event is generated when a thread exits. It is generated
+ from *within* the thread's thread-proc (for TLS reasons).
++
+------------
+{
+ "event":"thread_exit",
+ ...
+ "thread":"th02:preload_thread", # thread name
+ "t_rel":0.007328 # thread elapsed time
+}
+------------
+
+`"def_param"`::
+ This event is generated to log a global parameter.
++
+------------
+{
+ "event":"def_param",
+ ...
+ "param":"core.abbrev",
+ "value":"7"
+}
+------------
+
+`"def_repo"`::
+ This event defines a repo-id and associates it with the root
+ of the worktree.
++
+------------
+{
+ "event":"def_repo",
+ ...
+ "repo":1,
+ "worktree":"/Users/jeffhost/work/gfw"
+}
+------------
++
+As stated earlier, the repo-id is currently always 1, so there will
+only be one def_repo event. Later, if in-proc submodules are
+supported, a def_repo event should be emitted for each submodule
+visited.
+
+`"region_enter"`::
+ This event is generated when entering a region.
++
+------------
+{
+ "event":"region_enter",
+ ...
+ "repo":1, # optional
+ "nesting":1, # current region stack depth
+ "category":"index", # optional
+ "label":"do_read_index", # optional
+ "msg":".git/index" # optional
+}
+------------
++
+The `category` field may be used in a future enhancement to
+do category-based filtering.
++
+The `GIT_TR2_EVENT_NESTING` environment variable can be used to
+filter deeply nested regions and data events. It defaults to "2".
+
+`"region_leave"`::
+ This event is generated when leaving a region.
++
+------------
+{
+ "event":"region_leave",
+ ...
+ "repo":1, # optional
+ "t_rel":0.002876, # time spent in region in seconds
+ "nesting":1, # region stack depth
+ "category":"index", # optional
+ "label":"do_read_index", # optional
+ "msg":".git/index" # optional
+}
+------------
+
+`"data"`::
+ This event is generated to log a thread- and region-local
+ key/value pair.
++
+------------
+{
+ "event":"data",
+ ...
+ "repo":1, # optional
+ "t_abs":0.024107, # absolute elapsed time
+ "t_rel":0.001031, # elapsed time in region/thread
+ "nesting":2, # region stack depth
+ "category":"index",
+ "key":"read/cache_nr",
+ "value":"3552"
+}
+------------
++
+The "value" field may be an integer or a string.
+
+`"data-json"`::
+ This event is generated to log a pre-formatted JSON string
+ containing structured data.
++
+------------
+{
+ "event":"data_json",
+ ...
+ "repo":1, # optional
+ "t_abs":0.015905,
+ "t_rel":0.015905,
+ "nesting":1,
+ "category":"process",
+ "key":"windows/ancestry",
+ "value":["bash.exe","bash.exe"]
+}
+------------
+
+== Example Trace2 API Usage
+
+Here is a hypothetical usage of the Trace2 API showing the intended
+usage (without worrying about the actual Git details).
+
+Initialization::
+
+ Initialization happens in `main()`. Behind the scenes, an
+ `atexit` and `signal` handler are registered.
++
+----------------
+int main(int argc, const char **argv)
+{
+ int exit_code;
+
+ trace2_initialize();
+ trace2_cmd_start(argv);
+
+ exit_code = cmd_main(argc, argv);
+
+ trace2_cmd_exit(exit_code);
+
+ return exit_code;
+}
+----------------
+
+Command Details::
+
+ After the basics are established, additional command
+ information can be sent to Trace2 as it is discovered.
++
+----------------
+int cmd_checkout(int argc, const char **argv)
+{
+ trace2_cmd_name("checkout");
+ trace2_cmd_mode("branch");
+ trace2_def_repo(the_repository);
+
+ // emit "def_param" messages for "interesting" config settings.
+ trace2_cmd_list_config();
+
+ if (do_something())
+ trace2_cmd_error("Path '%s': cannot do something", path);
+
+ return 0;
+}
+----------------
+
+Child Processes::
+
+ Wrap code spawning child processes.
++
+----------------
+void run_child(...)
+{
+ int child_exit_code;
+ struct child_process cmd = CHILD_PROCESS_INIT;
+ ...
+ cmd.trace2_child_class = "editor";
+
+ trace2_child_start(&cmd);
+ child_exit_code = spawn_child_and_wait_for_it();
+ trace2_child_exit(&cmd, child_exit_code);
+}
+----------------
++
+For example, the following fetch command spawned ssh, index-pack,
+rev-list, and gc. This example also shows that fetch took
+5.199 seconds and of that 4.932 was in ssh.
++
+----------------
+$ export GIT_TR2_BRIEF=1
+$ export GIT_TR2=~/log.normal
+$ git fetch origin
+...
+----------------
++
+----------------
+$ cat ~/log.normal
+version 2.20.1.vfs.1.1.47.g534dbe1ad1
+start git fetch origin
+worktree /Users/jeffhost/work/gfw
+cmd_name fetch (fetch)
+child_start[0] ssh git@github.com ...
+child_start[1] git index-pack ...
+... (Trace2 events from child processes omitted)
+child_exit[1] pid:14707 code:0 elapsed:0.076353
+child_exit[0] pid:14706 code:0 elapsed:4.931869
+child_start[2] git rev-list ...
+... (Trace2 events from child process omitted)
+child_exit[2] pid:14708 code:0 elapsed:0.110605
+child_start[3] git gc --auto
+... (Trace2 events from child process omitted)
+child_exit[3] pid:14709 code:0 elapsed:0.006240
+exit elapsed:5.198503 code:0
+atexit elapsed:5.198541 code:0
+----------------
++
+When a git process is a (direct or indirect) child of another
+git process, it inherits Trace2 context information. This
+allows the child to print the command hierarchy. This example
+shows gc as child[3] of fetch. When the gc process reports
+its name as "gc", it also reports the hierarchy as "fetch/gc".
+(In this example, trace2 messages from the child process is
+indented for clarity.)
++
+----------------
+$ export GIT_TR2_BRIEF=1
+$ export GIT_TR2=~/log.normal
+$ git fetch origin
+...
+----------------
++
+----------------
+$ cat ~/log.normal
+version 2.20.1.160.g5676107ecd.dirty
+start git fetch official
+worktree /Users/jeffhost/work/gfw
+cmd_name fetch (fetch)
+...
+child_start[3] git gc --auto
+ version 2.20.1.160.g5676107ecd.dirty
+ start /Users/jeffhost/work/gfw/git gc --auto
+ worktree /Users/jeffhost/work/gfw
+ cmd_name gc (fetch/gc)
+ exit elapsed:0.001959 code:0
+ atexit elapsed:0.001997 code:0
+child_exit[3] pid:20303 code:0 elapsed:0.007564
+exit elapsed:3.868938 code:0
+atexit elapsed:3.868970 code:0
+----------------
+
+Regions::
+
+ Regions can be use to time an interesting section of code.
++
+----------------
+void wt_status_collect(struct wt_status *s)
+{
+ trace2_region_enter("status", "worktrees", s->repo);
+ wt_status_collect_changes_worktree(s);
+ trace2_region_leave("status", "worktrees", s->repo);
+
+ trace2_region_enter("status", "index", s->repo);
+ wt_status_collect_changes_index(s);
+ trace2_region_leave("status", "index", s->repo);
+
+ trace2_region_enter("status", "untracked", s->repo);
+ wt_status_collect_untracked(s);
+ trace2_region_leave("status", "untracked", s->repo);
+}
+
+void wt_status_print(struct wt_status *s)
+{
+ trace2_region_enter("status", "print", s->repo);
+ switch (s->status_format) {
+ ...
+ }
+ trace2_region_leave("status", "print", s->repo);
+}
+----------------
++
+In this example, scanning for untracked files ran from +0.012568 to
++0.027149 (since the process started) and took 0.014581 seconds.
++
+----------------
+$ export GIT_TR2_PERF_BRIEF=1
+$ export GIT_TR2_PERF=~/log.perf
+$ git status
+...
+
+$ cat ~/log.perf
+d0 | main | version | | | | | 2.20.1.160.g5676107ecd.dirty
+d0 | main | start | | | | | git status
+d0 | main | def_repo | r1 | | | | worktree:/Users/jeffhost/work/gfw
+d0 | main | cmd_name | | | | | status (status)
+...
+d0 | main | region_enter | r1 | 0.010988 | | status | label:worktrees
+d0 | main | region_leave | r1 | 0.011236 | 0.000248 | status | label:worktrees
+d0 | main | region_enter | r1 | 0.011260 | | status | label:index
+d0 | main | region_leave | r1 | 0.012542 | 0.001282 | status | label:index
+d0 | main | region_enter | r1 | 0.012568 | | status | label:untracked
+d0 | main | region_leave | r1 | 0.027149 | 0.014581 | status | label:untracked
+d0 | main | region_enter | r1 | 0.027411 | | status | label:print
+d0 | main | region_leave | r1 | 0.028741 | 0.001330 | status | label:print
+d0 | main | exit | | 0.028778 | | | code:0
+d0 | main | atexit | | 0.028809 | | | code:0
+----------------
++
+Regions may be nested. This causes messages to be indented in the
+PERF target, for example.
+Elapsed times are relative to the start of the correpsonding nesting
+level as expected. For example, if we add region message to:
++
+----------------
+static enum path_treatment read_directory_recursive(struct dir_struct *dir,
+ struct index_state *istate, const char *base, int baselen,
+ struct untracked_cache_dir *untracked, int check_only,
+ int stop_at_first_file, const struct pathspec *pathspec)
+{
+ enum path_treatment state, subdir_state, dir_state = path_none;
+
+ trace2_region_enter_printf("dir", "read_recursive", NULL, "%.*s", baselen, base);
+ ...
+ trace2_region_leave_printf("dir", "read_recursive", NULL, "%.*s", baselen, base);
+ return dir_state;
+}
+----------------
++
+We can further investigate the time spent scanning for untracked files.
++
+----------------
+$ export GIT_TR2_PERF_BRIEF=1
+$ export GIT_TR2_PERF=~/log.perf
+$ git status
+...
+$ cat ~/log.perf
+d0 | main | version | | | | | 2.20.1.162.gb4ccea44db.dirty
+d0 | main | start | | | | | git status
+d0 | main | def_repo | r1 | | | | worktree:/Users/jeffhost/work/gfw
+d0 | main | cmd_name | | | | | status (status)
+...
+d0 | main | region_enter | r1 | 0.015047 | | status | label:untracked
+d0 | main | region_enter | | 0.015132 | | dir | ..label:read_recursive
+d0 | main | region_enter | | 0.016341 | | dir | ....label:read_recursive vcs-svn/
+d0 | main | region_leave | | 0.016422 | 0.000081 | dir | ....label:read_recursive vcs-svn/
+d0 | main | region_enter | | 0.016446 | | dir | ....label:read_recursive xdiff/
+d0 | main | region_leave | | 0.016522 | 0.000076 | dir | ....label:read_recursive xdiff/
+d0 | main | region_enter | | 0.016612 | | dir | ....label:read_recursive git-gui/
+d0 | main | region_enter | | 0.016698 | | dir | ......label:read_recursive git-gui/po/
+d0 | main | region_enter | | 0.016810 | | dir | ........label:read_recursive git-gui/po/glossary/
+d0 | main | region_leave | | 0.016863 | 0.000053 | dir | ........label:read_recursive git-gui/po/glossary/
+...
+d0 | main | region_enter | | 0.031876 | | dir | ....label:read_recursive builtin/
+d0 | main | region_leave | | 0.032270 | 0.000394 | dir | ....label:read_recursive builtin/
+d0 | main | region_leave | | 0.032414 | 0.017282 | dir | ..label:read_recursive
+d0 | main | region_leave | r1 | 0.032454 | 0.017407 | status | label:untracked
+...
+d0 | main | exit | | 0.034279 | | | code:0
+d0 | main | atexit | | 0.034322 | | | code:0
+----------------
++
+Trace2 regions are similar to the existing trace_performance_enter()
+and trace_performance_leave() routines, but are thread safe and
+maintain per-thread stacks of timers.
+
+Data Messages::
+
+ Data messages added to a region.
++
+----------------
+int read_index_from(struct index_state *istate, const char *path,
+ const char *gitdir)
+{
+ trace2_region_enter_printf("index", "do_read_index", the_repository, "%s", path);
+
+ ...
+
+ trace2_data_intmax("index", the_repository, "read/version", istate->version);
+ trace2_data_intmax("index", the_repository, "read/cache_nr", istate->cache_nr);
+
+ trace2_region_leave_printf("index", "do_read_index", the_repository, "%s", path);
+}
+----------------
++
+This example shows that the index contained 3552 entries.
++
+----------------
+$ export GIT_TR2_PERF_BRIEF=1
+$ export GIT_TR2_PERF=~/log.perf
+$ git status
+...
+$ cat ~/log.perf
+d0 | main | version | | | | | 2.20.1.156.gf9916ae094.dirty
+d0 | main | start | | | | | git status
+d0 | main | def_repo | r1 | | | | worktree:/Users/jeffhost/work/gfw
+d0 | main | cmd_name | | | | | status (status)
+d0 | main | region_enter | r1 | 0.001791 | | index | label:do_read_index .git/index
+d0 | main | data | r1 | 0.002494 | 0.000703 | index | ..read/version:2
+d0 | main | data | r1 | 0.002520 | 0.000729 | index | ..read/cache_nr:3552
+d0 | main | region_leave | r1 | 0.002539 | 0.000748 | index | label:do_read_index .git/index
+...
+----------------
+
+Thread Events::
+
+ Thread messages added to a thread-proc.
++
+For example, the multithreaded preload-index code can be
+instrumented with a region around the thread pool and then
+per-thread start and exit events within the threadproc.
++
+----------------
+static void *preload_thread(void *_data)
+{
+ // start the per-thread clock and emit a message.
+ trace2_thread_start("preload_thread");
+
+ // report which chunk of the array this thread was assigned.
+ trace2_data_intmax("index", the_repository, "offset", p->offset);
+ trace2_data_intmax("index", the_repository, "count", nr);
+
+ do {
+ ...
+ } while (--nr > 0);
+ ...
+
+ // report elapsed time taken by this thread.
+ trace2_thread_exit();
+ return NULL;
+}
+
+void preload_index(struct index_state *index,
+ const struct pathspec *pathspec,
+ unsigned int refresh_flags)
+{
+ trace2_region_enter("index", "preload", the_repository);
+
+ for (i = 0; i < threads; i++) {
+ ... /* create thread */
+ }
+
+ for (i = 0; i < threads; i++) {
+ ... /* join thread */
+ }
+
+ trace2_region_leave("index", "preload", the_repository);
+}
+----------------
++
+In this example preload_index() was executed by the `main` thread
+and started the `preload` region. Seven threads, named
+`th01:preload_thread` through `th07:preload_thread`, were started.
+Events from each thread are atomically appended to the shared target
+stream as they occur so they may appear in random order with respect
+other threads. Finally, the main thread waits for the threads to
+finish and leaves the region.
++
+Data events are tagged with the active thread name. They are used
+to report the per-thread parameters.
++
+----------------
+$ export GIT_TR2_PERF_BRIEF=1
+$ export GIT_TR2_PERF=~/log.perf
+$ git status
+...
+$ cat ~/log.perf
+...
+d0 | main | region_enter | r1 | 0.002595 | | index | label:preload
+d0 | th01:preload_thread | thread_start | | 0.002699 | | |
+d0 | th02:preload_thread | thread_start | | 0.002721 | | |
+d0 | th01:preload_thread | data | r1 | 0.002736 | 0.000037 | index | offset:0
+d0 | th02:preload_thread | data | r1 | 0.002751 | 0.000030 | index | offset:2032
+d0 | th03:preload_thread | thread_start | | 0.002711 | | |
+d0 | th06:preload_thread | thread_start | | 0.002739 | | |
+d0 | th01:preload_thread | data | r1 | 0.002766 | 0.000067 | index | count:508
+d0 | th06:preload_thread | data | r1 | 0.002856 | 0.000117 | index | offset:2540
+d0 | th03:preload_thread | data | r1 | 0.002824 | 0.000113 | index | offset:1016
+d0 | th04:preload_thread | thread_start | | 0.002710 | | |
+d0 | th02:preload_thread | data | r1 | 0.002779 | 0.000058 | index | count:508
+d0 | th06:preload_thread | data | r1 | 0.002966 | 0.000227 | index | count:508
+d0 | th07:preload_thread | thread_start | | 0.002741 | | |
+d0 | th07:preload_thread | data | r1 | 0.003017 | 0.000276 | index | offset:3048
+d0 | th05:preload_thread | thread_start | | 0.002712 | | |
+d0 | th05:preload_thread | data | r1 | 0.003067 | 0.000355 | index | offset:1524
+d0 | th05:preload_thread | data | r1 | 0.003090 | 0.000378 | index | count:508
+d0 | th07:preload_thread | data | r1 | 0.003037 | 0.000296 | index | count:504
+d0 | th03:preload_thread | data | r1 | 0.002971 | 0.000260 | index | count:508
+d0 | th04:preload_thread | data | r1 | 0.002983 | 0.000273 | index | offset:508
+d0 | th04:preload_thread | data | r1 | 0.007311 | 0.004601 | index | count:508
+d0 | th05:preload_thread | thread_exit | | 0.008781 | 0.006069 | |
+d0 | th01:preload_thread | thread_exit | | 0.009561 | 0.006862 | |
+d0 | th03:preload_thread | thread_exit | | 0.009742 | 0.007031 | |
+d0 | th06:preload_thread | thread_exit | | 0.009820 | 0.007081 | |
+d0 | th02:preload_thread | thread_exit | | 0.010274 | 0.007553 | |
+d0 | th07:preload_thread | thread_exit | | 0.010477 | 0.007736 | |
+d0 | th04:preload_thread | thread_exit | | 0.011657 | 0.008947 | |
+d0 | main | region_leave | r1 | 0.011717 | 0.009122 | index | label:preload
+...
+d0 | main | exit | | 0.029996 | | | code:0
+d0 | main | atexit | | 0.030027 | | | code:0
+----------------
++
+In this example, the preload region took 0.009122 seconds. The 7 threads
+took between 0.006069 and 0.008947 seconds to work on their portion of
+the index. Thread "th01" worked on 508 items at offset 0. Thread "th02"
+worked on 508 items at offset 2032. Thread "th04" worked on 508 itemts
+at offset 508.
++
+This example also shows that thread names are assigned in a racy manner
+as each thread starts and allocates TLS storage.
+
+== Future Work
+
+=== Relationship to the Existing Trace Api (api-trace.txt)
+
+There are a few issues to resolve before we can completely
+switch to Trace2.
+
+* Updating existing tests that assume GIT_TRACE format messages.
+
+* How to best handle custom GIT_TRACE_<key> messages?
+
+** The GIT_TRACE_<key> mechanism allows each <key> to write to a
+different file (in addition to just stderr).
+
+** Do we want to maintain that ability or simply write to the existing
+Trace2 targets (and convert <key> to a "category").
* one side of history renames x -> z, and the other renames some file to
x/e, causing the need for the merge to do a transitive rename.
- * one side of history renames x -> z, but also renames all files within
- x. For example, x/a -> z/alpha, x/b -> z/bravo, etc.
+ * one side of history renames x -> z, but also renames all files within x.
+ For example, x/a -> z/alpha, x/b -> z/bravo, etc.
* both 'x' and 'y' being merged into a single directory 'z', with a
directory rename being detected for both x->z and y->z.
An example client/server communication might look like this:
----
- S: 007c74730d410fcb6603ace96f1dc55ea6196122532d refs/heads/local\0report-status delete-refs ofs-delta\n
+ S: 006274730d410fcb6603ace96f1dc55ea6196122532d refs/heads/local\0report-status delete-refs ofs-delta\n
S: 003e7d1665144a3a975c05f1f43902ddaf084e784dbe refs/heads/debug\n
S: 003f74730d410fcb6603ace96f1dc55ea6196122532d refs/heads/master\n
- S: 003f74730d410fcb6603ace96f1dc55ea6196122532d refs/heads/team\n
+ S: 003d74730d410fcb6603ace96f1dc55ea6196122532d refs/heads/team\n
S: 0000
- C: 003e7d1665144a3a975c05f1f43902ddaf084e784dbe 74730d410fcb6603ace96f1dc55ea6196122532d refs/heads/debug\n
- C: 003e74730d410fcb6603ace96f1dc55ea6196122532d 5a3f6be755bbb7deae50065988cbfa1ffa9ab68a refs/heads/master\n
+ C: 00677d1665144a3a975c05f1f43902ddaf084e784dbe 74730d410fcb6603ace96f1dc55ea6196122532d refs/heads/debug\n
+ C: 006874730d410fcb6603ace96f1dc55ea6196122532d 5a3f6be755bbb7deae50065988cbfa1ffa9ab68a refs/heads/master\n
C: 0000
C: [PACKDATA]
Git Protocol Capabilities
=========================
+NOTE: this document describes capabilities for versions 0 and 1 of the pack
+protocol. For version 2, please refer to the link:protocol-v2.html[protocol-v2]
+doc.
+
Servers SHOULD support all capabilities defined in this document.
On the very first line of the initial server response of either
purposes, and MUST NOT be used to programmatically assume the presence
or absence of particular features.
+symref
+------
+
+This parameterized capability is used to inform the receiver which symbolic ref
+points to which ref; for example, "symref=HEAD:refs/heads/master" tells the
+receiver that HEAD points to master. This capability can be repeated to
+represent multiple symrefs.
+
+Servers SHOULD include this capability for the HEAD symref if it is one of the
+refs being sent.
+
+Clients MAY use the parameters from this capability to select the proper initial
+branch when cloning a repository.
+
shallow
-------
- Git Wire Protocol, Version 2
-==============================
+Git Wire Protocol, Version 2
+============================
This document presents a specification for a version 2 of Git's wire
protocol. Protocol v2 will improve upon v1 in the following ways:
has completed, a client can reuse the connection and request that other
commands be executed.
- Packet-Line Framing
----------------------
+Packet-Line Framing
+-------------------
All communication is done using packet-line framing, just as in v1. See
`Documentation/technical/pack-protocol.txt` and
* '0000' Flush Packet (flush-pkt) - indicates the end of a message
* '0001' Delimiter Packet (delim-pkt) - separates sections of a message
- Initial Client Request
-------------------------
+Initial Client Request
+----------------------
In general a client can request to speak protocol v2 by sending
`version=2` through the respective side-channel for the transport being
found in `pack-protocol.txt` and `http-protocol.txt`. In all cases the
response from the server is the capability advertisement.
- Git Transport
-~~~~~~~~~~~~~~~
+Git Transport
+~~~~~~~~~~~~~
When using the git:// transport, you can request to use protocol v2 by
sending "version=2" as an extra parameter:
003egit-upload-pack /project.git\0host=myserver.com\0\0version=2\0
- SSH and File Transport
-~~~~~~~~~~~~~~~~~~~~~~~~
+SSH and File Transport
+~~~~~~~~~~~~~~~~~~~~~~
When using either the ssh:// or file:// transport, the GIT_PROTOCOL
environment variable must be set explicitly to include "version=2".
- HTTP Transport
-~~~~~~~~~~~~~~~~
+HTTP Transport
+~~~~~~~~~~~~~~
When using the http:// or https:// transport a client makes a "smart"
info/refs request as described in `http-protocol.txt` and requests that
Subsequent requests are then made directly to the service
`$GIT_URL/git-upload-pack`. (This works the same for git-receive-pack).
- Capability Advertisement
---------------------------
+Capability Advertisement
+------------------------
A server which decides to communicate (based on a request from a client)
using protocol version 2, notifies the client by sending a version string
key = 1*(ALPHA | DIGIT | "-_")
value = 1*(ALPHA | DIGIT | " -_.,?\/{}[]()<>!@#$%^&*+=:;")
- Command Request
------------------
+Command Request
+---------------
After receiving the capability advertisement, a client can then issue a
request to select the command it wants with any particular capabilities
optionally send an empty request consisting of just a flush-pkt to
indicate that no more requests will be made.
- Capabilities
---------------
+Capabilities
+------------
There are two different types of capabilities: normal capabilities,
which can be used to to convey information or alter the behavior of a
permits simple round-robin load-balancing on the server side, without
needing to worry about state management.
- agent
-~~~~~~~
+agent
+~~~~~
The server can advertise the `agent` capability with a value `X` (in the
form `agent=X`) to notify the client that the server is running version
and debugging purposes, and MUST NOT be used to programmatically assume
the presence or absence of particular features.
- ls-refs
-~~~~~~~~~
+ls-refs
+~~~~~~~
`ls-refs` is the command used to request a reference advertisement in v2.
Unlike the current reference advertisement, ls-refs takes in arguments
symref = "symref-target:" symref-target
peeled = "peeled:" obj-id
- fetch
-~~~~~~~
+fetch
+~~~~~
`fetch` is the command used to fetch a packfile in v2. It can be looked
at as a modified version of the v1 fetch where the ref-advertisement is
2 - progress messages
3 - fatal error message just before stream aborts
- server-option
-~~~~~~~~~~~~~~~
+server-option
+~~~~~~~~~~~~~
If advertised, indicates that any number of server specific options can be
included in a request. This is done by sending each option as a
where <address> may be a path, a server and path, or an arbitrary
URL-like string recognized by the specific remote helper being
-invoked. See linkgit:gitremote-helpers[1] for details.
+invoked. See linkgit:gitremote-helpers[7] for details.
If there are a large number of similarly-named remote repositories and
you want to use a different format for them (such that the URLs you
#!/bin/sh
GVF=GIT-VERSION-FILE
-DEF_VER=v2.21.0
+DEF_VER=v2.21.GIT
LF='
'
#
# Define DEVELOPER to enable more compiler warnings. Compiler version
# and family are auto detected, but could be overridden by defining
-# COMPILER_FEATURES (see config.mak.dev)
+# COMPILER_FEATURES (see config.mak.dev). You can still set
+# CFLAGS="..." in combination with DEVELOPER enables, whether that's
+# for tweaking something unrelated (e.g. optimization level), or for
+# selectively overriding something DEVELOPER or one of the DEVOPTS
+# (see just below) brings in.
#
# When DEVELOPER is set, DEVOPTS can be used to control compiler
# options. This variable contains keywords separated by
@$(SHELL_PATH) ./GIT-VERSION-GEN
-include GIT-VERSION-FILE
-# CFLAGS and LDFLAGS are for the users to override from the command line.
-
-CFLAGS = -g -O2 -Wall
-LDFLAGS =
-ALL_CFLAGS = $(CPPFLAGS) $(CFLAGS)
-ALL_LDFLAGS = $(LDFLAGS)
-STRIP ?= strip
-
-# Create as necessary, replace existing, make ranlib unneeded.
-ARFLAGS = rcs
-
+# Set our default configuration.
+#
# Among the variables below, these:
# gitexecdir
# template_dir
export prefix bindir sharedir sysconfdir gitwebdir perllibdir localedir
+# Set our default programs
CC = cc
AR = ar
RM = rm -f
XGETTEXT = xgettext
MSGFMT = msgfmt
CURL_CONFIG = curl-config
-PTHREAD_LIBS = -lpthread
-PTHREAD_CFLAGS =
GCOV = gcov
+STRIP = strip
SPATCH = spatch
export TCL_PATH TCLTK_PATH
-# user customisation variable for 'sparse' target
-SPARSE_FLAGS ?=
-# internal/platform customisation variable for 'sparse'
-SP_EXTRA_FLAGS =
-
-SPATCH_FLAGS = --all-includes --patch .
-
-
-
-### --- END CONFIGURATION SECTION ---
-
-# Those must not be GNU-specific; they are shared with perl/ which may
-# be built by a different compiler. (Note that this is an artifact now
-# but it still might be nice to keep that distinction.)
-BASIC_CFLAGS = -I.
-BASIC_LDFLAGS =
+# Set our default LIBS variables
+PTHREAD_LIBS = -lpthread
# Guard against environment variables
BUILTIN_OBJS =
SCRIPT_SH += git-merge-resolve.sh
SCRIPT_SH += git-mergetool.sh
SCRIPT_SH += git-quiltimport.sh
-SCRIPT_SH += git-legacy-rebase.sh
+SCRIPT_SH += git-legacy-stash.sh
SCRIPT_SH += git-remote-testgit.sh
SCRIPT_SH += git-request-pull.sh
-SCRIPT_SH += git-stash.sh
SCRIPT_SH += git-submodule.sh
SCRIPT_SH += git-web--browse.sh
TEST_BUILTINS_OBJS += test-submodule-config.o
TEST_BUILTINS_OBJS += test-submodule-nested-repo-config.o
TEST_BUILTINS_OBJS += test-subprocess.o
+TEST_BUILTINS_OBJS += test-trace2.o
TEST_BUILTINS_OBJS += test-urlmatch-normalization.o
TEST_BUILTINS_OBJS += test-xml-encode.o
TEST_BUILTINS_OBJS += test-wildmatch.o
GENERATED_H += command-list.h
-LIB_H = $(shell $(FIND) . \
+LIB_H := $(shell git ls-files '*.h' ':!t/' ':!Documentation/' 2>/dev/null || \
+ $(FIND) . \
-name .git -prune -o \
-name t -prune -o \
-name Documentation -prune -o \
LIB_OBJS += thread-utils.o
LIB_OBJS += tmp-objdir.o
LIB_OBJS += trace.o
+LIB_OBJS += trace2.o
+LIB_OBJS += trace2/tr2_cfg.o
+LIB_OBJS += trace2/tr2_cmd_name.o
+LIB_OBJS += trace2/tr2_dst.o
+LIB_OBJS += trace2/tr2_sid.o
+LIB_OBJS += trace2/tr2_tbuf.o
+LIB_OBJS += trace2/tr2_tgt_event.o
+LIB_OBJS += trace2/tr2_tgt_normal.o
+LIB_OBJS += trace2/tr2_tgt_perf.o
+LIB_OBJS += trace2/tr2_tls.o
LIB_OBJS += trailer.o
LIB_OBJS += transport.o
LIB_OBJS += transport-helper.o
BUILTIN_OBJS += builtin/show-branch.o
BUILTIN_OBJS += builtin/show-index.o
BUILTIN_OBJS += builtin/show-ref.o
+BUILTIN_OBJS += builtin/stash.o
BUILTIN_OBJS += builtin/stripspace.o
BUILTIN_OBJS += builtin/submodule--helper.o
BUILTIN_OBJS += builtin/symbolic-ref.o
DC_SHA1_SUBMODULE = auto
endif
+# Set CFLAGS, LDFLAGS and other *FLAGS variables. These might be
+# tweaked by config.* below as well as the command-line, both of
+# which'll override these defaults.
+CFLAGS = -g -O2 -Wall
+LDFLAGS =
+BASIC_CFLAGS = -I.
+BASIC_LDFLAGS =
+
+# library flags
+ARFLAGS = rcs
+PTHREAD_CFLAGS =
+
+# For the 'sparse' target
+SPARSE_FLAGS ?=
+SP_EXTRA_FLAGS =
+
+# For the 'coccicheck' target
+SPATCH_FLAGS = --all-includes --patch .
+
include config.mak.uname
-include config.mak.autogen
-include config.mak
include config.mak.dev
endif
+ALL_CFLAGS = $(DEVELOPER_CFLAGS) $(CPPFLAGS) $(CFLAGS)
+ALL_LDFLAGS = $(LDFLAGS)
+
comma := ,
empty :=
space := $(empty) $(empty)
BASIC_CFLAGS += -fno-omit-frame-pointer
ifneq ($(filter undefined,$(SANITIZERS)),)
BASIC_CFLAGS += -DNO_UNALIGNED_LOADS
+BASIC_CFLAGS += -DSHA1DC_FORCE_ALIGNED_ACCESS
endif
ifneq ($(filter leak,$(SANITIZERS)),)
BASIC_CFLAGS += -DSUPPRESS_ANNOTATED_LEAKS
LIB_OBJS += compat/inet_pton.o
BASIC_CFLAGS += -DNO_INET_PTON
endif
-ifndef NO_UNIX_SOCKETS
+ifdef NO_UNIX_SOCKETS
+ BASIC_CFLAGS += -DNO_UNIX_SOCKETS
+else
LIB_OBJS += unix-socket.o
PROGRAM_OBJS += credential-cache.o
PROGRAM_OBJS += credential-cache--daemon.o
# should _not_ be included here, since they are necessary even when
# building an object for the first time.
-$(OBJECTS): $(LIB_H)
+$(OBJECTS): $(LIB_H) $(GENERATED_H)
endif
exec-cmd.sp exec-cmd.s exec-cmd.o: GIT-PREFIX
sparse: $(SP_OBJ)
GEN_HDRS := command-list.h unicode-width.h
-EXCEPT_HDRS := $(GEN_HDRS) compat% xdiff%
+EXCEPT_HDRS := $(GEN_HDRS) compat/% xdiff/%
+ifndef GCRYPT_SHA256
+ EXCEPT_HDRS += sha256/gcrypt.h
+endif
CHK_HDRS = $(filter-out $(EXCEPT_HDRS),$(patsubst ./%,%,$(LIB_H)))
HCO = $(patsubst %.h,%.hco,$(CHK_HDRS))
.PHONY: check-docs
check-docs::
$(MAKE) -C Documentation lint-docs
- @(for v in $(ALL_COMMANDS); \
+ @(for v in $(patsubst %$X,%,$(ALL_COMMANDS)); \
do \
case "$$v" in \
git-merge-octopus | git-merge-ours | git-merge-recursive | \
( \
sed -e '1,/^### command list/d' \
-e '/^#/d' \
+ -e '/guide$$/d' \
-e 's/[ ].*//' \
-e 's/^/listed /' command-list.txt; \
$(MAKE) -C Documentation print-man1 | \
grep '\.txt$$' | \
- sed -e 's|Documentation/|documented |' \
+ sed -e 's|^|documented |' \
-e 's/\.txt//'; \
) | while read how cmd; \
do \
- case " $(ALL_COMMANDS) " in \
+ case " $(patsubst %$X,%,$(ALL_COMMANDS)) " in \
*" $$cmd "*) ;; \
*) echo "removed but $$how: $$cmd" ;; \
esac; \
-Documentation/RelNotes/2.21.0.txt
\ No newline at end of file
+Documentation/RelNotes/2.22.0.txt
\ No newline at end of file
state->ws_error_action = correct_ws_error;
return 0;
}
+ /*
+ * Please update $__git_whitespacelist in git-completion.bash
+ * when you add new options.
+ */
return error(_("unrecognized whitespace option '%s'"), option);
}
static void write_global_extended_header(struct archiver_args *args)
{
- const unsigned char *sha1 = args->commit_sha1;
+ const struct object_id *oid = args->commit_oid;
struct strbuf ext_header = STRBUF_INIT;
struct ustar_header header;
unsigned int mode;
- if (sha1)
+ if (oid)
strbuf_append_ext_header(&ext_header, "comment",
- sha1_to_hex(sha1), 40);
+ oid_to_hex(oid),
+ the_hash_algo->hexsz);
if (args->time > USTAR_MAX_MTIME) {
strbuf_append_ext_header_uint(&ext_header, "mtime",
args->time);
write_or_die(1, &locator64, ZIP64_DIR_TRAILER_LOCATOR_SIZE);
}
-static void write_zip_trailer(const unsigned char *sha1)
+static void write_zip_trailer(const struct object_id *oid)
{
struct zip_dir_trailer trailer;
int clamped = 0;
copy_le16_clamp(trailer.entries, zip_dir_entries, &clamped);
copy_le32(trailer.size, zip_dir.len);
copy_le32_clamp(trailer.offset, zip_offset, &clamped);
- copy_le16(trailer.comment_length, sha1 ? GIT_SHA1_HEXSZ : 0);
+ copy_le16(trailer.comment_length, oid ? the_hash_algo->hexsz : 0);
write_or_die(1, zip_dir.buf, zip_dir.len);
if (clamped)
write_zip64_trailer();
write_or_die(1, &trailer, ZIP_DIR_TRAILER_SIZE);
- if (sha1)
- write_or_die(1, sha1_to_hex(sha1), GIT_SHA1_HEXSZ);
+ if (oid)
+ write_or_die(1, oid_to_hex(oid), the_hash_algo->hexsz);
}
static void dos_time(timestamp_t *timestamp, int *dos_date, int *dos_time)
err = write_archive_entries(args, write_zip_entry);
if (!err)
- write_zip_trailer(args->commit_sha1);
+ write_zip_trailer(args->commit_oid);
strbuf_release(&zip_dir);
int remote)
{
const char *name = argv[0];
- const unsigned char *commit_sha1;
+ const struct object_id *commit_oid;
time_t archive_time;
struct tree *tree;
const struct commit *commit;
commit = lookup_commit_reference_gently(ar_args->repo, &oid, 1);
if (commit) {
- commit_sha1 = commit->object.oid.hash;
+ commit_oid = &commit->object.oid;
archive_time = commit->date;
} else {
- commit_sha1 = NULL;
+ commit_oid = NULL;
archive_time = time(NULL);
}
tree = parse_tree_indirect(&tree_oid);
}
ar_args->tree = tree;
- ar_args->commit_sha1 = commit_sha1;
+ ar_args->commit_oid = commit_oid;
ar_args->commit = commit;
ar_args->time = archive_time;
}
const char *base;
size_t baselen;
struct tree *tree;
- const unsigned char *commit_sha1;
+ const struct object_id *commit_oid;
const struct commit *commit;
timestamp_t time;
struct pathspec pathspec;
* Like info/exclude and .gitignore, the attribute information can
* come from many places.
*
- * (1) .gitattribute file of the same directory;
- * (2) .gitattribute file of the parent directory if (1) does not have
+ * (1) .gitattributes file of the same directory;
+ * (2) .gitattributes file of the parent directory if (1) does not have
* any match; this goes recursively upwards, just like .gitignore.
* (3) $GIT_DIR/info/attributes, which overrides both of the above.
*
* In the same file, later entries override the earlier match, so in the
* global list, we would have entries from info/attributes the earliest
- * (reading the file from top to bottom), .gitattribute of the root
+ * (reading the file from top to bottom), .gitattributes of the root
* directory (again, reading the file from top to bottom) down to the
* current directory, and then scan the list backwards to find the first match.
* This is exactly the same as what is_excluded() does in dir.c to deal with
* set of attribute definitions, followed by the contents
* of $(prefix)/etc/gitattributes and a file specified by
* core.attributesfile. Then, contents from
- * .gitattribute files from directories closer to the
+ * .gitattributes files from directories closer to the
* root to the ones in deeper directories are pushed
* to the stack. Finally, at the very top of the stack
* we always keep the contents of $GIT_DIR/info/attributes.
const char *prefix,
struct commit *commit)
{
+ const char *argv[] = {
+ "diff-tree", "--pretty", "--stat", "--summary", "--cc", NULL
+ };
struct rev_info opt;
- /* diff-tree init */
+ git_config(git_diff_ui_config, NULL);
repo_init_revisions(r, &opt, prefix);
- git_config(git_diff_basic_config, NULL); /* no "diff" UI options */
- opt.abbrev = 0;
- opt.diff = 1;
- /* This is what "--pretty" does */
- opt.verbose_header = 1;
- opt.use_terminator = 0;
- opt.commit_format = CMIT_FMT_DEFAULT;
-
- /* diff-tree init */
- if (!opt.diffopt.output_format)
- opt.diffopt.output_format = DIFF_FORMAT_RAW;
-
- setup_revisions(0, NULL, &opt, NULL);
+ setup_revisions(ARRAY_SIZE(argv) - 1, argv, &opt, NULL);
log_tree_commit(&opt, commit);
}
origin = make_origin(commit, path);
- ident = fmt_ident("Not Committed Yet", "not.committed.yet", NULL, 0);
+ ident = fmt_ident("Not Committed Yet", "not.committed.yet",
+ WANT_BLANK_IDENT, NULL, 0);
strbuf_addstr(&msg, "tree 0000000000000000000000000000000000000000\n");
for (parent = commit->parents; parent; parent = parent->next)
strbuf_addf(&msg, "parent %s\n",
}
for (i = 0; i < num_sg; i++) {
if (sg_origin[i]) {
- drop_origin_blob(sg_origin[i]);
+ if (!sg_origin[i]->suspects)
+ drop_origin_blob(sg_origin[i]);
blame_origin_decref(sg_origin[i]);
}
}
extern int cmd_show_branch(int argc, const char **argv, const char *prefix);
extern int cmd_show_index(int argc, const char **argv, const char *prefix);
extern int cmd_status(int argc, const char **argv, const char *prefix);
+extern int cmd_stash(int argc, const char **argv, const char *prefix);
extern int cmd_stripspace(int argc, const char **argv, const char *prefix);
extern int cmd_submodule__helper(int argc, const char **argv, const char *prefix);
extern int cmd_symbolic_ref(int argc, const char **argv, const char *prefix);
cp.in = xopen(am_path(state, "rewritten"), O_RDONLY);
cp.stdout_to_stderr = 1;
+ cp.trace2_hook_name = "post-rewrite";
ret = run_command(&cp);
while (!strbuf_getline_lf(&sb, fp)) {
struct object_id from_obj, to_obj;
+ const char *p;
- if (sb.len != GIT_SHA1_HEXSZ * 2 + 1) {
+ if (sb.len != the_hash_algo->hexsz * 2 + 1) {
ret = error(invalid_line, sb.buf);
goto finish;
}
- if (get_oid_hex(sb.buf, &from_obj)) {
+ if (parse_oid_hex(sb.buf, &from_obj, &p)) {
ret = error(invalid_line, sb.buf);
goto finish;
}
- if (sb.buf[GIT_SHA1_HEXSZ] != ' ') {
+ if (*p != ' ') {
ret = error(invalid_line, sb.buf);
goto finish;
}
- if (get_oid_hex(sb.buf + GIT_SHA1_HEXSZ + 1, &to_obj)) {
+ if (get_oid_hex(p + 1, &to_obj)) {
ret = error(invalid_line, sb.buf);
goto finish;
}
* review them with extra care to spot mismerges.
*/
struct rev_info rev_info;
- const char *diff_filter_str = "--diff-filter=AM";
repo_init_revisions(the_repository, &rev_info, NULL);
rev_info.diffopt.output_format = DIFF_FORMAT_NAME_STATUS;
- diff_opt_parse(&rev_info.diffopt, &diff_filter_str, 1, rev_info.prefix);
+ rev_info.diffopt.filter |= diff_filter_bit('A');
+ rev_info.diffopt.filter |= diff_filter_bit('M');
add_pending_oid(&rev_info, "HEAD", &our_tree, 0);
diff_setup_done(&rev_info.diffopt);
run_diff_index(&rev_info, 1);
}
author = fmt_ident(state->author_name, state->author_email,
+ WANT_AUTHOR_IDENT,
state->ignore_date ? NULL : state->author_date,
IDENT_STRICT);
*opt_value = PATCH_FORMAT_HG;
else if (!strcmp(arg, "mboxrd"))
*opt_value = PATCH_FORMAT_MBOXRD;
+ /*
+ * Please update $__git_patchformat in git-completion.bash
+ * when you add new options
+ */
else
return error(_("Invalid value for --patch-format: %s"), arg);
return 0;
* and are only included here to get included in the "-h"
* output:
*/
- { OPTION_LOWLEVEL_CALLBACK, 0, "indent-heuristic", NULL, NULL, N_("Use an experimental heuristic to improve diffs"), PARSE_OPT_NOARG, parse_opt_unknown_cb },
+ { OPTION_LOWLEVEL_CALLBACK, 0, "indent-heuristic", NULL, NULL, N_("Use an experimental heuristic to improve diffs"), PARSE_OPT_NOARG, NULL, 0, parse_opt_unknown_cb },
OPT_BIT(0, "minimal", &xdl_opts, N_("Spend extra cycles to find better match"), XDF_NEED_MINIMAL),
OPT_STRING('S', NULL, &revs_file, N_("file"), N_("Use revisions from <file> instead of calling git-rev-list")),
free(to_free);
}
+static void print_current_branch_name(void)
+{
+ int flags;
+ const char *refname = resolve_ref_unsafe("HEAD", 0, NULL, &flags);
+ const char *shortname;
+ if (!refname)
+ die(_("could not resolve HEAD"));
+ else if (!(flags & REF_ISSYMREF))
+ return;
+ else if (skip_prefix(refname, "refs/heads/", &shortname))
+ puts(shortname);
+ else
+ die(_("HEAD (%s) points outside of refs/heads/"), refname);
+}
+
static void reject_rebase_or_bisect_branch(const char *target)
{
struct worktree **worktrees = get_worktrees(0);
int cmd_branch(int argc, const char **argv, const char *prefix)
{
int delete = 0, rename = 0, copy = 0, force = 0, list = 0;
+ int show_current = 0;
int reflog = 0, edit_description = 0;
int quiet = 0, unset_upstream = 0;
const char *new_upstream = NULL;
OPT_BIT('c', "copy", ©, N_("copy a branch and its reflog"), 1),
OPT_BIT('C', NULL, ©, N_("copy a branch, even if target exists"), 2),
OPT_BOOL('l', "list", &list, N_("list branch names")),
+ OPT_BOOL(0, "show-current", &show_current, N_("show current branch name")),
OPT_BOOL(0, "create-reflog", &reflog, N_("create the branch's reflog")),
OPT_BOOL(0, "edit-description", &edit_description,
N_("edit the description for the branch")),
OPT_MERGED(&filter, N_("print only branches that are merged")),
OPT_NO_MERGED(&filter, N_("print only branches that are not merged")),
OPT_COLUMN(0, "column", &colopts, N_("list branches in columns")),
- OPT_CALLBACK(0 , "sort", sorting_tail, N_("key"),
- N_("field name to sort on"), &parse_opt_ref_sorting),
+ OPT_REF_SORT(sorting_tail),
{
OPTION_CALLBACK, 0, "points-at", &filter.points_at, N_("object"),
N_("print only branches of the object"), 0, parse_opt_object_name
argc = parse_options(argc, argv, prefix, options, builtin_branch_usage,
0);
- if (!delete && !rename && !copy && !edit_description && !new_upstream && !unset_upstream && argc == 0)
+ if (!delete && !rename && !copy && !edit_description && !new_upstream &&
+ !show_current && !unset_upstream && argc == 0)
list = 1;
if (filter.with_commit || filter.merge != REF_FILTER_MERGED_NONE || filter.points_at.nr ||
filter.no_commit)
list = 1;
- if (!!delete + !!rename + !!copy + !!new_upstream +
+ if (!!delete + !!rename + !!copy + !!new_upstream + !!show_current +
list + unset_upstream > 1)
usage_with_options(builtin_branch_usage, options);
if (!argc)
die(_("branch name required"));
return delete_branches(argc, argv, delete > 1, filter.kind, quiet);
+ } else if (show_current) {
+ print_current_branch_name();
+ return 0;
} else if (list) {
/* git branch --local also shows HEAD when it is detached */
if ((filter.kind & FILTER_REFS_BRANCHES) && filter.detached)
int ignore_other_worktrees;
int show_progress;
int count_checkout_paths;
+ int overlay_mode;
/*
* If new checkout options are added, skip_merge_working_tree
* should be updated accordingly.
return pos;
}
-static int check_stage(int stage, const struct cache_entry *ce, int pos)
+static int check_stage(int stage, const struct cache_entry *ce, int pos,
+ int overlay_mode)
{
while (pos < active_nr &&
!strcmp(active_cache[pos]->name, ce->name)) {
return 0;
pos++;
}
+ if (!overlay_mode)
+ return 0;
if (stage == 2)
return error(_("path '%s' does not have our version"), ce->name);
else
}
static int checkout_stage(int stage, const struct cache_entry *ce, int pos,
- const struct checkout *state, int *nr_checkouts)
+ const struct checkout *state, int *nr_checkouts,
+ int overlay_mode)
{
while (pos < active_nr &&
!strcmp(active_cache[pos]->name, ce->name)) {
NULL, nr_checkouts);
pos++;
}
+ if (!overlay_mode) {
+ unlink_entry(ce);
+ return 0;
+ }
if (stage == 2)
return error(_("path '%s' does not have our version"), ce->name);
else
return status;
}
+static void mark_ce_for_checkout_overlay(struct cache_entry *ce,
+ char *ps_matched,
+ const struct checkout_opts *opts)
+{
+ ce->ce_flags &= ~CE_MATCHED;
+ if (!opts->ignore_skipworktree && ce_skip_worktree(ce))
+ return;
+ if (opts->source_tree && !(ce->ce_flags & CE_UPDATE))
+ /*
+ * "git checkout tree-ish -- path", but this entry
+ * is in the original index but is not in tree-ish
+ * or does not match the pathspec; it will not be
+ * checked out to the working tree. We will not do
+ * anything to this entry at all.
+ */
+ return;
+ /*
+ * Either this entry came from the tree-ish we are
+ * checking the paths out of, or we are checking out
+ * of the index.
+ *
+ * If it comes from the tree-ish, we already know it
+ * matches the pathspec and could just stamp
+ * CE_MATCHED to it from update_some(). But we still
+ * need ps_matched and read_tree_recursive (and
+ * eventually tree_entry_interesting) cannot fill
+ * ps_matched yet. Once it can, we can avoid calling
+ * match_pathspec() for _all_ entries when
+ * opts->source_tree != NULL.
+ */
+ if (ce_path_match(&the_index, ce, &opts->pathspec, ps_matched))
+ ce->ce_flags |= CE_MATCHED;
+}
+
+static void mark_ce_for_checkout_no_overlay(struct cache_entry *ce,
+ char *ps_matched,
+ const struct checkout_opts *opts)
+{
+ ce->ce_flags &= ~CE_MATCHED;
+ if (!opts->ignore_skipworktree && ce_skip_worktree(ce))
+ return;
+ if (ce_path_match(&the_index, ce, &opts->pathspec, ps_matched)) {
+ ce->ce_flags |= CE_MATCHED;
+ if (opts->source_tree && !(ce->ce_flags & CE_UPDATE))
+ /*
+ * In overlay mode, but the path is not in
+ * tree-ish, which means we should remove it
+ * from the index and the working tree.
+ */
+ ce->ce_flags |= CE_REMOVE | CE_WT_REMOVE;
+ }
+}
+
static int checkout_paths(const struct checkout_opts *opts,
const char *revision)
{
struct lock_file lock_file = LOCK_INIT;
int nr_checkouts = 0, nr_unmerged = 0;
+ trace2_cmd_mode(opts->patch_mode ? "patch" : "path");
+
if (opts->track != BRANCH_TRACK_UNSPECIFIED)
die(_("'%s' cannot be used with updating paths"), "--track");
* Make sure all pathspecs participated in locating the paths
* to be checked out.
*/
- for (pos = 0; pos < active_nr; pos++) {
- struct cache_entry *ce = active_cache[pos];
- ce->ce_flags &= ~CE_MATCHED;
- if (!opts->ignore_skipworktree && ce_skip_worktree(ce))
- continue;
- if (opts->source_tree && !(ce->ce_flags & CE_UPDATE))
- /*
- * "git checkout tree-ish -- path", but this entry
- * is in the original index; it will not be checked
- * out to the working tree and it does not matter
- * if pathspec matched this entry. We will not do
- * anything to this entry at all.
- */
- continue;
- /*
- * Either this entry came from the tree-ish we are
- * checking the paths out of, or we are checking out
- * of the index.
- *
- * If it comes from the tree-ish, we already know it
- * matches the pathspec and could just stamp
- * CE_MATCHED to it from update_some(). But we still
- * need ps_matched and read_tree_recursive (and
- * eventually tree_entry_interesting) cannot fill
- * ps_matched yet. Once it can, we can avoid calling
- * match_pathspec() for _all_ entries when
- * opts->source_tree != NULL.
- */
- if (ce_path_match(&the_index, ce, &opts->pathspec, ps_matched))
- ce->ce_flags |= CE_MATCHED;
- }
+ for (pos = 0; pos < active_nr; pos++)
+ if (opts->overlay_mode)
+ mark_ce_for_checkout_overlay(active_cache[pos],
+ ps_matched,
+ opts);
+ else
+ mark_ce_for_checkout_no_overlay(active_cache[pos],
+ ps_matched,
+ opts);
- if (report_path_error(ps_matched, &opts->pathspec, opts->prefix)) {
+ if (report_path_error(ps_matched, &opts->pathspec)) {
free(ps_matched);
return 1;
}
if (opts->force) {
warning(_("path '%s' is unmerged"), ce->name);
} else if (opts->writeout_stage) {
- errs |= check_stage(opts->writeout_stage, ce, pos);
+ errs |= check_stage(opts->writeout_stage, ce, pos, opts->overlay_mode);
} else if (opts->merge) {
errs |= check_stages((1<<2) | (1<<3), ce, pos);
} else {
if (opts->writeout_stage)
errs |= checkout_stage(opts->writeout_stage,
ce, pos,
- &state, &nr_checkouts);
+ &state,
+ &nr_checkouts, opts->overlay_mode);
else if (opts->merge)
errs |= checkout_merged(pos, &state,
&nr_unmerged);
pos = skip_same_name(ce, pos) - 1;
}
}
+ remove_marked_cache_entries(&the_index, 1);
+ remove_scheduled_dirs();
errs |= finish_delayed_checkout(&state, &nr_checkouts);
if (opts->count_checkout_paths) {
* opts->show_progress only impacts output so doesn't require a merge
*/
+ /*
+ * opts->overlay_mode cannot be used with switching branches so is
+ * not tested here
+ */
+
/*
* If we aren't creating a new branch any changes or updates will
* happen in the existing branch. Since that could only be updating
topts.initial_checkout = is_cache_unborn();
topts.update = 1;
topts.merge = 1;
- topts.gently = opts->merge && old_branch_info->commit;
+ topts.quiet = opts->merge && old_branch_info->commit;
topts.verbose_update = opts->show_progress;
topts.fn = twoway_merge;
if (opts->overwrite_ignore) {
*/
struct tree *result;
struct tree *work;
+ struct tree *old_tree;
struct merge_options o;
+ struct strbuf sb = STRBUF_INIT;
+
if (!opts->merge)
return 1;
*/
if (!old_branch_info->commit)
return 1;
+ old_tree = get_commit_tree(old_branch_info->commit);
+
+ if (repo_index_has_changes(the_repository, old_tree, &sb))
+ die(_("cannot continue with staged changes in "
+ "the following files:\n%s"), sb.buf);
+ strbuf_release(&sb);
+
+ if (repo_index_has_changes(the_repository,
+ get_commit_tree(old_branch_info->commit),
+ &sb))
+ warning(_("staged changes in the following files may be lost: %s"),
+ sb.buf);
+ strbuf_release(&sb);
/* Do more real merge */
ret = merge_trees(&o,
get_commit_tree(new_branch_info->commit),
work,
- get_commit_tree(old_branch_info->commit),
+ old_tree,
&result);
if (ret < 0)
exit(128);
void *path_to_free;
struct object_id rev;
int flag, writeout_error = 0;
+
+ trace2_cmd_mode("branch");
+
memset(&old_branch_info, 0, sizeof(old_branch_info));
old_branch_info.path = path_to_free = resolve_refdup("HEAD", 0, &rev, &flag);
if (old_branch_info.path)
int status;
struct strbuf branch_ref = STRBUF_INIT;
+ trace2_cmd_mode("unborn");
+
if (!opts->new_branch)
die(_("You are on a branch yet to be born"));
strbuf_addf(&branch_ref, "refs/heads/%s", opts->new_branch);
die(_("'%s' cannot be used with switching branches"),
"--patch");
+ if (!opts->overlay_mode)
+ die(_("'%s' cannot be used with switching branches"),
+ "--no-overlay");
+
if (opts->writeout_stage)
die(_("'%s' cannot be used with switching branches"),
"--ours/--theirs");
"checkout", "control recursive updating of submodules",
PARSE_OPT_OPTARG, option_parse_recurse_submodules_worktree_updater },
OPT_BOOL(0, "progress", &opts.show_progress, N_("force progress reporting")),
+ OPT_BOOL(0, "overlay", &opts.overlay_mode, N_("use overlay mode (default)")),
OPT_END(),
};
opts.overwrite_ignore = 1;
opts.prefix = prefix;
opts.show_progress = -1;
+ opts.overlay_mode = -1;
git_config(git_checkout_config, &opts);
if ((!!opts.new_branch + !!opts.new_branch_force + !!opts.new_orphan_branch) > 1)
die(_("-b, -B and --orphan are mutually exclusive"));
+ if (opts.overlay_mode == 1 && opts.patch_mode)
+ die(_("-p and --overlay are mutually exclusive"));
+
/*
* From here on, new_branch will contain the branch to be checked out,
* and new_branch_force and new_orphan_branch will tell us which one of
{
struct commit_graph *graph = NULL;
char *graph_name;
+ int open_ok;
+ int fd;
+ struct stat st;
static struct option builtin_commit_graph_verify_options[] = {
OPT_STRING(0, "object-dir", &opts.obj_dir,
opts.obj_dir = get_object_directory();
graph_name = get_commit_graph_filename(opts.obj_dir);
- graph = load_commit_graph_one(graph_name);
+ open_ok = open_commit_graph(graph_name, &fd, &st);
+ if (!open_ok && errno == ENOENT)
+ return 0;
+ if (!open_ok)
+ die_errno(_("Could not open commit-graph '%s'"), graph_name);
+ graph = load_commit_graph_one_fd_st(fd, &st);
FREE_AND_NULL(graph_name);
if (!graph)
- return 0;
+ return 1;
UNLEAK(graph);
return verify_commit_graph(the_repository, graph);
{
struct commit_graph *graph = NULL;
char *graph_name;
+ int open_ok;
+ int fd;
+ struct stat st;
static struct option builtin_commit_graph_read_options[] = {
OPT_STRING(0, "object-dir", &opts.obj_dir,
opts.obj_dir = get_object_directory();
graph_name = get_commit_graph_filename(opts.obj_dir);
- graph = load_commit_graph_one(graph_name);
+ open_ok = open_commit_graph(graph_name, &fd, &st);
+ if (!open_ok)
+ die_errno(_("Could not open commit-graph '%s'"), graph_name);
+
+ graph = load_commit_graph_one_fd_st(fd, &st);
if (!graph)
- die("graph file %s does not exist", graph_name);
+ return 1;
FREE_AND_NULL(graph_name);
#include "builtin.h"
#include "utf8.h"
#include "gpg-interface.h"
+#include "parse-options.h"
-static const char commit_tree_usage[] = "git commit-tree [(-p <sha1>)...] [-S[<keyid>]] [-m <message>] [-F <file>] <sha1>";
+static const char * const commit_tree_usage[] = {
+ N_("git commit-tree [(-p <parent>)...] [-S[<keyid>]] [(-m <message>)...] "
+ "[(-F <file>)...] <tree>"),
+ NULL
+};
static const char *sign_commit;
struct commit_list *parents;
for (parents = *parents_p; parents; parents = parents->next) {
if (parents->item == parent) {
- error("duplicate parent %s ignored", oid_to_hex(oid));
+ error(_("duplicate parent %s ignored"), oid_to_hex(oid));
return;
}
parents_p = &parents->next;
return git_default_config(var, value, cb);
}
+static int parse_parent_arg_callback(const struct option *opt,
+ const char *arg, int unset)
+{
+ struct object_id oid;
+ struct commit_list **parents = opt->value;
+
+ BUG_ON_OPT_NEG_NOARG(unset, arg);
+
+ if (get_oid_commit(arg, &oid))
+ die(_("not a valid object name %s"), arg);
+
+ assert_oid_type(&oid, OBJ_COMMIT);
+ new_parent(lookup_commit(the_repository, &oid), parents);
+ return 0;
+}
+
+static int parse_message_arg_callback(const struct option *opt,
+ const char *arg, int unset)
+{
+ struct strbuf *buf = opt->value;
+
+ BUG_ON_OPT_NEG_NOARG(unset, arg);
+
+ if (buf->len)
+ strbuf_addch(buf, '\n');
+ strbuf_addstr(buf, arg);
+ strbuf_complete_line(buf);
+
+ return 0;
+}
+
+static int parse_file_arg_callback(const struct option *opt,
+ const char *arg, int unset)
+{
+ int fd;
+ struct strbuf *buf = opt->value;
+
+ BUG_ON_OPT_NEG_NOARG(unset, arg);
+
+ if (buf->len)
+ strbuf_addch(buf, '\n');
+ if (!strcmp(arg, "-"))
+ fd = 0;
+ else {
+ fd = open(arg, O_RDONLY);
+ if (fd < 0)
+ die_errno(_("git commit-tree: failed to open '%s'"), arg);
+ }
+ if (strbuf_read(buf, fd, 0) < 0)
+ die_errno(_("git commit-tree: failed to read '%s'"), arg);
+ if (fd && close(fd))
+ die_errno(_("git commit-tree: failed to close '%s'"), arg);
+
+ return 0;
+}
+
int cmd_commit_tree(int argc, const char **argv, const char *prefix)
{
- int i, got_tree = 0;
+ static struct strbuf buffer = STRBUF_INIT;
struct commit_list *parents = NULL;
struct object_id tree_oid;
struct object_id commit_oid;
- struct strbuf buffer = STRBUF_INIT;
+
+ struct option options[] = {
+ { OPTION_CALLBACK, 'p', NULL, &parents, N_("parent"),
+ N_("id of a parent commit object"), PARSE_OPT_NONEG,
+ parse_parent_arg_callback },
+ { OPTION_CALLBACK, 'm', NULL, &buffer, N_("message"),
+ N_("commit message"), PARSE_OPT_NONEG,
+ parse_message_arg_callback },
+ { OPTION_CALLBACK, 'F', NULL, &buffer, N_("file"),
+ N_("read commit log message from file"), PARSE_OPT_NONEG,
+ parse_file_arg_callback },
+ { OPTION_STRING, 'S', "gpg-sign", &sign_commit, N_("key-id"),
+ N_("GPG sign commit"), PARSE_OPT_OPTARG, NULL, (intptr_t) "" },
+ OPT_END()
+ };
git_config(commit_tree_config, NULL);
if (argc < 2 || !strcmp(argv[1], "-h"))
- usage(commit_tree_usage);
-
- for (i = 1; i < argc; i++) {
- const char *arg = argv[i];
- if (!strcmp(arg, "-p")) {
- struct object_id oid;
- if (argc <= ++i)
- usage(commit_tree_usage);
- if (get_oid_commit(argv[i], &oid))
- die("Not a valid object name %s", argv[i]);
- assert_oid_type(&oid, OBJ_COMMIT);
- new_parent(lookup_commit(the_repository, &oid),
- &parents);
- continue;
- }
+ usage_with_options(commit_tree_usage, options);
- if (!strcmp(arg, "--gpg-sign")) {
- sign_commit = "";
- continue;
- }
+ argc = parse_options(argc, argv, prefix, options, commit_tree_usage, 0);
- if (skip_prefix(arg, "-S", &sign_commit) ||
- skip_prefix(arg, "--gpg-sign=", &sign_commit))
- continue;
+ if (argc != 1)
+ die(_("must give exactly one tree"));
- if (!strcmp(arg, "--no-gpg-sign")) {
- sign_commit = NULL;
- continue;
- }
-
- if (!strcmp(arg, "-m")) {
- if (argc <= ++i)
- usage(commit_tree_usage);
- if (buffer.len)
- strbuf_addch(&buffer, '\n');
- strbuf_addstr(&buffer, argv[i]);
- strbuf_complete_line(&buffer);
- continue;
- }
-
- if (!strcmp(arg, "-F")) {
- int fd;
-
- if (argc <= ++i)
- usage(commit_tree_usage);
- if (buffer.len)
- strbuf_addch(&buffer, '\n');
- if (!strcmp(argv[i], "-"))
- fd = 0;
- else {
- fd = open(argv[i], O_RDONLY);
- if (fd < 0)
- die_errno("git commit-tree: failed to open '%s'",
- argv[i]);
- }
- if (strbuf_read(&buffer, fd, 0) < 0)
- die_errno("git commit-tree: failed to read '%s'",
- argv[i]);
- if (fd && close(fd))
- die_errno("git commit-tree: failed to close '%s'",
- argv[i]);
- continue;
- }
-
- if (get_oid_tree(arg, &tree_oid))
- die("Not a valid object name %s", arg);
- if (got_tree)
- die("Cannot give more than one trees");
- got_tree = 1;
- }
+ if (get_oid_tree(argv[0], &tree_oid))
+ die(_("not a valid object name %s"), argv[0]);
if (!buffer.len) {
if (strbuf_read(&buffer, 0, 0) < 0)
- die_errno("git commit-tree: failed to read");
+ die_errno(_("git commit-tree: failed to read"));
}
if (commit_tree(buffer.buf, buffer.len, &tree_oid, parents, &commit_oid,
* and return the paths that match the given pattern in list.
*/
static int list_paths(struct string_list *list, const char *with_tree,
- const char *prefix, const struct pathspec *pattern)
+ const struct pathspec *pattern)
{
int i, ret;
char *m;
item->util = item; /* better a valid pointer than a fake one */
}
- ret = report_path_error(m, pattern, prefix);
+ ret = report_path_error(m, pattern);
free(m);
return ret;
}
die(_("cannot do a partial commit during a cherry-pick."));
}
- if (list_paths(&partial, !current_head ? NULL : "HEAD", prefix, &pathspec))
+ if (list_paths(&partial, !current_head ? NULL : "HEAD", &pathspec))
exit(1);
discard_cache();
set_ident_var(&date, strbuf_detach(&date_buf, NULL));
}
- strbuf_addstr(author_ident, fmt_ident(name, email, date, IDENT_STRICT));
+ strbuf_addstr(author_ident, fmt_ident(name, email, WANT_AUTHOR_IDENT, date,
+ IDENT_STRICT));
assert_split_ident(&author, author_ident);
export_one("GIT_AUTHOR_NAME", author.name_begin, author.name_end, 0);
export_one("GIT_AUTHOR_EMAIL", author.mail_begin, author.mail_end, 0);
s->show_untracked_files = SHOW_NORMAL_UNTRACKED_FILES;
else if (!strcmp(untracked_files_arg, "all"))
s->show_untracked_files = SHOW_ALL_UNTRACKED_FILES;
+ /*
+ * Please update $__git_untracked_file_modes in
+ * git-completion.bash when you add new options
+ */
else
die(_("Invalid untracked files mode '%s'"), untracked_files_arg);
}
else if (!strcmp(cleanup_arg, "scissors"))
cleanup_mode = use_editor ? COMMIT_MSG_CLEANUP_SCISSORS :
COMMIT_MSG_CLEANUP_SPACE;
+ /*
+ * Please update _git_commit() in git-completion.bash when you
+ * add new options.
+ */
else
die(_("Invalid cleanup mode %s"), cleanup_arg);
handle_untracked_files_arg(s);
if (all && argc > 0)
- die(_("Paths with -a does not make sense."));
+ die(_("paths '%s ...' with -a does not make sense"),
+ argv[0]);
if (status_format != STATUS_FORMAT_NONE)
dry_run = 1;
}
static const char diff_tree_usage[] =
-"git diff-tree [--stdin] [-m] [-c] [--cc] [-s] [-v] [--pretty] [-t] [-r] [--root] "
+"git diff-tree [--stdin] [-m] [-c | --cc] [-s] [-v] [--pretty] [-t] [-r] [--root] "
"[<common-diff-options>] <tree-ish> [<tree-ish>] [<path>...]\n"
" -r diff recursively\n"
+" -c show combined diff for merge commits\n"
+" --cc show combined diff for merge commits removing uninteresting hunks\n"
+" --combined-all-paths\n"
+" show name of file in all parents for combined diffs\n"
" --root include the initial commit as diff against /dev/null\n"
COMMON_DIFF_OPTIONS_HELP;
repo_init_revisions(the_repository, &rev, prefix);
- if (no_index && argc != i + 2) {
- if (no_index == DIFF_NO_INDEX_IMPLICIT) {
- /*
- * There was no --no-index and there were not two
- * paths. It is possible that the user intended
- * to do an inside-repository operation.
- */
- fprintf(stderr, "Not a git repository\n");
- fprintf(stderr,
- "To compare two paths outside a working tree:\n");
- }
- /* Give the usage message for non-repository usage and exit. */
- usagef("git diff %s <path> <path>",
- no_index == DIFF_NO_INDEX_EXPLICIT ?
- "--no-index" : "[--no-index]");
-
- }
- if (no_index)
- /* If this is a no-index diff, just run it and exit there. */
- diff_no_index(the_repository, &rev, argc, argv);
-
- /* Otherwise, we are doing the usual "git" diff */
- rev.diffopt.skip_stat_unmatch = !!diff_auto_refresh_index;
-
- /* Scale to real terminal size and respect statGraphWidth config */
+ /* Set up defaults that will apply to both no-index and regular diffs. */
rev.diffopt.stat_width = -1;
rev.diffopt.stat_graph_width = -1;
-
- /* Default to let external and textconv be used */
rev.diffopt.flags.allow_external = 1;
rev.diffopt.flags.allow_textconv = 1;
+ /* If this is a no-index diff, just run it and exit there. */
+ if (no_index)
+ exit(diff_no_index(&rev, no_index == DIFF_NO_INDEX_IMPLICIT,
+ argc, argv));
+
+
+ /*
+ * Otherwise, we are doing the usual "git" diff; set up any
+ * further defaults that apply to regular diffs.
+ */
+ rev.diffopt.skip_stat_unmatch = !!diff_auto_refresh_index;
+
/*
* Default to intent-to-add entries invisible in the
* index. This makes them show up as new files in diff-files
*mode2 = (int)strtol(p + 1, &p, 8);
if (*p != ' ')
return error("expected ' ', got '%c'", *p);
- if (get_oid_hex(++p, oid1))
- return error("expected object ID, got '%s'", p + 1);
- p += GIT_SHA1_HEXSZ;
+ if (parse_oid_hex(++p, oid1, (const char **)&p))
+ return error("expected object ID, got '%s'", p);
if (*p != ' ')
return error("expected ' ', got '%c'", *p);
- if (get_oid_hex(++p, oid2))
- return error("expected object ID, got '%s'", p + 1);
- p += GIT_SHA1_HEXSZ;
+ if (parse_oid_hex(++p, oid2, (const char **)&p))
+ return error("expected object ID, got '%s'", p);
if (*p != ' ')
return error("expected ' ', got '%c'", *p);
*status = *++p;
int cmd_difftool(int argc, const char **argv, const char *prefix)
{
int use_gui_tool = 0, dir_diff = 0, prompt = -1, symlinks = 0,
- tool_help = 0;
+ tool_help = 0, no_index = 0;
static char *difftool_cmd = NULL, *extcmd = NULL;
struct option builtin_difftool_options[] = {
OPT_BOOL('g', "gui", &use_gui_tool,
"tool returns a non - zero exit code")),
OPT_STRING('x', "extcmd", &extcmd, N_("command"),
N_("specify a custom command for viewing diffs")),
+ OPT_ARGUMENT("no-index", &no_index, N_("passed to `diff`")),
OPT_END()
};
if (tool_help)
return print_tool_help();
- /* NEEDSWORK: once we no longer spawn anything, remove this */
- setenv(GIT_DIR_ENVIRONMENT, absolute_path(get_git_dir()), 1);
- setenv(GIT_WORK_TREE_ENVIRONMENT, absolute_path(get_git_work_tree()), 1);
+ if (!no_index && !startup_info->have_repository)
+ die(_("difftool requires worktree or --no-index"));
+
+ if (!no_index){
+ setup_work_tree();
+ setenv(GIT_DIR_ENVIRONMENT, absolute_path(get_git_dir()), 1);
+ setenv(GIT_WORK_TREE_ENVIRONMENT, absolute_path(get_git_work_tree()), 1);
+ }
if (use_gui_tool && diff_gui_tool && *diff_gui_tool)
setenv("GIT_DIFF_TOOL", diff_gui_tool, 1);
BUG("unknown protocol version");
}
- ref = fetch_pack(&args, fd, conn, ref, dest, sought, nr_sought,
+ ref = fetch_pack(&args, fd, ref, sought, nr_sought,
&shallow, pack_lockfile_ptr, version);
if (pack_lockfile) {
printf("lock %s\n", pack_lockfile);
sigchain_push_common(unlock_pack_on_signal);
atexit(unlock_pack);
+ sigchain_push(SIGPIPE, SIG_IGN);
exit_code = do_fetch(gtransport, &rs);
+ sigchain_pop(SIGPIPE);
refspec_clear(&rs);
transport_disconnect(gtransport);
gtransport = NULL;
OPT_INTEGER( 0 , "count", &maxcount, N_("show only <n> matched refs")),
OPT_STRING( 0 , "format", &format.format, N_("format"), N_("format to use for the output")),
OPT__COLOR(&format.use_color, N_("respect format colors")),
- OPT_CALLBACK(0 , "sort", sorting_tail, N_("key"),
- N_("field name to sort on"), &parse_opt_ref_sorting),
+ OPT_REF_SORT(sorting_tail),
OPT_CALLBACK(0, "points-at", &filter.points_at,
N_("object"), N_("print only refs which points at the given object"),
parse_opt_object_name),
return 0;
}
+static void mark_unreachable_referents(const struct object_id *oid)
+{
+ struct fsck_options options = FSCK_OPTIONS_DEFAULT;
+ struct object *obj = lookup_object(the_repository, oid->hash);
+
+ if (!obj || !(obj->flags & HAS_OBJ))
+ return; /* not part of our original set */
+ if (obj->flags & REACHABLE)
+ return; /* reachable objects already traversed */
+
+ /*
+ * Avoid passing OBJ_NONE to fsck_walk, which will parse the object
+ * (and we want to avoid parsing blobs).
+ */
+ if (obj->type == OBJ_NONE) {
+ enum object_type type = oid_object_info(the_repository,
+ &obj->oid, NULL);
+ if (type > 0)
+ object_as_type(the_repository, obj, type, 0);
+ }
+
+ options.walk = mark_used;
+ fsck_walk(obj, NULL, &options);
+}
+
+static int mark_loose_unreachable_referents(const struct object_id *oid,
+ const char *path,
+ void *data)
+{
+ mark_unreachable_referents(oid);
+ return 0;
+}
+
+static int mark_packed_unreachable_referents(const struct object_id *oid,
+ struct packed_git *pack,
+ uint32_t pos,
+ void *data)
+{
+ mark_unreachable_referents(oid);
+ return 0;
+}
+
/*
* Check a single reachable object
*/
/* Traverse the pending reachable objects */
traverse_reachable();
+ /*
+ * With --connectivity-only, we won't have actually opened and marked
+ * unreachable objects with USED. Do that now to make --dangling, etc
+ * accurate.
+ */
+ if (connectivity_only && (show_dangling || write_lost_and_found)) {
+ /*
+ * Even though we already have a "struct object" for each of
+ * these in memory, we must not iterate over the internal
+ * object hash as we do below. Our loop would potentially
+ * resize the hash, making our iteration invalid.
+ *
+ * Instead, we'll just go back to the source list of objects,
+ * and ignore any that weren't present in our earlier
+ * traversal.
+ */
+ for_each_loose_object(mark_loose_unreachable_referents, NULL, 0);
+ for_each_packed_object(mark_packed_unreachable_referents, NULL, 0);
+ }
+
/* Look up all the requirements, warn about missing objects.. */
max = get_max_object_index();
if (verbose)
raise(signo);
}
+static int gc_config_is_timestamp_never(const char *var)
+{
+ const char *value;
+ timestamp_t expire;
+
+ if (!git_config_get_value(var, &value) && value) {
+ if (parse_expiry_date(value, &expire))
+ die(_("failed to parse '%s' value '%s'"), var, value);
+ return expire == 0;
+ }
+ return 0;
+}
+
static void gc_config(void)
{
const char *value;
pack_refs = git_config_bool("gc.packrefs", value);
}
+ if (gc_config_is_timestamp_never("gc.reflogexpire") &&
+ gc_config_is_timestamp_never("gc.reflogexpireunreachable"))
+ prune_reflogs = 0;
+
git_config_get_int("gc.aggressivewindow", &aggressive_window);
git_config_get_int("gc.aggressivedepth", &aggressive_depth);
git_config_get_int("gc.auto", &gc_auto_threshold);
int auto_threshold;
int num_loose = 0;
int needed = 0;
-
- if (gc_auto_threshold <= 0)
- return 0;
+ const unsigned hexsz_loose = the_hash_algo->hexsz - 2;
dir = opendir(git_path("objects/17"));
if (!dir)
auto_threshold = DIV_ROUND_UP(gc_auto_threshold, 256);
while ((ent = readdir(dir)) != NULL) {
- if (strspn(ent->d_name, "0123456789abcdef") != 38 ||
- ent->d_name[38] != '\0')
+ if (strspn(ent->d_name, "0123456789abcdef") != hexsz_loose ||
+ ent->d_name[hexsz_loose] != '\0')
continue;
if (++num_loose > auto_threshold) {
needed = 1;
static void gc_before_repack(void)
{
+ /*
+ * We may be called twice, as both the pre- and
+ * post-daemonized phases will call us, but running these
+ * commands more than once is pointless and wasteful.
+ */
+ static int done = 0;
+ if (done++)
+ return;
+
if (pack_refs && run_command_v_opt(pack_refs_cmd.argv, RUN_GIT_CMD))
die(FAILED_RUN, pack_refs_cmd.argv[0]);
if (prune_reflogs && run_command_v_opt(reflog.argv, RUN_GIT_CMD))
die(FAILED_RUN, reflog.argv[0]);
-
- pack_refs = 0;
- prune_reflogs = 0;
}
int cmd_gc(int argc, const char **argv, const char *prefix)
char *content = buffer + RECORDSIZE;
const char *comment;
ssize_t n;
+ long len;
+ char *end;
if (argc != 1)
usage(builtin_get_tar_commit_id_usage);
die_errno("git get-tar-commit-id: EOF before reading tar header");
if (header->typeflag[0] != 'g')
return 1;
- if (!skip_prefix(content, "52 comment=", &comment))
+
+ len = strtol(content, &end, 10);
+ if (errno == ERANGE || end == content || len < 0)
+ return 1;
+ if (!skip_prefix(end, " comment=", &comment))
+ return 1;
+ len -= comment - content;
+ if (len < 1 || !(len % 2) ||
+ hash_algo_by_length((len - 1) / 2) == GIT_HASH_UNKNOWN)
return 1;
- if (write_in_full(1, comment, 41) < 0)
+ if (write_in_full(1, comment, len) < 0)
die_errno("git get-tar-commit-id: write error");
return 0;
return HELP_FORMAT_INFO;
if (!strcmp(format, "web") || !strcmp(format, "html"))
return HELP_FORMAT_WEB;
+ /*
+ * Please update _git_config() in git-completion.bash when you
+ * add new help formats.
+ */
die(_("unrecognized help format '%s'"), format);
}
unsigned i, max, foreign_nr = 0;
max = get_max_object_index();
- for (i = 0; i < max; i++)
+
+ if (verbose)
+ progress = start_delayed_progress(_("Checking objects"), max);
+
+ for (i = 0; i < max; i++) {
foreign_nr += check_object(get_indexed_object(i));
+ display_progress(progress, i + 1);
+ }
+
+ stop_progress(&progress);
return foreign_nr;
}
struct strbuf path = STRBUF_INIT;
struct strbuf template_path = STRBUF_INIT;
size_t template_len;
- struct repository_format template_format;
+ struct repository_format template_format = REPOSITORY_FORMAT_INIT;
struct strbuf err = STRBUF_INIT;
DIR *dir;
char *to_free = NULL;
free(to_free);
strbuf_release(&path);
strbuf_release(&template_path);
+ clear_repository_format(&template_format);
}
static int git_init_db_config(const char *k, const char *v, void *cb)
if (!strcmp(k, "init.templatedir"))
return git_config_pathname(&init_db_template_dir, k, v);
+ if (starts_with(k, "core."))
+ return platform_core_config(k, v, cb);
+
return 0;
}
struct strbuf err = STRBUF_INIT;
/* Just look for `init.templatedir` */
+ init_db_template_dir = NULL; /* re-set in case it was set before */
git_config(git_init_db_config, NULL);
/*
}
startup_info->have_repository = 1;
+ /* Just look for `core.hidedotfiles` */
+ git_config(git_init_db_config, NULL);
+
safe_create_dir(git_dir, 0);
init_is_bare_repository = is_bare_repository();
return DECORATE_SHORT_REFS;
else if (!strcmp(value, "auto"))
return auto_decoration_style();
+ /*
+ * Please update _git_log() in git-completion.bash when you
+ * add new decoration styles.
+ */
return -1;
}
* This gives a rough estimate for how many commits we
* will print out in the list.
*/
-static int estimate_commit_count(struct rev_info *rev, struct commit_list *list)
+static int estimate_commit_count(struct commit_list *list)
{
int n = 0;
switch (simplify_commit(revs, commit)) {
case commit_show:
if (show_header) {
- int n = estimate_commit_count(revs, list);
+ int n = estimate_commit_count(list);
show_early_header(revs, "incomplete", n);
show_header = 0;
}
show_early_output = log_show_early;
}
-static void setup_early_output(struct rev_info *rev)
+static void setup_early_output(void)
{
struct sigaction sa;
static void finish_early_output(struct rev_info *rev)
{
- int n = estimate_commit_count(rev, rev->commits);
+ int n = estimate_commit_count(rev->commits);
signal(SIGALRM, SIG_IGN);
show_early_header(rev, "done", n);
}
int saved_dcctc = 0, close_file = rev->diffopt.close_file;
if (rev->early_output)
- setup_early_output(rev);
+ setup_early_output();
if (prepare_revision_walk(rev))
die(_("revision walk setup failed"));
return cmd_log_walk(&rev);
}
-static void show_tagger(char *buf, int len, struct rev_info *rev)
+static void show_tagger(const char *buf, struct rev_info *rev)
{
struct strbuf out = STRBUF_INIT;
struct pretty_print_context pp = {0};
if (get_oid_with_context(the_repository, obj_name,
GET_OID_RECORD_PATH,
&oidc, &obj_context))
- die(_("Not a valid object name %s"), obj_name);
+ die(_("not a valid object name %s"), obj_name);
if (!obj_context.path ||
!textconv_object(the_repository, obj_context.path,
obj_context.mode, &oidc, 1, &buf, &size)) {
int offset = 0;
if (!buf)
- return error(_("Could not read object %s"), oid_to_hex(oid));
+ return error(_("could not read object %s"), oid_to_hex(oid));
assert(type == OBJ_TAG);
while (offset < size && buf[offset] != '\n') {
int new_offset = offset + 1;
+ const char *ident;
while (new_offset < size && buf[new_offset++] != '\n')
; /* do nothing */
- if (starts_with(buf + offset, "tagger "))
- show_tagger(buf + offset + 7,
- new_offset - offset - 7, rev);
+ if (skip_prefix(buf + offset, "tagger ", &ident))
+ show_tagger(ident, rev);
offset = new_offset;
}
break;
o = parse_object(the_repository, &t->tagged->oid);
if (!o)
- ret = error(_("Could not read object %s"),
+ ret = error(_("could not read object %s"),
oid_to_hex(&t->tagged->oid));
objects[i].item = o;
i--;
ret = cmd_log_walk(&rev);
break;
default:
- ret = error(_("Unknown type: %d"), o->type);
+ ret = error(_("unknown type: %d"), o->type);
}
}
free(objects);
printf("%s\n", filename.buf + outdir_offset);
if ((rev->diffopt.file = fopen(filename.buf, "w")) == NULL) {
- error_errno(_("Cannot open patch file %s"), filename.buf);
+ error_errno(_("cannot open patch file %s"), filename.buf);
strbuf_release(&filename);
return -1;
}
unsigned flags1, flags2;
if (rev->pending.nr != 2)
- die(_("Need exactly one range."));
+ die(_("need exactly one range"));
o1 = rev->pending.objects[0].item;
o2 = rev->pending.objects[1].item;
c2 = lookup_commit_reference(the_repository, &o2->oid);
if ((flags1 & UNINTERESTING) == (flags2 & UNINTERESTING))
- die(_("Not a range."));
+ die(_("not a range"));
init_patch_ids(the_repository, ids);
struct commit *head = list[0];
if (!cmit_fmt_is_mail(rev->commit_format))
- die(_("Cover letter needs email format"));
+ die(_("cover letter needs email format"));
committer = git_committer_info(0);
if (!use_stdout &&
open_next_file(NULL, rev->numbered_files ? NULL : "cover-letter", rev, quiet))
- return;
+ die(_("failed to create cover-letter file"));
log_write_email_headers(rev, head, &pp.after_subject, &need_8bit_cte, 0);
const char **dir = (const char **)opt->value;
BUG_ON_OPT_NEG(unset);
if (*dir)
- die(_("Two output directories?"));
+ die(_("two output directories?"));
*dir = arg;
return 0;
}
*thread = THREAD_SHALLOW;
else if (!strcmp(arg, "deep"))
*thread = THREAD_DEEP;
+ /*
+ * Please update _git_formatpatch() in git-completion.bash
+ * when you add new options.
+ */
else
return 1;
return 0;
if (base_commit && strcmp(base_commit, "auto")) {
base = lookup_commit_reference_by_name(base_commit);
if (!base)
- die(_("Unknown commit %s"), base_commit);
+ die(_("unknown commit %s"), base_commit);
} else if ((base_commit && !strcmp(base_commit, "auto")) || base_auto) {
struct branch *curr_branch = branch_get(NULL);
const char *upstream = branch_get_upstream(curr_branch, NULL);
struct object_id oid;
if (get_oid(upstream, &oid))
- die(_("Failed to resolve '%s' as a valid ref."), upstream);
+ die(_("failed to resolve '%s' as a valid ref"), upstream);
commit = lookup_commit_or_die(&oid, "upstream base");
base_list = get_merge_bases_many(commit, total, list);
/* There should be one and only one merge base. */
if (!base_list || base_list->next)
- die(_("Could not find exact merge base."));
+ die(_("could not find exact merge base"));
base = base_list->item;
free_commit_list(base_list);
} else {
- die(_("Failed to get upstream, if you want to record base commit automatically,\n"
+ die(_("failed to get upstream, if you want to record base commit automatically,\n"
"please use git branch --set-upstream-to to track a remote branch.\n"
- "Or you could specify base commit by --base=<base-commit-id> manually."));
+ "Or you could specify base commit by --base=<base-commit-id> manually"));
}
}
struct commit_list *merge_base;
merge_base = get_merge_bases(rev[2 * i], rev[2 * i + 1]);
if (!merge_base || merge_base->next)
- die(_("Failed to find exact merge base"));
+ die(_("failed to find exact merge base"));
rev[i] = merge_base->item;
}
if (use_stdout)
die(_("standard output, or directory, which one?"));
if (mkdir(output_directory, 0777) < 0 && errno != EEXIST)
- die_errno(_("Could not create directory '%s'"),
+ die_errno(_("could not create directory '%s'"),
output_directory);
}
if (!use_stdout &&
open_next_file(rev.numbered_files ? NULL : commit, NULL, &rev, quiet))
- die(_("Failed to create output files"));
+ die(_("failed to create output files"));
shown = log_tree_commit(&rev, commit);
free_commit_buffer(the_repository->parsed_objects,
commit);
revs.max_parents = 1;
if (add_pending_commit(head, &revs, 0))
- die(_("Unknown commit %s"), head);
+ die(_("unknown commit %s"), head);
if (add_pending_commit(upstream, &revs, UNINTERESTING))
- die(_("Unknown commit %s"), upstream);
+ die(_("unknown commit %s"), upstream);
/* Don't say anything if head and upstream are the same. */
if (revs.pending.nr == 2) {
get_patch_ids(&revs, &ids);
if (limit && add_pending_commit(limit, &revs, UNINTERESTING))
- die(_("Unknown commit %s"), limit);
+ die(_("unknown commit %s"), limit);
/* reverse the list of commits */
if (prepare_revision_walk(&revs))
if (ps_matched) {
int bad;
- bad = report_path_error(ps_matched, &pathspec, prefix);
+ bad = report_path_error(ps_matched, &pathspec);
if (bad)
fprintf(stderr, "Did you forget to 'git add'?\n");
OPT_BIT(0, "refs", &flags, N_("do not show peeled tags"), REF_NORMAL),
OPT_BOOL(0, "get-url", &get_url,
N_("take url.<base>.insteadOf into account")),
- OPT_CALLBACK(0 , "sort", sorting_tail, N_("key"),
- N_("field name to sort on"), &parse_opt_ref_sorting),
+ OPT_REF_SORT(sorting_tail),
OPT_SET_INT_F(0, "exit-code", &status,
N_("exit with exit code 2 if no matching refs are found"),
2, PARSE_OPT_NOCOMPLETE),
return 0;
}
-static int option_read_message(struct parse_opt_ctx_t *ctx,
- const struct option *opt, int unset)
+static enum parse_opt_result option_read_message(struct parse_opt_ctx_t *ctx,
+ const struct option *opt,
+ const char *arg_not_used,
+ int unset)
{
struct strbuf *buf = opt->value;
const char *arg;
+ BUG_ON_OPT_ARG(arg_not_used);
if (unset)
BUG("-F cannot be negated");
option_parse_message),
{ OPTION_LOWLEVEL_CALLBACK, 'F', "file", &merge_msg, N_("path"),
N_("read message from file"), PARSE_OPT_NONEG,
- (parse_opt_cb *) option_read_message },
+ NULL, 0, option_read_message },
OPT__VERBOSITY(&verbosity),
OPT_BOOL(0, "abort", &abort_current_merge,
N_("abort the current in-progress merge")),
#include "config.h"
#include "parse-options.h"
#include "midx.h"
+#include "trace2.h"
static char const * const builtin_multi_pack_index_usage[] = {
N_("git multi-pack-index [--object-dir=<dir>] (write|verify)"),
return 1;
}
+ trace2_cmd_mode(argv[0]);
+
if (!strcmp(argv[0], "write"))
return write_midx_file(opts.object_dir);
if (!strcmp(argv[0], "verify"))
static void name_rev_line(char *p, struct name_ref_data *data)
{
struct strbuf buf = STRBUF_INIT;
- int forty = 0;
+ int counter = 0;
char *p_start;
+ const unsigned hexsz = the_hash_algo->hexsz;
+
for (p_start = p; *p; p++) {
#define ishex(x) (isdigit((x)) || ((x) >= 'a' && (x) <= 'f'))
if (!ishex(*p))
- forty = 0;
- else if (++forty == GIT_SHA1_HEXSZ &&
+ counter = 0;
+ else if (++counter == hexsz &&
!ishex(*(p+1))) {
struct object_id oid;
const char *name = NULL;
char c = *(p+1);
int p_len = p - p_start + 1;
- forty = 0;
+ counter = 0;
*(p+1) = 0;
- if (!get_oid(p - (GIT_SHA1_HEXSZ - 1), &oid)) {
+ if (!get_oid(p - (hexsz - 1), &oid)) {
struct object *o =
lookup_object(the_repository,
oid.hash);
continue;
if (data->name_only)
- printf("%.*s%s", p_len - GIT_SHA1_HEXSZ, p_start, name);
+ printf("%.*s%s", p_len - hexsz, p_start, name);
else
printf("%.*s (%s)", p_len, p_start, name);
p_start = p + 1;
#include "object-store.h"
#include "dir.h"
#include "midx.h"
+#include "trace2.h"
#define IN_PACK(obj) oe_in_pack(&to_pack, obj)
#define SIZE(obj) oe_size(&to_pack, obj)
struct object_entry **base_out)
{
struct object_entry *base;
+ struct object_id base_oid;
if (!base_sha1)
return 0;
* even if it was buried too deep in history to make it into the
* packing list.
*/
- if (thin && bitmap_has_sha1_in_uninteresting(bitmap_git, base_sha1)) {
+ oidread(&base_oid, base_sha1);
+ if (thin && bitmap_has_oid_in_uninteresting(bitmap_git, &base_oid)) {
if (use_delta_islands) {
- struct object_id base_oid;
- hashcpy(base_oid.hash, base_sha1);
if (!in_same_island(&delta->idx.oid, &base_oid))
return 0;
}
}
}
+ trace2_region_enter("pack-objects", "enumerate-objects",
+ the_repository);
prepare_packing_data(the_repository, &to_pack);
if (progress)
if (include_tag && nr_result)
for_each_ref(add_ref_tag, NULL);
stop_progress(&progress_state);
+ trace2_region_leave("pack-objects", "enumerate-objects",
+ the_repository);
if (non_empty && !nr_result)
return 0;
- if (nr_result)
+ if (nr_result) {
+ trace2_region_enter("pack-objects", "prepare-pack",
+ the_repository);
prepare_pack(window, depth);
+ trace2_region_leave("pack-objects", "prepare-pack",
+ the_repository);
+ }
+
+ trace2_region_enter("pack-objects", "write-pack-file", the_repository);
write_pack_file();
+ trace2_region_leave("pack-objects", "write-pack-file", the_repository);
+
if (progress)
fprintf_ln(stderr,
_("Total %"PRIu32" (delta %"PRIu32"),"
struct pack_list *next;
struct packed_git *pack;
struct llist *unique_objects;
- struct llist *all_objects;
+ struct llist *remaining_objects;
+ size_t all_objects_size;
} *local_packs = NULL, *altodb_packs = NULL;
-struct pll {
- struct pll *next;
- struct pack_list *pl;
-};
-
static struct llist_item *free_nodes;
static inline void llist_item_put(struct llist_item *item)
return new_item;
}
-static void llist_free(struct llist *list)
-{
- while ((list->back = list->front)) {
- list->front = list->front->next;
- llist_item_put(list->back);
- }
- free(list);
-}
-
static inline void llist_init(struct llist **list)
{
*list = xmalloc(sizeof(struct llist));
struct llist_item *p1_hint = NULL, *p2_hint = NULL;
const unsigned int hashsz = the_hash_algo->rawsz;
+ if (!p1->unique_objects)
+ p1->unique_objects = llist_copy(p1->remaining_objects);
+ if (!p2->unique_objects)
+ p2->unique_objects = llist_copy(p2->remaining_objects);
+
p1_base = p1->pack->index_data;
p2_base = p2->pack->index_data;
p1_base += 256 * 4 + ((p1->pack->index_version < 2) ? 4 : 8);
}
}
-static void pll_free(struct pll *l)
-{
- struct pll *old;
- struct pack_list *opl;
-
- while (l) {
- old = l;
- while (l->pl) {
- opl = l->pl;
- l->pl = opl->next;
- free(opl);
- }
- l = l->next;
- free(old);
- }
-}
-
-/* all the permutations have to be free()d at the same time,
- * since they refer to each other
- */
-static struct pll * get_permutations(struct pack_list *list, int n)
-{
- struct pll *subset, *ret = NULL, *new_pll = NULL;
-
- if (list == NULL || pack_list_size(list) < n || n == 0)
- return NULL;
-
- if (n == 1) {
- while (list) {
- new_pll = xmalloc(sizeof(*new_pll));
- new_pll->pl = NULL;
- pack_list_insert(&new_pll->pl, list);
- new_pll->next = ret;
- ret = new_pll;
- list = list->next;
- }
- return ret;
- }
-
- while (list->next) {
- subset = get_permutations(list->next, n - 1);
- while (subset) {
- new_pll = xmalloc(sizeof(*new_pll));
- new_pll->pl = subset->pl;
- pack_list_insert(&new_pll->pl, list);
- new_pll->next = ret;
- ret = new_pll;
- subset = subset->next;
- }
- list = list->next;
- }
- return ret;
-}
-
-static int is_superset(struct pack_list *pl, struct llist *list)
-{
- struct llist *diff;
-
- diff = llist_copy(list);
-
- while (pl) {
- llist_sorted_difference_inplace(diff, pl->all_objects);
- if (diff->size == 0) { /* we're done */
- llist_free(diff);
- return 1;
- }
- pl = pl->next;
- }
- llist_free(diff);
- return 0;
-}
-
static size_t sizeof_union(struct packed_git *p1, struct packed_git *p2)
{
size_t ret = 0;
return ret;
}
+static int cmp_remaining_objects(const void *a, const void *b)
+{
+ struct pack_list *pl_a = *((struct pack_list **)a);
+ struct pack_list *pl_b = *((struct pack_list **)b);
+
+ if (pl_a->remaining_objects->size == pl_b->remaining_objects->size) {
+ /* have the same remaining_objects, big pack first */
+ if (pl_a->all_objects_size == pl_b->all_objects_size)
+ return 0;
+ else if (pl_a->all_objects_size < pl_b->all_objects_size)
+ return 1;
+ else
+ return -1;
+ } else if (pl_a->remaining_objects->size < pl_b->remaining_objects->size) {
+ /* sort by remaining objects, more objects first */
+ return 1;
+ } else {
+ return -1;
+ }
+}
+
+/* Sort pack_list, greater size of remaining_objects first */
+static void sort_pack_list(struct pack_list **pl)
+{
+ struct pack_list **ary, *p;
+ int i;
+ size_t n = pack_list_size(*pl);
+
+ if (n < 2)
+ return;
+
+ /* prepare an array of packed_list for easier sorting */
+ ary = xcalloc(n, sizeof(struct pack_list *));
+ for (n = 0, p = *pl; p; p = p->next)
+ ary[n++] = p;
+
+ QSORT(ary, n, cmp_remaining_objects);
+
+ /* link them back again */
+ for (i = 0; i < n - 1; i++)
+ ary[i]->next = ary[i + 1];
+ ary[n - 1]->next = NULL;
+ *pl = ary[0];
+
+ free(ary);
+}
+
+
static void minimize(struct pack_list **min)
{
- struct pack_list *pl, *unique = NULL,
- *non_unique = NULL, *min_perm = NULL;
- struct pll *perm, *perm_all, *perm_ok = NULL, *new_perm;
- struct llist *missing;
- off_t min_perm_size = 0, perm_size;
- int n;
+ struct pack_list *pl, *unique = NULL, *non_unique = NULL;
+ struct llist *missing, *unique_pack_objects;
pl = local_packs;
while (pl) {
missing = llist_copy(all_objects);
pl = unique;
while (pl) {
- llist_sorted_difference_inplace(missing, pl->all_objects);
+ llist_sorted_difference_inplace(missing, pl->remaining_objects);
pl = pl->next;
}
+ *min = unique;
+
/* return if there are no objects missing from the unique set */
if (missing->size == 0) {
- *min = unique;
free(missing);
return;
}
- /* find the permutations which contain all missing objects */
- for (n = 1; n <= pack_list_size(non_unique) && !perm_ok; n++) {
- perm_all = perm = get_permutations(non_unique, n);
- while (perm) {
- if (is_superset(perm->pl, missing)) {
- new_perm = xmalloc(sizeof(struct pll));
- memcpy(new_perm, perm, sizeof(struct pll));
- new_perm->next = perm_ok;
- perm_ok = new_perm;
- }
- perm = perm->next;
- }
- if (perm_ok)
- break;
- pll_free(perm_all);
- }
- if (perm_ok == NULL)
- die("Internal error: No complete sets found!");
-
- /* find the permutation with the smallest size */
- perm = perm_ok;
- while (perm) {
- perm_size = pack_set_bytecount(perm->pl);
- if (!min_perm_size || min_perm_size > perm_size) {
- min_perm_size = perm_size;
- min_perm = perm->pl;
- }
- perm = perm->next;
- }
- *min = min_perm;
- /* add the unique packs to the list */
- pl = unique;
+ unique_pack_objects = llist_copy(all_objects);
+ llist_sorted_difference_inplace(unique_pack_objects, missing);
+
+ /* remove unique pack objects from the non_unique packs */
+ pl = non_unique;
while (pl) {
- pack_list_insert(min, pl);
+ llist_sorted_difference_inplace(pl->remaining_objects, unique_pack_objects);
pl = pl->next;
}
+
+ while (non_unique) {
+ /* sort the non_unique packs, greater size of remaining_objects first */
+ sort_pack_list(&non_unique);
+ if (non_unique->remaining_objects->size == 0)
+ break;
+
+ pack_list_insert(min, non_unique);
+
+ for (pl = non_unique->next; pl && pl->remaining_objects->size > 0; pl = pl->next)
+ llist_sorted_difference_inplace(pl->remaining_objects, non_unique->remaining_objects);
+
+ non_unique = non_unique->next;
+ }
}
static void load_all_objects(void)
while (pl) {
hint = NULL;
- l = pl->all_objects->front;
+ l = pl->remaining_objects->front;
while (l) {
hint = llist_insert_sorted_unique(all_objects,
l->oid, hint);
/* remove objects present in remote packs */
pl = altodb_packs;
while (pl) {
- llist_sorted_difference_inplace(all_objects, pl->all_objects);
+ llist_sorted_difference_inplace(all_objects, pl->remaining_objects);
pl = pl->next;
}
}
while (alt) {
local = local_packs;
while (local) {
- llist_sorted_difference_inplace(local->unique_objects,
- alt->all_objects);
+ llist_sorted_difference_inplace(local->remaining_objects,
+ alt->remaining_objects);
local = local->next;
}
- llist_sorted_difference_inplace(all_objects, alt->all_objects);
alt = alt->next;
}
}
return NULL;
l.pack = p;
- llist_init(&l.all_objects);
+ llist_init(&l.remaining_objects);
if (open_pack_index(p))
return NULL;
base += 256 * 4 + ((p->index_version < 2) ? 4 : 8);
step = the_hash_algo->rawsz + ((p->index_version < 2) ? 4 : 0);
while (off < p->num_objects * step) {
- llist_insert_back(l.all_objects, (const struct object_id *)(base + off));
+ llist_insert_back(l.remaining_objects, (const struct object_id *)(base + off));
off += step;
}
- /* this list will be pruned in cmp_two_packs later */
- l.unique_objects = llist_copy(l.all_objects);
+ l.all_objects_size = l.remaining_objects->size;
+ l.unique_objects = NULL;
if (p->pack_local)
return pack_list_insert(&local_packs, &l);
else
int cmd_pack_redundant(int argc, const char **argv, const char *prefix)
{
int i;
- struct pack_list *min, *red, *pl;
+ struct pack_list *min = NULL, *red, *pl;
struct llist *ignore;
struct object_id *oid;
char buf[GIT_MAX_HEXSZ + 2]; /* hex hash + \n + \0 */
load_all_objects();
- cmp_local_packs();
if (alt_odb)
scan_alt_odb_packs();
llist_sorted_difference_inplace(all_objects, ignore);
pl = local_packs;
while (pl) {
- llist_sorted_difference_inplace(pl->unique_objects, ignore);
+ llist_sorted_difference_inplace(pl->remaining_objects, ignore);
pl = pl->next;
}
+ cmp_local_packs();
+
minimize(&min);
if (verbose) {
pl = red = pack_list_difference(local_packs, min);
while (pl) {
printf("%s\n%s\n",
- sha1_pack_index_name(pl->pack->sha1),
+ sha1_pack_index_name(pl->pack->hash),
pl->pack->pack_name);
pl = pl->next;
}
argc = parse_options(argc, argv, prefix, prune_packed_options,
prune_packed_usage, 0);
+ if (argc > 0)
+ usage_msg_opt(_("too many arguments"),
+ prune_packed_usage,
+ prune_packed_options);
+
prune_packed_objects(opts);
return 0;
}
return 0;
}
+static void perform_reachability_traversal(struct rev_info *revs)
+{
+ static int initialized;
+ struct progress *progress = NULL;
+
+ if (initialized)
+ return;
+
+ if (show_progress)
+ progress = start_delayed_progress(_("Checking connectivity"), 0);
+ mark_reachable_objects(revs, 1, expire, progress);
+ stop_progress(&progress);
+ initialized = 1;
+}
+
+static int is_object_reachable(const struct object_id *oid,
+ struct rev_info *revs)
+{
+ struct object *obj;
+
+ perform_reachability_traversal(revs);
+
+ obj = lookup_object(the_repository, oid->hash);
+ return obj && (obj->flags & SEEN);
+}
+
static int prune_object(const struct object_id *oid, const char *fullpath,
void *data)
{
+ struct rev_info *revs = data;
struct stat st;
- /*
- * Do we know about this object?
- * It must have been reachable
- */
- if (lookup_object(the_repository, oid->hash))
+ if (is_object_reachable(oid, revs))
return 0;
if (lstat(fullpath, &st)) {
int cmd_prune(int argc, const char **argv, const char *prefix)
{
struct rev_info revs;
- struct progress *progress = NULL;
int exclude_promisor_objects = 0;
const struct option options[] = {
OPT__DRY_RUN(&show_only, N_("do not remove, show only")),
if (show_progress == -1)
show_progress = isatty(2);
- if (show_progress)
- progress = start_delayed_progress(_("Checking connectivity"), 0);
if (exclude_promisor_objects) {
fetch_if_missing = 0;
revs.exclude_promisor_objects = 1;
}
- mark_reachable_objects(&revs, 1, expire, progress);
- stop_progress(&progress);
for_each_loose_file_in_objdir(get_object_directory(), prune_object,
- prune_cruft, prune_subdir, NULL);
+ prune_cruft, prune_subdir, &revs);
prune_packed_objects(show_only ? PRUNE_PACKED_DRY_RUN : 0);
remove_temporary_files(get_object_directory());
remove_temporary_files(s);
free(s);
- if (is_repository_shallow(the_repository))
+ if (is_repository_shallow(the_repository)) {
+ perform_reachability_traversal(&revs);
prune_shallow(show_only ? PRUNE_SHOW_ONLY : 0);
+ }
return 0;
}
return REBASE_MERGES;
else if (!strcmp(value, "interactive") || !strcmp(value, "i"))
return REBASE_INTERACTIVE;
+ /*
+ * Please update _git_config() in git-completion.bash when you
+ * add new rebase modes.
+ */
if (fatal)
die(_("Invalid value for %s: %s"), key, value);
fp = xfopen(filename, "r");
while (strbuf_getline_lf(&sb, fp) != EOF) {
- if (get_oid_hex(sb.buf, &oid))
- continue; /* invalid line: does not start with SHA1 */
- if (starts_with(sb.buf + GIT_SHA1_HEXSZ, "\tnot-for-merge\t"))
+ const char *p;
+ if (parse_oid_hex(sb.buf, &oid, &p))
+ continue; /* invalid line: does not start with object ID */
+ if (starts_with(p, "\tnot-for-merge\t"))
continue; /* ref is not-for-merge */
oid_array_append(merge_heads, &oid);
}
cp.no_stderr = 1;
cp.git_cmd = 1;
- ret = capture_command(&cp, &sb, GIT_SHA1_HEXSZ);
+ ret = capture_command(&cp, &sb, GIT_MAX_HEXSZ);
if (ret)
goto cleanup;
}
/**
- * Given the current HEAD SHA1, the merge head returned from git-fetch and the
+ * Given the current HEAD oid, the merge head returned from git-fetch and the
* fork point calculated by get_rebase_fork_point(), runs git-rebase with the
* appropriate arguments and returns its exit status.
*/
int creation_factor = RANGE_DIFF_CREATION_FACTOR_DEFAULT;
struct diff_options diffopt = { NULL };
int simple_color = -1;
- struct option options[] = {
+ struct option range_diff_options[] = {
OPT_INTEGER(0, "creation-factor", &creation_factor,
N_("Percentage by which creation is weighted")),
OPT_BOOL(0, "no-dual-color", &simple_color,
N_("use simple diff colors")),
OPT_END()
};
- int i, j, res = 0;
+ struct option *options;
+ int res = 0;
struct strbuf range1 = STRBUF_INIT, range2 = STRBUF_INIT;
git_config(git_diff_ui_config, NULL);
repo_diff_setup(the_repository, &diffopt);
+ options = parse_options_concat(range_diff_options, diffopt.parseopts);
argc = parse_options(argc, argv, NULL, options,
- builtin_range_diff_usage, PARSE_OPT_KEEP_UNKNOWN |
- PARSE_OPT_KEEP_DASHDASH | PARSE_OPT_KEEP_ARGV0);
-
- for (i = j = 1; i < argc && strcmp("--", argv[i]); ) {
- int c = diff_opt_parse(&diffopt, argv + i, argc - i, prefix);
+ builtin_range_diff_usage, 0);
- if (!c)
- argv[j++] = argv[i++];
- else
- i += c;
- }
- while (i < argc)
- argv[j++] = argv[i++];
- argc = j;
diff_setup_done(&diffopt);
- /* Make sure that there are no unparsed options */
- argc = parse_options(argc, argv, NULL,
- options + ARRAY_SIZE(options) - 1, /* OPT_END */
- builtin_range_diff_usage, 0);
-
/* force color when --dual-color was used */
if (!simple_color)
diffopt.use_color = 1;
error(_("need two commit ranges"));
usage_with_options(builtin_range_diff_usage, options);
}
+ FREE_AND_NULL(options);
res = show_range_diff(range1.buf, range2.buf, creation_factor,
simple_color < 1, &diffopt);
{ OPTION_CALLBACK, 0, "recurse-submodules", NULL,
"checkout", "control recursive updating of submodules",
PARSE_OPT_OPTARG, option_parse_recurse_submodules_worktree_updater },
+ OPT__QUIET(&opts.quiet, N_("suppress feedback messages")),
OPT_END()
};
static GIT_PATH_FUNC(path_squash_onto, "rebase-merge/squash-onto")
static GIT_PATH_FUNC(path_interactive, "rebase-merge/interactive")
+static int add_exec_commands(struct string_list *commands)
+{
+ const char *todo_file = rebase_path_todo();
+ struct todo_list todo_list = TODO_LIST_INIT;
+ int res;
+
+ if (strbuf_read_file(&todo_list.buf, todo_file, 0) < 0)
+ return error_errno(_("could not read '%s'."), todo_file);
+
+ if (todo_list_parse_insn_buffer(the_repository, todo_list.buf.buf,
+ &todo_list)) {
+ todo_list_release(&todo_list);
+ return error(_("unusable todo list: '%s'"), todo_file);
+ }
+
+ todo_list_add_exec_commands(&todo_list, commands);
+ res = todo_list_write_to_file(the_repository, &todo_list,
+ todo_file, NULL, NULL, -1, 0);
+ todo_list_release(&todo_list);
+
+ if (res)
+ return error_errno(_("could not write '%s'."), todo_file);
+ return 0;
+}
+
+static int rearrange_squash_in_todo_file(void)
+{
+ const char *todo_file = rebase_path_todo();
+ struct todo_list todo_list = TODO_LIST_INIT;
+ int res = 0;
+
+ if (strbuf_read_file(&todo_list.buf, todo_file, 0) < 0)
+ return error_errno(_("could not read '%s'."), todo_file);
+ if (todo_list_parse_insn_buffer(the_repository, todo_list.buf.buf,
+ &todo_list)) {
+ todo_list_release(&todo_list);
+ return error(_("unusable todo list: '%s'"), todo_file);
+ }
+
+ res = todo_list_rearrange_squash(&todo_list);
+ if (!res)
+ res = todo_list_write_to_file(the_repository, &todo_list,
+ todo_file, NULL, NULL, -1, 0);
+
+ todo_list_release(&todo_list);
+
+ if (res)
+ return error_errno(_("could not write '%s'."), todo_file);
+ return 0;
+}
+
+static int transform_todo_file(unsigned flags)
+{
+ const char *todo_file = rebase_path_todo();
+ struct todo_list todo_list = TODO_LIST_INIT;
+ int res;
+
+ if (strbuf_read_file(&todo_list.buf, todo_file, 0) < 0)
+ return error_errno(_("could not read '%s'."), todo_file);
+
+ if (todo_list_parse_insn_buffer(the_repository, todo_list.buf.buf,
+ &todo_list)) {
+ todo_list_release(&todo_list);
+ return error(_("unusable todo list: '%s'"), todo_file);
+ }
+
+ res = todo_list_write_to_file(the_repository, &todo_list, todo_file,
+ NULL, NULL, -1, flags);
+ todo_list_release(&todo_list);
+
+ if (res)
+ return error_errno(_("could not write '%s'."), todo_file);
+ return 0;
+}
+
+static int edit_todo_file(unsigned flags)
+{
+ const char *todo_file = rebase_path_todo();
+ struct todo_list todo_list = TODO_LIST_INIT,
+ new_todo = TODO_LIST_INIT;
+ int res = 0;
+
+ if (strbuf_read_file(&todo_list.buf, todo_file, 0) < 0)
+ return error_errno(_("could not read '%s'."), todo_file);
+
+ strbuf_stripspace(&todo_list.buf, 1);
+ res = edit_todo_list(the_repository, &todo_list, &new_todo, NULL, NULL, flags);
+ if (!res && todo_list_write_to_file(the_repository, &new_todo, todo_file,
+ NULL, NULL, -1, flags & ~(TODO_LIST_SHORTEN_IDS)))
+ res = error_errno(_("could not write '%s'"), todo_file);
+
+ todo_list_release(&todo_list);
+ todo_list_release(&new_todo);
+
+ return res;
+}
+
static int get_revision_ranges(const char *upstream, const char *onto,
const char **head_hash,
char **revisions, char **shortrevisions)
const char *onto, const char *onto_name,
const char *squash_onto, const char *head_name,
const char *restrict_revision, char *raw_strategies,
- const char *cmd, unsigned autosquash)
+ struct string_list *commands, unsigned autosquash)
{
int ret;
const char *head_hash = NULL;
char *revisions = NULL, *shortrevisions = NULL;
struct argv_array make_script_args = ARGV_ARRAY_INIT;
- FILE *todo_list;
+ struct todo_list todo_list = TODO_LIST_INIT;
if (prepare_branch_to_be_rebased(opts, switch_to))
return -1;
if (!upstream && squash_onto)
write_file(path_squash_onto(), "%s\n", squash_onto);
- todo_list = fopen(rebase_path_todo(), "w");
- if (!todo_list) {
- free(revisions);
- free(shortrevisions);
-
- return error_errno(_("could not open %s"), rebase_path_todo());
- }
-
argv_array_pushl(&make_script_args, "", revisions, NULL);
if (restrict_revision)
argv_array_push(&make_script_args, restrict_revision);
- ret = sequencer_make_script(the_repository, todo_list,
+ ret = sequencer_make_script(the_repository, &todo_list.buf,
make_script_args.argc, make_script_args.argv,
flags);
- fclose(todo_list);
if (ret)
error(_("could not generate todo list"));
else {
discard_cache();
- ret = complete_action(the_repository, opts, flags,
- shortrevisions, onto_name, onto,
- head_hash, cmd, autosquash);
+ if (todo_list_parse_insn_buffer(the_repository, todo_list.buf.buf,
+ &todo_list))
+ BUG("unusable todo list");
+
+ ret = complete_action(the_repository, opts, flags, shortrevisions, onto_name,
+ onto, head_hash, commands, autosquash, &todo_list);
}
free(revisions);
free(shortrevisions);
+ todo_list_release(&todo_list);
argv_array_clear(&make_script_args);
return ret;
const char *onto = NULL, *onto_name = NULL, *restrict_revision = NULL,
*squash_onto = NULL, *upstream = NULL, *head_name = NULL,
*switch_to = NULL, *cmd = NULL;
+ struct string_list commands = STRING_LIST_INIT_DUP;
char *raw_strategies = NULL;
enum {
NONE = 0, CONTINUE, SKIP, EDIT_TODO, SHOW_CURRENT_PATCH,
warning(_("--[no-]rebase-cousins has no effect without "
"--rebase-merges"));
+ if (cmd && *cmd) {
+ string_list_split(&commands, cmd, '\n', -1);
+
+ /* rebase.c adds a new line to cmd after every command,
+ * so here the last command is always empty */
+ string_list_remove_empty_items(&commands, 0);
+ }
+
switch (command) {
case NONE:
if (!onto && !upstream)
ret = do_interactive_rebase(&opts, flags, switch_to, upstream, onto,
onto_name, squash_onto, head_name, restrict_revision,
- raw_strategies, cmd, autosquash);
+ raw_strategies, &commands, autosquash);
break;
case SKIP: {
struct string_list merge_rr = STRING_LIST_INIT_DUP;
break;
}
case EDIT_TODO:
- ret = edit_todo_list(the_repository, flags);
+ ret = edit_todo_file(flags);
break;
case SHOW_CURRENT_PATCH: {
struct child_process cmd = CHILD_PROCESS_INIT;
}
case SHORTEN_OIDS:
case EXPAND_OIDS:
- ret = transform_todos(the_repository, flags);
+ ret = transform_todo_file(flags);
break;
case CHECK_TODO_LIST:
- ret = check_todo_list(the_repository);
+ ret = check_todo_list_from_file(the_repository);
break;
case REARRANGE_SQUASH:
- ret = rearrange_squash(the_repository);
+ ret = rearrange_squash_in_todo_file();
break;
case ADD_EXEC:
- ret = sequencer_add_exec_commands(the_repository, cmd);
+ ret = add_exec_commands(&commands);
break;
default:
BUG("invalid command '%d'", command);
}
+ string_list_clear(&commands, 0);
return !!ret;
}
REBASE_PRESERVE_MERGES
};
-static int use_builtin_rebase(void)
-{
- struct child_process cp = CHILD_PROCESS_INIT;
- struct strbuf out = STRBUF_INIT;
- int ret, env = git_env_bool("GIT_TEST_REBASE_USE_BUILTIN", -1);
-
- if (env != -1)
- return env;
-
- argv_array_pushl(&cp.args,
- "config", "--bool", "rebase.usebuiltin", NULL);
- cp.git_cmd = 1;
- if (capture_command(&cp, &out, 6)) {
- strbuf_release(&out);
- return 1;
- }
-
- strbuf_trim(&out);
- ret = !strcmp("true", out.buf);
- strbuf_release(&out);
- return ret;
-}
-
struct rebase_options {
enum rebase_type type;
const char *state_dir;
char *strategy, *strategy_opts;
struct strbuf git_format_patch_opt;
int reschedule_failed_exec;
+ int use_legacy_rebase;
};
static int is_interactive(struct rebase_options *opts)
#define RESET_HEAD_HARD (1<<1)
#define RESET_HEAD_RUN_POST_CHECKOUT_HOOK (1<<2)
#define RESET_HEAD_REFS_ONLY (1<<3)
+#define RESET_ORIG_HEAD (1<<4)
static int reset_head(struct object_id *oid, const char *action,
const char *switch_to_branch, unsigned flags,
unsigned reset_hard = flags & RESET_HEAD_HARD;
unsigned run_hook = flags & RESET_HEAD_RUN_POST_CHECKOUT_HOOK;
unsigned refs_only = flags & RESET_HEAD_REFS_ONLY;
+ unsigned update_orig_head = flags & RESET_ORIG_HEAD;
struct object_id head_oid;
struct tree_desc desc[2] = { { NULL }, { NULL } };
struct lock_file lock = LOCK_INIT;
strbuf_addf(&msg, "%s: ", reflog_action ? reflog_action : "rebase");
prefix_len = msg.len;
- if (!get_oid("ORIG_HEAD", &oid_old_orig))
- old_orig = &oid_old_orig;
- if (!get_oid("HEAD", &oid_orig)) {
- orig = &oid_orig;
- if (!reflog_orig_head) {
- strbuf_addstr(&msg, "updating ORIG_HEAD");
- reflog_orig_head = msg.buf;
- }
- update_ref(reflog_orig_head, "ORIG_HEAD", orig, old_orig, 0,
- UPDATE_REFS_MSG_ON_ERR);
- } else if (old_orig)
- delete_ref(NULL, "ORIG_HEAD", old_orig, 0);
+ if (update_orig_head) {
+ if (!get_oid("ORIG_HEAD", &oid_old_orig))
+ old_orig = &oid_old_orig;
+ if (!get_oid("HEAD", &oid_orig)) {
+ orig = &oid_orig;
+ if (!reflog_orig_head) {
+ strbuf_addstr(&msg, "updating ORIG_HEAD");
+ reflog_orig_head = msg.buf;
+ }
+ update_ref(reflog_orig_head, "ORIG_HEAD", orig,
+ old_orig, 0, UPDATE_REFS_MSG_ON_ERR);
+ } else if (old_orig)
+ delete_ref(NULL, "ORIG_HEAD", old_orig, 0);
+ }
+
if (!reflog_head) {
strbuf_setlen(&msg, prefix_len);
strbuf_addstr(&msg, "updating HEAD");
detach_head ? REF_NO_DEREF : 0,
UPDATE_REFS_MSG_ON_ERR);
else {
- ret = update_ref(reflog_orig_head, switch_to_branch, oid,
+ ret = update_ref(reflog_head, switch_to_branch, oid,
NULL, 0, UPDATE_REFS_MSG_ON_ERR);
if (!ret)
ret = create_symref("HEAD", switch_to_branch,
return 0;
}
+ if (!strcmp(var, "rebase.usebuiltin")) {
+ opts->use_legacy_rebase = !git_config_bool(var, value);
+ return 0;
+ }
+
return git_default_config(var, value, data);
}
ACTION_EDIT_TODO,
ACTION_SHOW_CURRENT_PATCH,
} action = NO_ACTION;
+ static const char *action_names[] = { N_("undefined"),
+ N_("continue"),
+ N_("skip"),
+ N_("abort"),
+ N_("quit"),
+ N_("edit_todo"),
+ N_("show_current_patch"),
+ NULL };
const char *gpg_sign = NULL;
struct string_list exec = STRING_LIST_INIT_NODUP;
const char *rebase_merges = NULL;
PARSE_OPT_NOARG | PARSE_OPT_NONEG,
parse_opt_interactive },
OPT_SET_INT('p', "preserve-merges", &options.type,
- N_("try to recreate merges instead of ignoring "
- "them"), REBASE_PRESERVE_MERGES),
+ N_("(DEPRECATED) try to recreate merges instead of "
+ "ignoring them"), REBASE_PRESERVE_MERGES),
OPT_BOOL(0, "rerere-autoupdate",
&options.allow_rerere_autoupdate,
N_("allow rerere to update index with resolved "
};
int i;
- /*
- * NEEDSWORK: Once the builtin rebase has been tested enough
- * and git-legacy-rebase.sh is retired to contrib/, this preamble
- * can be removed.
- */
-
- if (!use_builtin_rebase()) {
- const char *path = mkpath("%s/git-legacy-rebase",
- git_exec_path());
-
- if (sane_execvp(path, (char **)argv) < 0)
- die_errno(_("could not exec %s"), path);
- else
- BUG("sane_execvp() returned???");
- }
-
if (argc == 2 && !strcmp(argv[1], "-h"))
usage_with_options(builtin_rebase_usage,
builtin_rebase_options);
git_config(rebase_config, &options);
+ if (options.use_legacy_rebase ||
+ !git_env_bool("GIT_TEST_REBASE_USE_BUILTIN", -1))
+ warning(_("the rebase.useBuiltin support has been removed!\n"
+ "See its entry in 'git help config' for details."));
+
strbuf_reset(&buf);
strbuf_addf(&buf, "%s/applying", apply_dir());
if(file_exists(buf.buf))
usage_with_options(builtin_rebase_usage,
builtin_rebase_options);
+ if (options.type == REBASE_PRESERVE_MERGES)
+ warning(_("git rebase --preserve-merges is deprecated. "
+ "Use --rebase-merges instead."));
+
if (action != NO_ACTION && !in_progress)
die(_("No rebase in progress?"));
setenv(GIT_REFLOG_ACTION_ENVIRONMENT, "rebase", 0);
die(_("The --edit-todo action can only be used during "
"interactive rebase."));
+ if (trace2_is_enabled()) {
+ if (is_interactive(&options))
+ trace2_cmd_mode("interactive");
+ else if (exec.nr)
+ trace2_cmd_mode("interactive-exec");
+ else
+ trace2_cmd_mode(action_names[action]);
+ }
+
switch (action) {
case ACTION_CONTINUE: {
struct object_id head;
strbuf_addf(&msg, "%s: checkout %s",
getenv(GIT_REFLOG_ACTION_ENVIRONMENT), options.onto_name);
if (reset_head(&options.onto->object.oid, "checkout", NULL,
- RESET_HEAD_DETACH | RESET_HEAD_RUN_POST_CHECKOUT_HOOK,
+ RESET_HEAD_DETACH | RESET_ORIG_HEAD |
+ RESET_HEAD_RUN_POST_CHECKOUT_HOOK,
NULL, msg.buf))
die(_("Could not detach HEAD"));
strbuf_release(&msg);
strbuf_addf(&msg, "rebase finished: %s onto %s",
options.head_name ? options.head_name : "detached HEAD",
oid_to_hex(&options.onto->object.oid));
- reset_head(NULL, "Fast-forwarded", options.head_name, 0,
- "HEAD", msg.buf);
+ reset_head(NULL, "Fast-forwarded", options.head_name,
+ RESET_HEAD_REFS_ONLY, "HEAD", msg.buf);
strbuf_release(&msg);
ret = !!finish_rebase(&options);
goto cleanup;
proc.argv = argv;
proc.in = -1;
proc.stdout_to_stderr = 1;
+ proc.trace2_hook_name = hook_name;
+
if (feed_state->push_options) {
int i;
for (i = 0; i < feed_state->push_options->nr; i++)
proc.stdout_to_stderr = 1;
proc.err = use_sideband ? -1 : 0;
proc.argv = argv;
+ proc.trace2_hook_name = "update";
code = start_command(&proc);
if (code)
proc.no_stdin = 1;
proc.stdout_to_stderr = 1;
proc.err = use_sideband ? -1 : 0;
+ proc.trace2_hook_name = "post-update";
if (!start_command(&proc)) {
if (use_sideband)
}
}
-static void check_aliased_update(struct command *cmd, struct string_list *list)
+static void check_aliased_update_internal(struct command *cmd,
+ struct string_list *list,
+ const char *dst_name, int flag)
{
- struct strbuf buf = STRBUF_INIT;
- const char *dst_name;
struct string_list_item *item;
struct command *dst_cmd;
- int flag;
-
- strbuf_addf(&buf, "%s%s", get_git_namespace(), cmd->ref_name);
- dst_name = resolve_ref_unsafe(buf.buf, 0, NULL, &flag);
- strbuf_release(&buf);
if (!(flag & REF_ISSYMREF))
return;
"inconsistent aliased update";
}
+static void check_aliased_update(struct command *cmd, struct string_list *list)
+{
+ struct strbuf buf = STRBUF_INIT;
+ const char *dst_name;
+ int flag;
+
+ strbuf_addf(&buf, "%s%s", get_git_namespace(), cmd->ref_name);
+ dst_name = resolve_ref_unsafe(buf.buf, 0, NULL, &flag);
+ check_aliased_update_internal(cmd, list, dst_name, flag);
+ strbuf_release(&buf);
+}
+
static void check_aliased_updates(struct command *commands)
{
struct command *cmd;
data.format = REPLACE_FORMAT_MEDIUM;
else if (!strcmp(format, "long"))
data.format = REPLACE_FORMAT_LONG;
+ /*
+ * Please update _git_replace() in git-completion.bash when
+ * you add new format
+ */
else
return error(_("invalid replace format '%s'\n"
"valid formats are 'short', 'medium' and 'long'"),
if (patch_mode) {
if (reset_type != NONE)
die(_("--patch is incompatible with --{hard,mixed,soft}"));
+ trace2_cmd_mode("patch-interactive");
return run_add_interactive(rev, "--patch=reset", &pathspec);
}
if (reset_type == NONE)
reset_type = MIXED; /* by default */
+ if (pathspec.nr)
+ trace2_cmd_mode("path");
+ else
+ trace2_cmd_mode(reset_type_names[reset_type]);
+
if (reset_type != SOFT && (reset_type != MIXED || get_git_work_tree()))
setup_work_tree();
int flags = quiet ? REFRESH_QUIET : REFRESH_IN_PORCELAIN;
if (read_from_tree(&pathspec, &oid, intent_to_add))
return 1;
+ the_index.updated_skipworktree = 1;
if (!quiet && get_git_work_tree()) {
uint64_t t_begin, t_delta_in_ms;
static int finish_object(struct object *obj, const char *name, void *cb_data)
{
struct rev_list_info *info = cb_data;
- if (!has_object_file(&obj->oid)) {
+ if (oid_object_info_extended(the_repository, &obj->oid, NULL, 0) < 0) {
finish_object__ma(obj);
return 1;
}
--- /dev/null
+#define USE_THE_INDEX_COMPATIBILITY_MACROS
+#include "builtin.h"
+#include "config.h"
+#include "parse-options.h"
+#include "refs.h"
+#include "lockfile.h"
+#include "cache-tree.h"
+#include "unpack-trees.h"
+#include "merge-recursive.h"
+#include "argv-array.h"
+#include "run-command.h"
+#include "dir.h"
+#include "rerere.h"
+#include "revision.h"
+#include "log-tree.h"
+#include "diffcore.h"
+#include "exec-cmd.h"
+
+#define INCLUDE_ALL_FILES 2
+
+static const char * const git_stash_usage[] = {
+ N_("git stash list [<options>]"),
+ N_("git stash show [<options>] [<stash>]"),
+ N_("git stash drop [-q|--quiet] [<stash>]"),
+ N_("git stash ( pop | apply ) [--index] [-q|--quiet] [<stash>]"),
+ N_("git stash branch <branchname> [<stash>]"),
+ N_("git stash clear"),
+ N_("git stash [push [-p|--patch] [-k|--[no-]keep-index] [-q|--quiet]\n"
+ " [-u|--include-untracked] [-a|--all] [-m|--message <message>]\n"
+ " [--] [<pathspec>...]]"),
+ N_("git stash save [-p|--patch] [-k|--[no-]keep-index] [-q|--quiet]\n"
+ " [-u|--include-untracked] [-a|--all] [<message>]"),
+ NULL
+};
+
+static const char * const git_stash_list_usage[] = {
+ N_("git stash list [<options>]"),
+ NULL
+};
+
+static const char * const git_stash_show_usage[] = {
+ N_("git stash show [<options>] [<stash>]"),
+ NULL
+};
+
+static const char * const git_stash_drop_usage[] = {
+ N_("git stash drop [-q|--quiet] [<stash>]"),
+ NULL
+};
+
+static const char * const git_stash_pop_usage[] = {
+ N_("git stash pop [--index] [-q|--quiet] [<stash>]"),
+ NULL
+};
+
+static const char * const git_stash_apply_usage[] = {
+ N_("git stash apply [--index] [-q|--quiet] [<stash>]"),
+ NULL
+};
+
+static const char * const git_stash_branch_usage[] = {
+ N_("git stash branch <branchname> [<stash>]"),
+ NULL
+};
+
+static const char * const git_stash_clear_usage[] = {
+ N_("git stash clear"),
+ NULL
+};
+
+static const char * const git_stash_store_usage[] = {
+ N_("git stash store [-m|--message <message>] [-q|--quiet] <commit>"),
+ NULL
+};
+
+static const char * const git_stash_push_usage[] = {
+ N_("git stash [push [-p|--patch] [-k|--[no-]keep-index] [-q|--quiet]\n"
+ " [-u|--include-untracked] [-a|--all] [-m|--message <message>]\n"
+ " [--] [<pathspec>...]]"),
+ NULL
+};
+
+static const char * const git_stash_save_usage[] = {
+ N_("git stash save [-p|--patch] [-k|--[no-]keep-index] [-q|--quiet]\n"
+ " [-u|--include-untracked] [-a|--all] [<message>]"),
+ NULL
+};
+
+static const char *ref_stash = "refs/stash";
+static struct strbuf stash_index_path = STRBUF_INIT;
+
+/*
+ * w_commit is set to the commit containing the working tree
+ * b_commit is set to the base commit
+ * i_commit is set to the commit containing the index tree
+ * u_commit is set to the commit containing the untracked files tree
+ * w_tree is set to the working tree
+ * b_tree is set to the base tree
+ * i_tree is set to the index tree
+ * u_tree is set to the untracked files tree
+ */
+struct stash_info {
+ struct object_id w_commit;
+ struct object_id b_commit;
+ struct object_id i_commit;
+ struct object_id u_commit;
+ struct object_id w_tree;
+ struct object_id b_tree;
+ struct object_id i_tree;
+ struct object_id u_tree;
+ struct strbuf revision;
+ int is_stash_ref;
+ int has_u;
+};
+
+static void free_stash_info(struct stash_info *info)
+{
+ strbuf_release(&info->revision);
+}
+
+static void assert_stash_like(struct stash_info *info, const char *revision)
+{
+ if (get_oidf(&info->b_commit, "%s^1", revision) ||
+ get_oidf(&info->w_tree, "%s:", revision) ||
+ get_oidf(&info->b_tree, "%s^1:", revision) ||
+ get_oidf(&info->i_tree, "%s^2:", revision))
+ die(_("'%s' is not a stash-like commit"), revision);
+}
+
+static int get_stash_info(struct stash_info *info, int argc, const char **argv)
+{
+ int ret;
+ char *end_of_rev;
+ char *expanded_ref;
+ const char *revision;
+ const char *commit = NULL;
+ struct object_id dummy;
+ struct strbuf symbolic = STRBUF_INIT;
+
+ if (argc > 1) {
+ int i;
+ struct strbuf refs_msg = STRBUF_INIT;
+
+ for (i = 0; i < argc; i++)
+ strbuf_addf(&refs_msg, " '%s'", argv[i]);
+
+ fprintf_ln(stderr, _("Too many revisions specified:%s"),
+ refs_msg.buf);
+ strbuf_release(&refs_msg);
+
+ return -1;
+ }
+
+ if (argc == 1)
+ commit = argv[0];
+
+ strbuf_init(&info->revision, 0);
+ if (!commit) {
+ if (!ref_exists(ref_stash)) {
+ free_stash_info(info);
+ fprintf_ln(stderr, _("No stash entries found."));
+ return -1;
+ }
+
+ strbuf_addf(&info->revision, "%s@{0}", ref_stash);
+ } else if (strspn(commit, "0123456789") == strlen(commit)) {
+ strbuf_addf(&info->revision, "%s@{%s}", ref_stash, commit);
+ } else {
+ strbuf_addstr(&info->revision, commit);
+ }
+
+ revision = info->revision.buf;
+
+ if (get_oid(revision, &info->w_commit)) {
+ error(_("%s is not a valid reference"), revision);
+ free_stash_info(info);
+ return -1;
+ }
+
+ assert_stash_like(info, revision);
+
+ info->has_u = !get_oidf(&info->u_tree, "%s^3:", revision);
+
+ end_of_rev = strchrnul(revision, '@');
+ strbuf_add(&symbolic, revision, end_of_rev - revision);
+
+ ret = dwim_ref(symbolic.buf, symbolic.len, &dummy, &expanded_ref);
+ strbuf_release(&symbolic);
+ switch (ret) {
+ case 0: /* Not found, but valid ref */
+ info->is_stash_ref = 0;
+ break;
+ case 1:
+ info->is_stash_ref = !strcmp(expanded_ref, ref_stash);
+ break;
+ default: /* Invalid or ambiguous */
+ free_stash_info(info);
+ }
+
+ free(expanded_ref);
+ return !(ret == 0 || ret == 1);
+}
+
+static int do_clear_stash(void)
+{
+ struct object_id obj;
+ if (get_oid(ref_stash, &obj))
+ return 0;
+
+ return delete_ref(NULL, ref_stash, &obj, 0);
+}
+
+static int clear_stash(int argc, const char **argv, const char *prefix)
+{
+ struct option options[] = {
+ OPT_END()
+ };
+
+ argc = parse_options(argc, argv, prefix, options,
+ git_stash_clear_usage,
+ PARSE_OPT_STOP_AT_NON_OPTION);
+
+ if (argc)
+ return error(_("git stash clear with parameters is "
+ "unimplemented"));
+
+ return do_clear_stash();
+}
+
+static int reset_tree(struct object_id *i_tree, int update, int reset)
+{
+ int nr_trees = 1;
+ struct unpack_trees_options opts;
+ struct tree_desc t[MAX_UNPACK_TREES];
+ struct tree *tree;
+ struct lock_file lock_file = LOCK_INIT;
+
+ read_cache_preload(NULL);
+ if (refresh_cache(REFRESH_QUIET))
+ return -1;
+
+ hold_locked_index(&lock_file, LOCK_DIE_ON_ERROR);
+
+ memset(&opts, 0, sizeof(opts));
+
+ tree = parse_tree_indirect(i_tree);
+ if (parse_tree(tree))
+ return -1;
+
+ init_tree_desc(t, tree->buffer, tree->size);
+
+ opts.head_idx = 1;
+ opts.src_index = &the_index;
+ opts.dst_index = &the_index;
+ opts.merge = 1;
+ opts.reset = reset;
+ opts.update = update;
+ opts.fn = oneway_merge;
+
+ if (unpack_trees(nr_trees, t, &opts))
+ return -1;
+
+ if (write_locked_index(&the_index, &lock_file, COMMIT_LOCK))
+ return error(_("unable to write new index file"));
+
+ return 0;
+}
+
+static int diff_tree_binary(struct strbuf *out, struct object_id *w_commit)
+{
+ struct child_process cp = CHILD_PROCESS_INIT;
+ const char *w_commit_hex = oid_to_hex(w_commit);
+
+ /*
+ * Diff-tree would not be very hard to replace with a native function,
+ * however it should be done together with apply_cached.
+ */
+ cp.git_cmd = 1;
+ argv_array_pushl(&cp.args, "diff-tree", "--binary", NULL);
+ argv_array_pushf(&cp.args, "%s^2^..%s^2", w_commit_hex, w_commit_hex);
+
+ return pipe_command(&cp, NULL, 0, out, 0, NULL, 0);
+}
+
+static int apply_cached(struct strbuf *out)
+{
+ struct child_process cp = CHILD_PROCESS_INIT;
+
+ /*
+ * Apply currently only reads either from stdin or a file, thus
+ * apply_all_patches would have to be updated to optionally take a
+ * buffer.
+ */
+ cp.git_cmd = 1;
+ argv_array_pushl(&cp.args, "apply", "--cached", NULL);
+ return pipe_command(&cp, out->buf, out->len, NULL, 0, NULL, 0);
+}
+
+static int reset_head(void)
+{
+ struct child_process cp = CHILD_PROCESS_INIT;
+
+ /*
+ * Reset is overall quite simple, however there is no current public
+ * API for resetting.
+ */
+ cp.git_cmd = 1;
+ argv_array_push(&cp.args, "reset");
+
+ return run_command(&cp);
+}
+
+static void add_diff_to_buf(struct diff_queue_struct *q,
+ struct diff_options *options,
+ void *data)
+{
+ int i;
+
+ for (i = 0; i < q->nr; i++) {
+ strbuf_addstr(data, q->queue[i]->one->path);
+
+ /* NUL-terminate: will be fed to update-index -z */
+ strbuf_addch(data, '\0');
+ }
+}
+
+static int get_newly_staged(struct strbuf *out, struct object_id *c_tree)
+{
+ struct child_process cp = CHILD_PROCESS_INIT;
+ const char *c_tree_hex = oid_to_hex(c_tree);
+
+ /*
+ * diff-index is very similar to diff-tree above, and should be
+ * converted together with update_index.
+ */
+ cp.git_cmd = 1;
+ argv_array_pushl(&cp.args, "diff-index", "--cached", "--name-only",
+ "--diff-filter=A", NULL);
+ argv_array_push(&cp.args, c_tree_hex);
+ return pipe_command(&cp, NULL, 0, out, 0, NULL, 0);
+}
+
+static int update_index(struct strbuf *out)
+{
+ struct child_process cp = CHILD_PROCESS_INIT;
+
+ /*
+ * Update-index is very complicated and may need to have a public
+ * function exposed in order to remove this forking.
+ */
+ cp.git_cmd = 1;
+ argv_array_pushl(&cp.args, "update-index", "--add", "--stdin", NULL);
+ return pipe_command(&cp, out->buf, out->len, NULL, 0, NULL, 0);
+}
+
+static int restore_untracked(struct object_id *u_tree)
+{
+ int res;
+ struct child_process cp = CHILD_PROCESS_INIT;
+
+ /*
+ * We need to run restore files from a given index, but without
+ * affecting the current index, so we use GIT_INDEX_FILE with
+ * run_command to fork processes that will not interfere.
+ */
+ cp.git_cmd = 1;
+ argv_array_push(&cp.args, "read-tree");
+ argv_array_push(&cp.args, oid_to_hex(u_tree));
+ argv_array_pushf(&cp.env_array, "GIT_INDEX_FILE=%s",
+ stash_index_path.buf);
+ if (run_command(&cp)) {
+ remove_path(stash_index_path.buf);
+ return -1;
+ }
+
+ child_process_init(&cp);
+ cp.git_cmd = 1;
+ argv_array_pushl(&cp.args, "checkout-index", "--all", NULL);
+ argv_array_pushf(&cp.env_array, "GIT_INDEX_FILE=%s",
+ stash_index_path.buf);
+
+ res = run_command(&cp);
+ remove_path(stash_index_path.buf);
+ return res;
+}
+
+static int do_apply_stash(const char *prefix, struct stash_info *info,
+ int index, int quiet)
+{
+ int ret;
+ int has_index = index;
+ struct merge_options o;
+ struct object_id c_tree;
+ struct object_id index_tree;
+ struct commit *result;
+ const struct object_id *bases[1];
+
+ read_cache_preload(NULL);
+ if (refresh_cache(REFRESH_QUIET))
+ return -1;
+
+ if (write_cache_as_tree(&c_tree, 0, NULL))
+ return error(_("cannot apply a stash in the middle of a merge"));
+
+ if (index) {
+ if (oideq(&info->b_tree, &info->i_tree) ||
+ oideq(&c_tree, &info->i_tree)) {
+ has_index = 0;
+ } else {
+ struct strbuf out = STRBUF_INIT;
+
+ if (diff_tree_binary(&out, &info->w_commit)) {
+ strbuf_release(&out);
+ return error(_("could not generate diff %s^!."),
+ oid_to_hex(&info->w_commit));
+ }
+
+ ret = apply_cached(&out);
+ strbuf_release(&out);
+ if (ret)
+ return error(_("conflicts in index."
+ "Try without --index."));
+
+ discard_cache();
+ read_cache();
+ if (write_cache_as_tree(&index_tree, 0, NULL))
+ return error(_("could not save index tree"));
+
+ reset_head();
+ }
+ }
+
+ if (info->has_u && restore_untracked(&info->u_tree))
+ return error(_("could not restore untracked files from stash"));
+
+ init_merge_options(&o, the_repository);
+
+ o.branch1 = "Updated upstream";
+ o.branch2 = "Stashed changes";
+
+ if (oideq(&info->b_tree, &c_tree))
+ o.branch1 = "Version stash was based on";
+
+ if (quiet)
+ o.verbosity = 0;
+
+ if (o.verbosity >= 3)
+ printf_ln(_("Merging %s with %s"), o.branch1, o.branch2);
+
+ bases[0] = &info->b_tree;
+
+ ret = merge_recursive_generic(&o, &c_tree, &info->w_tree, 1, bases,
+ &result);
+ if (ret) {
+ rerere(0);
+
+ if (index)
+ fprintf_ln(stderr, _("Index was not unstashed."));
+
+ return ret;
+ }
+
+ if (has_index) {
+ if (reset_tree(&index_tree, 0, 0))
+ return -1;
+ } else {
+ struct strbuf out = STRBUF_INIT;
+
+ if (get_newly_staged(&out, &c_tree)) {
+ strbuf_release(&out);
+ return -1;
+ }
+
+ if (reset_tree(&c_tree, 0, 1)) {
+ strbuf_release(&out);
+ return -1;
+ }
+
+ ret = update_index(&out);
+ strbuf_release(&out);
+ if (ret)
+ return -1;
+
+ discard_cache();
+ }
+
+ if (quiet) {
+ if (refresh_cache(REFRESH_QUIET))
+ warning("could not refresh index");
+ } else {
+ struct child_process cp = CHILD_PROCESS_INIT;
+
+ /*
+ * Status is quite simple and could be replaced with calls to
+ * wt_status in the future, but it adds complexities which may
+ * require more tests.
+ */
+ cp.git_cmd = 1;
+ cp.dir = prefix;
+ argv_array_push(&cp.args, "status");
+ run_command(&cp);
+ }
+
+ return 0;
+}
+
+static int apply_stash(int argc, const char **argv, const char *prefix)
+{
+ int ret;
+ int quiet = 0;
+ int index = 0;
+ struct stash_info info;
+ struct option options[] = {
+ OPT__QUIET(&quiet, N_("be quiet, only report errors")),
+ OPT_BOOL(0, "index", &index,
+ N_("attempt to recreate the index")),
+ OPT_END()
+ };
+
+ argc = parse_options(argc, argv, prefix, options,
+ git_stash_apply_usage, 0);
+
+ if (get_stash_info(&info, argc, argv))
+ return -1;
+
+ ret = do_apply_stash(prefix, &info, index, quiet);
+ free_stash_info(&info);
+ return ret;
+}
+
+static int do_drop_stash(struct stash_info *info, int quiet)
+{
+ int ret;
+ struct child_process cp_reflog = CHILD_PROCESS_INIT;
+ struct child_process cp = CHILD_PROCESS_INIT;
+
+ /*
+ * reflog does not provide a simple function for deleting refs. One will
+ * need to be added to avoid implementing too much reflog code here
+ */
+
+ cp_reflog.git_cmd = 1;
+ argv_array_pushl(&cp_reflog.args, "reflog", "delete", "--updateref",
+ "--rewrite", NULL);
+ argv_array_push(&cp_reflog.args, info->revision.buf);
+ ret = run_command(&cp_reflog);
+ if (!ret) {
+ if (!quiet)
+ printf_ln(_("Dropped %s (%s)"), info->revision.buf,
+ oid_to_hex(&info->w_commit));
+ } else {
+ return error(_("%s: Could not drop stash entry"),
+ info->revision.buf);
+ }
+
+ /*
+ * This could easily be replaced by get_oid, but currently it will throw
+ * a fatal error when a reflog is empty, which we can not recover from.
+ */
+ cp.git_cmd = 1;
+ /* Even though --quiet is specified, rev-parse still outputs the hash */
+ cp.no_stdout = 1;
+ argv_array_pushl(&cp.args, "rev-parse", "--verify", "--quiet", NULL);
+ argv_array_pushf(&cp.args, "%s@{0}", ref_stash);
+ ret = run_command(&cp);
+
+ /* do_clear_stash if we just dropped the last stash entry */
+ if (ret)
+ do_clear_stash();
+
+ return 0;
+}
+
+static void assert_stash_ref(struct stash_info *info)
+{
+ if (!info->is_stash_ref) {
+ error(_("'%s' is not a stash reference"), info->revision.buf);
+ free_stash_info(info);
+ exit(1);
+ }
+}
+
+static int drop_stash(int argc, const char **argv, const char *prefix)
+{
+ int ret;
+ int quiet = 0;
+ struct stash_info info;
+ struct option options[] = {
+ OPT__QUIET(&quiet, N_("be quiet, only report errors")),
+ OPT_END()
+ };
+
+ argc = parse_options(argc, argv, prefix, options,
+ git_stash_drop_usage, 0);
+
+ if (get_stash_info(&info, argc, argv))
+ return -1;
+
+ assert_stash_ref(&info);
+
+ ret = do_drop_stash(&info, quiet);
+ free_stash_info(&info);
+ return ret;
+}
+
+static int pop_stash(int argc, const char **argv, const char *prefix)
+{
+ int ret;
+ int index = 0;
+ int quiet = 0;
+ struct stash_info info;
+ struct option options[] = {
+ OPT__QUIET(&quiet, N_("be quiet, only report errors")),
+ OPT_BOOL(0, "index", &index,
+ N_("attempt to recreate the index")),
+ OPT_END()
+ };
+
+ argc = parse_options(argc, argv, prefix, options,
+ git_stash_pop_usage, 0);
+
+ if (get_stash_info(&info, argc, argv))
+ return -1;
+
+ assert_stash_ref(&info);
+ if ((ret = do_apply_stash(prefix, &info, index, quiet)))
+ printf_ln(_("The stash entry is kept in case "
+ "you need it again."));
+ else
+ ret = do_drop_stash(&info, quiet);
+
+ free_stash_info(&info);
+ return ret;
+}
+
+static int branch_stash(int argc, const char **argv, const char *prefix)
+{
+ int ret;
+ const char *branch = NULL;
+ struct stash_info info;
+ struct child_process cp = CHILD_PROCESS_INIT;
+ struct option options[] = {
+ OPT_END()
+ };
+
+ argc = parse_options(argc, argv, prefix, options,
+ git_stash_branch_usage, 0);
+
+ if (!argc) {
+ fprintf_ln(stderr, _("No branch name specified"));
+ return -1;
+ }
+
+ branch = argv[0];
+
+ if (get_stash_info(&info, argc - 1, argv + 1))
+ return -1;
+
+ cp.git_cmd = 1;
+ argv_array_pushl(&cp.args, "checkout", "-b", NULL);
+ argv_array_push(&cp.args, branch);
+ argv_array_push(&cp.args, oid_to_hex(&info.b_commit));
+ ret = run_command(&cp);
+ if (!ret)
+ ret = do_apply_stash(prefix, &info, 1, 0);
+ if (!ret && info.is_stash_ref)
+ ret = do_drop_stash(&info, 0);
+
+ free_stash_info(&info);
+
+ return ret;
+}
+
+static int list_stash(int argc, const char **argv, const char *prefix)
+{
+ struct child_process cp = CHILD_PROCESS_INIT;
+ struct option options[] = {
+ OPT_END()
+ };
+
+ argc = parse_options(argc, argv, prefix, options,
+ git_stash_list_usage,
+ PARSE_OPT_KEEP_UNKNOWN);
+
+ if (!ref_exists(ref_stash))
+ return 0;
+
+ cp.git_cmd = 1;
+ argv_array_pushl(&cp.args, "log", "--format=%gd: %gs", "-g",
+ "--first-parent", "-m", NULL);
+ argv_array_pushv(&cp.args, argv);
+ argv_array_push(&cp.args, ref_stash);
+ argv_array_push(&cp.args, "--");
+ return run_command(&cp);
+}
+
+static int show_stat = 1;
+static int show_patch;
+
+static int git_stash_config(const char *var, const char *value, void *cb)
+{
+ if (!strcmp(var, "stash.showstat")) {
+ show_stat = git_config_bool(var, value);
+ return 0;
+ }
+ if (!strcmp(var, "stash.showpatch")) {
+ show_patch = git_config_bool(var, value);
+ return 0;
+ }
+ return git_default_config(var, value, cb);
+}
+
+static int show_stash(int argc, const char **argv, const char *prefix)
+{
+ int i;
+ int opts = 0;
+ int ret = 0;
+ struct stash_info info;
+ struct rev_info rev;
+ struct argv_array stash_args = ARGV_ARRAY_INIT;
+ struct option options[] = {
+ OPT_END()
+ };
+
+ init_diff_ui_defaults();
+ git_config(git_diff_ui_config, NULL);
+ init_revisions(&rev, prefix);
+
+ for (i = 1; i < argc; i++) {
+ if (argv[i][0] != '-')
+ argv_array_push(&stash_args, argv[i]);
+ else
+ opts++;
+ }
+
+ ret = get_stash_info(&info, stash_args.argc, stash_args.argv);
+ argv_array_clear(&stash_args);
+ if (ret)
+ return -1;
+
+ /*
+ * The config settings are applied only if there are not passed
+ * any options.
+ */
+ if (!opts) {
+ git_config(git_stash_config, NULL);
+ if (show_stat)
+ rev.diffopt.output_format = DIFF_FORMAT_DIFFSTAT;
+
+ if (show_patch)
+ rev.diffopt.output_format |= DIFF_FORMAT_PATCH;
+
+ if (!show_stat && !show_patch) {
+ free_stash_info(&info);
+ return 0;
+ }
+ }
+
+ argc = setup_revisions(argc, argv, &rev, NULL);
+ if (argc > 1) {
+ free_stash_info(&info);
+ usage_with_options(git_stash_show_usage, options);
+ }
+ if (!rev.diffopt.output_format) {
+ rev.diffopt.output_format = DIFF_FORMAT_PATCH;
+ diff_setup_done(&rev.diffopt);
+ }
+
+ rev.diffopt.flags.recursive = 1;
+ setup_diff_pager(&rev.diffopt);
+ diff_tree_oid(&info.b_commit, &info.w_commit, "", &rev.diffopt);
+ log_tree_diff_flush(&rev);
+
+ free_stash_info(&info);
+ return diff_result_code(&rev.diffopt, 0);
+}
+
+static int do_store_stash(const struct object_id *w_commit, const char *stash_msg,
+ int quiet)
+{
+ if (!stash_msg)
+ stash_msg = "Created via \"git stash store\".";
+
+ if (update_ref(stash_msg, ref_stash, w_commit, NULL,
+ REF_FORCE_CREATE_REFLOG,
+ quiet ? UPDATE_REFS_QUIET_ON_ERR :
+ UPDATE_REFS_MSG_ON_ERR)) {
+ if (!quiet) {
+ fprintf_ln(stderr, _("Cannot update %s with %s"),
+ ref_stash, oid_to_hex(w_commit));
+ }
+ return -1;
+ }
+
+ return 0;
+}
+
+static int store_stash(int argc, const char **argv, const char *prefix)
+{
+ int quiet = 0;
+ const char *stash_msg = NULL;
+ struct object_id obj;
+ struct object_context dummy;
+ struct option options[] = {
+ OPT__QUIET(&quiet, N_("be quiet")),
+ OPT_STRING('m', "message", &stash_msg, "message",
+ N_("stash message")),
+ OPT_END()
+ };
+
+ argc = parse_options(argc, argv, prefix, options,
+ git_stash_store_usage,
+ PARSE_OPT_KEEP_UNKNOWN);
+
+ if (argc != 1) {
+ if (!quiet)
+ fprintf_ln(stderr, _("\"git stash store\" requires one "
+ "<commit> argument"));
+ return -1;
+ }
+
+ if (get_oid_with_context(the_repository,
+ argv[0], quiet ? GET_OID_QUIETLY : 0, &obj,
+ &dummy)) {
+ if (!quiet)
+ fprintf_ln(stderr, _("Cannot update %s with %s"),
+ ref_stash, argv[0]);
+ return -1;
+ }
+
+ return do_store_stash(&obj, stash_msg, quiet);
+}
+
+static void add_pathspecs(struct argv_array *args,
+ const struct pathspec *ps) {
+ int i;
+
+ for (i = 0; i < ps->nr; i++)
+ argv_array_push(args, ps->items[i].original);
+}
+
+/*
+ * `untracked_files` will be filled with the names of untracked files.
+ * The return value is:
+ *
+ * = 0 if there are not any untracked files
+ * > 0 if there are untracked files
+ */
+static int get_untracked_files(const struct pathspec *ps, int include_untracked,
+ struct strbuf *untracked_files)
+{
+ int i;
+ int max_len;
+ int found = 0;
+ char *seen;
+ struct dir_struct dir;
+
+ memset(&dir, 0, sizeof(dir));
+ if (include_untracked != INCLUDE_ALL_FILES)
+ setup_standard_excludes(&dir);
+
+ seen = xcalloc(ps->nr, 1);
+
+ max_len = fill_directory(&dir, the_repository->index, ps);
+ for (i = 0; i < dir.nr; i++) {
+ struct dir_entry *ent = dir.entries[i];
+ if (dir_path_match(&the_index, ent, ps, max_len, seen)) {
+ found++;
+ strbuf_addstr(untracked_files, ent->name);
+ /* NUL-terminate: will be fed to update-index -z */
+ strbuf_addch(untracked_files, '\0');
+ }
+ free(ent);
+ }
+
+ free(seen);
+ free(dir.entries);
+ free(dir.ignored);
+ clear_directory(&dir);
+ return found;
+}
+
+/*
+ * The return value of `check_changes_tracked_files()` can be:
+ *
+ * < 0 if there was an error
+ * = 0 if there are no changes.
+ * > 0 if there are changes.
+ */
+static int check_changes_tracked_files(const struct pathspec *ps)
+{
+ int result;
+ struct rev_info rev;
+ struct object_id dummy;
+ int ret = 0;
+
+ /* No initial commit. */
+ if (get_oid("HEAD", &dummy))
+ return -1;
+
+ if (read_cache() < 0)
+ return -1;
+
+ init_revisions(&rev, NULL);
+ copy_pathspec(&rev.prune_data, ps);
+
+ rev.diffopt.flags.quick = 1;
+ rev.diffopt.flags.ignore_submodules = 1;
+ rev.abbrev = 0;
+
+ add_head_to_pending(&rev);
+ diff_setup_done(&rev.diffopt);
+
+ result = run_diff_index(&rev, 1);
+ if (diff_result_code(&rev.diffopt, result)) {
+ ret = 1;
+ goto done;
+ }
+
+ object_array_clear(&rev.pending);
+ result = run_diff_files(&rev, 0);
+ if (diff_result_code(&rev.diffopt, result)) {
+ ret = 1;
+ goto done;
+ }
+
+done:
+ clear_pathspec(&rev.prune_data);
+ return ret;
+}
+
+/*
+ * The function will fill `untracked_files` with the names of untracked files
+ * It will return 1 if there were any changes and 0 if there were not.
+ */
+static int check_changes(const struct pathspec *ps, int include_untracked,
+ struct strbuf *untracked_files)
+{
+ int ret = 0;
+ if (check_changes_tracked_files(ps))
+ ret = 1;
+
+ if (include_untracked && get_untracked_files(ps, include_untracked,
+ untracked_files))
+ ret = 1;
+
+ return ret;
+}
+
+static int save_untracked_files(struct stash_info *info, struct strbuf *msg,
+ struct strbuf files)
+{
+ int ret = 0;
+ struct strbuf untracked_msg = STRBUF_INIT;
+ struct child_process cp_upd_index = CHILD_PROCESS_INIT;
+ struct index_state istate = { NULL };
+
+ cp_upd_index.git_cmd = 1;
+ argv_array_pushl(&cp_upd_index.args, "update-index", "-z", "--add",
+ "--remove", "--stdin", NULL);
+ argv_array_pushf(&cp_upd_index.env_array, "GIT_INDEX_FILE=%s",
+ stash_index_path.buf);
+
+ strbuf_addf(&untracked_msg, "untracked files on %s\n", msg->buf);
+ if (pipe_command(&cp_upd_index, files.buf, files.len, NULL, 0,
+ NULL, 0)) {
+ ret = -1;
+ goto done;
+ }
+
+ if (write_index_as_tree(&info->u_tree, &istate, stash_index_path.buf, 0,
+ NULL)) {
+ ret = -1;
+ goto done;
+ }
+
+ if (commit_tree(untracked_msg.buf, untracked_msg.len,
+ &info->u_tree, NULL, &info->u_commit, NULL, NULL)) {
+ ret = -1;
+ goto done;
+ }
+
+done:
+ discard_index(&istate);
+ strbuf_release(&untracked_msg);
+ remove_path(stash_index_path.buf);
+ return ret;
+}
+
+static int stash_patch(struct stash_info *info, const struct pathspec *ps,
+ struct strbuf *out_patch, int quiet)
+{
+ int ret = 0;
+ struct child_process cp_read_tree = CHILD_PROCESS_INIT;
+ struct child_process cp_add_i = CHILD_PROCESS_INIT;
+ struct child_process cp_diff_tree = CHILD_PROCESS_INIT;
+ struct index_state istate = { NULL };
+
+ remove_path(stash_index_path.buf);
+
+ cp_read_tree.git_cmd = 1;
+ argv_array_pushl(&cp_read_tree.args, "read-tree", "HEAD", NULL);
+ argv_array_pushf(&cp_read_tree.env_array, "GIT_INDEX_FILE=%s",
+ stash_index_path.buf);
+ if (run_command(&cp_read_tree)) {
+ ret = -1;
+ goto done;
+ }
+
+ /* Find out what the user wants. */
+ cp_add_i.git_cmd = 1;
+ argv_array_pushl(&cp_add_i.args, "add--interactive", "--patch=stash",
+ "--", NULL);
+ add_pathspecs(&cp_add_i.args, ps);
+ argv_array_pushf(&cp_add_i.env_array, "GIT_INDEX_FILE=%s",
+ stash_index_path.buf);
+ if (run_command(&cp_add_i)) {
+ ret = -1;
+ goto done;
+ }
+
+ /* State of the working tree. */
+ if (write_index_as_tree(&info->w_tree, &istate, stash_index_path.buf, 0,
+ NULL)) {
+ ret = -1;
+ goto done;
+ }
+
+ cp_diff_tree.git_cmd = 1;
+ argv_array_pushl(&cp_diff_tree.args, "diff-tree", "-p", "HEAD",
+ oid_to_hex(&info->w_tree), "--", NULL);
+ if (pipe_command(&cp_diff_tree, NULL, 0, out_patch, 0, NULL, 0)) {
+ ret = -1;
+ goto done;
+ }
+
+ if (!out_patch->len) {
+ if (!quiet)
+ fprintf_ln(stderr, _("No changes selected"));
+ ret = 1;
+ }
+
+done:
+ discard_index(&istate);
+ remove_path(stash_index_path.buf);
+ return ret;
+}
+
+static int stash_working_tree(struct stash_info *info, const struct pathspec *ps)
+{
+ int ret = 0;
+ struct rev_info rev;
+ struct child_process cp_upd_index = CHILD_PROCESS_INIT;
+ struct strbuf diff_output = STRBUF_INIT;
+ struct index_state istate = { NULL };
+
+ init_revisions(&rev, NULL);
+ copy_pathspec(&rev.prune_data, ps);
+
+ set_alternate_index_output(stash_index_path.buf);
+ if (reset_tree(&info->i_tree, 0, 0)) {
+ ret = -1;
+ goto done;
+ }
+ set_alternate_index_output(NULL);
+
+ rev.diffopt.output_format = DIFF_FORMAT_CALLBACK;
+ rev.diffopt.format_callback = add_diff_to_buf;
+ rev.diffopt.format_callback_data = &diff_output;
+
+ if (read_cache_preload(&rev.diffopt.pathspec) < 0) {
+ ret = -1;
+ goto done;
+ }
+
+ add_pending_object(&rev, parse_object(the_repository, &info->b_commit),
+ "");
+ if (run_diff_index(&rev, 0)) {
+ ret = -1;
+ goto done;
+ }
+
+ cp_upd_index.git_cmd = 1;
+ argv_array_pushl(&cp_upd_index.args, "update-index", "-z", "--add",
+ "--remove", "--stdin", NULL);
+ argv_array_pushf(&cp_upd_index.env_array, "GIT_INDEX_FILE=%s",
+ stash_index_path.buf);
+
+ if (pipe_command(&cp_upd_index, diff_output.buf, diff_output.len,
+ NULL, 0, NULL, 0)) {
+ ret = -1;
+ goto done;
+ }
+
+ if (write_index_as_tree(&info->w_tree, &istate, stash_index_path.buf, 0,
+ NULL)) {
+ ret = -1;
+ goto done;
+ }
+
+done:
+ discard_index(&istate);
+ UNLEAK(rev);
+ object_array_clear(&rev.pending);
+ clear_pathspec(&rev.prune_data);
+ strbuf_release(&diff_output);
+ remove_path(stash_index_path.buf);
+ return ret;
+}
+
+static int do_create_stash(const struct pathspec *ps, struct strbuf *stash_msg_buf,
+ int include_untracked, int patch_mode,
+ struct stash_info *info, struct strbuf *patch,
+ int quiet)
+{
+ int ret = 0;
+ int flags = 0;
+ int untracked_commit_option = 0;
+ const char *head_short_sha1 = NULL;
+ const char *branch_ref = NULL;
+ const char *branch_name = "(no branch)";
+ struct commit *head_commit = NULL;
+ struct commit_list *parents = NULL;
+ struct strbuf msg = STRBUF_INIT;
+ struct strbuf commit_tree_label = STRBUF_INIT;
+ struct strbuf untracked_files = STRBUF_INIT;
+
+ prepare_fallback_ident("git stash", "git@stash");
+
+ read_cache_preload(NULL);
+ refresh_cache(REFRESH_QUIET);
+
+ if (get_oid("HEAD", &info->b_commit)) {
+ if (!quiet)
+ fprintf_ln(stderr, _("You do not have "
+ "the initial commit yet"));
+ ret = -1;
+ goto done;
+ } else {
+ head_commit = lookup_commit(the_repository, &info->b_commit);
+ }
+
+ if (!check_changes(ps, include_untracked, &untracked_files)) {
+ ret = 1;
+ goto done;
+ }
+
+ branch_ref = resolve_ref_unsafe("HEAD", 0, NULL, &flags);
+ if (flags & REF_ISSYMREF)
+ branch_name = strrchr(branch_ref, '/') + 1;
+ head_short_sha1 = find_unique_abbrev(&head_commit->object.oid,
+ DEFAULT_ABBREV);
+ strbuf_addf(&msg, "%s: %s ", branch_name, head_short_sha1);
+ pp_commit_easy(CMIT_FMT_ONELINE, head_commit, &msg);
+
+ strbuf_addf(&commit_tree_label, "index on %s\n", msg.buf);
+ commit_list_insert(head_commit, &parents);
+ if (write_cache_as_tree(&info->i_tree, 0, NULL) ||
+ commit_tree(commit_tree_label.buf, commit_tree_label.len,
+ &info->i_tree, parents, &info->i_commit, NULL, NULL)) {
+ if (!quiet)
+ fprintf_ln(stderr, _("Cannot save the current "
+ "index state"));
+ ret = -1;
+ goto done;
+ }
+
+ if (include_untracked) {
+ if (save_untracked_files(info, &msg, untracked_files)) {
+ if (!quiet)
+ fprintf_ln(stderr, _("Cannot save "
+ "the untracked files"));
+ ret = -1;
+ goto done;
+ }
+ untracked_commit_option = 1;
+ }
+ if (patch_mode) {
+ ret = stash_patch(info, ps, patch, quiet);
+ if (ret < 0) {
+ if (!quiet)
+ fprintf_ln(stderr, _("Cannot save the current "
+ "worktree state"));
+ goto done;
+ } else if (ret > 0) {
+ goto done;
+ }
+ } else {
+ if (stash_working_tree(info, ps)) {
+ if (!quiet)
+ fprintf_ln(stderr, _("Cannot save the current "
+ "worktree state"));
+ ret = -1;
+ goto done;
+ }
+ }
+
+ if (!stash_msg_buf->len)
+ strbuf_addf(stash_msg_buf, "WIP on %s", msg.buf);
+ else
+ strbuf_insertf(stash_msg_buf, 0, "On %s: ", branch_name);
+
+ /*
+ * `parents` will be empty after calling `commit_tree()`, so there is
+ * no need to call `free_commit_list()`
+ */
+ parents = NULL;
+ if (untracked_commit_option)
+ commit_list_insert(lookup_commit(the_repository,
+ &info->u_commit),
+ &parents);
+ commit_list_insert(lookup_commit(the_repository, &info->i_commit),
+ &parents);
+ commit_list_insert(head_commit, &parents);
+
+ if (commit_tree(stash_msg_buf->buf, stash_msg_buf->len, &info->w_tree,
+ parents, &info->w_commit, NULL, NULL)) {
+ if (!quiet)
+ fprintf_ln(stderr, _("Cannot record "
+ "working tree state"));
+ ret = -1;
+ goto done;
+ }
+
+done:
+ strbuf_release(&commit_tree_label);
+ strbuf_release(&msg);
+ strbuf_release(&untracked_files);
+ return ret;
+}
+
+static int create_stash(int argc, const char **argv, const char *prefix)
+{
+ int ret = 0;
+ struct strbuf stash_msg_buf = STRBUF_INIT;
+ struct stash_info info;
+ struct pathspec ps;
+
+ /* Starting with argv[1], since argv[0] is "create" */
+ strbuf_join_argv(&stash_msg_buf, argc - 1, ++argv, ' ');
+
+ memset(&ps, 0, sizeof(ps));
+ if (!check_changes_tracked_files(&ps))
+ return 0;
+
+ ret = do_create_stash(&ps, &stash_msg_buf, 0, 0, &info,
+ NULL, 0);
+ if (!ret)
+ printf_ln("%s", oid_to_hex(&info.w_commit));
+
+ strbuf_release(&stash_msg_buf);
+ return ret;
+}
+
+static int do_push_stash(const struct pathspec *ps, const char *stash_msg, int quiet,
+ int keep_index, int patch_mode, int include_untracked)
+{
+ int ret = 0;
+ struct stash_info info;
+ struct strbuf patch = STRBUF_INIT;
+ struct strbuf stash_msg_buf = STRBUF_INIT;
+ struct strbuf untracked_files = STRBUF_INIT;
+
+ if (patch_mode && keep_index == -1)
+ keep_index = 1;
+
+ if (patch_mode && include_untracked) {
+ fprintf_ln(stderr, _("Can't use --patch and --include-untracked"
+ " or --all at the same time"));
+ ret = -1;
+ goto done;
+ }
+
+ read_cache_preload(NULL);
+ if (!include_untracked && ps->nr) {
+ int i;
+ char *ps_matched = xcalloc(ps->nr, 1);
+
+ for (i = 0; i < active_nr; i++)
+ ce_path_match(&the_index, active_cache[i], ps,
+ ps_matched);
+
+ if (report_path_error(ps_matched, ps)) {
+ fprintf_ln(stderr, _("Did you forget to 'git add'?"));
+ ret = -1;
+ free(ps_matched);
+ goto done;
+ }
+ free(ps_matched);
+ }
+
+ if (refresh_cache(REFRESH_QUIET)) {
+ ret = -1;
+ goto done;
+ }
+
+ if (!check_changes(ps, include_untracked, &untracked_files)) {
+ if (!quiet)
+ printf_ln(_("No local changes to save"));
+ goto done;
+ }
+
+ if (!reflog_exists(ref_stash) && do_clear_stash()) {
+ ret = -1;
+ if (!quiet)
+ fprintf_ln(stderr, _("Cannot initialize stash"));
+ goto done;
+ }
+
+ if (stash_msg)
+ strbuf_addstr(&stash_msg_buf, stash_msg);
+ if (do_create_stash(ps, &stash_msg_buf, include_untracked, patch_mode,
+ &info, &patch, quiet)) {
+ ret = -1;
+ goto done;
+ }
+
+ if (do_store_stash(&info.w_commit, stash_msg_buf.buf, 1)) {
+ ret = -1;
+ if (!quiet)
+ fprintf_ln(stderr, _("Cannot save the current status"));
+ goto done;
+ }
+
+ if (!quiet)
+ printf_ln(_("Saved working directory and index state %s"),
+ stash_msg_buf.buf);
+
+ if (!patch_mode) {
+ if (include_untracked && !ps->nr) {
+ struct child_process cp = CHILD_PROCESS_INIT;
+
+ cp.git_cmd = 1;
+ argv_array_pushl(&cp.args, "clean", "--force",
+ "--quiet", "-d", NULL);
+ if (include_untracked == INCLUDE_ALL_FILES)
+ argv_array_push(&cp.args, "-x");
+ if (run_command(&cp)) {
+ ret = -1;
+ goto done;
+ }
+ }
+ discard_cache();
+ if (ps->nr) {
+ struct child_process cp_add = CHILD_PROCESS_INIT;
+ struct child_process cp_diff = CHILD_PROCESS_INIT;
+ struct child_process cp_apply = CHILD_PROCESS_INIT;
+ struct strbuf out = STRBUF_INIT;
+
+ cp_add.git_cmd = 1;
+ argv_array_push(&cp_add.args, "add");
+ if (!include_untracked)
+ argv_array_push(&cp_add.args, "-u");
+ if (include_untracked == INCLUDE_ALL_FILES)
+ argv_array_push(&cp_add.args, "--force");
+ argv_array_push(&cp_add.args, "--");
+ add_pathspecs(&cp_add.args, ps);
+ if (run_command(&cp_add)) {
+ ret = -1;
+ goto done;
+ }
+
+ cp_diff.git_cmd = 1;
+ argv_array_pushl(&cp_diff.args, "diff-index", "-p",
+ "--cached", "--binary", "HEAD", "--",
+ NULL);
+ add_pathspecs(&cp_diff.args, ps);
+ if (pipe_command(&cp_diff, NULL, 0, &out, 0, NULL, 0)) {
+ ret = -1;
+ goto done;
+ }
+
+ cp_apply.git_cmd = 1;
+ argv_array_pushl(&cp_apply.args, "apply", "--index",
+ "-R", NULL);
+ if (pipe_command(&cp_apply, out.buf, out.len, NULL, 0,
+ NULL, 0)) {
+ ret = -1;
+ goto done;
+ }
+ } else {
+ struct child_process cp = CHILD_PROCESS_INIT;
+ cp.git_cmd = 1;
+ argv_array_pushl(&cp.args, "reset", "--hard", "-q",
+ NULL);
+ if (run_command(&cp)) {
+ ret = -1;
+ goto done;
+ }
+ }
+
+ if (keep_index == 1 && !is_null_oid(&info.i_tree)) {
+ struct child_process cp_ls = CHILD_PROCESS_INIT;
+ struct child_process cp_checkout = CHILD_PROCESS_INIT;
+ struct strbuf out = STRBUF_INIT;
+
+ if (reset_tree(&info.i_tree, 0, 1)) {
+ ret = -1;
+ goto done;
+ }
+
+ cp_ls.git_cmd = 1;
+ argv_array_pushl(&cp_ls.args, "ls-files", "-z",
+ "--modified", "--", NULL);
+
+ add_pathspecs(&cp_ls.args, ps);
+ if (pipe_command(&cp_ls, NULL, 0, &out, 0, NULL, 0)) {
+ ret = -1;
+ goto done;
+ }
+
+ cp_checkout.git_cmd = 1;
+ argv_array_pushl(&cp_checkout.args, "checkout-index",
+ "-z", "--force", "--stdin", NULL);
+ if (pipe_command(&cp_checkout, out.buf, out.len, NULL,
+ 0, NULL, 0)) {
+ ret = -1;
+ goto done;
+ }
+ }
+ goto done;
+ } else {
+ struct child_process cp = CHILD_PROCESS_INIT;
+
+ cp.git_cmd = 1;
+ argv_array_pushl(&cp.args, "apply", "-R", NULL);
+
+ if (pipe_command(&cp, patch.buf, patch.len, NULL, 0, NULL, 0)) {
+ if (!quiet)
+ fprintf_ln(stderr, _("Cannot remove "
+ "worktree changes"));
+ ret = -1;
+ goto done;
+ }
+
+ if (keep_index < 1) {
+ struct child_process cp = CHILD_PROCESS_INIT;
+
+ cp.git_cmd = 1;
+ argv_array_pushl(&cp.args, "reset", "-q", "--", NULL);
+ add_pathspecs(&cp.args, ps);
+ if (run_command(&cp)) {
+ ret = -1;
+ goto done;
+ }
+ }
+ goto done;
+ }
+
+done:
+ strbuf_release(&stash_msg_buf);
+ return ret;
+}
+
+static int push_stash(int argc, const char **argv, const char *prefix)
+{
+ int keep_index = -1;
+ int patch_mode = 0;
+ int include_untracked = 0;
+ int quiet = 0;
+ const char *stash_msg = NULL;
+ struct pathspec ps;
+ struct option options[] = {
+ OPT_BOOL('k', "keep-index", &keep_index,
+ N_("keep index")),
+ OPT_BOOL('p', "patch", &patch_mode,
+ N_("stash in patch mode")),
+ OPT__QUIET(&quiet, N_("quiet mode")),
+ OPT_BOOL('u', "include-untracked", &include_untracked,
+ N_("include untracked files in stash")),
+ OPT_SET_INT('a', "all", &include_untracked,
+ N_("include ignore files"), 2),
+ OPT_STRING('m', "message", &stash_msg, N_("message"),
+ N_("stash message")),
+ OPT_END()
+ };
+
+ if (argc)
+ argc = parse_options(argc, argv, prefix, options,
+ git_stash_push_usage,
+ 0);
+
+ parse_pathspec(&ps, 0, PATHSPEC_PREFER_FULL | PATHSPEC_PREFIX_ORIGIN,
+ prefix, argv);
+ return do_push_stash(&ps, stash_msg, quiet, keep_index, patch_mode,
+ include_untracked);
+}
+
+static int save_stash(int argc, const char **argv, const char *prefix)
+{
+ int keep_index = -1;
+ int patch_mode = 0;
+ int include_untracked = 0;
+ int quiet = 0;
+ int ret = 0;
+ const char *stash_msg = NULL;
+ struct pathspec ps;
+ struct strbuf stash_msg_buf = STRBUF_INIT;
+ struct option options[] = {
+ OPT_BOOL('k', "keep-index", &keep_index,
+ N_("keep index")),
+ OPT_BOOL('p', "patch", &patch_mode,
+ N_("stash in patch mode")),
+ OPT__QUIET(&quiet, N_("quiet mode")),
+ OPT_BOOL('u', "include-untracked", &include_untracked,
+ N_("include untracked files in stash")),
+ OPT_SET_INT('a', "all", &include_untracked,
+ N_("include ignore files"), 2),
+ OPT_STRING('m', "message", &stash_msg, "message",
+ N_("stash message")),
+ OPT_END()
+ };
+
+ argc = parse_options(argc, argv, prefix, options,
+ git_stash_save_usage,
+ PARSE_OPT_KEEP_DASHDASH);
+
+ if (argc)
+ stash_msg = strbuf_join_argv(&stash_msg_buf, argc, argv, ' ');
+
+ memset(&ps, 0, sizeof(ps));
+ ret = do_push_stash(&ps, stash_msg, quiet, keep_index,
+ patch_mode, include_untracked);
+
+ strbuf_release(&stash_msg_buf);
+ return ret;
+}
+
+static int use_builtin_stash(void)
+{
+ struct child_process cp = CHILD_PROCESS_INIT;
+ struct strbuf out = STRBUF_INIT;
+ int ret, env = git_env_bool("GIT_TEST_STASH_USE_BUILTIN", -1);
+
+ if (env != -1)
+ return env;
+
+ argv_array_pushl(&cp.args,
+ "config", "--bool", "stash.usebuiltin", NULL);
+ cp.git_cmd = 1;
+ if (capture_command(&cp, &out, 6)) {
+ strbuf_release(&out);
+ return 1;
+ }
+
+ strbuf_trim(&out);
+ ret = !strcmp("true", out.buf);
+ strbuf_release(&out);
+ return ret;
+}
+
+int cmd_stash(int argc, const char **argv, const char *prefix)
+{
+ int i = -1;
+ pid_t pid = getpid();
+ const char *index_file;
+ struct argv_array args = ARGV_ARRAY_INIT;
+
+ struct option options[] = {
+ OPT_END()
+ };
+
+ if (!use_builtin_stash()) {
+ const char *path = mkpath("%s/git-legacy-stash",
+ git_exec_path());
+
+ if (sane_execvp(path, (char **)argv) < 0)
+ die_errno(_("could not exec %s"), path);
+ else
+ BUG("sane_execvp() returned???");
+ }
+
+ prefix = setup_git_directory();
+ trace_repo_setup(prefix);
+ setup_work_tree();
+
+ git_config(git_diff_basic_config, NULL);
+
+ argc = parse_options(argc, argv, prefix, options, git_stash_usage,
+ PARSE_OPT_KEEP_UNKNOWN | PARSE_OPT_KEEP_DASHDASH);
+
+ index_file = get_index_file();
+ strbuf_addf(&stash_index_path, "%s.stash.%" PRIuMAX, index_file,
+ (uintmax_t)pid);
+
+ if (!argc)
+ return !!push_stash(0, NULL, prefix);
+ else if (!strcmp(argv[0], "apply"))
+ return !!apply_stash(argc, argv, prefix);
+ else if (!strcmp(argv[0], "clear"))
+ return !!clear_stash(argc, argv, prefix);
+ else if (!strcmp(argv[0], "drop"))
+ return !!drop_stash(argc, argv, prefix);
+ else if (!strcmp(argv[0], "pop"))
+ return !!pop_stash(argc, argv, prefix);
+ else if (!strcmp(argv[0], "branch"))
+ return !!branch_stash(argc, argv, prefix);
+ else if (!strcmp(argv[0], "list"))
+ return !!list_stash(argc, argv, prefix);
+ else if (!strcmp(argv[0], "show"))
+ return !!show_stash(argc, argv, prefix);
+ else if (!strcmp(argv[0], "store"))
+ return !!store_stash(argc, argv, prefix);
+ else if (!strcmp(argv[0], "create"))
+ return !!create_stash(argc, argv, prefix);
+ else if (!strcmp(argv[0], "push"))
+ return !!push_stash(argc, argv, prefix);
+ else if (!strcmp(argv[0], "save"))
+ return !!save_stash(argc, argv, prefix);
+ else if (*argv[0] != '-')
+ usage_msg_opt(xstrfmt(_("unknown subcommand: %s"), argv[0]),
+ git_stash_usage, options);
+
+ if (strcmp(argv[0], "-p")) {
+ while (++i < argc && strcmp(argv[i], "--")) {
+ /*
+ * `akpqu` is a string which contains all short options,
+ * except `-m` which is verified separately.
+ */
+ if ((strlen(argv[i]) == 2) && *argv[i] == '-' &&
+ strchr("akpqu", argv[i][1]))
+ continue;
+
+ if (!strcmp(argv[i], "--all") ||
+ !strcmp(argv[i], "--keep-index") ||
+ !strcmp(argv[i], "--no-keep-index") ||
+ !strcmp(argv[i], "--patch") ||
+ !strcmp(argv[i], "--quiet") ||
+ !strcmp(argv[i], "--include-untracked"))
+ continue;
+
+ /*
+ * `-m` and `--message=` are verified separately because
+ * they need to be immediately followed by a string
+ * (i.e.`-m"foobar"` or `--message="foobar"`).
+ */
+ if (starts_with(argv[i], "-m") ||
+ starts_with(argv[i], "--message="))
+ continue;
+
+ usage_with_options(git_stash_usage, options);
+ }
+ }
+
+ argv_array_push(&args, "push");
+ argv_array_pushv(&args, argv);
+ return !!push_stash(args.argc, args.argv, prefix);
+}
i++;
}
- if (ps_matched && report_path_error(ps_matched, pathspec, prefix))
+ if (ps_matched && report_path_error(ps_matched, pathspec))
result = -1;
free(ps_matched);
{
int i;
- run_processes_parallel(suc->max_jobs,
- update_clone_get_next_task,
- update_clone_start_failure,
- update_clone_task_finished,
- suc);
+ run_processes_parallel_tr2(suc->max_jobs, update_clone_get_next_task,
+ update_clone_start_failure,
+ update_clone_task_finished, suc, "submodule",
+ "parallel/update");
/*
* We saved the output and put it out all at once now.
static int module_config(int argc, const char **argv, const char *prefix)
{
enum {
- CHECK_WRITEABLE = 1
+ CHECK_WRITEABLE = 1,
+ DO_UNSET = 2
} command = 0;
struct option module_config_options[] = {
OPT_CMDMODE(0, "check-writeable", &command,
N_("check if it is safe to write to the .gitmodules file"),
CHECK_WRITEABLE),
+ OPT_CMDMODE(0, "unset", &command,
+ N_("unset the config in the .gitmodules file"),
+ DO_UNSET),
OPT_END()
};
const char *const git_submodule_helper_usage[] = {
- N_("git submodule--helper config name [value]"),
+ N_("git submodule--helper config <name> [<value>]"),
+ N_("git submodule--helper config --unset <name>"),
N_("git submodule--helper config --check-writeable"),
NULL
};
return is_writing_gitmodules_ok() ? 0 : -1;
/* Equivalent to ACTION_GET in builtin/config.c */
- if (argc == 2)
+ if (argc == 2 && command != DO_UNSET)
return print_config_from_gitmodules(the_repository, argv[1]);
/* Equivalent to ACTION_SET in builtin/config.c */
- if (argc == 3) {
+ if (argc == 3 || (argc == 2 && command == DO_UNSET)) {
+ const char *value = (argc == 3) ? argv[2] : NULL;
+
if (!is_writing_gitmodules_ok())
die(_("please make sure that the .gitmodules file is in the working tree"));
- return config_set_in_gitmodules_file_gently(argv[1], argv[2]);
+ return config_set_in_gitmodules_file_gently(argv[1], value);
}
usage_with_options(git_submodule_helper_usage, module_config_options);
OPT_WITHOUT(&filter.no_commit, N_("print only tags that don't contain the commit")),
OPT_MERGED(&filter, N_("print only tags that are merged")),
OPT_NO_MERGED(&filter, N_("print only tags that are not merged")),
- OPT_CALLBACK(0 , "sort", sorting_tail, N_("key"),
- N_("field name to sort on"), &parse_opt_ref_sorting),
+ OPT_REF_SORT(sorting_tail),
{
OPTION_CALLBACK, 0, "points-at", &filter.points_at, N_("object"),
N_("print only tags of the object"), PARSE_OPT_LASTARG_DEFAULT,
}
static int do_reupdate(int ac, const char **av,
- const char *prefix, int prefix_length)
+ const char *prefix)
{
/* Read HEAD and run update-index on paths that are
* merged and already different between index and HEAD.
return 0;
}
-static int cacheinfo_callback(struct parse_opt_ctx_t *ctx,
- const struct option *opt, int unset)
+static enum parse_opt_result cacheinfo_callback(
+ struct parse_opt_ctx_t *ctx, const struct option *opt,
+ const char *arg, int unset)
{
struct object_id oid;
unsigned int mode;
const char *path;
BUG_ON_OPT_NEG(unset);
+ BUG_ON_OPT_ARG(arg);
if (!parse_new_style_cacheinfo(ctx->argv[1], &mode, &oid, &path)) {
if (add_cacheinfo(mode, &oid, path, 0))
return 0;
}
-static int stdin_cacheinfo_callback(struct parse_opt_ctx_t *ctx,
- const struct option *opt, int unset)
+static enum parse_opt_result stdin_cacheinfo_callback(
+ struct parse_opt_ctx_t *ctx, const struct option *opt,
+ const char *arg, int unset)
{
int *nul_term_line = opt->value;
BUG_ON_OPT_NEG(unset);
+ BUG_ON_OPT_ARG(arg);
if (ctx->argc != 1)
return error("option '%s' must be the last argument", opt->long_name);
return 0;
}
-static int stdin_callback(struct parse_opt_ctx_t *ctx,
- const struct option *opt, int unset)
+static enum parse_opt_result stdin_callback(
+ struct parse_opt_ctx_t *ctx, const struct option *opt,
+ const char *arg, int unset)
{
int *read_from_stdin = opt->value;
BUG_ON_OPT_NEG(unset);
+ BUG_ON_OPT_ARG(arg);
if (ctx->argc != 1)
return error("option '%s' must be the last argument", opt->long_name);
return 0;
}
-static int unresolve_callback(struct parse_opt_ctx_t *ctx,
- const struct option *opt, int unset)
+static enum parse_opt_result unresolve_callback(
+ struct parse_opt_ctx_t *ctx, const struct option *opt,
+ const char *arg, int unset)
{
int *has_errors = opt->value;
const char *prefix = startup_info->prefix;
BUG_ON_OPT_NEG(unset);
+ BUG_ON_OPT_ARG(arg);
/* consume remaining arguments. */
*has_errors = do_unresolve(ctx->argc, ctx->argv,
return 0;
}
-static int reupdate_callback(struct parse_opt_ctx_t *ctx,
- const struct option *opt, int unset)
+static enum parse_opt_result reupdate_callback(
+ struct parse_opt_ctx_t *ctx, const struct option *opt,
+ const char *arg, int unset)
{
int *has_errors = opt->value;
const char *prefix = startup_info->prefix;
BUG_ON_OPT_NEG(unset);
+ BUG_ON_OPT_ARG(arg);
/* consume remaining arguments. */
setup_work_tree();
- *has_errors = do_reupdate(ctx->argc, ctx->argv,
- prefix, prefix ? strlen(prefix) : 0);
+ *has_errors = do_reupdate(ctx->argc, ctx->argv, prefix);
if (*has_errors)
active_cache_changed = 0;
N_("add the specified entry to the index"),
PARSE_OPT_NOARG | /* disallow --cacheinfo=<mode> form */
PARSE_OPT_NONEG | PARSE_OPT_LITERAL_ARGHELP,
- (parse_opt_cb *) cacheinfo_callback},
+ NULL, 0,
+ cacheinfo_callback},
{OPTION_CALLBACK, 0, "chmod", &set_executable_bit, "(+|-)x",
N_("override the executable bit of the listed files"),
PARSE_OPT_NONEG,
{OPTION_LOWLEVEL_CALLBACK, 0, "stdin", &read_from_stdin, NULL,
N_("read list of paths to be updated from standard input"),
PARSE_OPT_NONEG | PARSE_OPT_NOARG,
- (parse_opt_cb *) stdin_callback},
+ NULL, 0, stdin_callback},
{OPTION_LOWLEVEL_CALLBACK, 0, "index-info", &nul_term_line, NULL,
N_("add entries from standard input to the index"),
PARSE_OPT_NONEG | PARSE_OPT_NOARG,
- (parse_opt_cb *) stdin_cacheinfo_callback},
+ NULL, 0, stdin_cacheinfo_callback},
{OPTION_LOWLEVEL_CALLBACK, 0, "unresolve", &has_errors, NULL,
N_("repopulate stages #2 and #3 for the listed paths"),
PARSE_OPT_NONEG | PARSE_OPT_NOARG,
- (parse_opt_cb *) unresolve_callback},
+ NULL, 0, unresolve_callback},
{OPTION_LOWLEVEL_CALLBACK, 'g', "again", &has_errors, NULL,
N_("only update entries that differ from HEAD"),
PARSE_OPT_NONEG | PARSE_OPT_NOARG,
- (parse_opt_cb *) reupdate_callback},
+ NULL, 0, reupdate_callback},
OPT_BIT(0, "ignore-missing", &refresh_args.flags,
N_("ignore files missing from worktree"),
REFRESH_IGNORE_MISSING),
if (entries < 0)
die("cache corrupted");
+ the_index.updated_skipworktree = 1;
+
/*
* Custom copy of parse_options() because we want to handle
* filename arguments as they come.
struct strbuf sb_git = STRBUF_INIT, sb_repo = STRBUF_INIT;
struct strbuf sb = STRBUF_INIT;
const char *name;
- struct stat st;
struct child_process cp = CHILD_PROCESS_INIT;
struct argv_array child_env = ARGV_ARRAY_INIT;
- int counter = 0, len, ret;
+ unsigned int counter = 0;
+ int len, ret;
struct strbuf symref = STRBUF_INIT;
struct commit *commit = NULL;
int is_branch = 0;
if (safe_create_leading_directories_const(sb_repo.buf))
die_errno(_("could not create leading directories of '%s'"),
sb_repo.buf);
- while (!stat(sb_repo.buf, &st)) {
+
+ while (mkdir(sb_repo.buf, 0777)) {
counter++;
+ if ((errno != EEXIST) || !counter /* overflow */)
+ die_errno(_("could not create directory of '%s'"),
+ sb_repo.buf);
strbuf_setlen(&sb_repo, len);
strbuf_addf(&sb_repo, "%d", counter);
}
atexit(remove_junk);
sigchain_push_common(remove_junk_on_signal);
- if (mkdir(sb_repo.buf, 0777))
- die_errno(_("could not create directory of '%s'"), sb_repo.buf);
junk_git_dir = xstrdup(sb_repo.buf);
is_junk = 1;
cp.dir = path;
cp.env = env;
cp.argv = NULL;
+ cp.trace2_hook_name = "post-checkout";
argv_array_pushl(&cp.args, absolute_path(hook),
oid_to_hex(&null_oid),
oid_to_hex(&commit->object.oid),
#include "gettext.h"
#include "convert.h"
#include "trace.h"
+#include "trace2.h"
#include "string-list.h"
#include "pack-revindex.h"
#include "hash.h"
struct cache_time timestamp;
unsigned name_hash_initialized : 1,
initialized : 1,
- drop_cache_tree : 1;
+ drop_cache_tree : 1,
+ updated_workdir : 1,
+ updated_skipworktree : 1;
struct hashmap name_hash;
struct hashmap dir_hash;
struct object_id oid;
/* Remove entry, return true if there are more entries to go. */
extern int remove_index_entry_at(struct index_state *, int pos);
-extern void remove_marked_cache_entries(struct index_state *istate);
+extern void remove_marked_cache_entries(struct index_state *istate, int invalidate);
extern int remove_file_from_index(struct index_state *, const char *path);
#define ADD_CACHE_VERBOSE 1
#define ADD_CACHE_PRETEND 2
extern const char *core_partial_clone_filter_default;
extern int repository_format_worktree_config;
+/*
+ * You _have_ to initialize a `struct repository_format` using
+ * `= REPOSITORY_FORMAT_INIT` before calling `read_repository_format()`.
+ */
struct repository_format {
int version;
int precious_objects;
struct string_list unknown_extensions;
};
+/*
+ * Always use this to initialize a `struct repository_format`
+ * to a well-defined, default state before calling
+ * `read_repository()`.
+ */
+#define REPOSITORY_FORMAT_INIT \
+{ \
+ .version = -1, \
+ .is_bare = -1, \
+ .hash_algo = GIT_HASH_SHA1, \
+ .unknown_extensions = STRING_LIST_INIT_DUP, \
+}
+
/*
* Read the repository format characteristics from the config file "path" into
- * "format" struct. Returns the numeric version. On error, -1 is returned,
- * format->version is set to -1, and all other fields in the struct are
- * undefined.
+ * "format" struct. Returns the numeric version. On error, or if no version is
+ * found in the configuration, -1 is returned, format->version is set to -1,
+ * and all other fields in the struct are set to the default configuration
+ * (REPOSITORY_FORMAT_INIT). Always initialize the struct using
+ * REPOSITORY_FORMAT_INIT before calling this function.
*/
int read_repository_format(struct repository_format *format, const char *path);
+/*
+ * Free the memory held onto by `format`, but not the struct itself.
+ * (No need to use this after `read_repository_format()` fails.)
+ */
+void clear_repository_format(struct repository_format *format);
+
/*
* Verify that the repository described by repository_format is something we
* can read. If it is, return 0. Otherwise, return -1, and "err" will describe
};
extern int get_oid(const char *str, struct object_id *oid);
+extern int get_oidf(struct object_id *oid, const char *fmt, ...);
extern int get_oid_commit(const char *str, struct object_id *oid);
extern int get_oid_committish(const char *str, struct object_id *oid);
extern int get_oid_tree(const char *str, struct object_id *oid);
#define IDENT_STRICT 1
#define IDENT_NO_DATE 2
#define IDENT_NO_NAME 4
+
+enum want_ident {
+ WANT_BLANK_IDENT,
+ WANT_AUTHOR_IDENT,
+ WANT_COMMITTER_IDENT
+};
+
extern const char *git_author_info(int);
extern const char *git_committer_info(int);
-extern const char *fmt_ident(const char *name, const char *email, const char *date_str, int);
-extern const char *fmt_name(const char *name, const char *email);
+extern const char *fmt_ident(const char *name, const char *email,
+ enum want_ident whose_ident,
+ const char *date_str, int);
+extern const char *fmt_name(enum want_ident);
extern const char *ident_default_name(void);
extern const char *ident_default_email(void);
extern const char *git_editor(void);
extern const char *git_pager(int stdout_is_tty);
extern int is_terminal_dumb(void);
extern int git_ident_config(const char *, const char *, void *);
+/*
+ * Prepare an ident to fall back on if the user didn't configure it.
+ */
+void prepare_fallback_ident(const char *name, const char *email);
extern void reset_ident_date(void);
struct ident_split {
extern int checkout_entry(struct cache_entry *ce, const struct checkout *state, char *topath, int *nr_checkouts);
extern void enable_delayed_checkout(struct checkout *state);
extern int finish_delayed_checkout(struct checkout *state, int *nr_checkouts);
+/*
+ * Unlink the last component and schedule the leading directories for
+ * removal, such that empty directories get removed.
+ */
+extern void unlink_entry(const struct cache_entry *ce);
struct cache_def {
struct strbuf path;
+++ /dev/null
-#include "cache.h"
-
-int main(int ac, char **av)
-{
- int i;
- int dirty, clean, racy;
-
- dirty = clean = racy = 0;
- read_cache();
- for (i = 0; i < active_nr; i++) {
- struct cache_entry *ce = active_cache[i];
- struct stat st;
-
- if (lstat(ce->name, &st)) {
- error_errno("lstat(%s)", ce->name);
- continue;
- }
-
- if (ce_match_stat(ce, &st, 0))
- dirty++;
- else if (ce_match_stat(ce, &st, CE_MATCH_RACY_IS_DIRTY))
- racy++;
- else
- clean++;
- }
- printf("dirty %d, clean %d, racy %d\n", dirty, clean, racy);
- return 0;
-}
Documentation)
sudo apt-get -q update
sudo apt-get -q -y install asciidoc xmlto
+
+ test -n "$ALREADY_HAVE_ASCIIDOCTOR" ||
+ gem install --version 1.5.8 asciidoctor
;;
esac
+++ /dev/null
-#!/usr/bin/env bash
-#
-# Script to trigger the Git for Windows build and test run.
-# Set the $GFW_CI_TOKEN as environment variable.
-# Pass the branch (only branches on https://github.com/git/git are
-# supported) and a commit hash.
-#
-
-. ${0%/*}/lib.sh
-
-test $# -ne 2 && echo "Unexpected number of parameters" && exit 1
-test -z "$GFW_CI_TOKEN" && echo "GFW_CI_TOKEN not defined" && exit
-
-BRANCH=$1
-COMMIT=$2
-
-gfwci () {
- local CURL_ERROR_CODE HTTP_CODE
- CONTENT_FILE=$(mktemp -t "git-windows-ci-XXXXXX")
- while test -z $HTTP_CODE
- do
- HTTP_CODE=$(curl \
- -H "Authentication: Bearer $GFW_CI_TOKEN" \
- --silent --retry 5 --write-out '%{HTTP_CODE}' \
- --output >(sed "$(printf '1s/^\xef\xbb\xbf//')" >$CONTENT_FILE) \
- "https://git-for-windows-ci.azurewebsites.net/api/TestNow?$1" \
- )
- CURL_ERROR_CODE=$?
- # The GfW CI web app sometimes returns HTTP errors of
- # "502 bad gateway" or "503 service unavailable".
- # We also need to check the HTTP content because the GfW web
- # app seems to pass through (error) results from other Azure
- # calls with HTTP code 200.
- # Wait a little and retry if we detect this error. More info:
- # https://docs.microsoft.com/en-in/azure/app-service-web/app-service-web-troubleshoot-http-502-http-503
- if test $HTTP_CODE -eq 502 ||
- test $HTTP_CODE -eq 503 ||
- grep "502 - Web server received an invalid response" $CONTENT_FILE >/dev/null
- then
- sleep 10
- HTTP_CODE=
- fi
- done
- cat $CONTENT_FILE
- rm $CONTENT_FILE
- if test $CURL_ERROR_CODE -ne 0
- then
- return $CURL_ERROR_CODE
- fi
- if test "$HTTP_CODE" -ge 400 && test "$HTTP_CODE" -lt 600
- then
- return 127
- fi
-}
-
-# Trigger build job
-BUILD_ID=$(gfwci "action=trigger&branch=$BRANCH&commit=$COMMIT&skipTests=false")
-if test $? -ne 0
-then
- echo "Unable to trigger Visual Studio Team Services Build"
- echo "$BUILD_ID"
- exit 1
-fi
-
-# Check if the $BUILD_ID contains a number
-case $BUILD_ID in
-''|*[!0-9]*) echo "Unexpected build number: $BUILD_ID" && exit 1
-esac
-
-echo "Visual Studio Team Services Build #${BUILD_ID}"
-
-# Tracing execued commands would produce too much noise in the waiting
-# loop below.
-set +x
-
-# Wait until build job finished
-STATUS=
-RESULT=
-while true
-do
- LAST_STATUS=$STATUS
- STATUS=$(gfwci "action=status&buildId=$BUILD_ID")
- test "$STATUS" = "$LAST_STATUS" || printf "\nStatus: %s " "$STATUS"
- printf "."
-
- case "$STATUS" in
- inProgress|postponed|notStarted) sleep 10 ;; # continue
- "completed: succeeded") RESULT="success"; break;; # success
- "completed: failed") break;; # failure
- *) echo "Unhandled status: $STATUS"; break;; # unknown
- esac
-done
-
-# Print log
-echo ""
-echo ""
-set -x
-gfwci "action=log&buildId=$BUILD_ID" | cut -c 30-
-
-# Set exit code for TravisCI
-test "$RESULT" = "success"
-
-save_good_tree
. ${0%/*}/lib.sh
-test -n "$ALREADY_HAVE_ASCIIDOCTOR" ||
-gem install asciidoctor
+filter_log () {
+ sed -e '/^GIT_VERSION = /d' \
+ -e '/^ \* new asciidoc flags$/d' \
+ "$1"
+}
make check-builtins
make check-docs
# Build docs with AsciiDoc
-make doc > >(tee stdout.log) 2> >(tee stderr.log >&2)
-! test -s stderr.log
+make doc > >(tee stdout.log) 2> >(tee stderr.raw >&2)
+cat stderr.raw
+filter_log stderr.raw >stderr.log
+test ! -s stderr.log
test -s Documentation/git.html
test -s Documentation/git.xml
test -s Documentation/git.1
grep '<meta name="generator" content="AsciiDoc ' Documentation/git.html
-rm -f stdout.log stderr.log
+rm -f stdout.log stderr.log stderr.raw
check_unignored_build_artifacts
# Build docs with AsciiDoctor
make clean
-make USE_ASCIIDOCTOR=1 doc > >(tee stdout.log) 2> >(tee stderr.log >&2)
-sed '/^GIT_VERSION = / d' stderr.log
-! test -s stderr.log
+make USE_ASCIIDOCTOR=1 doc > >(tee stdout.log) 2> >(tee stderr.raw >&2)
+cat stderr.raw
+filter_log stderr.raw >stderr.log
+test ! -s stderr.log
test -s Documentation/git.html
grep '<meta name="generator" content="Asciidoctor ' Documentation/git.html
-rm -f stdout.log stderr.log
+rm -f stdout.log stderr.log stderr.raw
check_unignored_build_artifacts
save_good_tree
two->path, strlen(two->path), two->mode);
}
-static struct combine_diff_path *intersect_paths(struct combine_diff_path *curr, int n, int num_parent)
+static int filename_changed(char status)
+{
+ return status == 'R' || status == 'C';
+}
+
+static struct combine_diff_path *intersect_paths(
+ struct combine_diff_path *curr,
+ int n,
+ int num_parent,
+ int combined_all_paths)
{
struct diff_queue_struct *q = &diff_queued_diff;
struct combine_diff_path *p, **tail = &curr;
- int i, cmp;
+ int i, j, cmp;
if (!n) {
for (i = 0; i < q->nr; i++) {
oidcpy(&p->parent[n].oid, &q->queue[i]->one->oid);
p->parent[n].mode = q->queue[i]->one->mode;
p->parent[n].status = q->queue[i]->status;
+
+ if (combined_all_paths &&
+ filename_changed(p->parent[n].status)) {
+ strbuf_init(&p->parent[n].path, 0);
+ strbuf_addstr(&p->parent[n].path,
+ q->queue[i]->one->path);
+ }
*tail = p;
tail = &p->next;
}
if (cmp < 0) {
/* p->path not in q->queue[]; drop it */
*tail = p->next;
+ for (j = 0; j < num_parent; j++)
+ if (combined_all_paths &&
+ filename_changed(p->parent[j].status))
+ strbuf_release(&p->parent[j].path);
free(p);
continue;
}
oidcpy(&p->parent[n].oid, &q->queue[i]->one->oid);
p->parent[n].mode = q->queue[i]->one->mode;
p->parent[n].status = q->queue[i]->status;
+ if (combined_all_paths &&
+ filename_changed(p->parent[n].status))
+ strbuf_addstr(&p->parent[n].path,
+ q->queue[i]->one->path);
tail = &p->next;
i++;
if (!show_file_header)
return;
- if (added)
- dump_quoted_path("--- ", "", "/dev/null",
- line_prefix, c_meta, c_reset);
- else
- dump_quoted_path("--- ", a_prefix, elem->path,
- line_prefix, c_meta, c_reset);
+ if (rev->combined_all_paths) {
+ for (i = 0; i < num_parent; i++) {
+ char *path = filename_changed(elem->parent[i].status)
+ ? elem->parent[i].path.buf : elem->path;
+ if (elem->parent[i].status == DIFF_STATUS_ADDED)
+ dump_quoted_path("--- ", "", "/dev/null",
+ line_prefix, c_meta, c_reset);
+ else
+ dump_quoted_path("--- ", a_prefix, path,
+ line_prefix, c_meta, c_reset);
+ }
+ } else {
+ if (added)
+ dump_quoted_path("--- ", "", "/dev/null",
+ line_prefix, c_meta, c_reset);
+ else
+ dump_quoted_path("--- ", a_prefix, elem->path,
+ line_prefix, c_meta, c_reset);
+ }
if (deleted)
dump_quoted_path("+++ ", "", "/dev/null",
line_prefix, c_meta, c_reset);
putchar(inter_name_termination);
}
+ for (i = 0; i < num_parent; i++)
+ if (rev->combined_all_paths) {
+ if (filename_changed(p->parent[i].status))
+ write_name_quoted(p->parent[i].path.buf, stdout,
+ inter_name_termination);
+ else
+ write_name_quoted(p->path, stdout,
+ inter_name_termination);
+ }
write_name_quoted(p->path, stdout, line_termination);
}
/* find set of paths that every parent touches */
static struct combine_diff_path *find_paths_generic(const struct object_id *oid,
- const struct oid_array *parents, struct diff_options *opt)
+ const struct oid_array *parents,
+ struct diff_options *opt,
+ int combined_all_paths)
{
struct combine_diff_path *paths = NULL;
int i, num_parent = parents->nr;
opt->output_format = DIFF_FORMAT_NO_OUTPUT;
diff_tree_oid(&parents->oid[i], oid, "", opt);
diffcore_std(opt);
- paths = intersect_paths(paths, i, num_parent);
+ paths = intersect_paths(paths, i, num_parent,
+ combined_all_paths);
/* if showing diff, show it in requested order */
if (opt->output_format != DIFF_FORMAT_NO_OUTPUT &&
* diff(sha1,parent_i) for all i to do the job, specifically
* for parent0.
*/
- paths = find_paths_generic(oid, parents, &diffopts);
+ paths = find_paths_generic(oid, parents, &diffopts,
+ rev->combined_all_paths);
}
else {
int stat_opt;
while (paths) {
struct combine_diff_path *tmp = paths;
paths = paths->next;
+ for (i = 0; i < num_parent; i++)
+ if (rev->combined_all_paths &&
+ filename_changed(tmp->parent[i].status))
+ strbuf_release(&tmp->parent[i].path);
free(tmp);
}
return 1;
}
-struct commit_graph *load_commit_graph_one(const char *graph_file)
+int open_commit_graph(const char *graph_file, int *fd, struct stat *st)
+{
+ *fd = git_open(graph_file);
+ if (*fd < 0)
+ return 0;
+ if (fstat(*fd, st)) {
+ close(*fd);
+ return 0;
+ }
+ return 1;
+}
+
+struct commit_graph *load_commit_graph_one_fd_st(int fd, struct stat *st)
{
void *graph_map;
size_t graph_size;
- struct stat st;
struct commit_graph *ret;
- int fd = git_open(graph_file);
- if (fd < 0)
- return NULL;
- if (fstat(fd, &st)) {
- close(fd);
- return NULL;
- }
- graph_size = xsize_t(st.st_size);
+ graph_size = xsize_t(st->st_size);
if (graph_size < GRAPH_MIN_SIZE) {
close(fd);
- die(_("graph file %s is too small"), graph_file);
+ error(_("commit-graph file is too small"));
+ return NULL;
}
graph_map = xmmap(NULL, graph_size, PROT_READ, MAP_PRIVATE, fd, 0);
ret = parse_commit_graph(graph_map, fd, graph_size);
if (!ret) {
munmap(graph_map, graph_size);
close(fd);
- exit(1);
}
return ret;
}
+static int verify_commit_graph_lite(struct commit_graph *g)
+{
+ /*
+ * Basic validation shared between parse_commit_graph()
+ * which'll be called every time the graph is used, and the
+ * much more expensive verify_commit_graph() used by
+ * "commit-graph verify".
+ *
+ * There should only be very basic checks here to ensure that
+ * we don't e.g. segfault in fill_commit_in_graph(), but
+ * because this is a very hot codepath nothing that e.g. loops
+ * over g->num_commits, or runs a checksum on the commit-graph
+ * itself.
+ */
+ if (!g->chunk_oid_fanout) {
+ error("commit-graph is missing the OID Fanout chunk");
+ return 1;
+ }
+ if (!g->chunk_oid_lookup) {
+ error("commit-graph is missing the OID Lookup chunk");
+ return 1;
+ }
+ if (!g->chunk_commit_data) {
+ error("commit-graph is missing the Commit Data chunk");
+ return 1;
+ }
+
+ return 0;
+}
+
struct commit_graph *parse_commit_graph(void *graph_map, int fd,
size_t graph_size)
{
graph_signature = get_be32(data);
if (graph_signature != GRAPH_SIGNATURE) {
- error(_("graph signature %X does not match signature %X"),
+ error(_("commit-graph signature %X does not match signature %X"),
graph_signature, GRAPH_SIGNATURE);
return NULL;
}
graph_version = *(unsigned char*)(data + 4);
if (graph_version != GRAPH_VERSION) {
- error(_("graph version %X does not match version %X"),
+ error(_("commit-graph version %X does not match version %X"),
graph_version, GRAPH_VERSION);
return NULL;
}
hash_version = *(unsigned char*)(data + 5);
if (hash_version != oid_version()) {
- error(_("hash version %X does not match version %X"),
+ error(_("commit-graph hash version %X does not match version %X"),
hash_version, oid_version());
return NULL;
}
if (data + graph_size - chunk_lookup <
GRAPH_CHUNKLOOKUP_WIDTH) {
- error(_("chunk lookup table entry missing; graph file may be incomplete"));
+ error(_("commit-graph chunk lookup table entry missing; file may be incomplete"));
free(graph);
return NULL;
}
chunk_lookup += GRAPH_CHUNKLOOKUP_WIDTH;
if (chunk_offset > graph_size - the_hash_algo->rawsz) {
- error(_("improper chunk offset %08x%08x"), (uint32_t)(chunk_offset >> 32),
+ error(_("commit-graph improper chunk offset %08x%08x"), (uint32_t)(chunk_offset >> 32),
(uint32_t)chunk_offset);
free(graph);
return NULL;
}
if (chunk_repeated) {
- error(_("chunk id %08x appears multiple times"), chunk_id);
+ error(_("commit-graph chunk id %08x appears multiple times"), chunk_id);
free(graph);
return NULL;
}
last_chunk_offset = chunk_offset;
}
+ if (verify_commit_graph_lite(graph))
+ return NULL;
+
return graph;
}
+static struct commit_graph *load_commit_graph_one(const char *graph_file)
+{
+
+ struct stat st;
+ int fd;
+ int open_ok = open_commit_graph(graph_file, &fd, &st);
+
+ if (!open_ok)
+ return NULL;
+
+ return load_commit_graph_one_fd_st(fd, &st);
+}
+
static void prepare_commit_graph_one(struct repository *r, const char *obj_dir)
{
char *graph_name;
struct object_directory *odb;
int config_value;
+ if (git_env_bool(GIT_TEST_COMMIT_GRAPH_DIE_ON_LOAD, 0))
+ die("dying as requested by the '%s' variable on commit-graph load!",
+ GIT_TEST_COMMIT_GRAPH_DIE_ON_LOAD);
+
if (r->objects->commit_graph_attempted)
return !!r->objects->commit_graph;
r->objects->commit_graph_attempted = 1;
uint32_t packedDate[2];
display_progress(progress, ++*progress_cnt);
- parse_commit(*list);
+ parse_commit_no_graph(*list);
hashwrite(f, get_commit_tree_oid(*list)->hash, hash_len);
parent = (*list)->parents;
display_progress(progress, i + 1);
commit = lookup_commit(the_repository, &oids->list[i]);
- if (commit && !parse_commit(commit))
+ if (commit && !parse_commit_no_graph(commit))
add_missing_parents(oids, commit);
}
stop_progress(&progress);
continue;
commits.list[commits.nr] = lookup_commit(the_repository, &oids.list[i]);
- parse_commit(commits.list[commits.nr]);
+ parse_commit_no_graph(commits.list[commits.nr]);
for (parent = commits.list[commits.nr]->parents;
parent; parent = parent->next)
return 1;
}
- verify_commit_graph_error = 0;
-
- if (!g->chunk_oid_fanout)
- graph_report("commit-graph is missing the OID Fanout chunk");
- if (!g->chunk_oid_lookup)
- graph_report("commit-graph is missing the OID Lookup chunk");
- if (!g->chunk_commit_data)
- graph_report("commit-graph is missing the Commit Data chunk");
-
+ verify_commit_graph_error = verify_commit_graph_lite(g);
if (verify_commit_graph_error)
return verify_commit_graph_error;
hashcpy(cur_oid.hash, g->chunk_oid_lookup + g->hash_len * i);
if (i && oidcmp(&prev_oid, &cur_oid) >= 0)
- graph_report("commit-graph has incorrect OID order: %s then %s",
+ graph_report(_("commit-graph has incorrect OID order: %s then %s"),
oid_to_hex(&prev_oid),
oid_to_hex(&cur_oid));
uint32_t fanout_value = get_be32(g->chunk_oid_fanout + cur_fanout_pos);
if (i != fanout_value)
- graph_report("commit-graph has incorrect fanout value: fanout[%d] = %u != %u",
+ graph_report(_("commit-graph has incorrect fanout value: fanout[%d] = %u != %u"),
cur_fanout_pos, fanout_value, i);
cur_fanout_pos++;
}
graph_commit = lookup_commit(r, &cur_oid);
if (!parse_commit_in_graph_one(r, g, graph_commit))
- graph_report("failed to parse %s from commit-graph",
+ graph_report(_("failed to parse commit %s from commit-graph"),
oid_to_hex(&cur_oid));
}
uint32_t fanout_value = get_be32(g->chunk_oid_fanout + cur_fanout_pos);
if (g->num_commits != fanout_value)
- graph_report("commit-graph has incorrect fanout value: fanout[%d] = %u != %u",
+ graph_report(_("commit-graph has incorrect fanout value: fanout[%d] = %u != %u"),
cur_fanout_pos, fanout_value, i);
cur_fanout_pos++;
graph_commit = lookup_commit(r, &cur_oid);
odb_commit = (struct commit *)create_object(r, cur_oid.hash, alloc_commit_node(r));
if (parse_commit_internal(odb_commit, 0, 0)) {
- graph_report("failed to parse %s from object database",
+ graph_report(_("failed to parse commit %s from object database for commit-graph"),
oid_to_hex(&cur_oid));
continue;
}
if (!oideq(&get_commit_tree_in_graph_one(r, g, graph_commit)->object.oid,
get_commit_tree_oid(odb_commit)))
- graph_report("root tree OID for commit %s in commit-graph is %s != %s",
+ graph_report(_("root tree OID for commit %s in commit-graph is %s != %s"),
oid_to_hex(&cur_oid),
oid_to_hex(get_commit_tree_oid(graph_commit)),
oid_to_hex(get_commit_tree_oid(odb_commit)));
while (graph_parents) {
if (odb_parents == NULL) {
- graph_report("commit-graph parent list for commit %s is too long",
+ graph_report(_("commit-graph parent list for commit %s is too long"),
oid_to_hex(&cur_oid));
break;
}
if (!oideq(&graph_parents->item->object.oid, &odb_parents->item->object.oid))
- graph_report("commit-graph parent for %s is %s != %s",
+ graph_report(_("commit-graph parent for %s is %s != %s"),
oid_to_hex(&cur_oid),
oid_to_hex(&graph_parents->item->object.oid),
oid_to_hex(&odb_parents->item->object.oid));
}
if (odb_parents != NULL)
- graph_report("commit-graph parent list for commit %s terminates early",
+ graph_report(_("commit-graph parent list for commit %s terminates early"),
oid_to_hex(&cur_oid));
if (!graph_commit->generation) {
if (generation_zero == GENERATION_NUMBER_EXISTS)
- graph_report("commit-graph has generation number zero for commit %s, but non-zero elsewhere",
+ graph_report(_("commit-graph has generation number zero for commit %s, but non-zero elsewhere"),
oid_to_hex(&cur_oid));
generation_zero = GENERATION_ZERO_EXISTS;
} else if (generation_zero == GENERATION_ZERO_EXISTS)
- graph_report("commit-graph has non-zero generation number for commit %s, but zero elsewhere",
+ graph_report(_("commit-graph has non-zero generation number for commit %s, but zero elsewhere"),
oid_to_hex(&cur_oid));
if (generation_zero == GENERATION_ZERO_EXISTS)
max_generation--;
if (graph_commit->generation != max_generation + 1)
- graph_report("commit-graph generation for commit %s is %u != %u",
+ graph_report(_("commit-graph generation for commit %s is %u != %u"),
oid_to_hex(&cur_oid),
graph_commit->generation,
max_generation + 1);
if (graph_commit->date != odb_commit->date)
- graph_report("commit date for commit %s in commit-graph is %"PRItime" != %"PRItime,
+ graph_report(_("commit date for commit %s in commit-graph is %"PRItime" != %"PRItime),
oid_to_hex(&cur_oid),
graph_commit->date,
odb_commit->date);
#include "cache.h"
#define GIT_TEST_COMMIT_GRAPH "GIT_TEST_COMMIT_GRAPH"
+#define GIT_TEST_COMMIT_GRAPH_DIE_ON_LOAD "GIT_TEST_COMMIT_GRAPH_DIE_ON_LOAD"
struct commit;
char *get_commit_graph_filename(const char *obj_dir);
+int open_commit_graph(const char *graph_file, int *fd, struct stat *st);
/*
* Given a commit struct, try to fill the commit struct info, including:
const unsigned char *chunk_extra_edges;
};
-struct commit_graph *load_commit_graph_one(const char *graph_file);
+struct commit_graph *load_commit_graph_one_fd_st(int fd, struct stat *st);
struct commit_graph *parse_commit_graph(void *graph_map, int fd,
size_t graph_size);
{
return repo_parse_commit_gently(r, item, 0);
}
+
+static inline int parse_commit_no_graph(struct commit *commit)
+{
+ return repo_parse_commit_internal(the_repository, commit, 0, 0);
+}
+
#ifndef NO_THE_REPOSITORY_COMPATIBILITY_MACROS
#define parse_commit_internal(item, quiet, use) repo_parse_commit_internal(the_repository, item, quiet, use)
#define parse_commit_gently(item, quiet) repo_parse_commit_gently(the_repository, item, quiet)
extern const char *setup_temporary_shallow(const struct oid_array *extra);
extern void advertise_shallow_grafts(int);
+/*
+ * Initialize with prepare_shallow_info() or zero-initialize (equivalent to
+ * prepare_shallow_info with a NULL oid_array).
+ */
struct shallow_info {
struct oid_array *shallow;
int *ours, nr_ours;
int main(int argc, const char **argv)
{
+ int result;
+
/*
* Always open file descriptors 0/1/2 to avoid clobbering files
* in die(). It also avoids messing up when the pipes are dup'ed
* onto stdin/stdout/stderr in the child processes we spawn.
*/
sanitize_stdfds();
+ restore_sigpipe_to_default();
+
+ trace2_initialize();
+ trace2_cmd_start(argv);
+ trace2_collect_process_info();
git_resolve_executable_dir(argv[0]);
attr_start();
- restore_sigpipe_to_default();
+ result = cmd_main(argc, argv);
+
+ trace2_cmd_exit(result);
- return cmd_main(argc, argv);
+ return result;
}
+#ifndef COMPAT_BSWAP_H
+#define COMPAT_BSWAP_H
+
/*
* Let's make sure we always have a sane definition for ntohl()/htonl().
* Some libraries define those as a function call, just to perform byte
}
#endif
+
+#endif /* COMPAT_BSWAP_H */
return 0;
prog = path_lookup(interpr, 1);
if (prog) {
+ int exec_id;
int argc = 0;
const char **argv2;
while (argv[argc]) argc++;
ALLOC_ARRAY(argv2, argc + 1);
argv2[0] = (char *)cmd; /* full path to the script file */
memcpy(&argv2[1], &argv[1], sizeof(*argv) * argc);
+ exec_id = trace2_exec(prog, argv2);
pid = mingw_spawnv(prog, argv2, 1);
if (pid >= 0) {
int status;
if (waitpid(pid, &status, 0) < 0)
status = 255;
+ trace2_exec_result(exec_id, status);
exit(status);
}
+ trace2_exec_result(exec_id, -1);
pid = 1; /* indicate that we tried but failed */
free(prog);
free(argv2);
/* check if git_command is a shell script */
if (!try_shell_exec(cmd, argv)) {
int pid, status;
+ int exec_id;
+ exec_id = trace2_exec(cmd, (const char **)argv);
pid = mingw_spawnv(cmd, (const char **)argv, 0);
- if (pid < 0)
+ if (pid < 0) {
+ trace2_exec_result(exec_id, -1);
return -1;
+ }
if (waitpid(pid, &status, 0) < 0)
status = 255;
+ trace2_exec_result(exec_id, status);
exit(status);
}
return -1;
errno = EINVAL;
return -1;
}
-/* bash cannot reliably detect negative return codes as failure */
-#define exit(code) exit((code) & 0xff)
+
#define sigemptyset(x) (void)0
static inline int sigaddset(sigset_t *set, int signum)
{ return 0; }
--- /dev/null
+#include "../../cache.h"
+#include "../../json-writer.h"
+#include <Psapi.h>
+#include <tlHelp32.h>
+
+/*
+ * An arbitrarily chosen value to limit the size of the ancestor
+ * array built in git_processes().
+ */
+#define NR_PIDS_LIMIT 10
+
+/*
+ * Find the process data for the given PID in the given snapshot
+ * and update the PROCESSENTRY32 data.
+ */
+static int find_pid(DWORD pid, HANDLE hSnapshot, PROCESSENTRY32 *pe32)
+{
+ pe32->dwSize = sizeof(PROCESSENTRY32);
+
+ if (Process32First(hSnapshot, pe32)) {
+ do {
+ if (pe32->th32ProcessID == pid)
+ return 1;
+ } while (Process32Next(hSnapshot, pe32));
+ }
+ return 0;
+}
+
+/*
+ * Accumulate JSON array of our parent processes:
+ * [
+ * exe-name-parent,
+ * exe-name-grand-parent,
+ * ...
+ * ]
+ *
+ * Note: we only report the filename of the process executable; the
+ * only way to get its full pathname is to use OpenProcess()
+ * and GetModuleFileNameEx() or QueryfullProcessImageName()
+ * and that seems rather expensive (on top of the cost of
+ * getting the snapshot).
+ *
+ * Note: we compute the set of parent processes by walking the PPID
+ * link in each visited PROCESSENTRY32 record. This search
+ * stops when an ancestor process is not found in the snapshot
+ * (because it exited before the current or intermediate parent
+ * process exited).
+ *
+ * This search may compute an incorrect result if the PPID link
+ * refers to the PID of an exited parent and that PID has been
+ * recycled and given to a new unrelated process.
+ *
+ * Worse, it is possible for a child or descendant of the
+ * current process to be given the recycled PID and cause a
+ * PPID-cycle. This would cause an infinite loop building our
+ * parent process array.
+ *
+ * Note: for completeness, the "System Idle" process has PID=0 and
+ * PPID=0 and could cause another PPID-cycle. We don't expect
+ * Git to be a descendant of the idle process, but because of
+ * PID recycling, it might be possible to get a PPID link value
+ * of 0. This too would cause an infinite loop.
+ *
+ * Therefore, we keep an array of the visited PPIDs to guard against
+ * cycles.
+ *
+ * We use a fixed-size array rather than ALLOC_GROW to keep things
+ * simple and avoid the alloc/realloc overhead. It is OK if we
+ * truncate the search and return a partial answer.
+ */
+static void get_processes(struct json_writer *jw, HANDLE hSnapshot)
+{
+ PROCESSENTRY32 pe32;
+ DWORD pid;
+ DWORD pid_list[NR_PIDS_LIMIT];
+ int k, nr_pids = 0;
+
+ pid = GetCurrentProcessId();
+ while (find_pid(pid, hSnapshot, &pe32)) {
+ /* Only report parents. Omit self from the JSON output. */
+ if (nr_pids)
+ jw_array_string(jw, pe32.szExeFile);
+
+ /* Check for cycle in snapshot. (Yes, it happened.) */
+ for (k = 0; k < nr_pids; k++)
+ if (pid == pid_list[k]) {
+ jw_array_string(jw, "(cycle)");
+ return;
+ }
+
+ if (nr_pids == NR_PIDS_LIMIT) {
+ jw_array_string(jw, "(truncated)");
+ return;
+ }
+
+ pid_list[nr_pids++] = pid;
+
+ pid = pe32.th32ParentProcessID;
+ }
+}
+
+/*
+ * Emit JSON data for the current and parent processes. Individual
+ * trace2 targets can decide how to actually print it.
+ */
+static void get_ancestry(void)
+{
+ HANDLE hSnapshot = CreateToolhelp32Snapshot(TH32CS_SNAPPROCESS, 0);
+
+ if (hSnapshot != INVALID_HANDLE_VALUE) {
+ struct json_writer jw = JSON_WRITER_INIT;
+
+ jw_array_begin(&jw, 0);
+ get_processes(&jw, hSnapshot);
+ jw_end(&jw);
+
+ trace2_data_json("process", the_repository, "windows/ancestry",
+ &jw);
+
+ jw_release(&jw);
+ CloseHandle(hSnapshot);
+ }
+}
+
+/*
+ * Is a debugger attached to the current process?
+ *
+ * This will catch debug runs (where the debugger started the process).
+ * This is the normal case. Since this code is called during our startup,
+ * it will not report instances where a debugger is attached dynamically
+ * to a running git process, but that is relatively rare.
+ */
+static void get_is_being_debugged(void)
+{
+ if (IsDebuggerPresent())
+ trace2_data_intmax("process", the_repository,
+ "windows/debugger_present", 1);
+}
+
+void trace2_collect_process_info(void)
+{
+ if (!trace2_is_enabled())
+ return;
+
+ get_is_being_debugged();
+ get_ancestry();
+}
}
ret = !wildmatch(pattern.buf + prefix, text.buf + prefix,
- icase ? WM_CASEFOLD : 0);
+ WM_PATHNAME | (icase ? WM_CASEFOLD : 0));
if (!ret && !already_tried_absolute) {
/*
if (starts_with(var, "core."))
return git_default_core_config(var, value, cb);
- if (starts_with(var, "user."))
+ if (starts_with(var, "user.") ||
+ starts_with(var, "author.") ||
+ starts_with(var, "committer."))
return git_ident_config(var, value, cb);
if (starts_with(var, "i18n."))
void git_config_set(const char *key, const char *value)
{
git_config_set_multivar(key, value, NULL, 0);
+
+ trace2_cmd_set_config(key, value);
}
/*
ifeq ($(filter no-error,$(DEVOPTS)),)
-CFLAGS += -Werror
+DEVELOPER_CFLAGS += -Werror
endif
ifneq ($(filter pedantic,$(DEVOPTS)),)
-CFLAGS += -pedantic
+DEVELOPER_CFLAGS += -pedantic
# don't warn for each N_ use
-CFLAGS += -DUSE_PARENS_AROUND_GETTEXT_N=0
-endif
-CFLAGS += -Wall
-CFLAGS += -Wdeclaration-after-statement
-CFLAGS += -Wformat-security
-CFLAGS += -Wno-format-zero-length
-CFLAGS += -Wold-style-definition
-CFLAGS += -Woverflow
-CFLAGS += -Wpointer-arith
-CFLAGS += -Wstrict-prototypes
-CFLAGS += -Wunused
-CFLAGS += -Wvla
+DEVELOPER_CFLAGS += -DUSE_PARENS_AROUND_GETTEXT_N=0
+endif
+DEVELOPER_CFLAGS += -Wall
+DEVELOPER_CFLAGS += -Wdeclaration-after-statement
+DEVELOPER_CFLAGS += -Wformat-security
+DEVELOPER_CFLAGS += -Wno-format-zero-length
+DEVELOPER_CFLAGS += -Wold-style-definition
+DEVELOPER_CFLAGS += -Woverflow
+DEVELOPER_CFLAGS += -Wpointer-arith
+DEVELOPER_CFLAGS += -Wstrict-prototypes
+DEVELOPER_CFLAGS += -Wunused
+DEVELOPER_CFLAGS += -Wvla
ifndef COMPILER_FEATURES
COMPILER_FEATURES := $(shell ./detect-compiler $(CC))
endif
ifneq ($(filter clang4,$(COMPILER_FEATURES)),)
-CFLAGS += -Wtautological-constant-out-of-range-compare
+DEVELOPER_CFLAGS += -Wtautological-constant-out-of-range-compare
endif
ifneq ($(or $(filter gcc6,$(COMPILER_FEATURES)),$(filter clang4,$(COMPILER_FEATURES))),)
-CFLAGS += -Wextra
+DEVELOPER_CFLAGS += -Wextra
# if a function is public, there should be a prototype and the right
# header file should be included. If not, it should be static.
-CFLAGS += -Wmissing-prototypes
+DEVELOPER_CFLAGS += -Wmissing-prototypes
ifeq ($(filter extra-all,$(DEVOPTS)),)
# These are disabled because we have these all over the place.
-CFLAGS += -Wno-empty-body
-CFLAGS += -Wno-missing-field-initializers
-CFLAGS += -Wno-sign-compare
-CFLAGS += -Wno-unused-parameter
+DEVELOPER_CFLAGS += -Wno-empty-body
+DEVELOPER_CFLAGS += -Wno-missing-field-initializers
+DEVELOPER_CFLAGS += -Wno-sign-compare
+DEVELOPER_CFLAGS += -Wno-unused-parameter
endif
endif
# not worth fixing since newer compilers correctly stop complaining
ifneq ($(filter gcc4,$(COMPILER_FEATURES)),)
ifeq ($(filter gcc5,$(COMPILER_FEATURES)),)
-CFLAGS += -Wno-uninitialized
+DEVELOPER_CFLAGS += -Wno-uninitialized
endif
endif
BASIC_CFLAGS = -nologo -I. -I../zlib -Icompat/vcbuild -Icompat/vcbuild/include -DWIN32 -D_CONSOLE -DHAVE_STRING_H -D_CRT_SECURE_NO_WARNINGS -D_CRT_NONSTDC_NO_DEPRECATE
COMPAT_OBJS = compat/msvc.o compat/winansi.o \
compat/win32/pthread.o compat/win32/syslog.o \
+ compat/win32/trace2_win32_process_info.o \
compat/win32/dirent.o
COMPAT_CFLAGS = -D__USE_MINGW_ACCESS -DNOGDI -DHAVE_STRING_H -Icompat -Icompat/regex -Icompat/win32 -DSTRIP_EXTENSION=\".exe\"
BASIC_LDFLAGS = -IGNORE:4217 -IGNORE:4049 -NOLOGO -SUBSYSTEM:CONSOLE
NO_STRTOUMAX = YesPlease
NO_MKDTEMP = YesPlease
NO_SVN_TESTS = YesPlease
- NO_PERL_MAKEMAKER = YesPlease
RUNTIME_PREFIX = YesPlease
HAVE_WPGMPTR = YesWeDo
NO_ST_BLOCKS_IN_STRUCT_STAT = YesPlease
COMPAT_CFLAGS += -DNOGDI -Icompat -Icompat/win32
COMPAT_CFLAGS += -DSTRIP_EXTENSION=\".exe\"
COMPAT_OBJS += compat/mingw.o compat/winansi.o \
+ compat/win32/trace2_win32_process_info.o \
compat/win32/path-utils.o \
compat/win32/pthread.o compat/win32/syslog.o \
compat/win32/dirent.o
conn = NULL;
} else if (protocol == PROTO_GIT) {
conn = git_connect_git(fd, hostandport, path, prog, version, flags);
+ conn->trace2_child_class = "transport/git";
} else {
struct strbuf cmd = STRBUF_INIT;
const char *const *var;
strbuf_release(&cmd);
return NULL;
}
+ conn->trace2_child_class = "transport/ssh";
fill_ssh_args(conn, ssh_host, port, version, flags);
} else {
transport_check_allowed("file");
+ conn->trace2_child_class = "transport/file";
if (version > 0) {
argv_array_pushf(&conn->env_array, GIT_PROTOCOL_ENVIRONMENT "=version=%d",
version);
--- /dev/null
+@@
+expression str;
+identifier x, flexname;
+@@
+- FLEX_ALLOC_MEM(x, flexname, str, strlen(str));
++ FLEX_ALLOC_STR(x, flexname, str);
+
+@@
+expression str;
+identifier x, ptrname;
+@@
+- FLEXPTR_ALLOC_MEM(x, ptrname, str, strlen(str));
++ FLEXPTR_ALLOC_STR(x, ptrname, str);
__git_merge_strategies=$(__git_list_merge_strategies)
}
+__git_merge_strategy_options="ours theirs subtree subtree= patience
+ histogram diff-algorithm= ignore-space-change ignore-all-space
+ ignore-space-at-eol renormalize no-renormalize no-renames
+ find-renames find-renames= rename-threshold="
+
__git_complete_revlist_file ()
{
local dequoted_word pfx ls ref cur_="$cur"
-s|--strategy)
__gitcomp "$__git_merge_strategies"
return 0
+ ;;
+ -X)
+ __gitcomp "$__git_merge_strategy_options"
+ return 0
+ ;;
esac
case "$cur" in
--strategy=*)
__gitcomp "$__git_merge_strategies" "" "${cur##--strategy=}"
return 0
;;
+ --strategy-option=*)
+ __gitcomp "$__git_merge_strategy_options" "" "${cur##--strategy-option=}"
+ return 0
+ ;;
esac
return 1
}
__git_compute_all_commands ()
{
test -n "$__git_all_commands" ||
- __git_all_commands=$(git --list-cmds=main,others,alias,nohelpers)
+ __git_all_commands=$(__git --list-cmds=main,others,alias,nohelpers)
}
# Lists all set config variables starting with the given section prefix,
}
__git_whitespacelist="nowarn warn error error-all fix"
+__git_patchformat="mbox stgit stgit-series hg mboxrd"
__git_am_inprogress_options="--skip --continue --resolved --abort --quit --show-current-patch"
_git_am ()
__gitcomp "$__git_whitespacelist" "" "${cur##--whitespace=}"
return
;;
+ --patch-format=*)
+ __gitcomp "$__git_patchformat" "" "${cur##--patch-format=}"
+ return
+ ;;
--*)
__gitcomp_builtin am "" \
"$__git_am_inprogress_options"
_git_add ()
{
case "$cur" in
+ --chmod=*)
+ __gitcomp "+x -x" "" "${cur##--chmod=}"
+ return
+ ;;
--*)
__gitcomp_builtin add
return
esac
}
+__git_ref_fieldlist="refname objecttype objectsize objectname upstream push HEAD symref"
+
_git_branch ()
{
local i c=1 only_local_ref="n" has_r="n"
__gitcomp "$__git_cherry_pick_inprogress_options"
return
fi
+
+ __git_complete_strategy && return
+
case "$cur" in
--*)
__gitcomp_builtin cherry-pick "" \
}
__git_mergetools_common="diffuse diffmerge ecmerge emerge kdiff3 meld opendiff
- tkdiff vimdiff gvimdiff xxdiff araxis p4merge bc codecompare
+ tkdiff vimdiff gvimdiff xxdiff araxis p4merge bc
+ codecompare smerge
"
_git_difftool ()
__gitcomp "$__git_fetch_recurse_submodules" "" "${cur##--recurse-submodules=}"
return
;;
+ --filter=*)
+ __gitcomp "blob:none blob:limit= sparse:oid= sparse:path=" "" "${cur##--filter=}"
+ return
+ ;;
--*)
__gitcomp_builtin fetch
return
esac
if test -n "$GIT_TESTING_ALL_COMMAND_LIST"
then
- __gitcomp "$GIT_TESTING_ALL_COMMAND_LIST $(git --list-cmds=alias,list-guide) gitk"
+ __gitcomp "$GIT_TESTING_ALL_COMMAND_LIST $(__git --list-cmds=alias,list-guide) gitk"
else
- __gitcomp "$(git --list-cmds=main,nohelpers,alias,list-guide) gitk"
+ __gitcomp "$(__git --list-cmds=main,nohelpers,alias,list-guide) gitk"
fi
}
--all-match --invert-grep
"
-__git_log_pretty_formats="oneline short medium full fuller email raw format:"
-__git_log_date_formats="relative iso8601 rfc2822 short local default raw"
+__git_log_pretty_formats="oneline short medium full fuller email raw format: mboxrd"
+__git_log_date_formats="relative iso8601 iso8601-strict rfc2822 short local default raw unix format:"
_git_log ()
{
return
;;
diff.submodule)
- __gitcomp "log short"
+ __gitcomp "$__git_diff_submodule_formats"
return
;;
help.format)
_git_replace ()
{
case "$cur" in
+ --format=*)
+ __gitcomp "short medium long" "" "${cur##--format=}"
+ return
+ ;;
--*)
__gitcomp_builtin replace
return
__gitcomp "$__git_revert_inprogress_options"
return
fi
+ __git_complete_strategy && return
case "$cur" in
--*)
__gitcomp_builtin revert "" \
{
__git_has_doubledash && return
- local subcommands="add status init deinit update summary foreach sync"
+ local subcommands="add status init deinit update set-branch summary foreach sync absorbgitdirs"
local subcommand="$(__git_find_on_cmdline "$subcommands")"
if [ -z "$subcommand" ]; then
case "$cur" in
--force --rebase --merge --reference --depth --recursive --jobs
"
;;
+ set-branch,--*)
+ __gitcomp "--default --branch"
+ ;;
summary,--*)
__gitcomp "--cached --files --summary-limit"
;;
then
__gitcomp "$GIT_TESTING_PORCELAIN_COMMAND_LIST"
else
- __gitcomp "$(git --list-cmds=list-mainporcelain,others,nohelpers,alias,list-complete,config)"
+ __gitcomp "$(__git --list-cmds=list-mainporcelain,others,nohelpers,alias,list-complete,config)"
fi
;;
esac
git subtree merge --prefix=<prefix> <commit>
git subtree pull --prefix=<prefix> <repository> <ref>
git subtree push --prefix=<prefix> <repository> <ref>
-git subtree split --prefix=<prefix> <commit...>
+git subtree split --prefix=<prefix> <commit>
--
h,help show the help
q quiet
fi
}
+ensure_single_rev () {
+ if test $# -ne 1
+ then
+ die "You must provide exactly one revision. Got: '$@'"
+ fi
+}
while test $# -gt 0
do
then
revs=$(git rev-parse $default --revs-only "$@") || exit $?
dirs=$(git rev-parse --no-revs --no-flags "$@") || exit $?
+ ensure_single_rev $revs
if test -n "$dirs"
then
die "Error: Use --prefix instead of bare filenames."
}
cmd_add_commit () {
- revs=$(git rev-parse $default --revs-only "$@") || exit $?
- set -- $revs
- rev="$1"
+ rev=$(git rev-parse $default --revs-only "$@") || exit $?
+ ensure_single_rev $rev
debug "Adding $dir as '$rev'..."
git read-tree --prefix="$dir" $rev || exit $?
}
cmd_merge () {
- revs=$(git rev-parse $default --revs-only "$@") || exit $?
+ rev=$(git rev-parse $default --revs-only "$@") || exit $?
+ ensure_single_rev $rev
ensure_clean
- set -- $revs
- if test $# -ne 1
- then
- die "You must provide exactly one revision. Got: '$revs'"
- fi
- rev="$1"
-
if test -n "$squash"
then
first_split="$(find_latest_squash "$dir")"
if (start_async(&async))
return 0; /* error was already reported */
- if (strbuf_read(&nbuf, async.out, len) < 0) {
+ if (strbuf_read(&nbuf, async.out, 0) < 0) {
err = error(_("read from external filter '%s' failed"), cmd);
}
if (close(async.out)) {
return DATE_UNIX;
if (skip_prefix(format, "format", end))
return DATE_STRFTIME;
+ /*
+ * Please update $__git_log_date_formats in
+ * git-completion.bash when you add new formats.
+ */
die("unknown date format %s", format);
}
exit(128);
diff_set_mnemonic_prefix(&revs->diffopt, "c/", cached ? "i/" : "w/");
- diffcore_fix_diff_index(&revs->diffopt);
+ diffcore_fix_diff_index();
diffcore_std(&revs->diffopt);
diff_flush(&revs->diffopt);
trace_performance_leave("diff-index");
#include "revision.h"
#include "log-tree.h"
#include "builtin.h"
+#include "parse-options.h"
#include "string-list.h"
#include "dir.h"
}
}
-void diff_no_index(struct repository *r,
- struct rev_info *revs,
- int argc, const char **argv)
+static const char * const diff_no_index_usage[] = {
+ N_("git diff --no-index [<options>] <path> <path>"),
+ NULL
+};
+
+int diff_no_index(struct rev_info *revs,
+ int implicit_no_index,
+ int argc, const char **argv)
{
- int i;
+ int i, no_index;
const char *paths[2];
struct strbuf replacement = STRBUF_INIT;
const char *prefix = revs->prefix;
-
- /*
- * FIXME: --no-index should not look at index and we should be
- * able to pass NULL repo. Maybe later.
- */
- repo_diff_setup(r, &revs->diffopt);
- for (i = 1; i < argc - 2; ) {
- int j;
- if (!strcmp(argv[i], "--no-index"))
- i++;
- else if (!strcmp(argv[i], "--"))
- i++;
- else {
- j = diff_opt_parse(&revs->diffopt, argv + i, argc - i,
- revs->prefix);
- if (j <= 0)
- die("invalid diff option/value: %s", argv[i]);
- i += j;
- }
+ struct option no_index_options[] = {
+ OPT_BOOL_F(0, "no-index", &no_index, "",
+ PARSE_OPT_NONEG | PARSE_OPT_HIDDEN),
+ OPT_END(),
+ };
+ struct option *options;
+
+ options = parse_options_concat(no_index_options,
+ revs->diffopt.parseopts);
+ argc = parse_options(argc, argv, revs->prefix, options,
+ diff_no_index_usage, 0);
+ if (argc != 2) {
+ if (implicit_no_index)
+ warning(_("Not a git repository. Use --no-index to "
+ "compare two paths outside a working tree"));
+ usage_with_options(diff_no_index_usage, options);
}
-
+ FREE_AND_NULL(options);
for (i = 0; i < 2; i++) {
const char *p = argv[argc - 2 + i];
if (!strcmp(p, "-"))
revs->diffopt.flags.exit_with_status = 1;
if (queue_diff(&revs->diffopt, paths[0], paths[1]))
- exit(1);
+ return 1;
diff_set_mnemonic_prefix(&revs->diffopt, "1/", "2/");
diffcore_std(&revs->diffopt);
diff_flush(&revs->diffopt);
* The return code for --no-index imitates diff(1):
* 0 = no changes, 1 = changes, else error
*/
- exit(diff_result_code(&revs->diffopt, 0));
+ return diff_result_code(&revs->diffopt, 0);
}
#include "argv-array.h"
#include "graph.h"
#include "packfile.h"
+#include "parse-options.h"
#include "help.h"
+#include "fetch-object.h"
#ifdef NO_FAST_WORKING_DIRECTORY
#define FAST_WORKING_DIRECTORY 0
[DIFF_FILE_NEW_BOLD] = "newBold",
};
-static NORETURN void die_want_option(const char *option_name)
-{
- die(_("option '%s' requires a value"), option_name);
-}
-
define_list_config_array_extra(color_diff_slots, {"plain"});
static int parse_diff_color_slot(const char *var)
options->submodule_format = DIFF_SUBMODULE_SHORT;
else if (!strcmp(value, "diff"))
options->submodule_format = DIFF_SUBMODULE_INLINE_DIFF;
+ /*
+ * Please update $__git_diff_submodule_formats in
+ * git-completion.bash when you add new formats.
+ */
else
return -1;
return 0;
return XDF_PATIENCE_DIFF;
else if (!strcasecmp(value, "histogram"))
return XDF_HISTOGRAM_DIFF;
+ /*
+ * Please update $__git_diff_algorithms in git-completion.bash
+ * when you add new algorithms.
+ */
return -1;
}
return ws_blank_line(line, len, ecbdata->ws_rule);
}
-static void emit_add_line(const char *reset,
- struct emit_callback *ecbdata,
+static void emit_add_line(struct emit_callback *ecbdata,
const char *line, int len)
{
unsigned flags = WSEH_NEW | ecbdata->ws_rule;
emit_diff_symbol(ecbdata->opt, DIFF_SYMBOL_PLUS, line, len, flags);
}
-static void emit_del_line(const char *reset,
- struct emit_callback *ecbdata,
+static void emit_del_line(struct emit_callback *ecbdata,
const char *line, int len)
{
unsigned flags = WSEH_OLD | ecbdata->ws_rule;
emit_diff_symbol(ecbdata->opt, DIFF_SYMBOL_MINUS, line, len, flags);
}
-static void emit_context_line(const char *reset,
- struct emit_callback *ecbdata,
+static void emit_context_line(struct emit_callback *ecbdata,
const char *line, int len)
{
unsigned flags = WSEH_CONTEXT | ecbdata->ws_rule;
int prefix, const char *data, int size)
{
const char *endp = NULL;
- const char *reset = diff_get_color(ecb->color_diff, DIFF_RESET);
while (0 < size) {
int len;
len = endp ? (endp - data + 1) : size;
if (prefix != '+') {
ecb->lno_in_preimage++;
- emit_del_line(reset, ecb, data, len);
+ emit_del_line(ecb, data, len);
} else {
ecb->lno_in_postimage++;
- emit_add_line(reset, ecb, data, len);
+ emit_add_line(ecb, data, len);
}
size -= len;
data += len;
return msgbuf->buf;
}
-static unsigned long sane_truncate_line(struct emit_callback *ecb, char *line, unsigned long len)
+static unsigned long sane_truncate_line(char *line, unsigned long len)
{
const char *cp;
unsigned long allot;
static void fn_out_consume(void *priv, char *line, unsigned long len)
{
struct emit_callback *ecbdata = priv;
- const char *reset = diff_get_color(ecbdata->color_diff, DIFF_RESET);
struct diff_options *o = ecbdata->opt;
o->found_changes = 1;
if (line[0] == '@') {
if (ecbdata->diff_words)
diff_words_flush(ecbdata);
- len = sane_truncate_line(ecbdata, line, len);
+ len = sane_truncate_line(line, len);
find_lno(line, ecbdata);
emit_hunk_header(ecbdata, line, len);
return;
switch (line[0]) {
case '+':
ecbdata->lno_in_postimage++;
- emit_add_line(reset, ecbdata, line + 1, len - 1);
+ emit_add_line(ecbdata, line + 1, len - 1);
break;
case '-':
ecbdata->lno_in_preimage++;
- emit_del_line(reset, ecbdata, line + 1, len - 1);
+ emit_del_line(ecbdata, line + 1, len - 1);
break;
case ' ':
ecbdata->lno_in_postimage++;
ecbdata->lno_in_preimage++;
- emit_context_line(reset, ecbdata, line + 1, len - 1);
+ emit_context_line(ecbdata, line + 1, len - 1);
break;
default:
/* incomplete line at the end */
struct diff_filespec *one,
struct diff_filespec *two,
const char *xfrm_msg,
- int complete_rewrite,
struct diff_options *o)
{
struct argv_array argv = ARGV_ARRAY_INIT;
}
if (pgm) {
- run_external_diff(pgm, name, other, one, two, xfrm_msg,
- complete_rewrite, o);
+ run_external_diff(pgm, name, other, one, two, xfrm_msg, o);
return;
}
if (one && two)
builtin_checkdiff(name, other, attr_path, p->one, p->two, o);
}
+static void prep_parse_options(struct diff_options *options);
+
void repo_diff_setup(struct repository *r, struct diff_options *options)
{
memcpy(options, &default_diff_options, sizeof(*options));
options->color_moved = diff_color_moved_default;
options->color_moved_ws_handling = diff_color_moved_ws_default;
+
+ prep_parse_options(options);
}
void diff_setup_done(struct diff_options *options)
if (!options->use_color || external_diff())
options->color_moved = 0;
-}
-static int opt_arg(const char *arg, int arg_short, const char *arg_long, int *val)
-{
- char c, *eq;
- int len;
-
- if (*arg != '-')
- return 0;
- c = *++arg;
- if (!c)
- return 0;
- if (c == arg_short) {
- c = *++arg;
- if (!c)
- return 1;
- if (val && isdigit(c)) {
- char *end;
- int n = strtoul(arg, &end, 10);
- if (*end)
- return 0;
- *val = n;
- return 1;
- }
- return 0;
- }
- if (c != '-')
- return 0;
- arg++;
- eq = strchrnul(arg, '=');
- len = eq - arg;
- if (!len || strncmp(arg, arg_long, len))
- return 0;
- if (*eq) {
- int n;
- char *end;
- if (!isdigit(*++eq))
- return 0;
- n = strtoul(eq, &end, 10);
- if (*end)
- return 0;
- *val = n;
- }
- return 1;
-}
-
-static int diff_scoreopt_parse(const char *opt);
-
-static inline int short_opt(char opt, const char **argv,
- const char **optarg)
-{
- const char *arg = argv[0];
- if (arg[0] != '-' || arg[1] != opt)
- return 0;
- if (arg[2] != '\0') {
- *optarg = arg + 2;
- return 1;
- }
- if (!argv[1])
- die("Option '%c' requires a value", opt);
- *optarg = argv[1];
- return 2;
+ FREE_AND_NULL(options->parseopts);
}
int parse_long_opt(const char *opt, const char **argv,
return 2;
}
-static int stat_opt(struct diff_options *options, const char **av)
+static int diff_opt_stat(const struct option *opt, const char *value, int unset)
{
- const char *arg = av[0];
- char *end;
+ struct diff_options *options = opt->value;
int width = options->stat_width;
int name_width = options->stat_name_width;
int graph_width = options->stat_graph_width;
int count = options->stat_count;
- int argcount = 1;
+ char *end;
- if (!skip_prefix(arg, "--stat", &arg))
- BUG("stat option does not begin with --stat: %s", arg);
- end = (char *)arg;
+ BUG_ON_OPT_NEG(unset);
- switch (*arg) {
- case '-':
- if (skip_prefix(arg, "-width", &arg)) {
- if (*arg == '=')
- width = strtoul(arg + 1, &end, 10);
- else if (!*arg && !av[1])
- die_want_option("--stat-width");
- else if (!*arg) {
- width = strtoul(av[1], &end, 10);
- argcount = 2;
- }
- } else if (skip_prefix(arg, "-name-width", &arg)) {
- if (*arg == '=')
- name_width = strtoul(arg + 1, &end, 10);
- else if (!*arg && !av[1])
- die_want_option("--stat-name-width");
- else if (!*arg) {
- name_width = strtoul(av[1], &end, 10);
- argcount = 2;
- }
- } else if (skip_prefix(arg, "-graph-width", &arg)) {
- if (*arg == '=')
- graph_width = strtoul(arg + 1, &end, 10);
- else if (!*arg && !av[1])
- die_want_option("--stat-graph-width");
- else if (!*arg) {
- graph_width = strtoul(av[1], &end, 10);
- argcount = 2;
- }
- } else if (skip_prefix(arg, "-count", &arg)) {
- if (*arg == '=')
- count = strtoul(arg + 1, &end, 10);
- else if (!*arg && !av[1])
- die_want_option("--stat-count");
- else if (!*arg) {
- count = strtoul(av[1], &end, 10);
- argcount = 2;
- }
+ if (!strcmp(opt->long_name, "stat")) {
+ if (value) {
+ width = strtoul(value, &end, 10);
+ if (*end == ',')
+ name_width = strtoul(end+1, &end, 10);
+ if (*end == ',')
+ count = strtoul(end+1, &end, 10);
+ if (*end)
+ return error(_("invalid --stat value: %s"), value);
}
- break;
- case '=':
- width = strtoul(arg+1, &end, 10);
- if (*end == ',')
- name_width = strtoul(end+1, &end, 10);
- if (*end == ',')
- count = strtoul(end+1, &end, 10);
- }
+ } else if (!strcmp(opt->long_name, "stat-width")) {
+ width = strtoul(value, &end, 10);
+ if (*end)
+ return error(_("%s expects a numerical value"),
+ opt->long_name);
+ } else if (!strcmp(opt->long_name, "stat-name-width")) {
+ name_width = strtoul(value, &end, 10);
+ if (*end)
+ return error(_("%s expects a numerical value"),
+ opt->long_name);
+ } else if (!strcmp(opt->long_name, "stat-graph-width")) {
+ graph_width = strtoul(value, &end, 10);
+ if (*end)
+ return error(_("%s expects a numerical value"),
+ opt->long_name);
+ } else if (!strcmp(opt->long_name, "stat-count")) {
+ count = strtoul(value, &end, 10);
+ if (*end)
+ return error(_("%s expects a numerical value"),
+ opt->long_name);
+ } else
+ BUG("%s should not get here", opt->long_name);
- /* Important! This checks all the error cases! */
- if (*end)
- return 0;
options->output_format |= DIFF_FORMAT_DIFFSTAT;
options->stat_name_width = name_width;
options->stat_graph_width = graph_width;
options->stat_width = width;
options->stat_count = count;
- return argcount;
+ return 0;
}
static int parse_dirstat_opt(struct diff_options *options, const char *params)
return 1;
}
-static int parse_submodule_opt(struct diff_options *options, const char *value)
-{
- if (parse_submodule_params(options, value))
- die(_("Failed to parse --submodule option parameter: '%s'"),
- value);
- return 1;
-}
-
static const char diff_status_letters[] = {
DIFF_STATUS_ADDED,
DIFF_STATUS_COPIED,
return opt->filter & filter_bit[(int) status];
}
-static int parse_diff_filter_opt(const char *optarg, struct diff_options *opt)
+unsigned diff_filter_bit(char status)
{
+ prepare_filter_bits();
+ return filter_bit[(int) status];
+}
+
+static int diff_opt_diff_filter(const struct option *option,
+ const char *optarg, int unset)
+{
+ struct diff_options *opt = option->value;
int i, optch;
+ BUG_ON_OPT_NEG(unset);
prepare_filter_bits();
/*
bit = (0 <= optch && optch <= 'Z') ? filter_bit[optch] : 0;
if (!bit)
- return optarg[i];
+ return error(_("unknown change class '%c' in --diff-filter=%s"),
+ optarg[i], optarg);
if (negate)
opt->filter &= ~bit;
else
*fmt |= DIFF_FORMAT_PATCH;
}
-static int parse_ws_error_highlight_opt(struct diff_options *opt, const char *arg)
+static int diff_opt_ws_error_highlight(const struct option *option,
+ const char *arg, int unset)
{
+ struct diff_options *opt = option->value;
int val = parse_ws_error_highlight(arg);
- if (val < 0) {
- error("unknown value after ws-error-highlight=%.*s",
- -1 - val, arg);
- return 0;
- }
+ BUG_ON_OPT_NEG(unset);
+ if (val < 0)
+ return error(_("unknown value after ws-error-highlight=%.*s"),
+ -1 - val, arg);
opt->ws_error_highlight = val;
- return 1;
+ return 0;
}
-static int parse_objfind_opt(struct diff_options *opt, const char *arg)
+static int diff_opt_find_object(const struct option *option,
+ const char *arg, int unset)
{
+ struct diff_options *opt = option->value;
struct object_id oid;
+ BUG_ON_OPT_NEG(unset);
if (get_oid(arg, &oid))
- return error("unable to resolve '%s'", arg);
+ return error(_("unable to resolve '%s'"), arg);
if (!opt->objfind)
opt->objfind = xcalloc(1, sizeof(*opt->objfind));
opt->flags.recursive = 1;
opt->flags.tree_in_recursive = 1;
oidset_insert(opt->objfind, &oid);
- return 1;
+ return 0;
}
-int diff_opt_parse(struct diff_options *options,
- const char **av, int ac, const char *prefix)
+static int diff_opt_anchored(const struct option *opt,
+ const char *arg, int unset)
{
- const char *arg = av[0];
- const char *optarg;
- int argcount;
+ struct diff_options *options = opt->value;
- if (!prefix)
- prefix = "";
+ BUG_ON_OPT_NEG(unset);
+ options->xdl_opts = DIFF_WITH_ALG(options, PATIENCE_DIFF);
+ ALLOC_GROW(options->anchors, options->anchors_nr + 1,
+ options->anchors_alloc);
+ options->anchors[options->anchors_nr++] = xstrdup(arg);
+ return 0;
+}
- /* Output format options */
- if (!strcmp(arg, "-p") || !strcmp(arg, "-u") || !strcmp(arg, "--patch")
- || opt_arg(arg, 'U', "unified", &options->context))
- enable_patch_output(&options->output_format);
- else if (!strcmp(arg, "--raw"))
- options->output_format |= DIFF_FORMAT_RAW;
- else if (!strcmp(arg, "--patch-with-raw")) {
- enable_patch_output(&options->output_format);
- options->output_format |= DIFF_FORMAT_RAW;
- } else if (!strcmp(arg, "--numstat"))
- options->output_format |= DIFF_FORMAT_NUMSTAT;
- else if (!strcmp(arg, "--shortstat"))
- options->output_format |= DIFF_FORMAT_SHORTSTAT;
- else if (skip_prefix(arg, "-X", &arg) ||
- skip_to_optional_arg(arg, "--dirstat", &arg))
- return parse_dirstat_opt(options, arg);
- else if (!strcmp(arg, "--cumulative"))
- return parse_dirstat_opt(options, "cumulative");
- else if (skip_to_optional_arg(arg, "--dirstat-by-file", &arg)) {
- parse_dirstat_opt(options, "files");
- return parse_dirstat_opt(options, arg);
- }
- else if (!strcmp(arg, "--check"))
- options->output_format |= DIFF_FORMAT_CHECKDIFF;
- else if (!strcmp(arg, "--summary"))
- options->output_format |= DIFF_FORMAT_SUMMARY;
- else if (!strcmp(arg, "--patch-with-stat")) {
- enable_patch_output(&options->output_format);
- options->output_format |= DIFF_FORMAT_DIFFSTAT;
- } else if (!strcmp(arg, "--name-only"))
- options->output_format |= DIFF_FORMAT_NAME;
- else if (!strcmp(arg, "--name-status"))
- options->output_format |= DIFF_FORMAT_NAME_STATUS;
- else if (!strcmp(arg, "-s") || !strcmp(arg, "--no-patch"))
- options->output_format |= DIFF_FORMAT_NO_OUTPUT;
- else if (starts_with(arg, "--stat"))
- /* --stat, --stat-width, --stat-name-width, or --stat-count */
- return stat_opt(options, av);
- else if (!strcmp(arg, "--compact-summary")) {
- options->flags.stat_with_summary = 1;
- options->output_format |= DIFF_FORMAT_DIFFSTAT;
- } else if (!strcmp(arg, "--no-compact-summary"))
- options->flags.stat_with_summary = 0;
- else if (skip_prefix(arg, "--output-indicator-new=", &arg))
- options->output_indicators[OUTPUT_INDICATOR_NEW] = arg[0];
- else if (skip_prefix(arg, "--output-indicator-old=", &arg))
- options->output_indicators[OUTPUT_INDICATOR_OLD] = arg[0];
- else if (skip_prefix(arg, "--output-indicator-context=", &arg))
- options->output_indicators[OUTPUT_INDICATOR_CONTEXT] = arg[0];
-
- /* renames options */
- else if (starts_with(arg, "-B") ||
- skip_to_optional_arg(arg, "--break-rewrites", NULL)) {
- if ((options->break_opt = diff_scoreopt_parse(arg)) == -1)
- return error("invalid argument to -B: %s", arg+2);
- }
- else if (starts_with(arg, "-M") ||
- skip_to_optional_arg(arg, "--find-renames", NULL)) {
- if ((options->rename_score = diff_scoreopt_parse(arg)) == -1)
- return error("invalid argument to -M: %s", arg+2);
- options->detect_rename = DIFF_DETECT_RENAME;
- }
- else if (!strcmp(arg, "-D") || !strcmp(arg, "--irreversible-delete")) {
- options->irreversible_delete = 1;
- }
- else if (starts_with(arg, "-C") ||
- skip_to_optional_arg(arg, "--find-copies", NULL)) {
- if (options->detect_rename == DIFF_DETECT_COPY)
- options->flags.find_copies_harder = 1;
- if ((options->rename_score = diff_scoreopt_parse(arg)) == -1)
- return error("invalid argument to -C: %s", arg+2);
- options->detect_rename = DIFF_DETECT_COPY;
+static int diff_opt_binary(const struct option *opt,
+ const char *arg, int unset)
+{
+ struct diff_options *options = opt->value;
+
+ BUG_ON_OPT_NEG(unset);
+ BUG_ON_OPT_ARG(arg);
+ enable_patch_output(&options->output_format);
+ options->flags.binary = 1;
+ return 0;
+}
+
+static int diff_opt_break_rewrites(const struct option *opt,
+ const char *arg, int unset)
+{
+ int *break_opt = opt->value;
+ int opt1, opt2;
+
+ BUG_ON_OPT_NEG(unset);
+ if (!arg)
+ arg = "";
+ opt1 = parse_rename_score(&arg);
+ if (*arg == 0)
+ opt2 = 0;
+ else if (*arg != '/')
+ return error(_("%s expects <n>/<m> form"), opt->long_name);
+ else {
+ arg++;
+ opt2 = parse_rename_score(&arg);
}
- else if (!strcmp(arg, "--no-renames"))
- options->detect_rename = 0;
- else if (!strcmp(arg, "--rename-empty"))
- options->flags.rename_empty = 1;
- else if (!strcmp(arg, "--no-rename-empty"))
- options->flags.rename_empty = 0;
- else if (skip_to_optional_arg_default(arg, "--relative", &arg, NULL)) {
- options->flags.relative_name = 1;
- if (arg)
- options->prefix = arg;
- }
-
- /* xdiff options */
- else if (!strcmp(arg, "--minimal"))
- DIFF_XDL_SET(options, NEED_MINIMAL);
- else if (!strcmp(arg, "--no-minimal"))
- DIFF_XDL_CLR(options, NEED_MINIMAL);
- else if (!strcmp(arg, "-w") || !strcmp(arg, "--ignore-all-space"))
- DIFF_XDL_SET(options, IGNORE_WHITESPACE);
- else if (!strcmp(arg, "-b") || !strcmp(arg, "--ignore-space-change"))
- DIFF_XDL_SET(options, IGNORE_WHITESPACE_CHANGE);
- else if (!strcmp(arg, "--ignore-space-at-eol"))
- DIFF_XDL_SET(options, IGNORE_WHITESPACE_AT_EOL);
- else if (!strcmp(arg, "--ignore-cr-at-eol"))
- DIFF_XDL_SET(options, IGNORE_CR_AT_EOL);
- else if (!strcmp(arg, "--ignore-blank-lines"))
- DIFF_XDL_SET(options, IGNORE_BLANK_LINES);
- else if (!strcmp(arg, "--indent-heuristic"))
- DIFF_XDL_SET(options, INDENT_HEURISTIC);
- else if (!strcmp(arg, "--no-indent-heuristic"))
- DIFF_XDL_CLR(options, INDENT_HEURISTIC);
- else if (!strcmp(arg, "--patience")) {
- int i;
- options->xdl_opts = DIFF_WITH_ALG(options, PATIENCE_DIFF);
- /*
- * Both --patience and --anchored use PATIENCE_DIFF
- * internally, so remove any anchors previously
- * specified.
- */
- for (i = 0; i < options->anchors_nr; i++)
- free(options->anchors[i]);
- options->anchors_nr = 0;
- } else if (!strcmp(arg, "--histogram"))
- options->xdl_opts = DIFF_WITH_ALG(options, HISTOGRAM_DIFF);
- else if ((argcount = parse_long_opt("diff-algorithm", av, &optarg))) {
- long value = parse_algorithm_value(optarg);
- if (value < 0)
- return error("option diff-algorithm accepts \"myers\", "
- "\"minimal\", \"patience\" and \"histogram\"");
- /* clear out previous settings */
- DIFF_XDL_CLR(options, NEED_MINIMAL);
- options->xdl_opts &= ~XDF_DIFF_ALGORITHM_MASK;
- options->xdl_opts |= value;
- return argcount;
- } else if (skip_prefix(arg, "--anchored=", &arg)) {
- options->xdl_opts = DIFF_WITH_ALG(options, PATIENCE_DIFF);
- ALLOC_GROW(options->anchors, options->anchors_nr + 1,
- options->anchors_alloc);
- options->anchors[options->anchors_nr++] = xstrdup(arg);
- }
-
- /* flags options */
- else if (!strcmp(arg, "--binary")) {
- enable_patch_output(&options->output_format);
- options->flags.binary = 1;
- }
- else if (!strcmp(arg, "--full-index"))
- options->flags.full_index = 1;
- else if (!strcmp(arg, "-a") || !strcmp(arg, "--text"))
- options->flags.text = 1;
- else if (!strcmp(arg, "-R"))
- options->flags.reverse_diff = 1;
- else if (!strcmp(arg, "--find-copies-harder"))
- options->flags.find_copies_harder = 1;
- else if (!strcmp(arg, "--follow"))
- options->flags.follow_renames = 1;
- else if (!strcmp(arg, "--no-follow")) {
- options->flags.follow_renames = 0;
- options->flags.default_follow_renames = 0;
- } else if (skip_to_optional_arg_default(arg, "--color", &arg, "always")) {
- int value = git_config_colorbool(NULL, arg);
- if (value < 0)
- return error("option `color' expects \"always\", \"auto\", or \"never\"");
- options->use_color = value;
- }
- else if (!strcmp(arg, "--no-color"))
- options->use_color = 0;
- else if (!strcmp(arg, "--color-moved")) {
+ if (*arg != 0)
+ return error(_("%s expects <n>/<m> form"), opt->long_name);
+ *break_opt = opt1 | (opt2 << 16);
+ return 0;
+}
+
+static int diff_opt_char(const struct option *opt,
+ const char *arg, int unset)
+{
+ char *value = opt->value;
+
+ BUG_ON_OPT_NEG(unset);
+ if (arg[1])
+ return error(_("%s expects a character, got '%s'"),
+ opt->long_name, arg);
+ *value = arg[0];
+ return 0;
+}
+
+static int diff_opt_color_moved(const struct option *opt,
+ const char *arg, int unset)
+{
+ struct diff_options *options = opt->value;
+
+ if (unset) {
+ options->color_moved = COLOR_MOVED_NO;
+ } else if (!arg) {
if (diff_color_moved_default)
options->color_moved = diff_color_moved_default;
if (options->color_moved == COLOR_MOVED_NO)
options->color_moved = COLOR_MOVED_DEFAULT;
- } else if (!strcmp(arg, "--no-color-moved"))
- options->color_moved = COLOR_MOVED_NO;
- else if (skip_prefix(arg, "--color-moved=", &arg)) {
+ } else {
int cm = parse_color_moved(arg);
if (cm < 0)
- return error("bad --color-moved argument: %s", arg);
+ return error(_("bad --color-moved argument: %s"), arg);
options->color_moved = cm;
- } else if (!strcmp(arg, "--no-color-moved-ws")) {
+ }
+ return 0;
+}
+
+static int diff_opt_color_moved_ws(const struct option *opt,
+ const char *arg, int unset)
+{
+ struct diff_options *options = opt->value;
+ unsigned cm;
+
+ if (unset) {
options->color_moved_ws_handling = 0;
- } else if (skip_prefix(arg, "--color-moved-ws=", &arg)) {
- unsigned cm = parse_color_moved_ws(arg);
- if (cm & COLOR_MOVED_WS_ERROR)
- return -1;
- options->color_moved_ws_handling = cm;
- } else if (skip_to_optional_arg_default(arg, "--color-words", &options->word_regex, NULL)) {
- options->use_color = 1;
- options->word_diff = DIFF_WORDS_COLOR;
+ return 0;
}
- else if (!strcmp(arg, "--word-diff")) {
- if (options->word_diff == DIFF_WORDS_NONE)
- options->word_diff = DIFF_WORDS_PLAIN;
+
+ cm = parse_color_moved_ws(arg);
+ if (cm & COLOR_MOVED_WS_ERROR)
+ return error(_("invalid mode '%s' in --color-moved-ws"), arg);
+ options->color_moved_ws_handling = cm;
+ return 0;
+}
+
+static int diff_opt_color_words(const struct option *opt,
+ const char *arg, int unset)
+{
+ struct diff_options *options = opt->value;
+
+ BUG_ON_OPT_NEG(unset);
+ options->use_color = 1;
+ options->word_diff = DIFF_WORDS_COLOR;
+ options->word_regex = arg;
+ return 0;
+}
+
+static int diff_opt_compact_summary(const struct option *opt,
+ const char *arg, int unset)
+{
+ struct diff_options *options = opt->value;
+
+ BUG_ON_OPT_ARG(arg);
+ if (unset) {
+ options->flags.stat_with_summary = 0;
+ } else {
+ options->flags.stat_with_summary = 1;
+ options->output_format |= DIFF_FORMAT_DIFFSTAT;
+ }
+ return 0;
+}
+
+static int diff_opt_diff_algorithm(const struct option *opt,
+ const char *arg, int unset)
+{
+ struct diff_options *options = opt->value;
+ long value = parse_algorithm_value(arg);
+
+ BUG_ON_OPT_NEG(unset);
+ if (value < 0)
+ return error(_("option diff-algorithm accepts \"myers\", "
+ "\"minimal\", \"patience\" and \"histogram\""));
+
+ /* clear out previous settings */
+ DIFF_XDL_CLR(options, NEED_MINIMAL);
+ options->xdl_opts &= ~XDF_DIFF_ALGORITHM_MASK;
+ options->xdl_opts |= value;
+ return 0;
+}
+
+static int diff_opt_dirstat(const struct option *opt,
+ const char *arg, int unset)
+{
+ struct diff_options *options = opt->value;
+
+ BUG_ON_OPT_NEG(unset);
+ if (!strcmp(opt->long_name, "cumulative")) {
+ if (arg)
+ BUG("how come --cumulative take a value?");
+ arg = "cumulative";
+ } else if (!strcmp(opt->long_name, "dirstat-by-file"))
+ parse_dirstat_opt(options, "files");
+ parse_dirstat_opt(options, arg ? arg : "");
+ return 0;
+}
+
+static int diff_opt_find_copies(const struct option *opt,
+ const char *arg, int unset)
+{
+ struct diff_options *options = opt->value;
+
+ BUG_ON_OPT_NEG(unset);
+ if (!arg)
+ arg = "";
+ options->rename_score = parse_rename_score(&arg);
+ if (*arg != 0)
+ return error(_("invalid argument to %s"), opt->long_name);
+
+ if (options->detect_rename == DIFF_DETECT_COPY)
+ options->flags.find_copies_harder = 1;
+ else
+ options->detect_rename = DIFF_DETECT_COPY;
+
+ return 0;
+}
+
+static int diff_opt_find_renames(const struct option *opt,
+ const char *arg, int unset)
+{
+ struct diff_options *options = opt->value;
+
+ BUG_ON_OPT_NEG(unset);
+ if (!arg)
+ arg = "";
+ options->rename_score = parse_rename_score(&arg);
+ if (*arg != 0)
+ return error(_("invalid argument to %s"), opt->long_name);
+
+ options->detect_rename = DIFF_DETECT_RENAME;
+ return 0;
+}
+
+static int diff_opt_follow(const struct option *opt,
+ const char *arg, int unset)
+{
+ struct diff_options *options = opt->value;
+
+ BUG_ON_OPT_ARG(arg);
+ if (unset) {
+ options->flags.follow_renames = 0;
+ options->flags.default_follow_renames = 0;
+ } else {
+ options->flags.follow_renames = 1;
+ }
+ return 0;
+}
+
+static int diff_opt_ignore_submodules(const struct option *opt,
+ const char *arg, int unset)
+{
+ struct diff_options *options = opt->value;
+
+ BUG_ON_OPT_NEG(unset);
+ if (!arg)
+ arg = "all";
+ options->flags.override_submodule_config = 1;
+ handle_ignore_submodules_arg(options, arg);
+ return 0;
+}
+
+static int diff_opt_line_prefix(const struct option *opt,
+ const char *optarg, int unset)
+{
+ struct diff_options *options = opt->value;
+
+ BUG_ON_OPT_NEG(unset);
+ options->line_prefix = optarg;
+ options->line_prefix_length = strlen(options->line_prefix);
+ graph_setup_line_prefix(options);
+ return 0;
+}
+
+static int diff_opt_no_prefix(const struct option *opt,
+ const char *optarg, int unset)
+{
+ struct diff_options *options = opt->value;
+
+ BUG_ON_OPT_NEG(unset);
+ BUG_ON_OPT_ARG(optarg);
+ options->a_prefix = "";
+ options->b_prefix = "";
+ return 0;
+}
+
+static enum parse_opt_result diff_opt_output(struct parse_opt_ctx_t *ctx,
+ const struct option *opt,
+ const char *arg, int unset)
+{
+ struct diff_options *options = opt->value;
+ char *path;
+
+ BUG_ON_OPT_NEG(unset);
+ path = prefix_filename(ctx->prefix, arg);
+ options->file = xfopen(path, "w");
+ options->close_file = 1;
+ if (options->use_color != GIT_COLOR_ALWAYS)
+ options->use_color = GIT_COLOR_NEVER;
+ free(path);
+ return 0;
+}
+
+static int diff_opt_patience(const struct option *opt,
+ const char *arg, int unset)
+{
+ struct diff_options *options = opt->value;
+ int i;
+
+ BUG_ON_OPT_NEG(unset);
+ BUG_ON_OPT_ARG(arg);
+ options->xdl_opts = DIFF_WITH_ALG(options, PATIENCE_DIFF);
+ /*
+ * Both --patience and --anchored use PATIENCE_DIFF
+ * internally, so remove any anchors previously
+ * specified.
+ */
+ for (i = 0; i < options->anchors_nr; i++)
+ free(options->anchors[i]);
+ options->anchors_nr = 0;
+ return 0;
+}
+
+static int diff_opt_pickaxe_regex(const struct option *opt,
+ const char *arg, int unset)
+{
+ struct diff_options *options = opt->value;
+
+ BUG_ON_OPT_NEG(unset);
+ options->pickaxe = arg;
+ options->pickaxe_opts |= DIFF_PICKAXE_KIND_G;
+ return 0;
+}
+
+static int diff_opt_pickaxe_string(const struct option *opt,
+ const char *arg, int unset)
+{
+ struct diff_options *options = opt->value;
+
+ BUG_ON_OPT_NEG(unset);
+ options->pickaxe = arg;
+ options->pickaxe_opts |= DIFF_PICKAXE_KIND_S;
+ return 0;
+}
+
+static int diff_opt_relative(const struct option *opt,
+ const char *arg, int unset)
+{
+ struct diff_options *options = opt->value;
+
+ BUG_ON_OPT_NEG(unset);
+ options->flags.relative_name = 1;
+ if (arg)
+ options->prefix = arg;
+ return 0;
+}
+
+static int diff_opt_submodule(const struct option *opt,
+ const char *arg, int unset)
+{
+ struct diff_options *options = opt->value;
+
+ BUG_ON_OPT_NEG(unset);
+ if (!arg)
+ arg = "log";
+ if (parse_submodule_params(options, arg))
+ return error(_("failed to parse --submodule option parameter: '%s'"),
+ arg);
+ return 0;
+}
+
+static int diff_opt_textconv(const struct option *opt,
+ const char *arg, int unset)
+{
+ struct diff_options *options = opt->value;
+
+ BUG_ON_OPT_ARG(arg);
+ if (unset) {
+ options->flags.allow_textconv = 0;
+ } else {
+ options->flags.allow_textconv = 1;
+ options->flags.textconv_set_via_cmdline = 1;
}
- else if (skip_prefix(arg, "--word-diff=", &arg)) {
+ return 0;
+}
+
+static int diff_opt_unified(const struct option *opt,
+ const char *arg, int unset)
+{
+ struct diff_options *options = opt->value;
+ char *s;
+
+ BUG_ON_OPT_NEG(unset);
+
+ options->context = strtol(arg, &s, 10);
+ if (*s)
+ return error(_("%s expects a numerical value"), "--unified");
+ enable_patch_output(&options->output_format);
+
+ return 0;
+}
+
+static int diff_opt_word_diff(const struct option *opt,
+ const char *arg, int unset)
+{
+ struct diff_options *options = opt->value;
+
+ BUG_ON_OPT_NEG(unset);
+ if (arg) {
if (!strcmp(arg, "plain"))
options->word_diff = DIFF_WORDS_PLAIN;
else if (!strcmp(arg, "color")) {
else if (!strcmp(arg, "none"))
options->word_diff = DIFF_WORDS_NONE;
else
- die("bad --word-diff argument: %s", arg);
- }
- else if ((argcount = parse_long_opt("word-diff-regex", av, &optarg))) {
+ return error(_("bad --word-diff argument: %s"), arg);
+ } else {
if (options->word_diff == DIFF_WORDS_NONE)
options->word_diff = DIFF_WORDS_PLAIN;
- options->word_regex = optarg;
- return argcount;
}
- else if (!strcmp(arg, "--exit-code"))
- options->flags.exit_with_status = 1;
- else if (!strcmp(arg, "--quiet"))
- options->flags.quick = 1;
- else if (!strcmp(arg, "--ext-diff"))
- options->flags.allow_external = 1;
- else if (!strcmp(arg, "--no-ext-diff"))
- options->flags.allow_external = 0;
- else if (!strcmp(arg, "--textconv")) {
- options->flags.allow_textconv = 1;
- options->flags.textconv_set_via_cmdline = 1;
- } else if (!strcmp(arg, "--no-textconv"))
- options->flags.allow_textconv = 0;
- else if (skip_to_optional_arg_default(arg, "--ignore-submodules", &arg, "all")) {
- options->flags.override_submodule_config = 1;
- handle_ignore_submodules_arg(options, arg);
- } else if (skip_to_optional_arg_default(arg, "--submodule", &arg, "log"))
- return parse_submodule_opt(options, arg);
- else if (skip_prefix(arg, "--ws-error-highlight=", &arg))
- return parse_ws_error_highlight_opt(options, arg);
- else if (!strcmp(arg, "--ita-invisible-in-index"))
- options->ita_invisible_in_index = 1;
- else if (!strcmp(arg, "--ita-visible-in-index"))
- options->ita_invisible_in_index = 0;
-
- /* misc options */
- else if (!strcmp(arg, "-z"))
- options->line_termination = 0;
- else if ((argcount = short_opt('l', av, &optarg))) {
- options->rename_limit = strtoul(optarg, NULL, 10);
- return argcount;
- }
- else if ((argcount = short_opt('S', av, &optarg))) {
- options->pickaxe = optarg;
- options->pickaxe_opts |= DIFF_PICKAXE_KIND_S;
- return argcount;
- } else if ((argcount = short_opt('G', av, &optarg))) {
- options->pickaxe = optarg;
- options->pickaxe_opts |= DIFF_PICKAXE_KIND_G;
- return argcount;
- }
- else if (!strcmp(arg, "--pickaxe-all"))
- options->pickaxe_opts |= DIFF_PICKAXE_ALL;
- else if (!strcmp(arg, "--pickaxe-regex"))
- options->pickaxe_opts |= DIFF_PICKAXE_REGEX;
- else if ((argcount = short_opt('O', av, &optarg))) {
- options->orderfile = prefix_filename(prefix, optarg);
- return argcount;
- } else if (skip_prefix(arg, "--find-object=", &arg))
- return parse_objfind_opt(options, arg);
- else if ((argcount = parse_long_opt("diff-filter", av, &optarg))) {
- int offending = parse_diff_filter_opt(optarg, options);
- if (offending)
- die("unknown change class '%c' in --diff-filter=%s",
- offending, optarg);
- return argcount;
- }
- else if (!strcmp(arg, "--no-abbrev"))
- options->abbrev = 0;
- else if (!strcmp(arg, "--abbrev"))
- options->abbrev = DEFAULT_ABBREV;
- else if (skip_prefix(arg, "--abbrev=", &arg)) {
- options->abbrev = strtoul(arg, NULL, 10);
- if (options->abbrev < MINIMUM_ABBREV)
- options->abbrev = MINIMUM_ABBREV;
- else if (the_hash_algo->hexsz < options->abbrev)
- options->abbrev = the_hash_algo->hexsz;
- }
- else if ((argcount = parse_long_opt("src-prefix", av, &optarg))) {
- options->a_prefix = optarg;
- return argcount;
- }
- else if ((argcount = parse_long_opt("line-prefix", av, &optarg))) {
- options->line_prefix = optarg;
- options->line_prefix_length = strlen(options->line_prefix);
- graph_setup_line_prefix(options);
- return argcount;
- }
- else if ((argcount = parse_long_opt("dst-prefix", av, &optarg))) {
- options->b_prefix = optarg;
- return argcount;
- }
- else if (!strcmp(arg, "--no-prefix"))
- options->a_prefix = options->b_prefix = "";
- else if (opt_arg(arg, '\0', "inter-hunk-context",
- &options->interhunkcontext))
- ;
- else if (!strcmp(arg, "-W"))
- options->flags.funccontext = 1;
- else if (!strcmp(arg, "--function-context"))
- options->flags.funccontext = 1;
- else if (!strcmp(arg, "--no-function-context"))
- options->flags.funccontext = 0;
- else if ((argcount = parse_long_opt("output", av, &optarg))) {
- char *path = prefix_filename(prefix, optarg);
- options->file = xfopen(path, "w");
- options->close_file = 1;
- if (options->use_color != GIT_COLOR_ALWAYS)
- options->use_color = GIT_COLOR_NEVER;
- free(path);
- return argcount;
- } else
- return 0;
- return 1;
+ return 0;
+}
+
+static int diff_opt_word_diff_regex(const struct option *opt,
+ const char *arg, int unset)
+{
+ struct diff_options *options = opt->value;
+
+ BUG_ON_OPT_NEG(unset);
+ if (options->word_diff == DIFF_WORDS_NONE)
+ options->word_diff = DIFF_WORDS_PLAIN;
+ options->word_regex = arg;
+ return 0;
+}
+
+static void prep_parse_options(struct diff_options *options)
+{
+ struct option parseopts[] = {
+ OPT_GROUP(N_("Diff output format options")),
+ OPT_BITOP('p', "patch", &options->output_format,
+ N_("generate patch"),
+ DIFF_FORMAT_PATCH, DIFF_FORMAT_NO_OUTPUT),
+ OPT_BIT_F('s', "no-patch", &options->output_format,
+ N_("suppress diff output"),
+ DIFF_FORMAT_NO_OUTPUT, PARSE_OPT_NONEG),
+ OPT_BITOP('u', NULL, &options->output_format,
+ N_("generate patch"),
+ DIFF_FORMAT_PATCH, DIFF_FORMAT_NO_OUTPUT),
+ OPT_CALLBACK_F('U', "unified", options, N_("<n>"),
+ N_("generate diffs with <n> lines context"),
+ PARSE_OPT_NONEG, diff_opt_unified),
+ OPT_BOOL('W', "function-context", &options->flags.funccontext,
+ N_("generate diffs with <n> lines context")),
+ OPT_BIT_F(0, "raw", &options->output_format,
+ N_("generate the diff in raw format"),
+ DIFF_FORMAT_RAW, PARSE_OPT_NONEG),
+ OPT_BITOP(0, "patch-with-raw", &options->output_format,
+ N_("synonym for '-p --raw'"),
+ DIFF_FORMAT_PATCH | DIFF_FORMAT_RAW,
+ DIFF_FORMAT_NO_OUTPUT),
+ OPT_BITOP(0, "patch-with-stat", &options->output_format,
+ N_("synonym for '-p --stat'"),
+ DIFF_FORMAT_PATCH | DIFF_FORMAT_DIFFSTAT,
+ DIFF_FORMAT_NO_OUTPUT),
+ OPT_BIT_F(0, "numstat", &options->output_format,
+ N_("machine friendly --stat"),
+ DIFF_FORMAT_NUMSTAT, PARSE_OPT_NONEG),
+ OPT_BIT_F(0, "shortstat", &options->output_format,
+ N_("output only the last line of --stat"),
+ DIFF_FORMAT_SHORTSTAT, PARSE_OPT_NONEG),
+ OPT_CALLBACK_F('X', "dirstat", options, N_("<param1,param2>..."),
+ N_("output the distribution of relative amount of changes for each sub-directory"),
+ PARSE_OPT_NONEG | PARSE_OPT_OPTARG,
+ diff_opt_dirstat),
+ OPT_CALLBACK_F(0, "cumulative", options, NULL,
+ N_("synonym for --dirstat=cumulative"),
+ PARSE_OPT_NONEG | PARSE_OPT_NOARG,
+ diff_opt_dirstat),
+ OPT_CALLBACK_F(0, "dirstat-by-file", options, N_("<param1,param2>..."),
+ N_("synonym for --dirstat=files,param1,param2..."),
+ PARSE_OPT_NONEG | PARSE_OPT_OPTARG,
+ diff_opt_dirstat),
+ OPT_BIT_F(0, "check", &options->output_format,
+ N_("warn if changes introduce conflict markers or whitespace errors"),
+ DIFF_FORMAT_CHECKDIFF, PARSE_OPT_NONEG),
+ OPT_BIT_F(0, "summary", &options->output_format,
+ N_("condensed summary such as creations, renames and mode changes"),
+ DIFF_FORMAT_SUMMARY, PARSE_OPT_NONEG),
+ OPT_BIT_F(0, "name-only", &options->output_format,
+ N_("show only names of changed files"),
+ DIFF_FORMAT_NAME, PARSE_OPT_NONEG),
+ OPT_BIT_F(0, "name-status", &options->output_format,
+ N_("show only names and status of changed files"),
+ DIFF_FORMAT_NAME_STATUS, PARSE_OPT_NONEG),
+ OPT_CALLBACK_F(0, "stat", options, N_("<width>[,<name-width>[,<count>]]"),
+ N_("generate diffstat"),
+ PARSE_OPT_NONEG | PARSE_OPT_OPTARG, diff_opt_stat),
+ OPT_CALLBACK_F(0, "stat-width", options, N_("<width>"),
+ N_("generate diffstat with a given width"),
+ PARSE_OPT_NONEG, diff_opt_stat),
+ OPT_CALLBACK_F(0, "stat-name-width", options, N_("<width>"),
+ N_("generate diffstat with a given name width"),
+ PARSE_OPT_NONEG, diff_opt_stat),
+ OPT_CALLBACK_F(0, "stat-graph-width", options, N_("<width>"),
+ N_("generate diffstat with a given graph width"),
+ PARSE_OPT_NONEG, diff_opt_stat),
+ OPT_CALLBACK_F(0, "stat-count", options, N_("<count>"),
+ N_("generate diffstat with limited lines"),
+ PARSE_OPT_NONEG, diff_opt_stat),
+ OPT_CALLBACK_F(0, "compact-summary", options, NULL,
+ N_("generate compact summary in diffstat"),
+ PARSE_OPT_NOARG, diff_opt_compact_summary),
+ OPT_CALLBACK_F(0, "binary", options, NULL,
+ N_("output a binary diff that can be applied"),
+ PARSE_OPT_NONEG | PARSE_OPT_NOARG, diff_opt_binary),
+ OPT_BOOL(0, "full-index", &options->flags.full_index,
+ N_("show full pre- and post-image object names on the \"index\" lines")),
+ OPT_COLOR_FLAG(0, "color", &options->use_color,
+ N_("show colored diff")),
+ OPT_CALLBACK_F(0, "ws-error-highlight", options, N_("<kind>"),
+ N_("highlight whitespace errors in the 'context', 'old' or 'new' lines in the diff"),
+ PARSE_OPT_NONEG, diff_opt_ws_error_highlight),
+ OPT_SET_INT('z', NULL, &options->line_termination,
+ N_("do not munge pathnames and use NULs as output field terminators in --raw or --numstat"),
+ 0),
+ OPT__ABBREV(&options->abbrev),
+ OPT_STRING_F(0, "src-prefix", &options->a_prefix, N_("<prefix>"),
+ N_("show the given source prefix instead of \"a/\""),
+ PARSE_OPT_NONEG),
+ OPT_STRING_F(0, "dst-prefix", &options->b_prefix, N_("<prefix>"),
+ N_("show the given source prefix instead of \"b/\""),
+ PARSE_OPT_NONEG),
+ OPT_CALLBACK_F(0, "line-prefix", options, N_("<prefix>"),
+ N_("prepend an additional prefix to every line of output"),
+ PARSE_OPT_NONEG, diff_opt_line_prefix),
+ OPT_CALLBACK_F(0, "no-prefix", options, NULL,
+ N_("do not show any source or destination prefix"),
+ PARSE_OPT_NONEG | PARSE_OPT_NOARG, diff_opt_no_prefix),
+ OPT_INTEGER_F(0, "inter-hunk-context", &options->interhunkcontext,
+ N_("show context between diff hunks up to the specified number of lines"),
+ PARSE_OPT_NONEG),
+ OPT_CALLBACK_F(0, "output-indicator-new",
+ &options->output_indicators[OUTPUT_INDICATOR_NEW],
+ N_("<char>"),
+ N_("specify the character to indicate a new line instead of '+'"),
+ PARSE_OPT_NONEG, diff_opt_char),
+ OPT_CALLBACK_F(0, "output-indicator-old",
+ &options->output_indicators[OUTPUT_INDICATOR_OLD],
+ N_("<char>"),
+ N_("specify the character to indicate an old line instead of '-'"),
+ PARSE_OPT_NONEG, diff_opt_char),
+ OPT_CALLBACK_F(0, "output-indicator-context",
+ &options->output_indicators[OUTPUT_INDICATOR_CONTEXT],
+ N_("<char>"),
+ N_("specify the character to indicate a context instead of ' '"),
+ PARSE_OPT_NONEG, diff_opt_char),
+
+ OPT_GROUP(N_("Diff rename options")),
+ OPT_CALLBACK_F('B', "break-rewrites", &options->break_opt, N_("<n>[/<m>]"),
+ N_("break complete rewrite changes into pairs of delete and create"),
+ PARSE_OPT_NONEG | PARSE_OPT_OPTARG,
+ diff_opt_break_rewrites),
+ OPT_CALLBACK_F('M', "find-renames", options, N_("<n>"),
+ N_("detect renames"),
+ PARSE_OPT_NONEG | PARSE_OPT_OPTARG,
+ diff_opt_find_renames),
+ OPT_SET_INT_F('D', "irreversible-delete", &options->irreversible_delete,
+ N_("omit the preimage for deletes"),
+ 1, PARSE_OPT_NONEG),
+ OPT_CALLBACK_F('C', "find-copies", options, N_("<n>"),
+ N_("detect copies"),
+ PARSE_OPT_NONEG | PARSE_OPT_OPTARG,
+ diff_opt_find_copies),
+ OPT_BOOL(0, "find-copies-harder", &options->flags.find_copies_harder,
+ N_("use unmodified files as source to find copies")),
+ OPT_SET_INT_F(0, "no-renames", &options->detect_rename,
+ N_("disable rename detection"),
+ 0, PARSE_OPT_NONEG),
+ OPT_BOOL(0, "rename-empty", &options->flags.rename_empty,
+ N_("use empty blobs as rename source")),
+ OPT_CALLBACK_F(0, "follow", options, NULL,
+ N_("continue listing the history of a file beyond renames"),
+ PARSE_OPT_NOARG, diff_opt_follow),
+ OPT_INTEGER('l', NULL, &options->rename_limit,
+ N_("prevent rename/copy detection if the number of rename/copy targets exceeds given limit")),
+
+ OPT_GROUP(N_("Diff algorithm options")),
+ OPT_BIT(0, "minimal", &options->xdl_opts,
+ N_("produce the smallest possible diff"),
+ XDF_NEED_MINIMAL),
+ OPT_BIT_F('w', "ignore-all-space", &options->xdl_opts,
+ N_("ignore whitespace when comparing lines"),
+ XDF_IGNORE_WHITESPACE, PARSE_OPT_NONEG),
+ OPT_BIT_F('b', "ignore-space-change", &options->xdl_opts,
+ N_("ignore changes in amount of whitespace"),
+ XDF_IGNORE_WHITESPACE_CHANGE, PARSE_OPT_NONEG),
+ OPT_BIT_F(0, "ignore-space-at-eol", &options->xdl_opts,
+ N_("ignore changes in whitespace at EOL"),
+ XDF_IGNORE_WHITESPACE_AT_EOL, PARSE_OPT_NONEG),
+ OPT_BIT_F(0, "ignore-cr-at-eol", &options->xdl_opts,
+ N_("ignore carrier-return at the end of line"),
+ XDF_IGNORE_CR_AT_EOL, PARSE_OPT_NONEG),
+ OPT_BIT_F(0, "ignore-blank-lines", &options->xdl_opts,
+ N_("ignore changes whose lines are all blank"),
+ XDF_IGNORE_BLANK_LINES, PARSE_OPT_NONEG),
+ OPT_BIT(0, "indent-heuristic", &options->xdl_opts,
+ N_("heuristic to shift diff hunk boundaries for easy reading"),
+ XDF_INDENT_HEURISTIC),
+ OPT_CALLBACK_F(0, "patience", options, NULL,
+ N_("generate diff using the \"patience diff\" algorithm"),
+ PARSE_OPT_NONEG | PARSE_OPT_NOARG,
+ diff_opt_patience),
+ OPT_BITOP(0, "histogram", &options->xdl_opts,
+ N_("generate diff using the \"histogram diff\" algorithm"),
+ XDF_HISTOGRAM_DIFF, XDF_DIFF_ALGORITHM_MASK),
+ OPT_CALLBACK_F(0, "diff-algorithm", options, N_("<algorithm>"),
+ N_("choose a diff algorithm"),
+ PARSE_OPT_NONEG, diff_opt_diff_algorithm),
+ OPT_CALLBACK_F(0, "anchored", options, N_("<text>"),
+ N_("generate diff using the \"anchored diff\" algorithm"),
+ PARSE_OPT_NONEG, diff_opt_anchored),
+ OPT_CALLBACK_F(0, "word-diff", options, N_("<mode>"),
+ N_("show word diff, using <mode> to delimit changed words"),
+ PARSE_OPT_NONEG | PARSE_OPT_OPTARG, diff_opt_word_diff),
+ OPT_CALLBACK_F(0, "word-diff-regex", options, N_("<regex>"),
+ N_("use <regex> to decide what a word is"),
+ PARSE_OPT_NONEG, diff_opt_word_diff_regex),
+ OPT_CALLBACK_F(0, "color-words", options, N_("<regex>"),
+ N_("equivalent to --word-diff=color --word-diff-regex=<regex>"),
+ PARSE_OPT_NONEG | PARSE_OPT_OPTARG, diff_opt_color_words),
+ OPT_CALLBACK_F(0, "color-moved", options, N_("<mode>"),
+ N_("move lines of code are colored differently"),
+ PARSE_OPT_OPTARG, diff_opt_color_moved),
+ OPT_CALLBACK_F(0, "color-moved-ws", options, N_("<mode>"),
+ N_("how white spaces are ignored in --color-moved"),
+ 0, diff_opt_color_moved_ws),
+
+ OPT_GROUP(N_("Diff other options")),
+ OPT_CALLBACK_F(0, "relative", options, N_("<prefix>"),
+ N_("when run from subdir, exclude changes outside and show relative paths"),
+ PARSE_OPT_NONEG | PARSE_OPT_OPTARG,
+ diff_opt_relative),
+ OPT_BOOL('a', "text", &options->flags.text,
+ N_("treat all files as text")),
+ OPT_BOOL('R', NULL, &options->flags.reverse_diff,
+ N_("swap two inputs, reverse the diff")),
+ OPT_BOOL(0, "exit-code", &options->flags.exit_with_status,
+ N_("exit with 1 if there were differences, 0 otherwise")),
+ OPT_BOOL(0, "quiet", &options->flags.quick,
+ N_("disable all output of the program")),
+ OPT_BOOL(0, "ext-diff", &options->flags.allow_external,
+ N_("allow an external diff helper to be executed")),
+ OPT_CALLBACK_F(0, "textconv", options, NULL,
+ N_("run external text conversion filters when comparing binary files"),
+ PARSE_OPT_NOARG, diff_opt_textconv),
+ OPT_CALLBACK_F(0, "ignore-submodules", options, N_("<when>"),
+ N_("ignore changes to submodules in the diff generation"),
+ PARSE_OPT_NONEG | PARSE_OPT_OPTARG,
+ diff_opt_ignore_submodules),
+ OPT_CALLBACK_F(0, "submodule", options, N_("<format>"),
+ N_("specify how differences in submodules are shown"),
+ PARSE_OPT_NONEG | PARSE_OPT_OPTARG,
+ diff_opt_submodule),
+ OPT_SET_INT_F(0, "ita-invisible-in-index", &options->ita_invisible_in_index,
+ N_("hide 'git add -N' entries from the index"),
+ 1, PARSE_OPT_NONEG),
+ OPT_SET_INT_F(0, "ita-visible-in-index", &options->ita_invisible_in_index,
+ N_("treat 'git add -N' entries as real in the index"),
+ 0, PARSE_OPT_NONEG),
+ OPT_CALLBACK_F('S', NULL, options, N_("<string>"),
+ N_("look for differences that change the number of occurrences of the specified string"),
+ 0, diff_opt_pickaxe_string),
+ OPT_CALLBACK_F('G', NULL, options, N_("<regex>"),
+ N_("look for differences that change the number of occurrences of the specified regex"),
+ 0, diff_opt_pickaxe_regex),
+ OPT_BIT_F(0, "pickaxe-all", &options->pickaxe_opts,
+ N_("show all changes in the changeset with -S or -G"),
+ DIFF_PICKAXE_ALL, PARSE_OPT_NONEG),
+ OPT_BIT_F(0, "pickaxe-regex", &options->pickaxe_opts,
+ N_("treat <string> in -S as extended POSIX regular expression"),
+ DIFF_PICKAXE_REGEX, PARSE_OPT_NONEG),
+ OPT_FILENAME('O', NULL, &options->orderfile,
+ N_("control the order in which files appear in the output")),
+ OPT_CALLBACK_F(0, "find-object", options, N_("<object-id>"),
+ N_("look for differences that change the number of occurrences of the specified object"),
+ PARSE_OPT_NONEG, diff_opt_find_object),
+ OPT_CALLBACK_F(0, "diff-filter", options, N_("[(A|C|D|M|R|T|U|X|B)...[*]]"),
+ N_("select files by diff type"),
+ PARSE_OPT_NONEG, diff_opt_diff_filter),
+ { OPTION_CALLBACK, 0, "output", options, N_("<file>"),
+ N_("Output to a specific file"),
+ PARSE_OPT_NONEG, NULL, 0, diff_opt_output },
+
+ OPT_END()
+ };
+
+ ALLOC_ARRAY(options->parseopts, ARRAY_SIZE(parseopts));
+ memcpy(options->parseopts, parseopts, sizeof(parseopts));
+}
+
+int diff_opt_parse(struct diff_options *options,
+ const char **av, int ac, const char *prefix)
+{
+ if (!prefix)
+ prefix = "";
+
+ ac = parse_options(ac, av, prefix, options->parseopts, NULL,
+ PARSE_OPT_KEEP_DASHDASH |
+ PARSE_OPT_KEEP_UNKNOWN |
+ PARSE_OPT_NO_INTERNAL_HELP |
+ PARSE_OPT_ONE_SHOT |
+ PARSE_OPT_STOP_AT_NON_OPTION);
+
+ return ac;
}
int parse_rename_score(const char **cp_p)
return (int)((num >= scale) ? MAX_SCORE : (MAX_SCORE * num / scale));
}
-static int diff_scoreopt_parse(const char *opt)
-{
- int opt1, opt2, cmd;
-
- if (*opt++ != '-')
- return -1;
- cmd = *opt++;
- if (cmd == '-') {
- /* convert the long-form arguments into short-form versions */
- if (skip_prefix(opt, "break-rewrites", &opt)) {
- if (*opt == 0 || *opt++ == '=')
- cmd = 'B';
- } else if (skip_prefix(opt, "find-copies", &opt)) {
- if (*opt == 0 || *opt++ == '=')
- cmd = 'C';
- } else if (skip_prefix(opt, "find-renames", &opt)) {
- if (*opt == 0 || *opt++ == '=')
- cmd = 'M';
- }
- }
- if (cmd != 'M' && cmd != 'C' && cmd != 'B')
- return -1; /* that is not a -M, -C, or -B option */
-
- opt1 = parse_rename_score(&opt);
- if (cmd != 'B')
- opt2 = 0;
- else {
- if (*opt == 0)
- opt2 = 0;
- else if (*opt != '/')
- return -1; /* we expect -B80/99 or -B80 */
- else {
- opt++;
- opt2 = parse_rename_score(&opt);
- }
- }
- if (*opt != 0)
- return -1;
- return opt1 | (opt2 << 16);
-}
-
struct diff_queue_struct diff_queued_diff;
void diff_q(struct diff_queue_struct *queue, struct diff_filepair *dp)
return strcmp(name_a, name_b);
}
-void diffcore_fix_diff_index(struct diff_options *options)
+void diffcore_fix_diff_index(void)
{
struct diff_queue_struct *q = &diff_queued_diff;
QSORT(q->queue, q->nr, diffnamecmp);
}
+static void add_if_missing(struct repository *r,
+ struct oid_array *to_fetch,
+ const struct diff_filespec *filespec)
+{
+ if (filespec && filespec->oid_valid &&
+ oid_object_info_extended(r, &filespec->oid, NULL,
+ OBJECT_INFO_FOR_PREFETCH))
+ oid_array_append(to_fetch, &filespec->oid);
+}
+
void diffcore_std(struct diff_options *options)
{
+ if (options->repo == the_repository &&
+ repository_format_partial_clone) {
+ /*
+ * Prefetch the diff pairs that are about to be flushed.
+ */
+ int i;
+ struct diff_queue_struct *q = &diff_queued_diff;
+ struct oid_array to_fetch = OID_ARRAY_INIT;
+
+ for (i = 0; i < q->nr; i++) {
+ struct diff_filepair *p = q->queue[i];
+ add_if_missing(options->repo, &to_fetch, p->one);
+ add_if_missing(options->repo, &to_fetch, p->two);
+ }
+ if (to_fetch.nr)
+ /*
+ * NEEDSWORK: Consider deduplicating the OIDs sent.
+ */
+ fetch_objects(repository_format_partial_clone,
+ to_fetch.oid, to_fetch.nr);
+ oid_array_clear(&to_fetch);
+ }
+
/* NOTE please keep the following in sync with diff_tree_combined() */
if (options->skip_stat_unmatch)
diffcore_skip_stat_unmatch(options);
#include "object.h"
#include "oidset.h"
-struct rev_info;
+struct combine_diff_path;
+struct commit;
+struct diff_filespec;
struct diff_options;
struct diff_queue_struct;
-struct strbuf;
-struct diff_filespec;
-struct userdiff_driver;
struct oid_array;
-struct commit;
-struct combine_diff_path;
+struct option;
struct repository;
+struct rev_info;
+struct strbuf;
+struct userdiff_driver;
typedef int (*pathchange_fn_t)(struct diff_options *options,
struct combine_diff_path *path);
#define DIFF_FLAGS_INIT { 0 }
struct diff_flags {
- unsigned recursive:1;
- unsigned tree_in_recursive:1;
- unsigned binary:1;
- unsigned text:1;
- unsigned full_index:1;
- unsigned silent_on_remove:1;
- unsigned find_copies_harder:1;
- unsigned follow_renames:1;
- unsigned rename_empty:1;
- unsigned has_changes:1;
- unsigned quick:1;
- unsigned no_index:1;
- unsigned allow_external:1;
- unsigned exit_with_status:1;
- unsigned reverse_diff:1;
- unsigned check_failed:1;
- unsigned relative_name:1;
- unsigned ignore_submodules:1;
- unsigned dirstat_cumulative:1;
- unsigned dirstat_by_file:1;
- unsigned allow_textconv:1;
- unsigned textconv_set_via_cmdline:1;
- unsigned diff_from_contents:1;
- unsigned dirty_submodules:1;
- unsigned ignore_untracked_in_submodules:1;
- unsigned ignore_dirty_submodules:1;
- unsigned override_submodule_config:1;
- unsigned dirstat_by_line:1;
- unsigned funccontext:1;
- unsigned default_follow_renames:1;
- unsigned stat_with_summary:1;
- unsigned suppress_diff_headers:1;
- unsigned dual_color_diffed_diffs:1;
+ unsigned recursive;
+ unsigned tree_in_recursive;
+ unsigned binary;
+ unsigned text;
+ unsigned full_index;
+ unsigned silent_on_remove;
+ unsigned find_copies_harder;
+ unsigned follow_renames;
+ unsigned rename_empty;
+ unsigned has_changes;
+ unsigned quick;
+ unsigned no_index;
+ unsigned allow_external;
+ unsigned exit_with_status;
+ unsigned reverse_diff;
+ unsigned check_failed;
+ unsigned relative_name;
+ unsigned ignore_submodules;
+ unsigned dirstat_cumulative;
+ unsigned dirstat_by_file;
+ unsigned allow_textconv;
+ unsigned textconv_set_via_cmdline;
+ unsigned diff_from_contents;
+ unsigned dirty_submodules;
+ unsigned ignore_untracked_in_submodules;
+ unsigned ignore_dirty_submodules;
+ unsigned override_submodule_config;
+ unsigned dirstat_by_line;
+ unsigned funccontext;
+ unsigned default_follow_renames;
+ unsigned stat_with_summary;
+ unsigned suppress_diff_headers;
+ unsigned dual_color_diffed_diffs;
};
static inline void diff_flags_or(struct diff_flags *a,
unsigned color_moved_ws_handling;
struct repository *repo;
+ struct option *parseopts;
};
+unsigned diff_filter_bit(char status);
+
void diff_emit_submodule_del(struct diff_options *o, const char *line);
void diff_emit_submodule_add(struct diff_options *o, const char *line);
void diff_emit_submodule_untracked(struct diff_options *o, const char *path);
char status;
unsigned int mode;
struct object_id oid;
+ struct strbuf path;
} parent[FLEX_ARRAY];
};
#define combine_diff_path_size(n, l) \
#define DIFF_PICKAXE_IGNORE_CASE 32
void diffcore_std(struct diff_options *);
-void diffcore_fix_diff_index(struct diff_options *);
+void diffcore_fix_diff_index(void);
#define COMMON_DIFF_OPTIONS_HELP \
"\ncommon diff options:\n" \
int diff_result_code(struct diff_options *, int);
-void diff_no_index(struct repository *, struct rev_info *, int, const char **);
+int diff_no_index(struct rev_info *,
+ int implicit_no_index, int, const char **);
int index_differs_from(struct repository *r, const char *def,
const struct diff_flags *flags,
}
int report_path_error(const char *ps_matched,
- const struct pathspec *pathspec,
- const char *prefix)
+ const struct pathspec *pathspec)
{
/*
* Make sure all pathspec matched; otherwise it is an error.
struct stat_data info_exclude_stat;
struct stat_data excludes_file_stat;
uint32_t dir_flags;
- unsigned char info_exclude_sha1[20];
- unsigned char excludes_file_sha1[20];
- char exclude_per_dir[FLEX_ARRAY];
};
#define ouc_offset(x) offsetof(struct ondisk_untracked_cache, x)
-#define ouc_size(len) (ouc_offset(exclude_per_dir) + len + 1)
struct write_data {
int index; /* number of written untracked_cache_dir */
struct write_data wd;
unsigned char varbuf[16];
int varint_len;
- size_t len = strlen(untracked->exclude_per_dir);
+ const unsigned hashsz = the_hash_algo->rawsz;
- FLEX_ALLOC_MEM(ouc, exclude_per_dir, untracked->exclude_per_dir, len);
+ ouc = xcalloc(1, sizeof(*ouc));
stat_data_to_disk(&ouc->info_exclude_stat, &untracked->ss_info_exclude.stat);
stat_data_to_disk(&ouc->excludes_file_stat, &untracked->ss_excludes_file.stat);
- hashcpy(ouc->info_exclude_sha1, untracked->ss_info_exclude.oid.hash);
- hashcpy(ouc->excludes_file_sha1, untracked->ss_excludes_file.oid.hash);
ouc->dir_flags = htonl(untracked->dir_flags);
varint_len = encode_varint(untracked->ident.len, varbuf);
strbuf_add(out, varbuf, varint_len);
strbuf_addbuf(out, &untracked->ident);
- strbuf_add(out, ouc, ouc_size(len));
+ strbuf_add(out, ouc, sizeof(*ouc));
+ strbuf_add(out, untracked->ss_info_exclude.oid.hash, hashsz);
+ strbuf_add(out, untracked->ss_excludes_file.oid.hash, hashsz);
+ strbuf_add(out, untracked->exclude_per_dir, strlen(untracked->exclude_per_dir) + 1);
FREE_AND_NULL(ouc);
if (!untracked->root) {
int ident_len;
ssize_t len;
const char *exclude_per_dir;
+ const unsigned hashsz = the_hash_algo->rawsz;
+ const unsigned offset = sizeof(struct ondisk_untracked_cache);
+ const unsigned exclude_per_dir_offset = offset + 2 * hashsz;
if (sz <= 1 || end[-1] != '\0')
return NULL;
ident = (const char *)next;
next += ident_len;
- if (next + ouc_size(0) > end)
+ if (next + exclude_per_dir_offset + 1 > end)
return NULL;
uc = xcalloc(1, sizeof(*uc));
strbuf_add(&uc->ident, ident, ident_len);
load_oid_stat(&uc->ss_info_exclude,
next + ouc_offset(info_exclude_stat),
- next + ouc_offset(info_exclude_sha1));
+ next + offset);
load_oid_stat(&uc->ss_excludes_file,
next + ouc_offset(excludes_file_stat),
- next + ouc_offset(excludes_file_sha1));
+ next + offset + hashsz);
uc->dir_flags = get_be32(next + ouc_offset(dir_flags));
- exclude_per_dir = (const char *)next + ouc_offset(exclude_per_dir);
+ exclude_per_dir = (const char *)next + exclude_per_dir_offset;
uc->exclude_per_dir = xstrdup(exclude_per_dir);
/* NUL after exclude_per_dir is covered by sizeof(*ouc) */
- next += ouc_size(strlen(exclude_per_dir));
+ next += exclude_per_dir_offset + strlen(exclude_per_dir) + 1;
if (next >= end)
goto done2;
const struct pathspec *pathspec,
const char *name, int namelen,
int prefix, char *seen, int is_dir);
-extern int report_path_error(const char *ps_matched, const struct pathspec *pathspec, const char *prefix);
+extern int report_path_error(const char *ps_matched, const struct pathspec *pathspec);
extern int within_depth(const char *name, int namelen, int depth, int max_depth);
extern int fill_directory(struct dir_struct *dir,
p.argv = args;
p.env = env;
p.use_shell = 1;
+ p.trace2_child_class = "editor";
if (start_command(&p) < 0)
return error("unable to start editor '%s'", editor);
static struct strbuf path = STRBUF_INIT;
struct stat st;
+ if (ce->ce_flags & CE_WT_REMOVE) {
+ if (topath)
+ /*
+ * No content and thus no path to create, so we have
+ * no pathname to return.
+ */
+ BUG("Can't remove entry to a path");
+ unlink_entry(ce);
+ return 0;
+ }
+
if (topath)
return write_entry(ce, topath, state, 1);
(*nr_checkouts)++;
return write_entry(ce, path.buf, state, 0);
}
+
+void unlink_entry(const struct cache_entry *ce)
+{
+ const struct submodule *sub = submodule_from_ce(ce);
+ if (sub) {
+ /* state.force is set at the caller. */
+ submodule_move_head(ce->name, "HEAD", NULL,
+ SUBMODULE_MOVE_HEAD_FORCE);
+ }
+ if (!check_leading_path(ce->name, ce_namelen(ce)))
+ return;
+ if (remove_or_warn(ce->ce_mode, ce->name))
+ return;
+ schedule_dir_for_removal(ce->name, ce_namelen(ce));
+}
return -1;
}
+ trace2_cmd_path(buf->buf);
+
return 0;
}
*/
#define NO_DELTA S_ISUID
+/*
+ * The amount of additional space required in order to write an object into the
+ * current pack. This is the hash lengths at the end of the pack, plus the
+ * length of one object ID.
+ */
+#define PACK_SIZE_THRESHOLD (the_hash_algo->rawsz * 3)
+
struct object_entry {
struct pack_idx_entry idx;
struct object_entry *next;
if (c != last)
die("internal consistency error creating the index");
- tmpfile = write_idx_file(NULL, idx, object_count, &pack_idx_opts, pack_data->sha1);
+ tmpfile = write_idx_file(NULL, idx, object_count, &pack_idx_opts,
+ pack_data->hash);
free(idx);
return tmpfile;
}
struct strbuf name = STRBUF_INIT;
int keep_fd;
- odb_pack_name(&name, pack_data->sha1, "keep");
+ odb_pack_name(&name, pack_data->hash, "keep");
keep_fd = odb_pack_keep(name.buf);
if (keep_fd < 0)
die_errno("cannot create keep file");
if (close(keep_fd))
die_errno("failed to write keep file");
- odb_pack_name(&name, pack_data->sha1, "pack");
+ odb_pack_name(&name, pack_data->hash, "pack");
if (finalize_object_file(pack_data->pack_name, name.buf))
die("cannot store pack file");
- odb_pack_name(&name, pack_data->sha1, "idx");
+ odb_pack_name(&name, pack_data->hash, "idx");
if (finalize_object_file(curr_index_name, name.buf))
die("cannot store index file");
free((void *)curr_index_name);
for (k = 0; k < pack_id; k++) {
struct packed_git *p = all_packs[k];
- odb_pack_name(&name, p->sha1, "keep");
+ odb_pack_name(&name, p->hash, "keep");
unlink_or_warn(name.buf);
}
strbuf_release(&name);
close_pack_windows(pack_data);
finalize_hashfile(pack_file, cur_pack_oid.hash, 0);
- fixup_pack_header_footer(pack_data->pack_fd, pack_data->sha1,
- pack_data->pack_name, object_count,
- cur_pack_oid.hash, pack_size);
+ fixup_pack_header_footer(pack_data->pack_fd, pack_data->hash,
+ pack_data->pack_name, object_count,
+ cur_pack_oid.hash, pack_size);
if (object_count <= unpack_limit) {
if (!loosen_small_pack(pack_data)) {
git_deflate_end(&s);
/* Determine if we should auto-checkpoint. */
- if ((max_packsize && (pack_size + 60 + s.total_out) > max_packsize)
- || (pack_size + 60 + s.total_out) < pack_size) {
+ if ((max_packsize
+ && (pack_size + PACK_SIZE_THRESHOLD + s.total_out) > max_packsize)
+ || (pack_size + PACK_SIZE_THRESHOLD + s.total_out) < pack_size) {
/* This new object needs to *not* have the current pack_id. */
e->pack_id = pack_id + 1;
int status = Z_OK;
/* Determine if we should auto-checkpoint. */
- if ((max_packsize && (pack_size + 60 + len) > max_packsize)
- || (pack_size + 60 + len) < pack_size)
+ if ((max_packsize
+ && (pack_size + PACK_SIZE_THRESHOLD + len) > max_packsize)
+ || (pack_size + PACK_SIZE_THRESHOLD + len) < pack_size)
cycle_packfile();
hashfile_checkpoint(pack_file, &checkpoint);
c += e->name->str_len + 1;
hashcpy(e->versions[0].oid.hash, (unsigned char *)c);
hashcpy(e->versions[1].oid.hash, (unsigned char *)c);
- c += GIT_SHA1_RAWSZ;
+ c += the_hash_algo->rawsz;
}
free(buf);
}
strbuf_addf(b, "%o %s%c",
(unsigned int)(e->versions[v].mode & ~NO_DELTA),
e->name->str_dat, '\0');
- strbuf_add(b, e->versions[v].oid.hash, GIT_SHA1_RAWSZ);
+ strbuf_add(b, e->versions[v].oid.hash, the_hash_algo->rawsz);
}
}
}
for (;;) {
- const char *p;
-
if (unread_command_buf) {
unread_command_buf = 0;
} else {
rc->prev->next = rc;
cmd_tail = rc;
}
- if (skip_prefix(command_buf.buf, "get-mark ", &p)) {
- parse_get_mark(p);
- continue;
- }
- if (skip_prefix(command_buf.buf, "cat-blob ", &p)) {
- parse_cat_blob(p);
- continue;
- }
if (command_buf.buf[0] == '#')
continue;
return 0;
unsigned int i, tmp_hex_oid_len, tmp_fullpath_len;
uintmax_t num_notes = 0;
struct object_id oid;
- char realpath[60];
+ /* hex oid + '/' between each pair of hex digits + NUL */
+ char realpath[GIT_MAX_HEXSZ + ((GIT_MAX_HEXSZ / 2) - 1) + 1];
+ const unsigned hexsz = the_hash_algo->hexsz;
if (!root->tree)
load_tree(root);
* of 2 chars.
*/
if (!e->versions[1].mode ||
- tmp_hex_oid_len > GIT_SHA1_HEXSZ ||
+ tmp_hex_oid_len > hexsz ||
e->name->str_len % 2)
continue;
tmp_fullpath_len += e->name->str_len;
fullpath[tmp_fullpath_len] = '\0';
- if (tmp_hex_oid_len == GIT_SHA1_HEXSZ && !get_oid_hex(hex_oid, &oid)) {
+ if (tmp_hex_oid_len == hexsz && !get_oid_hex(hex_oid, &oid)) {
/* This is a note entry */
if (fanout == 0xff) {
/* Counting mode, no rename */
strbuf_addstr(&uq, p);
p = uq.buf;
}
- read_next_command();
- parse_and_store_blob(&last_blob, &oid, 0);
+ while (read_next_command() != EOF) {
+ const char *v;
+ if (skip_prefix(command_buf.buf, "cat-blob ", &v))
+ parse_cat_blob(v);
+ else {
+ parse_and_store_blob(&last_blob, &oid, 0);
+ break;
+ }
+ }
} else {
enum object_type expected = S_ISDIR(mode) ?
OBJ_TREE: OBJ_BLOB;
struct object_entry *oe;
struct branch *s;
struct object_id oid, commit_oid;
- char path[60];
+ char path[GIT_MAX_RAWSZ * 3];
uint16_t inline_data = 0;
unsigned char new_fanout;
char *buf = read_object_with_reference(&commit_oid,
commit_type, &size,
&commit_oid);
- if (!buf || size < 46)
+ if (!buf || size < the_hash_algo->hexsz + 6)
die("Not a valid commit: %s", p);
free(buf);
} else
static void parse_from_commit(struct branch *b, char *buf, unsigned long size)
{
- if (!buf || size < GIT_SHA1_HEXSZ + 6)
+ if (!buf || size < the_hash_algo->hexsz + 6)
die("Not a valid commit: %s", oid_to_hex(&b->oid));
if (memcmp("tree ", buf, 5)
|| get_oid_hex(buf + 5, &b->branch_tree.versions[1].oid))
char *buf = read_object_with_reference(&n->oid,
commit_type,
&size, &n->oid);
- if (!buf || size < 46)
+ if (!buf || size < the_hash_algo->hexsz + 6)
die("Not a valid commit: %s", from);
free(buf);
} else
file_change_deleteall(b);
else if (skip_prefix(command_buf.buf, "ls ", &v))
parse_ls(v, b);
+ else if (skip_prefix(command_buf.buf, "cat-blob ", &v))
+ parse_cat_blob(v);
else {
unread_command_buf = 1;
break;
die("Unknown mark: %s", command_buf.buf);
xsnprintf(output, sizeof(output), "%s\n", oid_to_hex(&oe->idx.oid));
- cat_blob_write(output, GIT_SHA1_HEXSZ + 1);
+ cat_blob_write(output, the_hash_algo->hexsz + 1);
}
static void parse_cat_blob(const char *p)
{
unsigned long size;
char *buf = NULL;
+ const unsigned hexsz = the_hash_algo->hexsz;
+
if (!oe) {
enum object_type type = oid_object_info(the_repository, oid,
NULL);
/* Peel one layer. */
switch (oe->type) {
case OBJ_TAG:
- if (size < GIT_SHA1_HEXSZ + strlen("object ") ||
+ if (size < hexsz + strlen("object ") ||
get_oid_hex(buf + strlen("object "), oid))
die("Invalid SHA1 in tag: %s", command_buf.buf);
break;
case OBJ_COMMIT:
- if (size < GIT_SHA1_HEXSZ + strlen("tree ") ||
+ if (size < hexsz + strlen("tree ") ||
get_oid_hex(buf + strlen("tree "), oid))
die("Invalid SHA1 in commit: %s", command_buf.buf);
}
return e;
}
-static void print_ls(int mode, const unsigned char *sha1, const char *path)
+static void print_ls(int mode, const unsigned char *hash, const char *path)
{
static struct strbuf line = STRBUF_INIT;
/* mode SP type SP object_name TAB path LF */
strbuf_reset(&line);
strbuf_addf(&line, "%06o %s %s\t",
- mode & ~NO_DELTA, type, sha1_to_hex(sha1));
+ mode & ~NO_DELTA, type, hash_to_hex(hash));
quote_c_style(path, &line, NULL, 0);
strbuf_addch(&line, '\n');
}
const char *v;
if (!strcmp("blob", command_buf.buf))
parse_new_blob();
- else if (skip_prefix(command_buf.buf, "ls ", &v))
- parse_ls(v, NULL);
else if (skip_prefix(command_buf.buf, "commit ", &v))
parse_new_commit(v);
else if (skip_prefix(command_buf.buf, "tag ", &v))
parse_new_tag(v);
else if (skip_prefix(command_buf.buf, "reset ", &v))
parse_reset_branch(v);
+ else if (skip_prefix(command_buf.buf, "ls ", &v))
+ parse_ls(v, NULL);
+ else if (skip_prefix(command_buf.buf, "cat-blob ", &v))
+ parse_cat_blob(v);
+ else if (skip_prefix(command_buf.buf, "get-mark ", &v))
+ parse_get_mark(v);
else if (!strcmp("checkpoint", command_buf.buf))
parse_checkpoint();
else if (!strcmp("done", command_buf.buf))
if (args->stateless_rpc) {
send_sideband(fd, -1, buf->buf, buf->len, LARGE_PACKET_MAX);
packet_flush(fd);
- } else
- write_or_die(fd, buf->buf, buf->len);
+ } else {
+ if (write_in_full(fd, buf->buf, buf->len) < 0)
+ die_errno(_("unable to write to remote"));
+ }
}
static void insert_one_alternate_object(struct fetch_negotiator *negotiator,
/* Send request */
packet_buf_flush(&req_buf);
- write_or_die(fd_out, req_buf.buf, req_buf.len);
+ if (write_in_full(fd_out, req_buf.buf, req_buf.len) < 0)
+ die_errno(_("unable to write request to remote"));
strbuf_release(&req_buf);
return ret;
}
static void receive_shallow_info(struct fetch_pack_args *args,
- struct packet_reader *reader)
+ struct packet_reader *reader,
+ struct oid_array *shallows,
+ struct shallow_info *si)
{
- int line_received = 0;
+ int unshallow_received = 0;
process_section_header(reader, "shallow-info", 0);
while (packet_reader_read(reader) == PACKET_READ_NORMAL) {
if (skip_prefix(reader->line, "shallow ", &arg)) {
if (get_oid_hex(arg, &oid))
die(_("invalid shallow line: %s"), reader->line);
- register_shallow(the_repository, &oid);
- line_received = 1;
+ oid_array_append(shallows, &oid);
continue;
}
if (skip_prefix(reader->line, "unshallow ", &arg)) {
die(_("error in object: %s"), reader->line);
if (unregister_shallow(&oid))
die(_("no shallow found: %s"), reader->line);
- line_received = 1;
+ unshallow_received = 1;
continue;
}
die(_("expected shallow/unshallow, got %s"), reader->line);
reader->status != PACKET_READ_DELIM)
die(_("error processing shallow info: %d"), reader->status);
- if (line_received) {
+ if (args->deepen || unshallow_received) {
+ /*
+ * Treat these as shallow lines caused by our depth settings.
+ * In v0, these lines cannot cause refs to be rejected; do the
+ * same.
+ */
+ int i;
+
+ for (i = 0; i < shallows->nr; i++)
+ register_shallow(the_repository, &shallows->oid[i]);
setup_alternate_shallow(&shallow_lock, &alternate_shallow_file,
NULL);
args->deepen = 1;
+ } else if (shallows->nr) {
+ /*
+ * Treat these as shallow lines caused by the remote being
+ * shallow. In v0, remote refs that reach these objects are
+ * rejected (unless --update-shallow is set); do the same.
+ */
+ prepare_shallow_info(si, shallows);
+ if (si->nr_ours || si->nr_theirs)
+ alternate_shallow_file =
+ setup_temporary_shallow(si->shallow);
+ else
+ alternate_shallow_file = NULL;
} else {
alternate_shallow_file = NULL;
}
}
+static int cmp_name_ref(const void *name, const void *ref)
+{
+ return strcmp(name, (*(struct ref **)ref)->name);
+}
+
static void receive_wanted_refs(struct packet_reader *reader,
struct ref **sought, int nr_sought)
{
while (packet_reader_read(reader) == PACKET_READ_NORMAL) {
struct object_id oid;
const char *end;
- int i;
+ struct ref **found;
if (parse_oid_hex(reader->line, &oid, &end) || *end++ != ' ')
die(_("expected wanted-ref, got '%s'"), reader->line);
- for (i = 0; i < nr_sought; i++) {
- if (!strcmp(end, sought[i]->name)) {
- oidcpy(&sought[i]->old_oid, &oid);
- break;
- }
- }
-
- if (i == nr_sought)
+ found = bsearch(end, sought, nr_sought, sizeof(*sought),
+ cmp_name_ref);
+ if (!found)
die(_("unexpected wanted-ref: '%s'"), reader->line);
+ oidcpy(&(*found)->old_oid, &oid);
}
if (reader->status != PACKET_READ_DELIM)
int fd[2],
const struct ref *orig_ref,
struct ref **sought, int nr_sought,
+ struct oid_array *shallows,
+ struct shallow_info *si,
char **pack_lockfile)
{
struct ref *ref = copy_ref_list(orig_ref);
case FETCH_GET_PACK:
/* Check for shallow-info section */
if (process_section_header(&reader, "shallow-info", 1))
- receive_shallow_info(args, &reader);
+ receive_shallow_info(args, &reader, shallows, si);
if (process_section_header(&reader, "wanted-refs", 1))
receive_wanted_refs(&reader, sought, nr_sought);
}
struct ref *fetch_pack(struct fetch_pack_args *args,
- int fd[], struct child_process *conn,
+ int fd[],
const struct ref *ref,
- const char *dest,
struct ref **sought, int nr_sought,
struct oid_array *shallow,
char **pack_lockfile,
{
struct ref *ref_cpy;
struct shallow_info si;
+ struct oid_array shallows_scratch = OID_ARRAY_INIT;
fetch_pack_setup();
if (nr_sought)
packet_flush(fd[1]);
die(_("no matching remote head"));
}
- prepare_shallow_info(&si, shallow);
- if (version == protocol_v2)
+ if (version == protocol_v2) {
+ if (shallow->nr)
+ BUG("Protocol V2 does not provide shallows at this point in the fetch");
+ memset(&si, 0, sizeof(si));
ref_cpy = do_fetch_pack_v2(args, fd, ref, sought, nr_sought,
+ &shallows_scratch, &si,
pack_lockfile);
- else
+ } else {
+ prepare_shallow_info(&si, shallow);
ref_cpy = do_fetch_pack(args, fd, ref, sought, nr_sought,
&si, pack_lockfile);
+ }
reprepare_packed_git(the_repository);
if (!args->cloning && args->deepen) {
update_shallow(args, sought, nr_sought, &si);
cleanup:
clear_shallow_info(&si);
+ oid_array_clear(&shallows_scratch);
return ref_cpy;
}
* marked as such.
*/
struct ref *fetch_pack(struct fetch_pack_args *args,
- int fd[], struct child_process *conn,
+ int fd[],
const struct ref *ref,
- const char *dest,
struct ref **sought,
int nr_sought,
struct oid_array *shallow,
extern int cmd_main(int, const char **);
+/*
+ * Intercept all calls to exit() and route them to trace2 to
+ * optionally emit a message before calling the real exit().
+ */
+int trace2_cmd_exit_fl(const char *file, int line, int code);
+#define exit(code) exit(trace2_cmd_exit_fl(__FILE__, __LINE__, (code)))
+
/*
* You can mark a stack variable with UNLEAK(var) to avoid it being
* reported as a leak by tools like LSAN or valgrind. The argument
+++ /dev/null
-#!/bin/sh
-#
-# Copyright (c) 2005 Junio C Hamano.
-#
-
-SUBDIRECTORY_OK=Yes
-OPTIONS_KEEPDASHDASH=
-OPTIONS_STUCKLONG=t
-OPTIONS_SPEC="\
-git rebase [-i] [options] [--exec <cmd>] [--onto <newbase>] [<upstream>] [<branch>]
-git rebase [-i] [options] [--exec <cmd>] [--onto <newbase>] --root [<branch>]
-git rebase --continue | --abort | --skip | --edit-todo
---
- Available options are
-v,verbose! display a diffstat of what changed upstream
-q,quiet! be quiet. implies --no-stat
-autostash automatically stash/stash pop before and after
-fork-point use 'merge-base --fork-point' to refine upstream
-onto=! rebase onto given branch instead of upstream
-r,rebase-merges? try to rebase merges instead of skipping them
-p,preserve-merges! try to recreate merges instead of ignoring them
-s,strategy=! use the given merge strategy
-X,strategy-option=! pass the argument through to the merge strategy
-no-ff! cherry-pick all commits, even if unchanged
-f,force-rebase! cherry-pick all commits, even if unchanged
-m,merge! use merging strategies to rebase
-i,interactive! let the user edit the list of commits to rebase
-x,exec=! add exec lines after each commit of the editable list
-k,keep-empty preserve empty commits during rebase
-allow-empty-message allow rebasing commits with empty messages
-stat! display a diffstat of what changed upstream
-n,no-stat! do not show diffstat of what changed upstream
-verify allow pre-rebase hook to run
-rerere-autoupdate allow rerere to update index with resolved conflicts
-root! rebase all reachable commits up to the root(s)
-autosquash move commits that begin with squash!/fixup! under -i
-signoff add a Signed-off-by: line to each commit
-committer-date-is-author-date! passed to 'git am'
-ignore-date! passed to 'git am'
-whitespace=! passed to 'git apply'
-ignore-whitespace! passed to 'git apply'
-C=! passed to 'git apply'
-S,gpg-sign? GPG-sign commits
- Actions:
-continue! continue
-abort! abort and check out the original branch
-skip! skip current patch and continue
-edit-todo! edit the todo list during an interactive rebase
-quit! abort but keep HEAD where it is
-show-current-patch! show the patch file being applied or merged
-reschedule-failed-exec automatically reschedule failed exec commands
-"
-. git-sh-setup
-set_reflog_action rebase
-require_work_tree_exists
-cd_to_toplevel
-
-LF='
-'
-ok_to_skip_pre_rebase=
-
-squash_onto=
-unset onto
-unset restrict_revision
-cmd=
-strategy=
-strategy_opts=
-do_merge=
-merge_dir="$GIT_DIR"/rebase-merge
-apply_dir="$GIT_DIR"/rebase-apply
-verbose=
-diffstat=
-test "$(git config --bool rebase.stat)" = true && diffstat=t
-autostash="$(git config --bool rebase.autostash || echo false)"
-fork_point=auto
-git_am_opt=
-git_format_patch_opt=
-rebase_root=
-force_rebase=
-allow_rerere_autoupdate=
-# Non-empty if a rebase was in progress when 'git rebase' was invoked
-in_progress=
-# One of {am, merge, interactive}
-type=
-# One of {"$GIT_DIR"/rebase-apply, "$GIT_DIR"/rebase-merge}
-state_dir=
-# One of {'', continue, skip, abort}, as parsed from command line
-action=
-rebase_merges=
-rebase_cousins=
-preserve_merges=
-autosquash=
-keep_empty=
-allow_empty_message=--allow-empty-message
-signoff=
-reschedule_failed_exec=
-test "$(git config --bool rebase.autosquash)" = "true" && autosquash=t
-case "$(git config --bool commit.gpgsign)" in
-true) gpg_sign_opt=-S ;;
-*) gpg_sign_opt= ;;
-esac
-test "$(git config --bool rebase.reschedulefailedexec)" = "true" &&
-reschedule_failed_exec=--reschedule-failed-exec
-. git-rebase--common
-
-read_basic_state () {
- test -f "$state_dir/head-name" &&
- test -f "$state_dir/onto" &&
- head_name=$(cat "$state_dir"/head-name) &&
- onto=$(cat "$state_dir"/onto) &&
- # We always write to orig-head, but interactive rebase used to write to
- # head. Fall back to reading from head to cover for the case that the
- # user upgraded git with an ongoing interactive rebase.
- if test -f "$state_dir"/orig-head
- then
- orig_head=$(cat "$state_dir"/orig-head)
- else
- orig_head=$(cat "$state_dir"/head)
- fi &&
- test -f "$state_dir"/quiet && GIT_QUIET=t
- test -f "$state_dir"/verbose && verbose=t
- test -f "$state_dir"/strategy && strategy="$(cat "$state_dir"/strategy)"
- test -f "$state_dir"/strategy_opts &&
- strategy_opts="$(cat "$state_dir"/strategy_opts)"
- test -f "$state_dir"/allow_rerere_autoupdate &&
- allow_rerere_autoupdate="$(cat "$state_dir"/allow_rerere_autoupdate)"
- test -f "$state_dir"/gpg_sign_opt &&
- gpg_sign_opt="$(cat "$state_dir"/gpg_sign_opt)"
- test -f "$state_dir"/signoff && {
- signoff="$(cat "$state_dir"/signoff)"
- force_rebase=t
- }
- test -f "$state_dir"/reschedule-failed-exec &&
- reschedule_failed_exec=t
-}
-
-finish_rebase () {
- rm -f "$(git rev-parse --git-path REBASE_HEAD)"
- apply_autostash &&
- { git gc --auto || true; } &&
- rm -rf "$state_dir"
-}
-
-run_interactive () {
- GIT_CHERRY_PICK_HELP="$resolvemsg"
- export GIT_CHERRY_PICK_HELP
-
- test -n "$keep_empty" && keep_empty="--keep-empty"
- test -n "$rebase_merges" && rebase_merges="--rebase-merges"
- test -n "$rebase_cousins" && rebase_cousins="--rebase-cousins"
- test -n "$autosquash" && autosquash="--autosquash"
- test -n "$verbose" && verbose="--verbose"
- test -n "$force_rebase" && force_rebase="--no-ff"
- test -n "$restrict_revision" && \
- restrict_revision="--restrict-revision=^$restrict_revision"
- test -n "$upstream" && upstream="--upstream=$upstream"
- test -n "$onto" && onto="--onto=$onto"
- test -n "$squash_onto" && squash_onto="--squash-onto=$squash_onto"
- test -n "$onto_name" && onto_name="--onto-name=$onto_name"
- test -n "$head_name" && head_name="--head-name=$head_name"
- test -n "$strategy" && strategy="--strategy=$strategy"
- test -n "$strategy_opts" && strategy_opts="--strategy-opts=$strategy_opts"
- test -n "$switch_to" && switch_to="--switch-to=$switch_to"
- test -n "$cmd" && cmd="--cmd=$cmd"
- test -n "$action" && action="--$action"
-
- exec git rebase--interactive "$action" "$keep_empty" "$rebase_merges" "$rebase_cousins" \
- "$upstream" "$onto" "$squash_onto" "$restrict_revision" \
- "$allow_empty_message" "$autosquash" "$verbose" \
- "$force_rebase" "$onto_name" "$head_name" "$strategy" \
- "$strategy_opts" "$cmd" "$switch_to" \
- "$allow_rerere_autoupdate" "$gpg_sign_opt" "$signoff" \
- "$reschedule_failed_exec"
-}
-
-run_specific_rebase () {
- if [ "$interactive_rebase" = implied ]; then
- GIT_SEQUENCE_EDITOR=:
- export GIT_SEQUENCE_EDITOR
- autosquash=
- fi
-
- if test -n "$interactive_rebase" -a -z "$preserve_merges"
- then
- run_interactive
- else
- . git-rebase--$type
-
- if test -z "$preserve_merges"
- then
- git_rebase__$type
- else
- git_rebase__preserve_merges
- fi
- fi
-
- ret=$?
- if test $ret -eq 0
- then
- finish_rebase
- elif test $ret -eq 2 # special exit status for rebase -p
- then
- apply_autostash &&
- rm -rf "$state_dir" &&
- die "Nothing to do"
- fi
- exit $ret
-}
-
-run_pre_rebase_hook () {
- if test -z "$ok_to_skip_pre_rebase" &&
- test -x "$(git rev-parse --git-path hooks/pre-rebase)"
- then
- "$(git rev-parse --git-path hooks/pre-rebase)" ${1+"$@"} ||
- die "$(gettext "The pre-rebase hook refused to rebase.")"
- fi
-}
-
-test -f "$apply_dir"/applying &&
- die "$(gettext "It looks like 'git am' is in progress. Cannot rebase.")"
-
-if test -d "$apply_dir"
-then
- type=am
- state_dir="$apply_dir"
-elif test -d "$merge_dir"
-then
- type=interactive
- if test -d "$merge_dir"/rewritten
- then
- type=preserve-merges
- interactive_rebase=explicit
- preserve_merges=t
- elif test -f "$merge_dir"/interactive
- then
- interactive_rebase=explicit
- fi
- state_dir="$merge_dir"
-fi
-test -n "$type" && in_progress=t
-
-total_argc=$#
-while test $# != 0
-do
- case "$1" in
- --no-verify)
- ok_to_skip_pre_rebase=yes
- ;;
- --verify)
- ok_to_skip_pre_rebase=
- ;;
- --continue|--skip|--abort|--quit|--edit-todo|--show-current-patch)
- test $total_argc -eq 2 || usage
- action=${1##--}
- ;;
- --onto=*)
- onto="${1#--onto=}"
- ;;
- --exec=*)
- cmd="${cmd}exec ${1#--exec=}${LF}"
- test -z "$interactive_rebase" && interactive_rebase=implied
- ;;
- --interactive)
- interactive_rebase=explicit
- ;;
- --keep-empty)
- keep_empty=yes
- ;;
- --allow-empty-message)
- allow_empty_message=--allow-empty-message
- ;;
- --no-keep-empty)
- keep_empty=
- ;;
- --rebase-merges)
- rebase_merges=t
- test -z "$interactive_rebase" && interactive_rebase=implied
- ;;
- --rebase-merges=*)
- rebase_merges=t
- case "${1#*=}" in
- rebase-cousins) rebase_cousins=t;;
- no-rebase-cousins) rebase_cousins=;;
- *) die "Unknown mode: $1";;
- esac
- test -z "$interactive_rebase" && interactive_rebase=implied
- ;;
- --preserve-merges)
- preserve_merges=t
- test -z "$interactive_rebase" && interactive_rebase=implied
- ;;
- --autosquash)
- autosquash=t
- ;;
- --no-autosquash)
- autosquash=
- ;;
- --fork-point)
- fork_point=t
- ;;
- --no-fork-point)
- fork_point=
- ;;
- --merge)
- do_merge=t
- ;;
- --strategy-option=*)
- strategy_opts="$strategy_opts $(git rev-parse --sq-quote "--${1#--strategy-option=}" | sed -e s/^.//)"
- do_merge=t
- test -z "$strategy" && strategy=recursive
- ;;
- --strategy=*)
- strategy="${1#--strategy=}"
- do_merge=t
- ;;
- --no-stat)
- diffstat=
- ;;
- --stat)
- diffstat=t
- ;;
- --autostash)
- autostash=true
- ;;
- --no-autostash)
- autostash=false
- ;;
- --verbose)
- verbose=t
- diffstat=t
- GIT_QUIET=
- ;;
- --quiet)
- GIT_QUIET=t
- git_am_opt="$git_am_opt -q"
- verbose=
- diffstat=
- ;;
- --whitespace=*)
- git_am_opt="$git_am_opt --whitespace=${1#--whitespace=}"
- case "${1#--whitespace=}" in
- fix|strip)
- force_rebase=t
- ;;
- warn|nowarn|error|error-all)
- ;; # okay, known whitespace option
- *)
- die "fatal: Invalid whitespace option: '${1#*=}'"
- ;;
- esac
- ;;
- --ignore-whitespace)
- git_am_opt="$git_am_opt $1"
- ;;
- --signoff)
- signoff=--signoff
- ;;
- --no-signoff)
- signoff=
- ;;
- --committer-date-is-author-date|--ignore-date)
- git_am_opt="$git_am_opt $1"
- force_rebase=t
- ;;
- -C*[!0-9]*)
- die "fatal: switch \`C' expects a numerical value"
- ;;
- -C*)
- git_am_opt="$git_am_opt $1"
- ;;
- --root)
- rebase_root=t
- ;;
- --force-rebase|--no-ff)
- force_rebase=t
- ;;
- --rerere-autoupdate|--no-rerere-autoupdate)
- allow_rerere_autoupdate="$1"
- ;;
- --gpg-sign)
- gpg_sign_opt=-S
- ;;
- --gpg-sign=*)
- gpg_sign_opt="-S${1#--gpg-sign=}"
- ;;
- --reschedule-failed-exec)
- reschedule_failed_exec=--reschedule-failed-exec
- ;;
- --no-reschedule-failed-exec)
- reschedule_failed_exec=
- ;;
- --)
- shift
- break
- ;;
- *)
- usage
- ;;
- esac
- shift
-done
-test $# -gt 2 && usage
-
-if test -n "$action"
-then
- test -z "$in_progress" && die "$(gettext "No rebase in progress?")"
- # Only interactive rebase uses detailed reflog messages
- if test -n "$interactive_rebase" && test "$GIT_REFLOG_ACTION" = rebase
- then
- GIT_REFLOG_ACTION="rebase -i ($action)"
- export GIT_REFLOG_ACTION
- fi
-fi
-
-if test "$action" = "edit-todo" && test -z "$interactive_rebase"
-then
- die "$(gettext "The --edit-todo action can only be used during interactive rebase.")"
-fi
-
-case "$action" in
-continue)
- # Sanity check
- git rev-parse --verify HEAD >/dev/null ||
- die "$(gettext "Cannot read HEAD")"
- git update-index --ignore-submodules --refresh &&
- git diff-files --quiet --ignore-submodules || {
- echo "$(gettext "You must edit all merge conflicts and then
-mark them as resolved using git add")"
- exit 1
- }
- read_basic_state
- run_specific_rebase
- ;;
-skip)
- output git reset --hard HEAD || exit $?
- read_basic_state
- run_specific_rebase
- ;;
-abort)
- git rerere clear
- read_basic_state
- case "$head_name" in
- refs/*)
- git symbolic-ref -m "rebase: aborting" HEAD $head_name ||
- die "$(eval_gettext "Could not move back to \$head_name")"
- ;;
- esac
- output git reset --hard $orig_head
- finish_rebase
- exit
- ;;
-quit)
- exec rm -rf "$state_dir"
- ;;
-edit-todo)
- run_specific_rebase
- ;;
-show-current-patch)
- run_specific_rebase
- die "BUG: run_specific_rebase is not supposed to return here"
- ;;
-esac
-
-# Make sure no rebase is in progress
-if test -n "$in_progress"
-then
- state_dir_base=${state_dir##*/}
- cmd_live_rebase="git rebase (--continue | --abort | --skip)"
- cmd_clear_stale_rebase="rm -fr \"$state_dir\""
- die "
-$(eval_gettext 'It seems that there is already a $state_dir_base directory, and
-I wonder if you are in the middle of another rebase. If that is the
-case, please try
- $cmd_live_rebase
-If that is not the case, please
- $cmd_clear_stale_rebase
-and run me again. I am stopping in case you still have something
-valuable there.')"
-fi
-
-if test -n "$rebase_root" && test -z "$onto"
-then
- test -z "$interactive_rebase" && interactive_rebase=implied
-fi
-
-if test -n "$keep_empty"
-then
- test -z "$interactive_rebase" && interactive_rebase=implied
-fi
-
-actually_interactive=
-if test -n "$interactive_rebase"
-then
- if test -z "$preserve_merges"
- then
- type=interactive
- else
- type=preserve-merges
- fi
- actually_interactive=t
- state_dir="$merge_dir"
-elif test -n "$do_merge"
-then
- interactive_rebase=implied
- type=interactive
- state_dir="$merge_dir"
-else
- type=am
- state_dir="$apply_dir"
-fi
-
-if test -t 2 && test -z "$GIT_QUIET"
-then
- git_format_patch_opt="$git_format_patch_opt --progress"
-fi
-
-incompatible_opts=$(echo " $git_am_opt " | \
- sed -e 's/ -q / /g' -e 's/^ \(.*\) $/\1/')
-if test -n "$incompatible_opts"
-then
- if test -n "$actually_interactive" || test "$do_merge"
- then
- die "$(gettext "fatal: cannot combine am options with either interactive or merge options")"
- fi
-fi
-
-if test -n "$signoff"
-then
- test -n "$preserve_merges" &&
- die "$(gettext "fatal: cannot combine '--signoff' with '--preserve-merges'")"
- git_am_opt="$git_am_opt $signoff"
- force_rebase=t
-fi
-
-if test -n "$preserve_merges"
-then
- # Note: incompatibility with --signoff handled in signoff block above
- # Note: incompatibility with --interactive is just a strong warning;
- # git-rebase.txt caveats with "unless you know what you are doing"
- test -n "$rebase_merges" &&
- die "$(gettext "fatal: cannot combine '--preserve-merges' with '--rebase-merges'")"
-
- test -n "$reschedule_failed_exec" &&
- die "$(gettext "error: cannot combine '--preserve-merges' with '--reschedule-failed-exec'")"
-fi
-
-if test -n "$rebase_merges"
-then
- test -n "$strategy_opts" &&
- die "$(gettext "fatal: cannot combine '--rebase-merges' with '--strategy-option'")"
- test -n "$strategy" &&
- die "$(gettext "fatal: cannot combine '--rebase-merges' with '--strategy'")"
-fi
-
-if test -z "$rebase_root"
-then
- case "$#" in
- 0)
- if ! upstream_name=$(git rev-parse --symbolic-full-name \
- --verify -q @{upstream} 2>/dev/null)
- then
- . git-parse-remote
- error_on_missing_default_upstream "rebase" "rebase" \
- "against" "git rebase $(gettext '<branch>')"
- fi
-
- test "$fork_point" = auto && fork_point=t
- ;;
- *) upstream_name="$1"
- if test "$upstream_name" = "-"
- then
- upstream_name="@{-1}"
- fi
- shift
- ;;
- esac
- upstream=$(peel_committish "${upstream_name}") ||
- die "$(eval_gettext "invalid upstream '\$upstream_name'")"
- upstream_arg="$upstream_name"
-else
- if test -z "$onto"
- then
- empty_tree=$(git hash-object -t tree /dev/null)
- onto=$(git commit-tree $empty_tree </dev/null)
- squash_onto="$onto"
- fi
- unset upstream_name
- unset upstream
- test $# -gt 1 && usage
- upstream_arg=--root
-fi
-
-# Make sure the branch to rebase onto is valid.
-onto_name=${onto-"$upstream_name"}
-case "$onto_name" in
-*...*)
- if left=${onto_name%...*} right=${onto_name#*...} &&
- onto=$(git merge-base --all ${left:-HEAD} ${right:-HEAD})
- then
- case "$onto" in
- ?*"$LF"?*)
- die "$(eval_gettext "\$onto_name: there are more than one merge bases")"
- ;;
- '')
- die "$(eval_gettext "\$onto_name: there is no merge base")"
- ;;
- esac
- else
- die "$(eval_gettext "\$onto_name: there is no merge base")"
- fi
- ;;
-*)
- onto=$(peel_committish "$onto_name") ||
- die "$(eval_gettext "Does not point to a valid commit: \$onto_name")"
- ;;
-esac
-
-# If the branch to rebase is given, that is the branch we will rebase
-# $branch_name -- branch/commit being rebased, or HEAD (already detached)
-# $orig_head -- commit object name of tip of the branch before rebasing
-# $head_name -- refs/heads/<that-branch> or "detached HEAD"
-switch_to=
-case "$#" in
-1)
- # Is it "rebase other $branchname" or "rebase other $commit"?
- branch_name="$1"
- switch_to="$1"
-
- # Is it a local branch?
- if git show-ref --verify --quiet -- "refs/heads/$branch_name" &&
- orig_head=$(git rev-parse -q --verify "refs/heads/$branch_name")
- then
- head_name="refs/heads/$branch_name"
- # If not is it a valid ref (branch or commit)?
- elif orig_head=$(git rev-parse -q --verify "$branch_name")
- then
- head_name="detached HEAD"
-
- else
- die "$(eval_gettext "fatal: no such branch/commit '\$branch_name'")"
- fi
- ;;
-0)
- # Do not need to switch branches, we are already on it.
- if branch_name=$(git symbolic-ref -q HEAD)
- then
- head_name=$branch_name
- branch_name=$(expr "z$branch_name" : 'zrefs/heads/\(.*\)')
- else
- head_name="detached HEAD"
- branch_name=HEAD
- fi
- orig_head=$(git rev-parse --verify HEAD) || exit
- ;;
-*)
- die "BUG: unexpected number of arguments left to parse"
- ;;
-esac
-
-if test "$fork_point" = t
-then
- new_upstream=$(git merge-base --fork-point "$upstream_name" \
- "${switch_to:-HEAD}")
- if test -n "$new_upstream"
- then
- restrict_revision=$new_upstream
- fi
-fi
-
-if test "$autostash" = true && ! (require_clean_work_tree) 2>/dev/null
-then
- stash_sha1=$(git stash create "autostash") ||
- die "$(gettext 'Cannot autostash')"
-
- mkdir -p "$state_dir" &&
- echo $stash_sha1 >"$state_dir/autostash" &&
- stash_abbrev=$(git rev-parse --short $stash_sha1) &&
- echo "$(eval_gettext 'Created autostash: $stash_abbrev')" &&
- git reset --hard
-fi
-
-require_clean_work_tree "rebase" "$(gettext "Please commit or stash them.")"
-
-# Now we are rebasing commits $upstream..$orig_head (or with --root,
-# everything leading up to $orig_head) on top of $onto
-
-# Check if we are already based on $onto with linear history,
-# but this should be done only when upstream and onto are the same
-# and if this is not an interactive rebase.
-mb=$(git merge-base "$onto" "$orig_head")
-if test -z "$actually_interactive" && test "$upstream" = "$onto" &&
- test "$mb" = "$onto" && test -z "$restrict_revision" &&
- # linear history?
- ! (git rev-list --parents "$onto".."$orig_head" | sane_grep " .* ") > /dev/null
-then
- if test -z "$force_rebase"
- then
- # Lazily switch to the target branch if needed...
- test -z "$switch_to" ||
- GIT_REFLOG_ACTION="$GIT_REFLOG_ACTION: checkout $switch_to" \
- git checkout -q "$switch_to" --
- if test "$branch_name" = "HEAD" &&
- ! git symbolic-ref -q HEAD
- then
- say "$(eval_gettext "HEAD is up to date.")"
- else
- say "$(eval_gettext "Current branch \$branch_name is up to date.")"
- fi
- finish_rebase
- exit 0
- else
- if test "$branch_name" = "HEAD" &&
- ! git symbolic-ref -q HEAD
- then
- say "$(eval_gettext "HEAD is up to date, rebase forced.")"
- else
- say "$(eval_gettext "Current branch \$branch_name is up to date, rebase forced.")"
- fi
- fi
-fi
-
-# If a hook exists, give it a chance to interrupt
-run_pre_rebase_hook "$upstream_arg" "$@"
-
-if test -n "$diffstat"
-then
- if test -n "$verbose"
- then
- if test -z "$mb"
- then
- echo "$(eval_gettext "Changes to \$onto:")"
- else
- echo "$(eval_gettext "Changes from \$mb to \$onto:")"
- fi
- fi
- mb_tree="${mb:-$(git hash-object -t tree /dev/null)}"
- # We want color (if set), but no pager
- GIT_PAGER='' git diff --stat --summary "$mb_tree" "$onto"
-fi
-
-if test -z "$actually_interactive" && test "$mb" = "$orig_head"
-then
- say "$(eval_gettext "Fast-forwarded \$branch_name to \$onto_name.")"
- GIT_REFLOG_ACTION="$GIT_REFLOG_ACTION: checkout $onto_name" \
- git checkout -q "$onto^0" || die "could not detach HEAD"
- # If the $onto is a proper descendant of the tip of the branch, then
- # we just fast-forwarded.
- git update-ref ORIG_HEAD $orig_head
- move_to_original_branch
- finish_rebase
- exit 0
-fi
-
-test -n "$interactive_rebase" && run_specific_rebase
-
-# Detach HEAD and reset the tree
-say "$(gettext "First, rewinding head to replay your work on top of it...")"
-
-GIT_REFLOG_ACTION="$GIT_REFLOG_ACTION: checkout $onto_name" \
- git checkout -q "$onto^0" || die "could not detach HEAD"
-git update-ref ORIG_HEAD $orig_head
-
-if test -n "$rebase_root"
-then
- revisions="$onto..$orig_head"
-else
- revisions="${restrict_revision-$upstream}..$orig_head"
-fi
-
-run_specific_rebase
--- /dev/null
+#!/bin/sh
+# Copyright (c) 2007, Nanako Shiraishi
+
+dashless=$(basename "$0" | sed -e 's/-/ /')
+USAGE="list [<options>]
+ or: $dashless show [<stash>]
+ or: $dashless drop [-q|--quiet] [<stash>]
+ or: $dashless ( pop | apply ) [--index] [-q|--quiet] [<stash>]
+ or: $dashless branch <branchname> [<stash>]
+ or: $dashless save [--patch] [-k|--[no-]keep-index] [-q|--quiet]
+ [-u|--include-untracked] [-a|--all] [<message>]
+ or: $dashless [push [--patch] [-k|--[no-]keep-index] [-q|--quiet]
+ [-u|--include-untracked] [-a|--all] [-m <message>]
+ [-- <pathspec>...]]
+ or: $dashless clear"
+
+SUBDIRECTORY_OK=Yes
+OPTIONS_SPEC=
+START_DIR=$(pwd)
+. git-sh-setup
+require_work_tree
+prefix=$(git rev-parse --show-prefix) || exit 1
+cd_to_toplevel
+
+TMP="$GIT_DIR/.git-stash.$$"
+TMPindex=${GIT_INDEX_FILE-"$(git rev-parse --git-path index)"}.stash.$$
+trap 'rm -f "$TMP-"* "$TMPindex"' 0
+
+ref_stash=refs/stash
+
+if git config --get-colorbool color.interactive; then
+ help_color="$(git config --get-color color.interactive.help 'red bold')"
+ reset_color="$(git config --get-color '' reset)"
+else
+ help_color=
+ reset_color=
+fi
+
+no_changes () {
+ git diff-index --quiet --cached HEAD --ignore-submodules -- "$@" &&
+ git diff-files --quiet --ignore-submodules -- "$@" &&
+ (test -z "$untracked" || test -z "$(untracked_files "$@")")
+}
+
+untracked_files () {
+ if test "$1" = "-z"
+ then
+ shift
+ z=-z
+ else
+ z=
+ fi
+ excl_opt=--exclude-standard
+ test "$untracked" = "all" && excl_opt=
+ git ls-files -o $z $excl_opt -- "$@"
+}
+
+prepare_fallback_ident () {
+ if ! git -c user.useconfigonly=yes var GIT_COMMITTER_IDENT >/dev/null 2>&1
+ then
+ GIT_AUTHOR_NAME="git stash"
+ GIT_AUTHOR_EMAIL=git@stash
+ GIT_COMMITTER_NAME="git stash"
+ GIT_COMMITTER_EMAIL=git@stash
+ export GIT_AUTHOR_NAME
+ export GIT_AUTHOR_EMAIL
+ export GIT_COMMITTER_NAME
+ export GIT_COMMITTER_EMAIL
+ fi
+}
+
+clear_stash () {
+ if test $# != 0
+ then
+ die "$(gettext "git stash clear with parameters is unimplemented")"
+ fi
+ if current=$(git rev-parse --verify --quiet $ref_stash)
+ then
+ git update-ref -d $ref_stash $current
+ fi
+}
+
+maybe_quiet () {
+ case "$1" in
+ --keep-stdout)
+ shift
+ if test -n "$GIT_QUIET"
+ then
+ "$@" 2>/dev/null
+ else
+ "$@"
+ fi
+ ;;
+ *)
+ if test -n "$GIT_QUIET"
+ then
+ "$@" >/dev/null 2>&1
+ else
+ "$@"
+ fi
+ ;;
+ esac
+}
+
+create_stash () {
+
+ prepare_fallback_ident
+
+ stash_msg=
+ untracked=
+ while test $# != 0
+ do
+ case "$1" in
+ -m|--message)
+ shift
+ stash_msg=${1?"BUG: create_stash () -m requires an argument"}
+ ;;
+ -m*)
+ stash_msg=${1#-m}
+ ;;
+ --message=*)
+ stash_msg=${1#--message=}
+ ;;
+ -u|--include-untracked)
+ shift
+ untracked=${1?"BUG: create_stash () -u requires an argument"}
+ ;;
+ --)
+ shift
+ break
+ ;;
+ esac
+ shift
+ done
+
+ git update-index -q --refresh
+ if maybe_quiet no_changes "$@"
+ then
+ exit 0
+ fi
+
+ # state of the base commit
+ if b_commit=$(maybe_quiet --keep-stdout git rev-parse --verify HEAD)
+ then
+ head=$(git rev-list --oneline -n 1 HEAD --)
+ elif test -n "$GIT_QUIET"
+ then
+ exit 1
+ else
+ die "$(gettext "You do not have the initial commit yet")"
+ fi
+
+ if branch=$(git symbolic-ref -q HEAD)
+ then
+ branch=${branch#refs/heads/}
+ else
+ branch='(no branch)'
+ fi
+ msg=$(printf '%s: %s' "$branch" "$head")
+
+ # state of the index
+ i_tree=$(git write-tree) &&
+ i_commit=$(printf 'index on %s\n' "$msg" |
+ git commit-tree $i_tree -p $b_commit) ||
+ die "$(gettext "Cannot save the current index state")"
+
+ if test -n "$untracked"
+ then
+ # Untracked files are stored by themselves in a parentless commit, for
+ # ease of unpacking later.
+ u_commit=$(
+ untracked_files -z "$@" | (
+ GIT_INDEX_FILE="$TMPindex" &&
+ export GIT_INDEX_FILE &&
+ rm -f "$TMPindex" &&
+ git update-index -z --add --remove --stdin &&
+ u_tree=$(git write-tree) &&
+ printf 'untracked files on %s\n' "$msg" | git commit-tree $u_tree &&
+ rm -f "$TMPindex"
+ ) ) || die "$(gettext "Cannot save the untracked files")"
+
+ untracked_commit_option="-p $u_commit";
+ else
+ untracked_commit_option=
+ fi
+
+ if test -z "$patch_mode"
+ then
+
+ # state of the working tree
+ w_tree=$( (
+ git read-tree --index-output="$TMPindex" -m $i_tree &&
+ GIT_INDEX_FILE="$TMPindex" &&
+ export GIT_INDEX_FILE &&
+ git diff-index --name-only -z HEAD -- "$@" >"$TMP-stagenames" &&
+ git update-index -z --add --remove --stdin <"$TMP-stagenames" &&
+ git write-tree &&
+ rm -f "$TMPindex"
+ ) ) ||
+ die "$(gettext "Cannot save the current worktree state")"
+
+ else
+
+ rm -f "$TMP-index" &&
+ GIT_INDEX_FILE="$TMP-index" git read-tree HEAD &&
+
+ # find out what the user wants
+ GIT_INDEX_FILE="$TMP-index" \
+ git add--interactive --patch=stash -- "$@" &&
+
+ # state of the working tree
+ w_tree=$(GIT_INDEX_FILE="$TMP-index" git write-tree) ||
+ die "$(gettext "Cannot save the current worktree state")"
+
+ git diff-tree -p HEAD $w_tree -- >"$TMP-patch" &&
+ test -s "$TMP-patch" ||
+ die "$(gettext "No changes selected")"
+
+ rm -f "$TMP-index" ||
+ die "$(gettext "Cannot remove temporary index (can't happen)")"
+
+ fi
+
+ # create the stash
+ if test -z "$stash_msg"
+ then
+ stash_msg=$(printf 'WIP on %s' "$msg")
+ else
+ stash_msg=$(printf 'On %s: %s' "$branch" "$stash_msg")
+ fi
+ w_commit=$(printf '%s\n' "$stash_msg" |
+ git commit-tree $w_tree -p $b_commit -p $i_commit $untracked_commit_option) ||
+ die "$(gettext "Cannot record working tree state")"
+}
+
+store_stash () {
+ while test $# != 0
+ do
+ case "$1" in
+ -m|--message)
+ shift
+ stash_msg="$1"
+ ;;
+ -m*)
+ stash_msg=${1#-m}
+ ;;
+ --message=*)
+ stash_msg=${1#--message=}
+ ;;
+ -q|--quiet)
+ quiet=t
+ ;;
+ *)
+ break
+ ;;
+ esac
+ shift
+ done
+ test $# = 1 ||
+ die "$(eval_gettext "\"$dashless store\" requires one <commit> argument")"
+
+ w_commit="$1"
+ if test -z "$stash_msg"
+ then
+ stash_msg="Created via \"git stash store\"."
+ fi
+
+ git update-ref --create-reflog -m "$stash_msg" $ref_stash $w_commit
+ ret=$?
+ test $ret != 0 && test -z "$quiet" &&
+ die "$(eval_gettext "Cannot update \$ref_stash with \$w_commit")"
+ return $ret
+}
+
+push_stash () {
+ keep_index=
+ patch_mode=
+ untracked=
+ stash_msg=
+ while test $# != 0
+ do
+ case "$1" in
+ -k|--keep-index)
+ keep_index=t
+ ;;
+ --no-keep-index)
+ keep_index=n
+ ;;
+ -p|--patch)
+ patch_mode=t
+ # only default to keep if we don't already have an override
+ test -z "$keep_index" && keep_index=t
+ ;;
+ -q|--quiet)
+ GIT_QUIET=t
+ ;;
+ -u|--include-untracked)
+ untracked=untracked
+ ;;
+ -a|--all)
+ untracked=all
+ ;;
+ -m|--message)
+ shift
+ test -z ${1+x} && usage
+ stash_msg=$1
+ ;;
+ -m*)
+ stash_msg=${1#-m}
+ ;;
+ --message=*)
+ stash_msg=${1#--message=}
+ ;;
+ --help)
+ show_help
+ ;;
+ --)
+ shift
+ break
+ ;;
+ -*)
+ option="$1"
+ eval_gettextln "error: unknown option for 'stash push': \$option"
+ usage
+ ;;
+ *)
+ break
+ ;;
+ esac
+ shift
+ done
+
+ eval "set $(git rev-parse --sq --prefix "$prefix" -- "$@")"
+
+ if test -n "$patch_mode" && test -n "$untracked"
+ then
+ die "$(gettext "Can't use --patch and --include-untracked or --all at the same time")"
+ fi
+
+ test -n "$untracked" || git ls-files --error-unmatch -- "$@" >/dev/null || exit 1
+
+ git update-index -q --refresh
+ if maybe_quiet no_changes "$@"
+ then
+ say "$(gettext "No local changes to save")"
+ exit 0
+ fi
+
+ git reflog exists $ref_stash ||
+ clear_stash || die "$(gettext "Cannot initialize stash")"
+
+ create_stash -m "$stash_msg" -u "$untracked" -- "$@"
+ store_stash -m "$stash_msg" -q $w_commit ||
+ die "$(gettext "Cannot save the current status")"
+ say "$(eval_gettext "Saved working directory and index state \$stash_msg")"
+
+ if test -z "$patch_mode"
+ then
+ test "$untracked" = "all" && CLEAN_X_OPTION=-x || CLEAN_X_OPTION=
+ if test -n "$untracked" && test $# = 0
+ then
+ git clean --force --quiet -d $CLEAN_X_OPTION
+ fi
+
+ if test $# != 0
+ then
+ test -z "$untracked" && UPDATE_OPTION="-u" || UPDATE_OPTION=
+ test "$untracked" = "all" && FORCE_OPTION="--force" || FORCE_OPTION=
+ git add $UPDATE_OPTION $FORCE_OPTION -- "$@"
+ git diff-index -p --cached --binary HEAD -- "$@" |
+ git apply --index -R
+ else
+ git reset --hard -q
+ fi
+
+ if test "$keep_index" = "t" && test -n "$i_tree"
+ then
+ git read-tree --reset $i_tree
+ git ls-files -z --modified -- "$@" |
+ git checkout-index -z --force --stdin
+ fi
+ else
+ git apply -R < "$TMP-patch" ||
+ die "$(gettext "Cannot remove worktree changes")"
+
+ if test "$keep_index" != "t"
+ then
+ git reset -q -- "$@"
+ fi
+ fi
+}
+
+save_stash () {
+ push_options=
+ while test $# != 0
+ do
+ case "$1" in
+ -q|--quiet)
+ GIT_QUIET=t
+ ;;
+ --)
+ shift
+ break
+ ;;
+ -*)
+ # pass all options through to push_stash
+ push_options="$push_options $1"
+ ;;
+ *)
+ break
+ ;;
+ esac
+ shift
+ done
+
+ stash_msg="$*"
+
+ if test -z "$stash_msg"
+ then
+ push_stash $push_options
+ else
+ push_stash $push_options -m "$stash_msg"
+ fi
+}
+
+have_stash () {
+ git rev-parse --verify --quiet $ref_stash >/dev/null
+}
+
+list_stash () {
+ have_stash || return 0
+ git log --format="%gd: %gs" -g --first-parent -m "$@" $ref_stash --
+}
+
+show_stash () {
+ ALLOW_UNKNOWN_FLAGS=t
+ assert_stash_like "$@"
+
+ if test -z "$FLAGS"
+ then
+ if test "$(git config --bool stash.showStat || echo true)" = "true"
+ then
+ FLAGS=--stat
+ fi
+
+ if test "$(git config --bool stash.showPatch || echo false)" = "true"
+ then
+ FLAGS=${FLAGS}${FLAGS:+ }-p
+ fi
+
+ if test -z "$FLAGS"
+ then
+ return 0
+ fi
+ fi
+
+ git diff ${FLAGS} $b_commit $w_commit
+}
+
+show_help () {
+ exec git help stash
+ exit 1
+}
+
+#
+# Parses the remaining options looking for flags and
+# at most one revision defaulting to ${ref_stash}@{0}
+# if none found.
+#
+# Derives related tree and commit objects from the
+# revision, if one is found.
+#
+# stash records the work tree, and is a merge between the
+# base commit (first parent) and the index tree (second parent).
+#
+# REV is set to the symbolic version of the specified stash-like commit
+# IS_STASH_LIKE is non-blank if ${REV} looks like a stash
+# IS_STASH_REF is non-blank if the ${REV} looks like a stash ref
+# s is set to the SHA1 of the stash commit
+# w_commit is set to the commit containing the working tree
+# b_commit is set to the base commit
+# i_commit is set to the commit containing the index tree
+# u_commit is set to the commit containing the untracked files tree
+# w_tree is set to the working tree
+# b_tree is set to the base tree
+# i_tree is set to the index tree
+# u_tree is set to the untracked files tree
+#
+# GIT_QUIET is set to t if -q is specified
+# INDEX_OPTION is set to --index if --index is specified.
+# FLAGS is set to the remaining flags (if allowed)
+#
+# dies if:
+# * too many revisions specified
+# * no revision is specified and there is no stash stack
+# * a revision is specified which cannot be resolve to a SHA1
+# * a non-existent stash reference is specified
+# * unknown flags were set and ALLOW_UNKNOWN_FLAGS is not "t"
+#
+
+parse_flags_and_rev()
+{
+ test "$PARSE_CACHE" = "$*" && return 0 # optimisation
+ PARSE_CACHE="$*"
+
+ IS_STASH_LIKE=
+ IS_STASH_REF=
+ INDEX_OPTION=
+ s=
+ w_commit=
+ b_commit=
+ i_commit=
+ u_commit=
+ w_tree=
+ b_tree=
+ i_tree=
+ u_tree=
+
+ FLAGS=
+ REV=
+ for opt
+ do
+ case "$opt" in
+ -q|--quiet)
+ GIT_QUIET=-t
+ ;;
+ --index)
+ INDEX_OPTION=--index
+ ;;
+ --help)
+ show_help
+ ;;
+ -*)
+ test "$ALLOW_UNKNOWN_FLAGS" = t ||
+ die "$(eval_gettext "unknown option: \$opt")"
+ FLAGS="${FLAGS}${FLAGS:+ }$opt"
+ ;;
+ *)
+ REV="${REV}${REV:+ }'$opt'"
+ ;;
+ esac
+ done
+
+ eval set -- $REV
+
+ case $# in
+ 0)
+ have_stash || die "$(gettext "No stash entries found.")"
+ set -- ${ref_stash}@{0}
+ ;;
+ 1)
+ :
+ ;;
+ *)
+ die "$(eval_gettext "Too many revisions specified: \$REV")"
+ ;;
+ esac
+
+ case "$1" in
+ *[!0-9]*)
+ :
+ ;;
+ *)
+ set -- "${ref_stash}@{$1}"
+ ;;
+ esac
+
+ REV=$(git rev-parse --symbolic --verify --quiet "$1") || {
+ reference="$1"
+ die "$(eval_gettext "\$reference is not a valid reference")"
+ }
+
+ i_commit=$(git rev-parse --verify --quiet "$REV^2") &&
+ set -- $(git rev-parse "$REV" "$REV^1" "$REV:" "$REV^1:" "$REV^2:" 2>/dev/null) &&
+ s=$1 &&
+ w_commit=$1 &&
+ b_commit=$2 &&
+ w_tree=$3 &&
+ b_tree=$4 &&
+ i_tree=$5 &&
+ IS_STASH_LIKE=t &&
+ test "$ref_stash" = "$(git rev-parse --symbolic-full-name "${REV%@*}")" &&
+ IS_STASH_REF=t
+
+ u_commit=$(git rev-parse --verify --quiet "$REV^3") &&
+ u_tree=$(git rev-parse "$REV^3:" 2>/dev/null)
+}
+
+is_stash_like()
+{
+ parse_flags_and_rev "$@"
+ test -n "$IS_STASH_LIKE"
+}
+
+assert_stash_like() {
+ is_stash_like "$@" || {
+ args="$*"
+ die "$(eval_gettext "'\$args' is not a stash-like commit")"
+ }
+}
+
+is_stash_ref() {
+ is_stash_like "$@" && test -n "$IS_STASH_REF"
+}
+
+assert_stash_ref() {
+ is_stash_ref "$@" || {
+ args="$*"
+ die "$(eval_gettext "'\$args' is not a stash reference")"
+ }
+}
+
+apply_stash () {
+
+ assert_stash_like "$@"
+
+ git update-index -q --refresh || die "$(gettext "unable to refresh index")"
+
+ # current index state
+ c_tree=$(git write-tree) ||
+ die "$(gettext "Cannot apply a stash in the middle of a merge")"
+
+ unstashed_index_tree=
+ if test -n "$INDEX_OPTION" && test "$b_tree" != "$i_tree" &&
+ test "$c_tree" != "$i_tree"
+ then
+ git diff-tree --binary $s^2^..$s^2 | git apply --cached
+ test $? -ne 0 &&
+ die "$(gettext "Conflicts in index. Try without --index.")"
+ unstashed_index_tree=$(git write-tree) ||
+ die "$(gettext "Could not save index tree")"
+ git reset
+ fi
+
+ if test -n "$u_tree"
+ then
+ GIT_INDEX_FILE="$TMPindex" git read-tree "$u_tree" &&
+ GIT_INDEX_FILE="$TMPindex" git checkout-index --all &&
+ rm -f "$TMPindex" ||
+ die "$(gettext "Could not restore untracked files from stash entry")"
+ fi
+
+ eval "
+ GITHEAD_$w_tree='Stashed changes' &&
+ GITHEAD_$c_tree='Updated upstream' &&
+ GITHEAD_$b_tree='Version stash was based on' &&
+ export GITHEAD_$w_tree GITHEAD_$c_tree GITHEAD_$b_tree
+ "
+
+ if test -n "$GIT_QUIET"
+ then
+ GIT_MERGE_VERBOSITY=0 && export GIT_MERGE_VERBOSITY
+ fi
+ if git merge-recursive $b_tree -- $c_tree $w_tree
+ then
+ # No conflict
+ if test -n "$unstashed_index_tree"
+ then
+ git read-tree "$unstashed_index_tree"
+ else
+ a="$TMP-added" &&
+ git diff-index --cached --name-only --diff-filter=A $c_tree >"$a" &&
+ git read-tree --reset $c_tree &&
+ git update-index --add --stdin <"$a" ||
+ die "$(gettext "Cannot unstage modified files")"
+ rm -f "$a"
+ fi
+ squelch=
+ if test -n "$GIT_QUIET"
+ then
+ squelch='>/dev/null 2>&1'
+ fi
+ (cd "$START_DIR" && eval "git status $squelch") || :
+ else
+ # Merge conflict; keep the exit status from merge-recursive
+ status=$?
+ git rerere
+ if test -n "$INDEX_OPTION"
+ then
+ gettextln "Index was not unstashed." >&2
+ fi
+ exit $status
+ fi
+}
+
+pop_stash() {
+ assert_stash_ref "$@"
+
+ if apply_stash "$@"
+ then
+ drop_stash "$@"
+ else
+ status=$?
+ say "$(gettext "The stash entry is kept in case you need it again.")"
+ exit $status
+ fi
+}
+
+drop_stash () {
+ assert_stash_ref "$@"
+
+ git reflog delete --updateref --rewrite "${REV}" &&
+ say "$(eval_gettext "Dropped \${REV} (\$s)")" ||
+ die "$(eval_gettext "\${REV}: Could not drop stash entry")"
+
+ # clear_stash if we just dropped the last stash entry
+ git rev-parse --verify --quiet "$ref_stash@{0}" >/dev/null ||
+ clear_stash
+}
+
+apply_to_branch () {
+ test -n "$1" || die "$(gettext "No branch name specified")"
+ branch=$1
+ shift 1
+
+ set -- --index "$@"
+ assert_stash_like "$@"
+
+ git checkout -b $branch $REV^ &&
+ apply_stash "$@" && {
+ test -z "$IS_STASH_REF" || drop_stash "$@"
+ }
+}
+
+test "$1" = "-p" && set "push" "$@"
+
+PARSE_CACHE='--not-parsed'
+# The default command is "push" if nothing but options are given
+seen_non_option=
+for opt
+do
+ case "$opt" in
+ --) break ;;
+ -*) ;;
+ *) seen_non_option=t; break ;;
+ esac
+done
+
+test -n "$seen_non_option" || set "push" "$@"
+
+# Main command set
+case "$1" in
+list)
+ shift
+ list_stash "$@"
+ ;;
+show)
+ shift
+ show_stash "$@"
+ ;;
+save)
+ shift
+ save_stash "$@"
+ ;;
+push)
+ shift
+ push_stash "$@"
+ ;;
+apply)
+ shift
+ apply_stash "$@"
+ ;;
+clear)
+ shift
+ clear_stash "$@"
+ ;;
+create)
+ shift
+ create_stash -m "$*" && echo "$w_commit"
+ ;;
+store)
+ shift
+ store_stash "$@"
+ ;;
+drop)
+ shift
+ drop_stash "$@"
+ ;;
+pop)
+ shift
+ pop_stash "$@"
+ ;;
+branch)
+ shift
+ apply_to_branch "$@"
+ ;;
+*)
+ case $# in
+ 0)
+ push_stash &&
+ say "$(gettext "(To restore them type \"git stash apply\")")"
+ ;;
+ *)
+ usage
+ esac
+ ;;
+esac
fi
tools="$tools gvimdiff diffuse diffmerge ecmerge"
tools="$tools p4merge araxis bc codecompare"
+ tools="$tools smerge"
fi
case "${VISUAL:-$EDITOR}" in
*vim*)
my(%suppress_cc);
if (@suppress_cc) {
foreach my $entry (@suppress_cc) {
+ # Please update $__git_send_email_suppresscc_options
+ # in git-completion.bash when you add new options.
die sprintf(__("Unknown --suppress-cc field: '%s'\n"), $entry)
unless $entry =~ /^(?:all|cccmd|cc|author|self|sob|body|bodycc|misc-by)$/;
$suppress_cc{$entry} = 1;
if ($confirm_unconfigured) {
$confirm = scalar %suppress_cc ? 'compose' : 'auto';
};
+# Please update $__git_send_email_confirm_options in
+# git-completion.bash when you add new options.
die sprintf(__("Unknown --confirm setting: '%s'\n"), $confirm)
unless $confirm =~ /^(?:auto|cc|compose|always|never)/;
if (/\(define-mail-alias\s+"(\S+?)"\s+"(\S+?)"\)/) {
$aliases{$1} = [ $2 ];
}}}
+ # Please update _git_config() in git-completion.bash when you
+ # add new MUAs.
);
if (@alias_files and $aliasfiletype and defined $parse_alias{$aliasfiletype}) {
case "$1" in
-h)
echo "$LONG_USAGE"
+ case "$0" in *git-legacy-stash) exit 129;; esac
exit
esac
fi
+++ /dev/null
-#!/bin/sh
-# Copyright (c) 2007, Nanako Shiraishi
-
-dashless=$(basename "$0" | sed -e 's/-/ /')
-USAGE="list [<options>]
- or: $dashless show [<stash>]
- or: $dashless drop [-q|--quiet] [<stash>]
- or: $dashless ( pop | apply ) [--index] [-q|--quiet] [<stash>]
- or: $dashless branch <branchname> [<stash>]
- or: $dashless save [--patch] [-k|--[no-]keep-index] [-q|--quiet]
- [-u|--include-untracked] [-a|--all] [<message>]
- or: $dashless [push [--patch] [-k|--[no-]keep-index] [-q|--quiet]
- [-u|--include-untracked] [-a|--all] [-m <message>]
- [-- <pathspec>...]]
- or: $dashless clear"
-
-SUBDIRECTORY_OK=Yes
-OPTIONS_SPEC=
-START_DIR=$(pwd)
-. git-sh-setup
-require_work_tree
-prefix=$(git rev-parse --show-prefix) || exit 1
-cd_to_toplevel
-
-TMP="$GIT_DIR/.git-stash.$$"
-TMPindex=${GIT_INDEX_FILE-"$(git rev-parse --git-path index)"}.stash.$$
-trap 'rm -f "$TMP-"* "$TMPindex"' 0
-
-ref_stash=refs/stash
-
-if git config --get-colorbool color.interactive; then
- help_color="$(git config --get-color color.interactive.help 'red bold')"
- reset_color="$(git config --get-color '' reset)"
-else
- help_color=
- reset_color=
-fi
-
-no_changes () {
- git diff-index --quiet --cached HEAD --ignore-submodules -- "$@" &&
- git diff-files --quiet --ignore-submodules -- "$@" &&
- (test -z "$untracked" || test -z "$(untracked_files "$@")")
-}
-
-untracked_files () {
- if test "$1" = "-z"
- then
- shift
- z=-z
- else
- z=
- fi
- excl_opt=--exclude-standard
- test "$untracked" = "all" && excl_opt=
- git ls-files -o $z $excl_opt -- "$@"
-}
-
-prepare_fallback_ident () {
- if ! git -c user.useconfigonly=yes var GIT_COMMITTER_IDENT >/dev/null 2>&1
- then
- GIT_AUTHOR_NAME="git stash"
- GIT_AUTHOR_EMAIL=git@stash
- GIT_COMMITTER_NAME="git stash"
- GIT_COMMITTER_EMAIL=git@stash
- export GIT_AUTHOR_NAME
- export GIT_AUTHOR_EMAIL
- export GIT_COMMITTER_NAME
- export GIT_COMMITTER_EMAIL
- fi
-}
-
-clear_stash () {
- if test $# != 0
- then
- die "$(gettext "git stash clear with parameters is unimplemented")"
- fi
- if current=$(git rev-parse --verify --quiet $ref_stash)
- then
- git update-ref -d $ref_stash $current
- fi
-}
-
-create_stash () {
-
- prepare_fallback_ident
-
- stash_msg=
- untracked=
- while test $# != 0
- do
- case "$1" in
- -m|--message)
- shift
- stash_msg=${1?"BUG: create_stash () -m requires an argument"}
- ;;
- -m*)
- stash_msg=${1#-m}
- ;;
- --message=*)
- stash_msg=${1#--message=}
- ;;
- -u|--include-untracked)
- shift
- untracked=${1?"BUG: create_stash () -u requires an argument"}
- ;;
- --)
- shift
- break
- ;;
- esac
- shift
- done
-
- git update-index -q --refresh
- if no_changes "$@"
- then
- exit 0
- fi
-
- # state of the base commit
- if b_commit=$(git rev-parse --verify HEAD)
- then
- head=$(git rev-list --oneline -n 1 HEAD --)
- else
- die "$(gettext "You do not have the initial commit yet")"
- fi
-
- if branch=$(git symbolic-ref -q HEAD)
- then
- branch=${branch#refs/heads/}
- else
- branch='(no branch)'
- fi
- msg=$(printf '%s: %s' "$branch" "$head")
-
- # state of the index
- i_tree=$(git write-tree) &&
- i_commit=$(printf 'index on %s\n' "$msg" |
- git commit-tree $i_tree -p $b_commit) ||
- die "$(gettext "Cannot save the current index state")"
-
- if test -n "$untracked"
- then
- # Untracked files are stored by themselves in a parentless commit, for
- # ease of unpacking later.
- u_commit=$(
- untracked_files -z "$@" | (
- GIT_INDEX_FILE="$TMPindex" &&
- export GIT_INDEX_FILE &&
- rm -f "$TMPindex" &&
- git update-index -z --add --remove --stdin &&
- u_tree=$(git write-tree) &&
- printf 'untracked files on %s\n' "$msg" | git commit-tree $u_tree &&
- rm -f "$TMPindex"
- ) ) || die "$(gettext "Cannot save the untracked files")"
-
- untracked_commit_option="-p $u_commit";
- else
- untracked_commit_option=
- fi
-
- if test -z "$patch_mode"
- then
-
- # state of the working tree
- w_tree=$( (
- git read-tree --index-output="$TMPindex" -m $i_tree &&
- GIT_INDEX_FILE="$TMPindex" &&
- export GIT_INDEX_FILE &&
- git diff-index --name-only -z HEAD -- "$@" >"$TMP-stagenames" &&
- git update-index -z --add --remove --stdin <"$TMP-stagenames" &&
- git write-tree &&
- rm -f "$TMPindex"
- ) ) ||
- die "$(gettext "Cannot save the current worktree state")"
-
- else
-
- rm -f "$TMP-index" &&
- GIT_INDEX_FILE="$TMP-index" git read-tree HEAD &&
-
- # find out what the user wants
- GIT_INDEX_FILE="$TMP-index" \
- git add--interactive --patch=stash -- "$@" &&
-
- # state of the working tree
- w_tree=$(GIT_INDEX_FILE="$TMP-index" git write-tree) ||
- die "$(gettext "Cannot save the current worktree state")"
-
- git diff-tree -p HEAD $w_tree -- >"$TMP-patch" &&
- test -s "$TMP-patch" ||
- die "$(gettext "No changes selected")"
-
- rm -f "$TMP-index" ||
- die "$(gettext "Cannot remove temporary index (can't happen)")"
-
- fi
-
- # create the stash
- if test -z "$stash_msg"
- then
- stash_msg=$(printf 'WIP on %s' "$msg")
- else
- stash_msg=$(printf 'On %s: %s' "$branch" "$stash_msg")
- fi
- w_commit=$(printf '%s\n' "$stash_msg" |
- git commit-tree $w_tree -p $b_commit -p $i_commit $untracked_commit_option) ||
- die "$(gettext "Cannot record working tree state")"
-}
-
-store_stash () {
- while test $# != 0
- do
- case "$1" in
- -m|--message)
- shift
- stash_msg="$1"
- ;;
- -m*)
- stash_msg=${1#-m}
- ;;
- --message=*)
- stash_msg=${1#--message=}
- ;;
- -q|--quiet)
- quiet=t
- ;;
- *)
- break
- ;;
- esac
- shift
- done
- test $# = 1 ||
- die "$(eval_gettext "\"$dashless store\" requires one <commit> argument")"
-
- w_commit="$1"
- if test -z "$stash_msg"
- then
- stash_msg="Created via \"git stash store\"."
- fi
-
- git update-ref --create-reflog -m "$stash_msg" $ref_stash $w_commit
- ret=$?
- test $ret != 0 && test -z "$quiet" &&
- die "$(eval_gettext "Cannot update \$ref_stash with \$w_commit")"
- return $ret
-}
-
-push_stash () {
- keep_index=
- patch_mode=
- untracked=
- stash_msg=
- while test $# != 0
- do
- case "$1" in
- -k|--keep-index)
- keep_index=t
- ;;
- --no-keep-index)
- keep_index=n
- ;;
- -p|--patch)
- patch_mode=t
- # only default to keep if we don't already have an override
- test -z "$keep_index" && keep_index=t
- ;;
- -q|--quiet)
- GIT_QUIET=t
- ;;
- -u|--include-untracked)
- untracked=untracked
- ;;
- -a|--all)
- untracked=all
- ;;
- -m|--message)
- shift
- test -z ${1+x} && usage
- stash_msg=$1
- ;;
- -m*)
- stash_msg=${1#-m}
- ;;
- --message=*)
- stash_msg=${1#--message=}
- ;;
- --help)
- show_help
- ;;
- --)
- shift
- break
- ;;
- -*)
- option="$1"
- eval_gettextln "error: unknown option for 'stash push': \$option"
- usage
- ;;
- *)
- break
- ;;
- esac
- shift
- done
-
- eval "set $(git rev-parse --sq --prefix "$prefix" -- "$@")"
-
- if test -n "$patch_mode" && test -n "$untracked"
- then
- die "$(gettext "Can't use --patch and --include-untracked or --all at the same time")"
- fi
-
- test -n "$untracked" || git ls-files --error-unmatch -- "$@" >/dev/null || exit 1
-
- git update-index -q --refresh
- if no_changes "$@"
- then
- say "$(gettext "No local changes to save")"
- exit 0
- fi
-
- git reflog exists $ref_stash ||
- clear_stash || die "$(gettext "Cannot initialize stash")"
-
- create_stash -m "$stash_msg" -u "$untracked" -- "$@"
- store_stash -m "$stash_msg" -q $w_commit ||
- die "$(gettext "Cannot save the current status")"
- say "$(eval_gettext "Saved working directory and index state \$stash_msg")"
-
- if test -z "$patch_mode"
- then
- test "$untracked" = "all" && CLEAN_X_OPTION=-x || CLEAN_X_OPTION=
- if test -n "$untracked" && test $# = 0
- then
- git clean --force --quiet -d $CLEAN_X_OPTION
- fi
-
- if test $# != 0
- then
- test -z "$untracked" && UPDATE_OPTION="-u" || UPDATE_OPTION=
- test "$untracked" = "all" && FORCE_OPTION="--force" || FORCE_OPTION=
- git add $UPDATE_OPTION $FORCE_OPTION -- "$@"
- git diff-index -p --cached --binary HEAD -- "$@" |
- git apply --index -R
- else
- git reset --hard -q
- fi
-
- if test "$keep_index" = "t" && test -n "$i_tree"
- then
- git read-tree --reset $i_tree
- git ls-files -z --modified -- "$@" |
- git checkout-index -z --force --stdin
- fi
- else
- git apply -R < "$TMP-patch" ||
- die "$(gettext "Cannot remove worktree changes")"
-
- if test "$keep_index" != "t"
- then
- git reset -q -- "$@"
- fi
- fi
-}
-
-save_stash () {
- push_options=
- while test $# != 0
- do
- case "$1" in
- --)
- shift
- break
- ;;
- -*)
- # pass all options through to push_stash
- push_options="$push_options $1"
- ;;
- *)
- break
- ;;
- esac
- shift
- done
-
- stash_msg="$*"
-
- if test -z "$stash_msg"
- then
- push_stash $push_options
- else
- push_stash $push_options -m "$stash_msg"
- fi
-}
-
-have_stash () {
- git rev-parse --verify --quiet $ref_stash >/dev/null
-}
-
-list_stash () {
- have_stash || return 0
- git log --format="%gd: %gs" -g --first-parent -m "$@" $ref_stash --
-}
-
-show_stash () {
- ALLOW_UNKNOWN_FLAGS=t
- assert_stash_like "$@"
-
- if test -z "$FLAGS"
- then
- if test "$(git config --bool stash.showStat || echo true)" = "true"
- then
- FLAGS=--stat
- fi
-
- if test "$(git config --bool stash.showPatch || echo false)" = "true"
- then
- FLAGS=${FLAGS}${FLAGS:+ }-p
- fi
-
- if test -z "$FLAGS"
- then
- return 0
- fi
- fi
-
- git diff ${FLAGS} $b_commit $w_commit
-}
-
-show_help () {
- exec git help stash
- exit 1
-}
-
-#
-# Parses the remaining options looking for flags and
-# at most one revision defaulting to ${ref_stash}@{0}
-# if none found.
-#
-# Derives related tree and commit objects from the
-# revision, if one is found.
-#
-# stash records the work tree, and is a merge between the
-# base commit (first parent) and the index tree (second parent).
-#
-# REV is set to the symbolic version of the specified stash-like commit
-# IS_STASH_LIKE is non-blank if ${REV} looks like a stash
-# IS_STASH_REF is non-blank if the ${REV} looks like a stash ref
-# s is set to the SHA1 of the stash commit
-# w_commit is set to the commit containing the working tree
-# b_commit is set to the base commit
-# i_commit is set to the commit containing the index tree
-# u_commit is set to the commit containing the untracked files tree
-# w_tree is set to the working tree
-# b_tree is set to the base tree
-# i_tree is set to the index tree
-# u_tree is set to the untracked files tree
-#
-# GIT_QUIET is set to t if -q is specified
-# INDEX_OPTION is set to --index if --index is specified.
-# FLAGS is set to the remaining flags (if allowed)
-#
-# dies if:
-# * too many revisions specified
-# * no revision is specified and there is no stash stack
-# * a revision is specified which cannot be resolve to a SHA1
-# * a non-existent stash reference is specified
-# * unknown flags were set and ALLOW_UNKNOWN_FLAGS is not "t"
-#
-
-parse_flags_and_rev()
-{
- test "$PARSE_CACHE" = "$*" && return 0 # optimisation
- PARSE_CACHE="$*"
-
- IS_STASH_LIKE=
- IS_STASH_REF=
- INDEX_OPTION=
- s=
- w_commit=
- b_commit=
- i_commit=
- u_commit=
- w_tree=
- b_tree=
- i_tree=
- u_tree=
-
- FLAGS=
- REV=
- for opt
- do
- case "$opt" in
- -q|--quiet)
- GIT_QUIET=-t
- ;;
- --index)
- INDEX_OPTION=--index
- ;;
- --help)
- show_help
- ;;
- -*)
- test "$ALLOW_UNKNOWN_FLAGS" = t ||
- die "$(eval_gettext "unknown option: \$opt")"
- FLAGS="${FLAGS}${FLAGS:+ }$opt"
- ;;
- *)
- REV="${REV}${REV:+ }'$opt'"
- ;;
- esac
- done
-
- eval set -- $REV
-
- case $# in
- 0)
- have_stash || die "$(gettext "No stash entries found.")"
- set -- ${ref_stash}@{0}
- ;;
- 1)
- :
- ;;
- *)
- die "$(eval_gettext "Too many revisions specified: \$REV")"
- ;;
- esac
-
- case "$1" in
- *[!0-9]*)
- :
- ;;
- *)
- set -- "${ref_stash}@{$1}"
- ;;
- esac
-
- REV=$(git rev-parse --symbolic --verify --quiet "$1") || {
- reference="$1"
- die "$(eval_gettext "\$reference is not a valid reference")"
- }
-
- i_commit=$(git rev-parse --verify --quiet "$REV^2") &&
- set -- $(git rev-parse "$REV" "$REV^1" "$REV:" "$REV^1:" "$REV^2:" 2>/dev/null) &&
- s=$1 &&
- w_commit=$1 &&
- b_commit=$2 &&
- w_tree=$3 &&
- b_tree=$4 &&
- i_tree=$5 &&
- IS_STASH_LIKE=t &&
- test "$ref_stash" = "$(git rev-parse --symbolic-full-name "${REV%@*}")" &&
- IS_STASH_REF=t
-
- u_commit=$(git rev-parse --verify --quiet "$REV^3") &&
- u_tree=$(git rev-parse "$REV^3:" 2>/dev/null)
-}
-
-is_stash_like()
-{
- parse_flags_and_rev "$@"
- test -n "$IS_STASH_LIKE"
-}
-
-assert_stash_like() {
- is_stash_like "$@" || {
- args="$*"
- die "$(eval_gettext "'\$args' is not a stash-like commit")"
- }
-}
-
-is_stash_ref() {
- is_stash_like "$@" && test -n "$IS_STASH_REF"
-}
-
-assert_stash_ref() {
- is_stash_ref "$@" || {
- args="$*"
- die "$(eval_gettext "'\$args' is not a stash reference")"
- }
-}
-
-apply_stash () {
-
- assert_stash_like "$@"
-
- git update-index -q --refresh || die "$(gettext "unable to refresh index")"
-
- # current index state
- c_tree=$(git write-tree) ||
- die "$(gettext "Cannot apply a stash in the middle of a merge")"
-
- unstashed_index_tree=
- if test -n "$INDEX_OPTION" && test "$b_tree" != "$i_tree" &&
- test "$c_tree" != "$i_tree"
- then
- git diff-tree --binary $s^2^..$s^2 | git apply --cached
- test $? -ne 0 &&
- die "$(gettext "Conflicts in index. Try without --index.")"
- unstashed_index_tree=$(git write-tree) ||
- die "$(gettext "Could not save index tree")"
- git reset
- fi
-
- if test -n "$u_tree"
- then
- GIT_INDEX_FILE="$TMPindex" git read-tree "$u_tree" &&
- GIT_INDEX_FILE="$TMPindex" git checkout-index --all &&
- rm -f "$TMPindex" ||
- die "$(gettext "Could not restore untracked files from stash entry")"
- fi
-
- eval "
- GITHEAD_$w_tree='Stashed changes' &&
- GITHEAD_$c_tree='Updated upstream' &&
- GITHEAD_$b_tree='Version stash was based on' &&
- export GITHEAD_$w_tree GITHEAD_$c_tree GITHEAD_$b_tree
- "
-
- if test -n "$GIT_QUIET"
- then
- GIT_MERGE_VERBOSITY=0 && export GIT_MERGE_VERBOSITY
- fi
- if git merge-recursive $b_tree -- $c_tree $w_tree
- then
- # No conflict
- if test -n "$unstashed_index_tree"
- then
- git read-tree "$unstashed_index_tree"
- else
- a="$TMP-added" &&
- git diff-index --cached --name-only --diff-filter=A $c_tree >"$a" &&
- git read-tree --reset $c_tree &&
- git update-index --add --stdin <"$a" ||
- die "$(gettext "Cannot unstage modified files")"
- rm -f "$a"
- fi
- squelch=
- if test -n "$GIT_QUIET"
- then
- squelch='>/dev/null 2>&1'
- fi
- (cd "$START_DIR" && eval "git status $squelch") || :
- else
- # Merge conflict; keep the exit status from merge-recursive
- status=$?
- git rerere
- if test -n "$INDEX_OPTION"
- then
- gettextln "Index was not unstashed." >&2
- fi
- exit $status
- fi
-}
-
-pop_stash() {
- assert_stash_ref "$@"
-
- if apply_stash "$@"
- then
- drop_stash "$@"
- else
- status=$?
- say "$(gettext "The stash entry is kept in case you need it again.")"
- exit $status
- fi
-}
-
-drop_stash () {
- assert_stash_ref "$@"
-
- git reflog delete --updateref --rewrite "${REV}" &&
- say "$(eval_gettext "Dropped \${REV} (\$s)")" ||
- die "$(eval_gettext "\${REV}: Could not drop stash entry")"
-
- # clear_stash if we just dropped the last stash entry
- git rev-parse --verify --quiet "$ref_stash@{0}" >/dev/null ||
- clear_stash
-}
-
-apply_to_branch () {
- test -n "$1" || die "$(gettext "No branch name specified")"
- branch=$1
- shift 1
-
- set -- --index "$@"
- assert_stash_like "$@"
-
- git checkout -b $branch $REV^ &&
- apply_stash "$@" && {
- test -z "$IS_STASH_REF" || drop_stash "$@"
- }
-}
-
-test "$1" = "-p" && set "push" "$@"
-
-PARSE_CACHE='--not-parsed'
-# The default command is "push" if nothing but options are given
-seen_non_option=
-for opt
-do
- case "$opt" in
- --) break ;;
- -*) ;;
- *) seen_non_option=t; break ;;
- esac
-done
-
-test -n "$seen_non_option" || set "push" "$@"
-
-# Main command set
-case "$1" in
-list)
- shift
- list_stash "$@"
- ;;
-show)
- shift
- show_stash "$@"
- ;;
-save)
- shift
- save_stash "$@"
- ;;
-push)
- shift
- push_stash "$@"
- ;;
-apply)
- shift
- apply_stash "$@"
- ;;
-clear)
- shift
- clear_stash "$@"
- ;;
-create)
- shift
- create_stash -m "$*" && echo "$w_commit"
- ;;
-store)
- shift
- store_stash "$@"
- ;;
-drop)
- shift
- drop_stash "$@"
- ;;
-pop)
- shift
- pop_stash "$@"
- ;;
-branch)
- shift
- apply_to_branch "$@"
- ;;
-*)
- case $# in
- 0)
- push_stash &&
- say "$(gettext "(To restore them type \"git stash apply\")")"
- ;;
- *)
- usage
- esac
- ;;
-esac
# Copyright (c) 2007 Lars Hjemli
dashless=$(basename "$0" | sed -e 's/-/ /')
-USAGE="[--quiet] add [-b <branch>] [-f|--force] [--name <name>] [--reference <repository>] [--] <repository> [<path>]
+USAGE="[--quiet] [--cached]
+ or: $dashless [--quiet] add [-b <branch>] [-f|--force] [--name <name>] [--reference <repository>] [--] <repository> [<path>]
or: $dashless [--quiet] status [--cached] [--recursive] [--] [<path>...]
or: $dashless [--quiet] init [--] [<path>...]
or: $dashless [--quiet] deinit [-f|--force] (--all| [--] <path>...)
or: $dashless [--quiet] update [--init] [--remote] [-N|--no-fetch] [-f|--force] [--checkout|--merge|--rebase] [--[no-]recommend-shallow] [--reference <repository>] [--recursive] [--] [<path>...]
+ or: $dashless [--quiet] set-branch (--default|--branch <branch>) [--] <path>
or: $dashless [--quiet] summary [--cached|--files] [--summary-limit <n>] [commit] [--] [<path>...]
or: $dashless [--quiet] foreach [--recursive] <command>
or: $dashless [--quiet] sync [--recursive] [--] [<path>...]
# is not reachable from a ref.
is_tip_reachable "$sm_path" "$sha1" ||
fetch_in_submodule "$sm_path" $depth ||
- say "$(eval_gettext "Unable to fetch in submodule path '\$displaypath'")"
+ say "$(eval_gettext "Unable to fetch in submodule path '\$displaypath'; trying to directly fetch \$sha1:")"
# Now we tried the usual fetch, but $sha1 may
# not be reachable from any of the refs
}
}
+#
+# Configures a submodule's default branch
+#
+# $@ = requested path
+#
+cmd_set_branch() {
+ unset_branch=false
+ branch=
+
+ while test $# -ne 0
+ do
+ case "$1" in
+ -q|--quiet)
+ # we don't do anything with this but we need to accept it
+ ;;
+ -d|--default)
+ unset_branch=true
+ ;;
+ -b|--branch)
+ case "$2" in '') usage ;; esac
+ branch=$2
+ shift
+ ;;
+ --)
+ shift
+ break
+ ;;
+ -*)
+ usage
+ ;;
+ *)
+ break
+ ;;
+ esac
+ shift
+ done
+
+ if test $# -ne 1
+ then
+ usage
+ fi
+
+ # we can't use `git submodule--helper name` here because internally, it
+ # hashes the path so a trailing slash could lead to an unintentional no match
+ name="$(git submodule--helper list "$1" | cut -f2)"
+ if test -z "$name"
+ then
+ exit 1
+ fi
+
+ test -n "$branch"; has_branch=$?
+ test "$unset_branch" = true; has_unset_branch=$?
+
+ if test $((!$has_branch != !$has_unset_branch)) -eq 0
+ then
+ usage
+ fi
+
+ if test $has_branch -eq 0
+ then
+ git submodule--helper config submodule."$name".branch "$branch"
+ else
+ git submodule--helper config --unset submodule."$name".branch
+ fi
+}
+
#
# Show commit summary for submodules in index or working tree
#
while test $# != 0 && test -z "$command"
do
case "$1" in
- add | foreach | init | deinit | update | status | summary | sync | absorbgitdirs)
+ add | foreach | init | deinit | update | set-branch | status | summary | sync | absorbgitdirs)
command=$1
;;
-q|--quiet)
fi
fi
-# "-b branch" is accepted only by "add"
-if test -n "$branch" && test "$command" != add
+# "-b branch" is accepted only by "add" and "set-branch"
+if test -n "$branch" && (test "$command" != add || test "$command" != set-branch)
then
usage
fi
usage
fi
-"cmd_$command" "$@"
+"cmd_$(echo $command | sed -e s/-/_/g)" "$@"
{
struct string_list list = STRING_LIST_INIT_DUP;
int i;
+ int nongit;
+
+ /*
+ * Set up the repository so we can pick up any repo-level config (like
+ * completion.commands).
+ */
+ setup_git_directory_gently(&nongit);
while (*spec) {
const char *sep = strchrnul(spec, ',');
git_set_exec_path(cmd + 1);
else {
puts(git_exec_path());
+ trace2_cmd_name("_query_");
exit(0);
}
} else if (!strcmp(cmd, "--html-path")) {
puts(system_path(GIT_HTML_PATH));
+ trace2_cmd_name("_query_");
exit(0);
} else if (!strcmp(cmd, "--man-path")) {
puts(system_path(GIT_MAN_PATH));
+ trace2_cmd_name("_query_");
exit(0);
} else if (!strcmp(cmd, "--info-path")) {
puts(system_path(GIT_INFO_PATH));
+ trace2_cmd_name("_query_");
exit(0);
} else if (!strcmp(cmd, "-p") || !strcmp(cmd, "--paginate")) {
use_pager = 1;
(*argv)++;
(*argc)--;
} else if (skip_prefix(cmd, "--list-cmds=", &cmd)) {
+ trace2_cmd_name("_query_");
if (!strcmp(cmd, "parseopt")) {
struct string_list list = STRING_LIST_INIT_DUP;
int i;
commit_pager_choice();
child.use_shell = 1;
+ child.trace2_child_class = "shell_alias";
argv_array_push(&child.args, alias_string + 1);
argv_array_pushv(&child.args, (*argv) + 1);
+ trace2_cmd_alias(alias_command, child.args.argv);
+ trace2_cmd_list_config();
+ trace2_cmd_name("_run_shell_alias_");
+
ret = run_command(&child);
if (ret >= 0) /* normal exit */
exit(ret);
/* insert after command name */
memcpy(new_argv + count, *argv + 1, sizeof(char *) * *argcp);
+ trace2_cmd_alias(alias_command, new_argv);
+ trace2_cmd_list_config();
+
*argv = new_argv;
*argcp += count - 1;
setup_work_tree();
trace_argv_printf(argv, "trace: built-in: git");
+ trace2_cmd_name(p->cmd);
+ trace2_cmd_list_config();
validate_cache_entries(the_repository->index);
status = p->fn(argc, argv, prefix);
{ "diff-files", cmd_diff_files, RUN_SETUP | NEED_WORK_TREE | NO_PARSEOPT },
{ "diff-index", cmd_diff_index, RUN_SETUP | NO_PARSEOPT },
{ "diff-tree", cmd_diff_tree, RUN_SETUP | NO_PARSEOPT },
- { "difftool", cmd_difftool, RUN_SETUP | NEED_WORK_TREE },
+ { "difftool", cmd_difftool, RUN_SETUP_GENTLY },
{ "fast-export", cmd_fast_export, RUN_SETUP },
{ "fetch", cmd_fetch, RUN_SETUP },
{ "fetch-pack", cmd_fetch_pack, RUN_SETUP | NO_PARSEOPT },
{ "show-index", cmd_show_index },
{ "show-ref", cmd_show_ref, RUN_SETUP },
{ "stage", cmd_add, RUN_SETUP | NEED_WORK_TREE },
+ /*
+ * NEEDSWORK: Until the builtin stash is thoroughly robust and no
+ * longer needs redirection to the stash shell script this is kept as
+ * is, then should be changed to RUN_SETUP | NEED_WORK_TREE
+ */
+ { "stash", cmd_stash },
{ "status", cmd_status, RUN_SETUP | NEED_WORK_TREE },
{ "stripspace", cmd_stripspace },
{ "submodule--helper", cmd_submodule__helper, RUN_SETUP | SUPPORT_SUPER_PREFIX | NO_PARSEOPT },
cmd.clean_on_exit = 1;
cmd.wait_after_clean = 1;
cmd.silent_exec_failure = 1;
+ cmd.trace2_child_class = "dashed";
+
+ trace2_cmd_name("_run_dashed_");
+ /*
+ * The code in run_command() logs trace2 child_start/child_exit
+ * events, so we do not need to report exec/exec_result events here.
+ */
trace_argv_printf(cmd.args.argv, "trace: exec:");
/*
* the program.
*/
status = run_command(&cmd);
+
+ /*
+ * If the child process ran and we are now going to exit, emit a
+ * generic string as our trace2 command verb to indicate that we
+ * launched a dashed command.
+ */
if (status >= 0)
exit(status);
else if (errno != ENOENT)
if (!done_alias)
handle_builtin(*argcp, *argv);
+#if 0 // TODO In GFW, need to amend a7924b655e940b06cb547c235d6bed9767929673 to include trace2_ and _tr2 lines.
+ else if (get_builtin(**argv)) {
+ struct argv_array args = ARGV_ARRAY_INIT;
+ int i;
+
+ /*
+ * The current process is committed to launching a
+ * child process to run the command named in (**argv)
+ * and exiting. Log a generic string as the trace2
+ * command verb to indicate this. Note that the child
+ * process will log the actual verb when it runs.
+ */
+ trace2_cmd_name("_run_git_alias_");
+
+ if (get_super_prefix())
+ die("%s doesn't support --super-prefix", **argv);
+
+ commit_pager_choice();
+
+ argv_array_push(&args, "git");
+ for (i = 0; i < *argcp; i++)
+ argv_array_push(&args, (*argv)[i]);
+
+ trace_argv_printf(args.argv, "trace: exec:");
+
+ /*
+ * if we fail because the command is not found, it is
+ * OK to return. Otherwise, we just pass along the status code.
+ */
+ i = run_command_v_opt_tr2(args.argv, RUN_SILENT_EXEC_FAILURE |
+ RUN_CLEAN_ON_EXIT, "git_alias");
+ if (i >= 0 || errno != ENOENT)
+ exit(i);
+ die("could not execute builtin %s", **argv);
+ }
+#endif // a7924b655e940b06cb547c235d6bed9767929673
+
/* .. then try the external ones */
execv_dashed_external(*argv);
# Bulgarian translation of gitk po-file.
-# Copyright (C) 2014, 2015 Alexander Shopov <ash@kambanaria.org>.
+# Copyright (C) 2014, 2015, 2019 Alexander Shopov <ash@kambanaria.org>.
# This file is distributed under the same license as the git package.
-# Alexander Shopov <ash@kambanaria.org>, 2014, 2015.
+# Alexander Shopov <ash@kambanaria.org>, 2014, 2015, 2019.
#
#
msgid ""
msgstr ""
"Project-Id-Version: gitk master\n"
"Report-Msgid-Bugs-To: \n"
-"POT-Creation-Date: 2015-12-19 11:48+0200\n"
-"PO-Revision-Date: 2015-12-19 11:49+0200\n"
+"POT-Creation-Date: 2019-03-04 11:27+0100\n"
+"PO-Revision-Date: 2019-03-04 11:39+0100\n"
"Last-Translator: Alexander Shopov <ash@kambanaria.org>\n"
"Language-Team: Bulgarian <dict@fsa-bg.org>\n"
"Language: bg\n"
msgid "Couldn't get list of unmerged files:"
msgstr "Списъкът с неслети файлове не може да бъде получен:"
-#: gitk:212 gitk:2399
+#: gitk:212 gitk:2403
msgid "Color words"
msgstr "Оцветяване на думите"
-#: gitk:217 gitk:2399 gitk:8239 gitk:8272
+#: gitk:217 gitk:2403 gitk:8249 gitk:8282
msgid "Markup words"
msgstr "Отбелязване на думите"
#: gitk:324
msgid "Error parsing revisions:"
-msgstr "Грешка при разбор на версиите:"
+msgstr "Грешка при анализ на версиите:"
#: gitk:380
msgid "Error executing --argscmd command:"
msgid "Reading"
msgstr "Прочитане"
-#: gitk:496 gitk:4544
+#: gitk:496 gitk:4549
msgid "Reading commits..."
msgstr "Прочитане на подаванията…"
-#: gitk:499 gitk:1637 gitk:4547
+#: gitk:499 gitk:1641 gitk:4552
msgid "No commits selected"
msgstr "Не са избрани подавания"
-#: gitk:1445 gitk:4064 gitk:12469
+#: gitk:1449 gitk:4069 gitk:12583
msgid "Command line"
msgstr "Команден ред"
-#: gitk:1511
+#: gitk:1515
msgid "Can't parse git log output:"
msgstr "Изходът от „git log“ не може да се анализира:"
-#: gitk:1740
+#: gitk:1744
msgid "No commit information available"
msgstr "Липсва информация за подавания"
-#: gitk:1903 gitk:1932 gitk:4334 gitk:9702 gitk:11274 gitk:11554
+#: gitk:1907 gitk:1936 gitk:4339 gitk:9789 gitk:11388 gitk:11668
msgid "OK"
msgstr "Добре"
-#: gitk:1934 gitk:4336 gitk:9215 gitk:9294 gitk:9424 gitk:9473 gitk:9704
-#: gitk:11275 gitk:11555
+#: gitk:1938 gitk:4341 gitk:9225 gitk:9304 gitk:9434 gitk:9520 gitk:9791
+#: gitk:11389 gitk:11669
msgid "Cancel"
msgstr "Отказ"
-#: gitk:2083
+#: gitk:2087
msgid "&Update"
msgstr "&Обновяване"
-#: gitk:2084
+#: gitk:2088
msgid "&Reload"
msgstr "&Презареждане"
-#: gitk:2085
+#: gitk:2089
msgid "Reread re&ferences"
-msgstr "&Наново прочитане на настройките"
+msgstr "&Наново прочитане"
-#: gitk:2086
+#: gitk:2090
msgid "&List references"
msgstr "&Изброяване на указателите"
-#: gitk:2088
+#: gitk:2092
msgid "Start git &gui"
msgstr "&Стартиране на „git gui“"
-#: gitk:2090
+#: gitk:2094
msgid "&Quit"
msgstr "&Спиране на програмата"
-#: gitk:2082
+#: gitk:2086
msgid "&File"
msgstr "&Файл"
-#: gitk:2094
+#: gitk:2098
msgid "&Preferences"
msgstr "&Настройки"
-#: gitk:2093
+#: gitk:2097
msgid "&Edit"
msgstr "&Редактиране"
-#: gitk:2098
+#: gitk:2102
msgid "&New view..."
msgstr "&Нов изглед…"
-#: gitk:2099
+#: gitk:2103
msgid "&Edit view..."
msgstr "&Редактиране на изгледа…"
-#: gitk:2100
+#: gitk:2104
msgid "&Delete view"
msgstr "&Изтриване на изгледа"
-#: gitk:2102
+#: gitk:2106
msgid "&All files"
msgstr "&Всички файлове"
-#: gitk:2097
+#: gitk:2101
msgid "&View"
msgstr "&Изглед"
-#: gitk:2107 gitk:2117
+#: gitk:2111 gitk:2121
msgid "&About gitk"
msgstr "&Относно gitk"
-#: gitk:2108 gitk:2122
+#: gitk:2112 gitk:2126
msgid "&Key bindings"
msgstr "&Клавишни комбинации"
-#: gitk:2106 gitk:2121
+#: gitk:2110 gitk:2125
msgid "&Help"
msgstr "Помо&щ"
-#: gitk:2199 gitk:8671
+#: gitk:2203 gitk:8681
msgid "SHA1 ID:"
msgstr "SHA1:"
-#: gitk:2243
+#: gitk:2247
msgid "Row"
msgstr "Ред"
-#: gitk:2281
+#: gitk:2285
msgid "Find"
msgstr "Търсене"
-#: gitk:2309
+#: gitk:2313
msgid "commit"
msgstr "подаване"
-#: gitk:2313 gitk:2315 gitk:4706 gitk:4729 gitk:4753 gitk:6774 gitk:6846
-#: gitk:6931
+#: gitk:2317 gitk:2319 gitk:4711 gitk:4734 gitk:4758 gitk:6779 gitk:6851
+#: gitk:6936
msgid "containing:"
msgstr "съдържащо:"
-#: gitk:2316 gitk:3545 gitk:3550 gitk:4782
+#: gitk:2320 gitk:3550 gitk:3555 gitk:4787
msgid "touching paths:"
-msgstr "заÑ\81Ñ\8fгаÑ\89о пътищата:"
+msgstr "в пътищата:"
-#: gitk:2317 gitk:4796
+#: gitk:2321 gitk:4801
msgid "adding/removing string:"
msgstr "добавящо/премахващо низ"
-#: gitk:2318 gitk:4798
+#: gitk:2322 gitk:4803
msgid "changing lines matching:"
msgstr "променящо редове напасващи:"
-#: gitk:2327 gitk:2329 gitk:4785
+#: gitk:2331 gitk:2333 gitk:4790
msgid "Exact"
msgstr "Точно"
-#: gitk:2329 gitk:4873 gitk:6742
+#: gitk:2333 gitk:4878 gitk:6747
msgid "IgnCase"
msgstr "Без регистър"
-#: gitk:2329 gitk:4755 gitk:4871 gitk:6738
+#: gitk:2333 gitk:4760 gitk:4876 gitk:6743
msgid "Regexp"
msgstr "Рег. израз"
-#: gitk:2331 gitk:2332 gitk:4893 gitk:4923 gitk:4930 gitk:6867 gitk:6935
+#: gitk:2335 gitk:2336 gitk:4898 gitk:4928 gitk:4935 gitk:6872 gitk:6940
msgid "All fields"
msgstr "Всички полета"
-#: gitk:2332 gitk:4890 gitk:4923 gitk:6805
+#: gitk:2336 gitk:4895 gitk:4928 gitk:6810
msgid "Headline"
msgstr "Първи ред"
-#: gitk:2333 gitk:4890 gitk:6805 gitk:6935 gitk:7408
+#: gitk:2337 gitk:4895 gitk:6810 gitk:6940 gitk:7413
msgid "Comments"
msgstr "Коментари"
-#: gitk:2333 gitk:4890 gitk:4895 gitk:4930 gitk:6805 gitk:7343 gitk:8849
-#: gitk:8864
+#: gitk:2337 gitk:4895 gitk:4900 gitk:4935 gitk:6810 gitk:7348 gitk:8859
+#: gitk:8874
msgid "Author"
msgstr "Автор"
-#: gitk:2333 gitk:4890 gitk:6805 gitk:7345
+#: gitk:2337 gitk:4895 gitk:6810 gitk:7350
msgid "Committer"
msgstr "Подаващ"
-#: gitk:2367
+#: gitk:2371
msgid "Search"
msgstr "Търсене"
-#: gitk:2375
+#: gitk:2379
msgid "Diff"
msgstr "Разлики"
-#: gitk:2377
+#: gitk:2381
msgid "Old version"
msgstr "Стара версия"
-#: gitk:2379
+#: gitk:2383
msgid "New version"
msgstr "Нова версия"
-#: gitk:2382
+#: gitk:2386
msgid "Lines of context"
msgstr "Контекст в редове"
-#: gitk:2392
+#: gitk:2396
msgid "Ignore space change"
msgstr "Празните знаци без значение"
-#: gitk:2396 gitk:2398 gitk:7978 gitk:8225
+#: gitk:2400 gitk:2402 gitk:7983 gitk:8235
msgid "Line diff"
msgstr "Поредови разлики"
-#: gitk:2463
+#: gitk:2467
msgid "Patch"
msgstr "Кръпка"
-#: gitk:2465
+#: gitk:2469
msgid "Tree"
msgstr "Дърво"
-#: gitk:2635 gitk:2656
+#: gitk:2639 gitk:2660
msgid "Diff this -> selected"
msgstr "Разлики между това и избраното"
-#: gitk:2636 gitk:2657
+#: gitk:2640 gitk:2661
msgid "Diff selected -> this"
msgstr "Разлики между избраното и това"
-#: gitk:2637 gitk:2658
+#: gitk:2641 gitk:2662
msgid "Make patch"
msgstr "Създаване на кръпка"
-#: gitk:2638 gitk:9273
+#: gitk:2642 gitk:9283
msgid "Create tag"
msgstr "Създаване на етикет"
-#: gitk:2639
+#: gitk:2643
msgid "Copy commit summary"
msgstr "Копиране на информацията за подаване"
-#: gitk:2640 gitk:9404
+#: gitk:2644 gitk:9414
msgid "Write commit to file"
msgstr "Запазване на подаването във файл"
-#: gitk:2641 gitk:9461
+#: gitk:2645
msgid "Create new branch"
msgstr "Създаване на нов клон"
-#: gitk:2642
+#: gitk:2646
msgid "Cherry-pick this commit"
msgstr "Отбиране на това подаване"
-#: gitk:2643
+#: gitk:2647
msgid "Reset HEAD branch to here"
msgstr "Привеждане на върха на клона към текущото подаване"
-#: gitk:2644
+#: gitk:2648
msgid "Mark this commit"
msgstr "Отбелязване на това подаване"
-#: gitk:2645
+#: gitk:2649
msgid "Return to mark"
msgstr "Връщане към отбелязаното подаване"
-#: gitk:2646
+#: gitk:2650
msgid "Find descendant of this and mark"
msgstr "Откриване и отбелязване на наследниците"
-#: gitk:2647
+#: gitk:2651
msgid "Compare with marked commit"
msgstr "Сравнение с отбелязаното подаване"
-#: gitk:2648 gitk:2659
+#: gitk:2652 gitk:2663
msgid "Diff this -> marked commit"
msgstr "Разлики между това и отбелязаното"
-#: gitk:2649 gitk:2660
+#: gitk:2653 gitk:2664
msgid "Diff marked commit -> this"
msgstr "Разлики между отбелязаното и това"
-#: gitk:2650
+#: gitk:2654
msgid "Revert this commit"
msgstr "Отмяна на това подаване"
-#: gitk:2666
+#: gitk:2670
msgid "Check out this branch"
msgstr "Изтегляне на този клон"
-#: gitk:2667
+#: gitk:2671
+msgid "Rename this branch"
+msgstr "Преименуване на този клон"
+
+#: gitk:2672
msgid "Remove this branch"
msgstr "Изтриване на този клон"
-#: gitk:2668
+#: gitk:2673
msgid "Copy branch name"
msgstr "Копиране на името на клона"
-#: gitk:2675
+#: gitk:2680
msgid "Highlight this too"
msgstr "Отбелязване и на това"
-#: gitk:2676
+#: gitk:2681
msgid "Highlight this only"
msgstr "Отбелязване само на това"
-#: gitk:2677
+#: gitk:2682
msgid "External diff"
msgstr "Външна програма за разлики"
-#: gitk:2678
+#: gitk:2683
msgid "Blame parent commit"
msgstr "Анотиране на родителското подаване"
-#: gitk:2679
+#: gitk:2684
msgid "Copy path"
msgstr "Копиране на пътя"
-#: gitk:2686
+#: gitk:2691
msgid "Show origin of this line"
msgstr "Показване на произхода на този ред"
-#: gitk:2687
+#: gitk:2692
msgid "Run git gui blame on this line"
msgstr "Изпълнение на „git gui blame“ върху този ред"
-#: gitk:3031
+#: gitk:3036
msgid "About gitk"
msgstr "Относно gitk"
-#: gitk:3033
+#: gitk:3038
msgid ""
"\n"
"Gitk - a commit viewer for git\n"
"\n"
"Използвайте и разпространявайте при условията на ОПЛ на ГНУ"
-#: gitk:3041 gitk:3108 gitk:9890
+#: gitk:3046 gitk:3113 gitk:10004
msgid "Close"
msgstr "Затваряне"
-#: gitk:3062
+#: gitk:3067
msgid "Gitk key bindings"
msgstr "Клавишни комбинации"
-#: gitk:3065
+#: gitk:3070
msgid "Gitk key bindings:"
msgstr "Клавишни комбинации:"
-#: gitk:3067
+#: gitk:3072
#, tcl-format
msgid "<%s-Q>\t\tQuit"
msgstr "<%s-Q>\t\tСпиране на програмата"
-#: gitk:3068
+#: gitk:3073
#, tcl-format
msgid "<%s-W>\t\tClose window"
msgstr "<%s-W>\t\tЗатваряне на прозореца"
-#: gitk:3069
+#: gitk:3074
msgid "<Home>\t\tMove to first commit"
msgstr "<Home>\t\tКъм първото подаване"
-#: gitk:3070
+#: gitk:3075
msgid "<End>\t\tMove to last commit"
msgstr "<End>\t\tКъм последното подаване"
-#: gitk:3071
+#: gitk:3076
msgid "<Up>, p, k\tMove up one commit"
msgstr "<Up>, p, k\tЕдно подаване нагоре"
-#: gitk:3072
+#: gitk:3077
msgid "<Down>, n, j\tMove down one commit"
msgstr "<Down>, n, j\tЕдно подаване надолу"
-#: gitk:3073
+#: gitk:3078
msgid "<Left>, z, h\tGo back in history list"
msgstr "<Left>, z, h\tНазад в историята"
-#: gitk:3074
+#: gitk:3079
msgid "<Right>, x, l\tGo forward in history list"
msgstr "<Right>, x, l\tНапред в историята"
-#: gitk:3075
+#: gitk:3080
#, tcl-format
msgid "<%s-n>\tGo to n-th parent of current commit in history list"
msgstr "<%s-n>\tКъм n-тия родител на текущото подаване в историята"
-#: gitk:3076
+#: gitk:3081
msgid "<PageUp>\tMove up one page in commit list"
msgstr "<PageUp>\tСтраница нагоре в списъка с подаванията"
-#: gitk:3077
+#: gitk:3082
msgid "<PageDown>\tMove down one page in commit list"
msgstr "<PageDown>\tСтраница надолу в списъка с подаванията"
-#: gitk:3078
+#: gitk:3083
#, tcl-format
msgid "<%s-Home>\tScroll to top of commit list"
msgstr "<%s-Home>\tКъм началото на списъка с подаванията"
-#: gitk:3079
+#: gitk:3084
#, tcl-format
msgid "<%s-End>\tScroll to bottom of commit list"
msgstr "<%s-End>\tКъм края на списъка с подаванията"
-#: gitk:3080
+#: gitk:3085
#, tcl-format
msgid "<%s-Up>\tScroll commit list up one line"
msgstr "<%s-Up>\tРед нагоре в списъка с подавания"
-#: gitk:3081
+#: gitk:3086
#, tcl-format
msgid "<%s-Down>\tScroll commit list down one line"
msgstr "<%s-Down>\tРед надолу в списъка с подавания"
-#: gitk:3082
+#: gitk:3087
#, tcl-format
msgid "<%s-PageUp>\tScroll commit list up one page"
msgstr "<%s-PageUp>\tСтраница нагоре в списъка с подавания"
-#: gitk:3083
+#: gitk:3088
#, tcl-format
msgid "<%s-PageDown>\tScroll commit list down one page"
msgstr "<%s-PageDown>\tСтраница надолу в списъка с подавания"
-#: gitk:3084
+#: gitk:3089
msgid "<Shift-Up>\tFind backwards (upwards, later commits)"
msgstr "<Shift-Up>\tТърсене назад (визуално нагоре, исторически — последващи)"
-#: gitk:3085
+#: gitk:3090
msgid "<Shift-Down>\tFind forwards (downwards, earlier commits)"
msgstr ""
"<Shift-Down>\tТърсене напред (визуално надолу, исторически — предхождащи)"
-#: gitk:3086
+#: gitk:3091
msgid "<Delete>, b\tScroll diff view up one page"
msgstr "<Delete>, b\tСтраница нагоре в изгледа за разлики"
-#: gitk:3087
+#: gitk:3092
msgid "<Backspace>\tScroll diff view up one page"
msgstr "<Backspace>\tСтраница надолу в изгледа за разлики"
-#: gitk:3088
+#: gitk:3093
msgid "<Space>\t\tScroll diff view down one page"
msgstr "<Space>\t\tСтраница надолу в изгледа за разлики"
-#: gitk:3089
+#: gitk:3094
msgid "u\t\tScroll diff view up 18 lines"
msgstr "u\t\t18 реда нагоре в изгледа за разлики"
-#: gitk:3090
+#: gitk:3095
msgid "d\t\tScroll diff view down 18 lines"
msgstr "d\t\t18 реда надолу в изгледа за разлики"
-#: gitk:3091
+#: gitk:3096
#, tcl-format
msgid "<%s-F>\t\tFind"
msgstr "<%s-F>\t\tТърсене"
-#: gitk:3092
+#: gitk:3097
#, tcl-format
msgid "<%s-G>\t\tMove to next find hit"
msgstr "<%s-G>\t\tКъм следващата поява"
-#: gitk:3093
+#: gitk:3098
msgid "<Return>\tMove to next find hit"
msgstr "<Return>\tКъм следващата поява"
-#: gitk:3094
+#: gitk:3099
msgid "g\t\tGo to commit"
msgstr "g\t\tКъм последното подаване"
-#: gitk:3095
+#: gitk:3100
msgid "/\t\tFocus the search box"
msgstr "/\t\tФокус върху полето за търсене"
-#: gitk:3096
+#: gitk:3101
msgid "?\t\tMove to previous find hit"
msgstr "?\t\tКъм предишната поява"
-#: gitk:3097
+#: gitk:3102
msgid "f\t\tScroll diff view to next file"
msgstr "f\t\tСледващ файл в изгледа за разлики"
-#: gitk:3098
+#: gitk:3103
#, tcl-format
msgid "<%s-S>\t\tSearch for next hit in diff view"
msgstr "<%s-S>\t\tТърсене на следващата поява в изгледа за разлики"
-#: gitk:3099
+#: gitk:3104
#, tcl-format
msgid "<%s-R>\t\tSearch for previous hit in diff view"
msgstr "<%s-R>\t\tТърсене на предишната поява в изгледа за разлики"
-#: gitk:3100
+#: gitk:3105
#, tcl-format
msgid "<%s-KP+>\tIncrease font size"
msgstr "<%s-KP+>\tПо-голям размер на шрифта"
-#: gitk:3101
+#: gitk:3106
#, tcl-format
msgid "<%s-plus>\tIncrease font size"
msgstr "<%s-plus>\tПо-голям размер на шрифта"
-#: gitk:3102
+#: gitk:3107
#, tcl-format
msgid "<%s-KP->\tDecrease font size"
msgstr "<%s-KP->\tПо-малък размер на шрифта"
-#: gitk:3103
+#: gitk:3108
#, tcl-format
msgid "<%s-minus>\tDecrease font size"
msgstr "<%s-minus>\tПо-малък размер на шрифта"
-#: gitk:3104
+#: gitk:3109
msgid "<F5>\t\tUpdate"
msgstr "<F5>\t\tОбновяване"
-#: gitk:3569 gitk:3578
+#: gitk:3574 gitk:3583
#, tcl-format
msgid "Error creating temporary directory %s:"
msgstr "Грешка при създаването на временната директория „%s“:"
-#: gitk:3591
+#: gitk:3596
#, tcl-format
msgid "Error getting \"%s\" from %s:"
msgstr "Грешка при получаването на „%s“ от %s:"
-#: gitk:3654
+#: gitk:3659
msgid "command failed:"
msgstr "неуспешно изпълнение на команда:"
-#: gitk:3803
+#: gitk:3808
msgid "No such commit"
msgstr "Такова подаване няма"
-#: gitk:3817
+#: gitk:3822
msgid "git gui blame: command failed:"
msgstr "„git gui blame“: неуспешно изпълнение на команда:"
-#: gitk:3848
+#: gitk:3853
#, tcl-format
msgid "Couldn't read merge head: %s"
msgstr "Върхът за сливане не може да бъде прочетен: %s"
-#: gitk:3856
+#: gitk:3861
#, tcl-format
msgid "Error reading index: %s"
msgstr "Грешка при прочитане на индекса: %s"
-#: gitk:3881
+#: gitk:3886
#, tcl-format
msgid "Couldn't start git blame: %s"
msgstr "Командата „git blame“ не може да бъде стартирана: %s"
-#: gitk:3884 gitk:6773
+#: gitk:3889 gitk:6778
msgid "Searching"
msgstr "Търсене"
-#: gitk:3916
+#: gitk:3921
#, tcl-format
msgid "Error running git blame: %s"
msgstr "Грешка при изпълнението на „git blame“: %s"
-#: gitk:3944
+#: gitk:3949
#, tcl-format
msgid "That line comes from commit %s, which is not in this view"
msgstr "Този ред идва от подаването %s, което не е в изгледа"
-#: gitk:3958
+#: gitk:3963
msgid "External diff viewer failed:"
msgstr "Неуспешно изпълнение на външната програма за разлики:"
-#: gitk:4062
+#: gitk:4067
msgid "All files"
msgstr "Всички файлове"
-#: gitk:4086
+#: gitk:4091
msgid "View"
msgstr "Изглед"
-#: gitk:4089
+#: gitk:4094
msgid "Gitk view definition"
msgstr "Дефиниция на изглед в Gitk"
-#: gitk:4093
+#: gitk:4098
msgid "Remember this view"
msgstr "Запазване на този изглед"
-#: gitk:4094
+#: gitk:4099
msgid "References (space separated list):"
msgstr "Указатели (списък с разделител интервал):"
-#: gitk:4095
+#: gitk:4100
msgid "Branches & tags:"
msgstr "Клони и етикети:"
-#: gitk:4096
+#: gitk:4101
msgid "All refs"
msgstr "Всички указатели"
-#: gitk:4097
+#: gitk:4102
msgid "All (local) branches"
msgstr "Всички (локални) клони"
-#: gitk:4098
+#: gitk:4103
msgid "All tags"
msgstr "Всички етикети"
-#: gitk:4099
+#: gitk:4104
msgid "All remote-tracking branches"
msgstr "Всички следящи клони"
-#: gitk:4100
+#: gitk:4105
msgid "Commit Info (regular expressions):"
msgstr "Информация за подаване (рег. изр.):"
-#: gitk:4101
+#: gitk:4106
msgid "Author:"
msgstr "Автор:"
-#: gitk:4102
+#: gitk:4107
msgid "Committer:"
msgstr "Подал:"
-#: gitk:4103
+#: gitk:4108
msgid "Commit Message:"
msgstr "Съобщение при подаване:"
-#: gitk:4104
+#: gitk:4109
msgid "Matches all Commit Info criteria"
msgstr "Съвпадение по всички характеристики на подаването"
-#: gitk:4105
+#: gitk:4110
msgid "Matches no Commit Info criteria"
msgstr "Не съвпада по никоя от характеристиките на подаването"
-#: gitk:4106
+#: gitk:4111
msgid "Changes to Files:"
msgstr "Промени по файловете:"
-#: gitk:4107
+#: gitk:4112
msgid "Fixed String"
msgstr "Дословен низ"
-#: gitk:4108
+#: gitk:4113
msgid "Regular Expression"
msgstr "Регулярен израз"
-#: gitk:4109
+#: gitk:4114
msgid "Search string:"
msgstr "Низ за търсене:"
-#: gitk:4110
+#: gitk:4115
msgid ""
"Commit Dates (\"2 weeks ago\", \"2009-03-17 15:27:38\", \"March 17, 2009 "
"15:27:38\"):"
"Дата на подаване („2 weeks ago“ (преди 2 седмици), „2009-03-17 15:27:38“, "
"„March 17, 2009 15:27:38“):"
-#: gitk:4111
+#: gitk:4116
msgid "Since:"
msgstr "От:"
-#: gitk:4112
+#: gitk:4117
msgid "Until:"
msgstr "До:"
-#: gitk:4113
+#: gitk:4118
msgid "Limit and/or skip a number of revisions (positive integer):"
msgstr ""
"Ограничаване и/или прескачане на определен брой версии (неотрицателно цяло "
"число):"
-#: gitk:4114
+#: gitk:4119
msgid "Number to show:"
msgstr "Брой показани:"
-#: gitk:4115
+#: gitk:4120
msgid "Number to skip:"
msgstr "Брой прескочени:"
-#: gitk:4116
+#: gitk:4121
msgid "Miscellaneous options:"
msgstr "Разни:"
-#: gitk:4117
+#: gitk:4122
msgid "Strictly sort by date"
msgstr "Подреждане по дата"
-#: gitk:4118
+#: gitk:4123
msgid "Mark branch sides"
msgstr "Отбелязване на страните по клона"
-#: gitk:4119
+#: gitk:4124
msgid "Limit to first parent"
msgstr "Само първия родител"
-#: gitk:4120
+#: gitk:4125
msgid "Simple history"
msgstr "Опростена история"
-#: gitk:4121
+#: gitk:4126
msgid "Additional arguments to git log:"
msgstr "Допълнителни аргументи към „git log“:"
-#: gitk:4122
+#: gitk:4127
msgid "Enter files and directories to include, one per line:"
msgstr "Въведете файловете и директориите за включване, по елемент на ред"
-#: gitk:4123
+#: gitk:4128
msgid "Command to generate more commits to include:"
msgstr ""
"Команда за генерирането на допълнителни подавания, които да бъдат включени:"
-#: gitk:4247
+#: gitk:4252
msgid "Gitk: edit view"
msgstr "Gitk: редактиране на изглед"
-#: gitk:4255
+#: gitk:4260
msgid "-- criteria for selecting revisions"
msgstr "— критерии за избор на версии"
-#: gitk:4260
+#: gitk:4265
msgid "View Name"
msgstr "Име на изглед"
-#: gitk:4335
+#: gitk:4340
msgid "Apply (F5)"
msgstr "Прилагане (F5)"
-#: gitk:4373
+#: gitk:4378
msgid "Error in commit selection arguments:"
msgstr "Грешка в аргументите за избор на подавания:"
-#: gitk:4428 gitk:4481 gitk:4943 gitk:4957 gitk:6227 gitk:12410 gitk:12411
+#: gitk:4433 gitk:4486 gitk:4948 gitk:4962 gitk:6232 gitk:12524 gitk:12525
msgid "None"
msgstr "Няма"
-#: gitk:5040 gitk:5045
+#: gitk:5045 gitk:5050
msgid "Descendant"
msgstr "Наследник"
-#: gitk:5041
+#: gitk:5046
msgid "Not descendant"
msgstr "Не е наследник"
-#: gitk:5048 gitk:5053
+#: gitk:5053 gitk:5058
msgid "Ancestor"
msgstr "Предшественик"
-#: gitk:5049
+#: gitk:5054
msgid "Not ancestor"
msgstr "Не е предшественик"
-#: gitk:5343
+#: gitk:5348
msgid "Local changes checked in to index but not committed"
msgstr "Локални промени добавени към индекса, но неподадени"
-#: gitk:5379
+#: gitk:5384
msgid "Local uncommitted changes, not checked in to index"
msgstr "Локални промени извън индекса"
-#: gitk:7153
+#: gitk:7158
msgid "and many more"
msgstr "и още много"
-#: gitk:7156
+#: gitk:7161
msgid "many"
msgstr "много"
-#: gitk:7347
+#: gitk:7352
msgid "Tags:"
msgstr "Етикети:"
-#: gitk:7364 gitk:7370 gitk:8844
+#: gitk:7369 gitk:7375 gitk:8854
msgid "Parent"
msgstr "Родител"
-#: gitk:7375
+#: gitk:7380
msgid "Child"
msgstr "Дете"
-#: gitk:7384
+#: gitk:7389
msgid "Branch"
msgstr "Клон"
-#: gitk:7387
+#: gitk:7392
msgid "Follows"
msgstr "Следва"
-#: gitk:7390
+#: gitk:7395
msgid "Precedes"
msgstr "Предшества"
-#: gitk:7985
+#: gitk:7990
#, tcl-format
msgid "Error getting diffs: %s"
msgstr "Грешка при получаването на разликите: %s"
-#: gitk:8669
+#: gitk:8679
msgid "Goto:"
msgstr "Към ред:"
-#: gitk:8690
+#: gitk:8700
#, tcl-format
msgid "Short SHA1 id %s is ambiguous"
msgstr "Съкратената сума по SHA1 %s не е еднозначна"
-#: gitk:8697
+#: gitk:8707
#, tcl-format
msgid "Revision %s is not known"
msgstr "Непозната версия %s"
-#: gitk:8707
+#: gitk:8717
#, tcl-format
msgid "SHA1 id %s is not known"
msgstr "Непозната сума по SHA1 %s"
-#: gitk:8709
+#: gitk:8719
#, tcl-format
msgid "Revision %s is not in the current view"
msgstr "Версия %s не е в текущия изглед"
-#: gitk:8851 gitk:8866
+#: gitk:8861 gitk:8876
msgid "Date"
msgstr "Дата"
-#: gitk:8854
+#: gitk:8864
msgid "Children"
msgstr "Деца"
-#: gitk:8917
+#: gitk:8927
#, tcl-format
msgid "Reset %s branch to here"
msgstr "Зануляване на клона „%s“ към текущото подаване"
-#: gitk:8919
+#: gitk:8929
msgid "Detached head: can't reset"
msgstr "Несвързан връх: невъзможно зануляване"
-#: gitk:9024 gitk:9030
+#: gitk:9034 gitk:9040
msgid "Skipping merge commit "
msgstr "Пропускане на подаването на сливането"
-#: gitk:9039 gitk:9044
+#: gitk:9049 gitk:9054
msgid "Error getting patch ID for "
msgstr "Грешка при получаването на идентификатора на "
-#: gitk:9040 gitk:9045
+#: gitk:9050 gitk:9055
msgid " - stopping\n"
msgstr " — спиране\n"
-#: gitk:9050 gitk:9053 gitk:9061 gitk:9075 gitk:9084
+#: gitk:9060 gitk:9063 gitk:9071 gitk:9085 gitk:9094
msgid "Commit "
msgstr "Подаване"
-#: gitk:9054
+#: gitk:9064
msgid ""
" is the same patch as\n"
" "
" е същата кръпка като\n"
" "
-#: gitk:9062
+#: gitk:9072
msgid ""
" differs from\n"
" "
" се различава от\n"
" "
-#: gitk:9064
+#: gitk:9074
msgid ""
"Diff of commits:\n"
"\n"
"Разлика между подаванията:\n"
"\n"
-#: gitk:9076 gitk:9085
+#: gitk:9086 gitk:9095
#, tcl-format
msgid " has %s children - stopping\n"
msgstr " има %s деца — спиране\n"
-#: gitk:9104
+#: gitk:9114
#, tcl-format
msgid "Error writing commit to file: %s"
msgstr "Грешка при запазването на подаването във файл: %s"
-#: gitk:9110
+#: gitk:9120
#, tcl-format
msgid "Error diffing commits: %s"
msgstr "Грешка при изчисляването на разликите между подаванията: %s"
-#: gitk:9156
+#: gitk:9166
msgid "Top"
msgstr "Най-горе"
-#: gitk:9157
+#: gitk:9167
msgid "From"
msgstr "От"
-#: gitk:9162
+#: gitk:9172
msgid "To"
msgstr "До"
-#: gitk:9186
+#: gitk:9196
msgid "Generate patch"
msgstr "Генериране на кръпка"
-#: gitk:9188
+#: gitk:9198
msgid "From:"
msgstr "От:"
-#: gitk:9197
+#: gitk:9207
msgid "To:"
msgstr "До:"
-#: gitk:9206
+#: gitk:9216
msgid "Reverse"
msgstr "Обръщане"
-#: gitk:9208 gitk:9418
+#: gitk:9218 gitk:9428
msgid "Output file:"
msgstr "Запазване във файла:"
-#: gitk:9214
+#: gitk:9224
msgid "Generate"
msgstr "Генериране"
-#: gitk:9252
+#: gitk:9262
msgid "Error creating patch:"
msgstr "Грешка при създаването на кръпка:"
-#: gitk:9275 gitk:9406 gitk:9463
+#: gitk:9285 gitk:9416 gitk:9504
msgid "ID:"
msgstr "Идентификатор:"
-#: gitk:9284
+#: gitk:9294
msgid "Tag name:"
msgstr "Име на етикет:"
-#: gitk:9287
+#: gitk:9297
msgid "Tag message is optional"
msgstr "Съобщението за етикет е незадължително"
-#: gitk:9289
+#: gitk:9299
msgid "Tag message:"
msgstr "Съобщение за етикет:"
-#: gitk:9293 gitk:9472
+#: gitk:9303 gitk:9474
msgid "Create"
msgstr "Създаване"
-#: gitk:9311
+#: gitk:9321
msgid "No tag name specified"
msgstr "Липсва име на етикет"
-#: gitk:9315
+#: gitk:9325
#, tcl-format
msgid "Tag \"%s\" already exists"
msgstr "Етикетът „%s“ вече съществува"
-#: gitk:9325
+#: gitk:9335
msgid "Error creating tag:"
msgstr "Грешка при създаването на етикет:"
-#: gitk:9415
+#: gitk:9425
msgid "Command:"
msgstr "Команда:"
-#: gitk:9423
+#: gitk:9433
msgid "Write"
msgstr "Запазване"
-#: gitk:9441
+#: gitk:9451
msgid "Error writing commit:"
msgstr "Грешка при запазването на подаването:"
-#: gitk:9468
+#: gitk:9473
+msgid "Create branch"
+msgstr "Създаване на клон"
+
+#: gitk:9489
+#, tcl-format
+msgid "Rename branch %s"
+msgstr "Преименуване на клона „%s“"
+
+#: gitk:9490
+msgid "Rename"
+msgstr "Преименуване"
+
+#: gitk:9514
msgid "Name:"
msgstr "Име:"
-#: gitk:9491
+#: gitk:9538
msgid "Please specify a name for the new branch"
msgstr "Укажете име за новия клон"
-#: gitk:9496
+#: gitk:9543
#, tcl-format
msgid "Branch '%s' already exists. Overwrite?"
msgstr "Клонът „%s“ вече съществува. Да бъде ли презаписан?"
-#: gitk:9563
+#: gitk:9587
+msgid "Please specify a new name for the branch"
+msgstr "Укажете ново име за клона"
+
+#: gitk:9650
#, tcl-format
msgid "Commit %s is already included in branch %s -- really re-apply it?"
msgstr ""
"Подаването „%s“ вече е включено в клона „%s“ — да бъде ли приложено отново?"
-#: gitk:9568
+#: gitk:9655
msgid "Cherry-picking"
msgstr "Отбиране"
-#: gitk:9577
+#: gitk:9664
#, tcl-format
msgid ""
"Cherry-pick failed because of local changes to file '%s'.\n"
"Неуспешно отбиране, защото във файла „%s“ има локални промени.\n"
"Подайте, занулете или ги скатайте и пробвайте отново."
-#: gitk:9583
+#: gitk:9670
msgid ""
"Cherry-pick failed because of merge conflict.\n"
"Do you wish to run git citool to resolve it?"
"Неуспешно отбиране поради конфликти при сливане.\n"
"Искате ли да ги коригирате чрез „git citool“?"
-#: gitk:9599 gitk:9657
+#: gitk:9686 gitk:9744
msgid "No changes committed"
msgstr "Не са подадени промени"
-#: gitk:9626
+#: gitk:9713
#, tcl-format
msgid "Commit %s is not included in branch %s -- really revert it?"
msgstr "Подаването „%s“ не е включено в клона „%s“. Да бъде ли отменено?"
-#: gitk:9631
+#: gitk:9718
msgid "Reverting"
msgstr "Отмяна"
-#: gitk:9639
+#: gitk:9726
#, tcl-format
msgid ""
"Revert failed because of local changes to the following files:%s Please "
"commit, reset or stash your changes and try again."
msgstr ""
"Неуспешна отмяна, защото във файла „%s“ има локални промени.\n"
-"Подайте, занулете или ги скатайте и пробвайте отново.<"
+"Подайте, занулете или ги скатайте и пробвайте отново."
-#: gitk:9643
+#: gitk:9730
msgid ""
"Revert failed because of merge conflict.\n"
" Do you wish to run git citool to resolve it?"
"Неуспешно отмяна поради конфликти при сливане.\n"
"Искате ли да ги коригирате чрез „git citool“?"
-#: gitk:9686
+#: gitk:9773
msgid "Confirm reset"
msgstr "Потвърждаване на зануляването"
-#: gitk:9688
+#: gitk:9775
#, tcl-format
msgid "Reset branch %s to %s?"
msgstr "Да се занули ли клонът „%s“ към „%s“?"
-#: gitk:9690
+#: gitk:9777
msgid "Reset type:"
msgstr "Вид зануляване:"
-#: gitk:9693
+#: gitk:9780
msgid "Soft: Leave working tree and index untouched"
msgstr "Слабо: работното дърво и индекса остават същите"
-#: gitk:9696
+#: gitk:9783
msgid "Mixed: Leave working tree untouched, reset index"
msgstr "Смесено: работното дърво остава същото, индексът се занулява"
-#: gitk:9699
+#: gitk:9786
msgid ""
"Hard: Reset working tree and index\n"
"(discard ALL local changes)"
msgstr ""
"Силно: зануляване и на работното дърво, и на индекса\n"
-"(*ВСИЧКИ* локални промени ще бъдат безвъзвратно загубени)"
+"(ВСИЧКИ локални промени ще бъдат безвъзвратно загубени)"
-#: gitk:9716
+#: gitk:9803
msgid "Resetting"
msgstr "Зануляване"
-#: gitk:9776
+#: gitk:9876
+#, tcl-format
+msgid "A local branch named %s exists already"
+msgstr "Вече съществува локален клон „%s“."
+
+#: gitk:9884
msgid "Checking out"
msgstr "Изтегляне"
-#: gitk:9829
+#: gitk:9943
msgid "Cannot delete the currently checked-out branch"
msgstr "Текущо изтегленият клон не може да бъде изтрит"
-#: gitk:9835
+#: gitk:9949
#, tcl-format
msgid ""
"The commits on branch %s aren't on any other branch.\n"
"Really delete branch %s?"
msgstr ""
"Подаванията на клона „%s“ не са на никой друг клон.\n"
-"Ð\9dаиÑ\81Ñ\82ина ли да Ñ\81е изÑ\82Ñ\80ие клонÑ\8aÑ\82 „%s“?"
+"Ð\9dаиÑ\81Ñ\82ина ли иÑ\81каÑ\82е да изÑ\82Ñ\80иеÑ\82е клона „%s“?"
-#: gitk:9866
+#: gitk:9980
#, tcl-format
msgid "Tags and heads: %s"
msgstr "Етикети и върхове: %s"
-#: gitk:9883
+#: gitk:9997
msgid "Filter"
msgstr "Филтриране"
-#: gitk:10179
+#: gitk:10293
msgid ""
"Error reading commit topology information; branch and preceding/following "
"tag information will be incomplete."
"Грешка при прочитането на топологията на подаванията. Информацията за клона "
"и предшестващите/следващите етикети ще е непълна."
-#: gitk:11156
+#: gitk:11270
msgid "Tag"
msgstr "Етикет"
-#: gitk:11160
+#: gitk:11274
msgid "Id"
msgstr "Идентификатор"
-#: gitk:11243
+#: gitk:11357
msgid "Gitk font chooser"
msgstr "Избор на шрифт за Gitk"
-#: gitk:11260
+#: gitk:11374
msgid "B"
msgstr "Ч"
-#: gitk:11263
+#: gitk:11377
msgid "I"
msgstr "К"
-#: gitk:11381
+#: gitk:11495
msgid "Commit list display options"
msgstr "Настройки на списъка с подавания"
-#: gitk:11384
+#: gitk:11498
msgid "Maximum graph width (lines)"
msgstr "Максимална широчина на графа (в редове)"
-#: gitk:11388
+#: gitk:11502
#, no-tcl-format
msgid "Maximum graph width (% of pane)"
msgstr "Максимална широчина на графа (% от панела)"
-#: gitk:11391
+#: gitk:11505
msgid "Show local changes"
msgstr "Показване на локалните промени"
-#: gitk:11394
+#: gitk:11508
msgid "Auto-select SHA1 (length)"
msgstr "Автоматично избиране на SHA1 (дължина)"
-#: gitk:11398
+#: gitk:11512
msgid "Hide remote refs"
msgstr "Скриване на отдалечените указатели"
-#: gitk:11402
+#: gitk:11516
msgid "Diff display options"
msgstr "Настройки на показването на разликите"
-#: gitk:11404
+#: gitk:11518
msgid "Tab spacing"
msgstr "Широчина на табулатора"
-#: gitk:11407
+#: gitk:11521
msgid "Display nearby tags/heads"
msgstr "Извеждане на близките етикети и върхове"
-#: gitk:11410
+#: gitk:11524
msgid "Maximum # tags/heads to show"
msgstr "Максимален брой етикети/върхове за показване"
-#: gitk:11413
+#: gitk:11527
msgid "Limit diffs to listed paths"
msgstr "Разлика само в избраните пътища"
-#: gitk:11416
+#: gitk:11530
msgid "Support per-file encodings"
msgstr "Поддръжка на различни кодирания за всеки файл"
-#: gitk:11422 gitk:11569
+#: gitk:11536 gitk:11683
msgid "External diff tool"
msgstr "Външен инструмент за разлики"
-#: gitk:11423
+#: gitk:11537
msgid "Choose..."
msgstr "Избор…"
-#: gitk:11428
+#: gitk:11542
msgid "General options"
msgstr "Общи настройки"
-#: gitk:11431
+#: gitk:11545
msgid "Use themed widgets"
msgstr "Използване на тема за графичните обекти"
-#: gitk:11433
+#: gitk:11547
msgid "(change requires restart)"
msgstr "(промяната изисква рестартиране на Gitk)"
-#: gitk:11435
+#: gitk:11549
msgid "(currently unavailable)"
msgstr "(в момента недостъпно)"
-#: gitk:11446
+#: gitk:11560
msgid "Colors: press to choose"
msgstr "Цветове: избира се с натискане"
-#: gitk:11449
+#: gitk:11563
msgid "Interface"
msgstr "Интерфейс"
-#: gitk:11450
+#: gitk:11564
msgid "interface"
msgstr "интерфейс"
-#: gitk:11453
+#: gitk:11567
msgid "Background"
msgstr "Фон"
-#: gitk:11454 gitk:11484
+#: gitk:11568 gitk:11598
msgid "background"
msgstr "фон"
-#: gitk:11457
+#: gitk:11571
msgid "Foreground"
msgstr "Знаци"
-#: gitk:11458
+#: gitk:11572
msgid "foreground"
msgstr "знаци"
-#: gitk:11461
+#: gitk:11575
msgid "Diff: old lines"
msgstr "Разлика: стари редове"
-#: gitk:11462
+#: gitk:11576
msgid "diff old lines"
msgstr "разлика, стари редове"
-#: gitk:11466
+#: gitk:11580
msgid "Diff: new lines"
msgstr "Разлика: нови редове"
-#: gitk:11467
+#: gitk:11581
msgid "diff new lines"
msgstr "разлика, нови редове"
-#: gitk:11471
+#: gitk:11585
msgid "Diff: hunk header"
msgstr "Разлика: начало на парче"
-#: gitk:11473
+#: gitk:11587
msgid "diff hunk header"
msgstr "разлика, начало на парче"
-#: gitk:11477
+#: gitk:11591
msgid "Marked line bg"
msgstr "Фон на отбелязан ред"
-#: gitk:11479
+#: gitk:11593
msgid "marked line background"
msgstr "фон на отбелязан ред"
-#: gitk:11483
+#: gitk:11597
msgid "Select bg"
msgstr "Избор на фон"
-#: gitk:11492
+#: gitk:11606
msgid "Fonts: press to choose"
msgstr "Шрифтове: избира се с натискане"
-#: gitk:11494
+#: gitk:11608
msgid "Main font"
msgstr "Основен шрифт"
-#: gitk:11495
+#: gitk:11609
msgid "Diff display font"
msgstr "Шрифт за разликите"
-#: gitk:11496
+#: gitk:11610
msgid "User interface font"
msgstr "Шрифт на интерфейса"
-#: gitk:11518
+#: gitk:11632
msgid "Gitk preferences"
msgstr "Настройки на Gitk"
-#: gitk:11527
+#: gitk:11641
msgid "General"
msgstr "Общи"
-#: gitk:11528
+#: gitk:11642
msgid "Colors"
msgstr "Цветове"
-#: gitk:11529
+#: gitk:11643
msgid "Fonts"
msgstr "Шрифтове"
-#: gitk:11579
+#: gitk:11693
#, tcl-format
msgid "Gitk: choose color for %s"
msgstr "Gitk: избор на цвят на „%s“"
-#: gitk:12092
+#: gitk:12206
msgid ""
"Sorry, gitk cannot run with this version of Tcl/Tk.\n"
" Gitk requires at least Tcl/Tk 8.4."
"Тази версия на Tcl/Tk не се поддържа от Gitk.\n"
" Необходима ви е поне Tcl/Tk 8.4."
-#: gitk:12302
+#: gitk:12416
msgid "Cannot find a git repository here."
msgstr "Тук липсва хранилище на Git."
-#: gitk:12349
+#: gitk:12463
#, tcl-format
msgid "Ambiguous argument '%s': both revision and filename"
msgstr "Нееднозначен аргумент „%s“: има и такава версия, и такъв файл"
-#: gitk:12361
+#: gitk:12475
msgid "Bad arguments to gitk:"
msgstr "Неправилни аргументи на gitk:"
# ======================================================================
# input validation and dispatch
+# Various hash size-related values.
+my $sha1_len = 40;
+my $sha256_extra_len = 24;
+my $sha256_len = $sha1_len + $sha256_extra_len;
+
+# A regex matching $len hex characters. $len may be a range (e.g. 7,64).
+sub oid_nlen_regex {
+ my $len = shift;
+ my $hchr = qr/[0-9a-fA-F]/;
+ return qr/(?:(?:$hchr){$len})/;
+}
+
+# A regex matching two sets of $nlen hex characters, prefixed by the literal
+# string $prefix and with the literal string $infix between them.
+sub oid_nlen_prefix_infix_regex {
+ my $nlen = shift;
+ my $prefix = shift;
+ my $infix = shift;
+
+ my $rx = oid_nlen_regex($nlen);
+
+ return qr/^\Q$prefix\E$rx\Q$infix\E$rx$/;
+}
+
+# A regex matching a valid object ID.
+our $oid_regex;
+{
+ my $x = oid_nlen_regex($sha1_len);
+ my $y = oid_nlen_regex($sha256_extra_len);
+ $oid_regex = qr/(?:$x(?:$y)?)/;
+}
+
# input parameters can be collected from a variety of sources (presently, CGI
# and PATH_INFO), so we define an %input_params hash that collects them all
# together during validation: this allows subsequent uses (e.g. href()) to be
return undef unless defined $input;
# textual hashes are O.K.
- if ($input =~ m/^[0-9a-fA-F]{40}$/) {
+ if ($input =~ m/^$oid_regex$/) {
return 1;
}
# it must be correct pathname
sub format_log_line_html {
my $line = shift;
+ # Potentially abbreviated OID.
+ my $regex = oid_nlen_regex("7,64");
+
$line = esc_html($line, -nbsp=>1);
$line =~ s{
\b
(?<!-) # see strbuf_check_tag_ref(). Tags can't start with -
[A-Za-z0-9.-]+
(?!\.) # refs can't end with ".", see check_refname_format()
- -g[0-9a-fA-F]{7,40}
+ -g$regex
|
# Just a normal looking Git SHA1
- [0-9a-fA-F]{7,40}
+ $regex
)
\b
}{
')</span>';
}
# match <hash>
- if ($line =~ m/^index [0-9a-fA-F]{40},[0-9a-fA-F]{40}/) {
+ if ($line =~ oid_nlen_prefix_infix_regex($sha1_len, "index ", ",") |
+ $line =~ oid_nlen_prefix_infix_regex($sha256_len, "index ", ",")) {
# can match only for combined diff
$line = 'index ';
for (my $i = 0; $i < $diffinfo->{'nparents'}; $i++) {
$line .= '0' x 7;
}
- } elsif ($line =~ m/^index [0-9a-fA-F]{40}..[0-9a-fA-F]{40}/) {
+ } elsif ($line =~ oid_nlen_prefix_infix_regex($sha1_len, "index ", "..") |
+ $line =~ oid_nlen_prefix_infix_regex($sha256_len, "index ", "..")) {
# can match only for ordinary diff
my ($from_link, $to_link);
if ($from->{'href'}) {
}
#'100644 blob 0fa3f3a66fb6a137f6ec2c19351ed4d807070ffa panic.c'
- $line =~ m/^([0-9]+) (.+) ([0-9a-fA-F]{40})\t/;
+ $line =~ m/^([0-9]+) (.+) ($oid_regex)\t/;
if (defined $type && $type ne $2) {
# type doesn't match
return undef;
while (my $line = <$fd>) {
chomp $line;
- if ($line =~ m!^([0-9a-fA-F]{40})\srefs/($type.*)$!) {
+ if ($line =~ m!^($oid_regex)\srefs/($type.*)$!) {
if (defined $refs{$1}) {
push @{$refs{$1}}, $2;
} else {
$tag{'id'} = $tag_id;
while (my $line = <$fd>) {
chomp $line;
- if ($line =~ m/^object ([0-9a-fA-F]{40})$/) {
+ if ($line =~ m/^object ($oid_regex)$/) {
$tag{'object'} = $1;
} elsif ($line =~ m/^type (.+)$/) {
$tag{'type'} = $1;
}
my $header = shift @commit_lines;
- if ($header !~ m/^[0-9a-fA-F]{40}/) {
+ if ($header !~ m/^$oid_regex/) {
return;
}
($co{'id'}, my @parents) = split ' ', $header;
while (my $line = shift @commit_lines) {
last if $line eq "\n";
- if ($line =~ m/^tree ([0-9a-fA-F]{40})$/) {
+ if ($line =~ m/^tree ($oid_regex)$/) {
$co{'tree'} = $1;
- } elsif ((!defined $withparents) && ($line =~ m/^parent ([0-9a-fA-F]{40})$/)) {
+ } elsif ((!defined $withparents) && ($line =~ m/^parent ($oid_regex)$/)) {
push @parents, $1;
} elsif ($line =~ m/^author (.*) ([0-9]+) (.*)$/) {
$co{'author'} = to_utf8($1);
# ':100644 100644 03b218260e99b78c6df0ed378e59ed9205ccc96d 3b93d5e7cc7f7dd4ebed13a5cc1a4ad976fc94d8 M ls-files.c'
# ':100644 100644 7f9281985086971d3877aca27704f2aaf9c448ce bc190ebc71bbd923f2b728e505408f5e54bd073a M rev-tree.c'
- if ($line =~ m/^:([0-7]{6}) ([0-7]{6}) ([0-9a-fA-F]{40}) ([0-9a-fA-F]{40}) (.)([0-9]{0,3})\t(.*)$/) {
+ if ($line =~ m/^:([0-7]{6}) ([0-7]{6}) ($oid_regex) ($oid_regex) (.)([0-9]{0,3})\t(.*)$/) {
$res{'from_mode'} = $1;
$res{'to_mode'} = $2;
$res{'from_id'} = $3;
}
# '::100755 100755 100755 60e79ca1b01bc8b057abe17ddab484699a7f5fdb 94067cc5f73388f33722d52ae02f44692bc07490 94067cc5f73388f33722d52ae02f44692bc07490 MR git-gui/git-gui.sh'
# combined diff (for merge commit)
- elsif ($line =~ s/^(::+)((?:[0-7]{6} )+)((?:[0-9a-fA-F]{40} )+)([a-zA-Z]+)\t(.*)$//) {
+ elsif ($line =~ s/^(::+)((?:[0-7]{6} )+)((?:$oid_regex )+)([a-zA-Z]+)\t(.*)$//) {
$res{'nparents'} = length($1);
$res{'from_mode'} = [ split(' ', $2) ];
$res{'to_mode'} = pop @{$res{'from_mode'}};
$res{'to_file'} = unquote($5);
}
# 'c512b523472485aef4fff9e57b229d9d243c967f'
- elsif ($line =~ m/^([0-9a-fA-F]{40})$/) {
+ elsif ($line =~ m/^($oid_regex)$/) {
$res{'commit'} = $1;
}
if ($opts{'-l'}) {
#'100644 blob 0fa3f3a66fb6a137f6ec2c19351ed4d807070ffa 16717 panic.c'
- $line =~ m/^([0-9]+) (.+) ([0-9a-fA-F]{40}) +(-|[0-9]+)\t(.+)$/s;
+ $line =~ m/^([0-9]+) (.+) ($oid_regex) +(-|[0-9]+)\t(.+)$/s;
$res{'mode'} = $1;
$res{'type'} = $2;
}
} else {
#'100644 blob 0fa3f3a66fb6a137f6ec2c19351ed4d807070ffa panic.c'
- $line =~ m/^([0-9]+) (.+) ([0-9a-fA-F]{40})\t(.+)$/s;
+ $line =~ m/^([0-9]+) (.+) ($oid_regex)\t(.+)$/s;
$res{'mode'} = $1;
$res{'type'} = $2;
sub is_deleted {
my $diffinfo = shift;
- return $diffinfo->{'to_id'} eq ('0' x 40);
+ return $diffinfo->{'to_id'} eq ('0' x 40) || $diffinfo->{'to_id'} eq ('0' x 64);
}
# does patch correspond to [previous] difftree raw line
-class => "list subject"},
chop_and_escape_str($co{'title'}, 50) . "<br/>");
} elsif (defined $set{'to_id'}) {
- next if ($set{'to_id'} =~ m/^0{40}$/);
+ next if is_deleted(\%set);
print $cgi->a({-href => href(action=>"blob", hash_base=>$co{'id'},
hash=>$set{'to_id'}, file_name=>$set{'to_file'}),
# the header: <SHA-1> <src lineno> <dst lineno> [<lines in group>]
# no <lines in group> for subsequent lines in group of lines
my ($full_rev, $orig_lineno, $lineno, $group_size) =
- ($line =~ /^([0-9a-f]{40}) (\d+) (\d+)(?: (\d+))?$/);
+ ($line =~ /^($oid_regex) (\d+) (\d+)(?: (\d+))?$/);
if (!exists $metainfo{$full_rev}) {
$metainfo{$full_rev} = { 'nprevious' => 0 };
}
}
# 'previous' <sha1 of parent commit> <filename at commit>
if (exists $meta->{'previous'} &&
- $meta->{'previous'} =~ /^([a-fA-F0-9]{40}) (.*)$/) {
+ $meta->{'previous'} =~ /^($oid_regex) (.*)$/) {
$meta->{'parent'} = $1;
$meta->{'file_parent'} = unquote($2);
}
} else {
die_error(400, "No file name defined");
}
- } elsif ($hash =~ m/^[0-9a-fA-F]{40}$/) {
+ } elsif ($hash =~ m/^$oid_regex$/) {
# blobs defined by non-textual hash id's can be cached
$expires = "+1d";
}
} else {
die_error(400, "No file name defined");
}
- } elsif ($hash =~ m/^[0-9a-fA-F]{40}$/) {
+ } elsif ($hash =~ m/^$oid_regex$/) {
# blobs defined by non-textual hash id's can be cached
$expires = "+1d";
}
# non-textual hash id's can be cached
my $expires;
- if ($hash =~ m/^[0-9a-fA-F]{40}$/) {
+ if ($hash =~ m/^$oid_regex$/) {
$expires = "+1d";
}
my $refs = git_get_references();
close $fd;
#'100644 blob 0fa3f3a66fb6a137f6ec2c19351ed4d807070ffa panic.c'
- unless ($line && $line =~ m/^([0-9]+) (.+) ([0-9a-fA-F]{40})\t/) {
+ unless ($line && $line =~ m/^([0-9]+) (.+) ($oid_regex)\t/) {
die_error(404, "File or directory for given base does not exist");
}
$type = $2;
or die_error(404, "Blob diff not found");
} elsif (defined $hash &&
- $hash =~ /[0-9a-fA-F]{40}/) {
+ $hash =~ $oid_regex) {
# try to find filename from $hash
# read filtered raw output
@difftree =
# ':100644 100644 03b21826... 3b93d5e7... M ls-files.c'
# $hash == to_id
- grep { /^:[0-7]{6} [0-7]{6} [0-9a-fA-F]{40} $hash/ }
+ grep { /^:[0-7]{6} [0-7]{6} $oid_regex $hash/ }
map { chomp; $_ } <$fd>;
close $fd
or die_error(404, "Reading git-diff-tree failed");
$hash ||= $diffinfo{'to_id'};
# non-textual hash id's can be cached
- if ($hash_base =~ m/^[0-9a-fA-F]{40}$/ &&
- $hash_parent_base =~ m/^[0-9a-fA-F]{40}$/) {
+ if ($hash_base =~ m/^$oid_regex$/ &&
+ $hash_parent_base =~ m/^$oid_regex$/) {
$expires = '+1d';
}
$hash_parent ne '-c' && $hash_parent ne '--cc') {
# commitdiff with two commits given
my $hash_parent_short = $hash_parent;
- if ($hash_parent =~ m/^[0-9a-fA-F]{40}$/) {
+ if ($hash_parent =~ m/^$oid_regex$/) {
$hash_parent_short = substr($hash_parent, 0, 7);
}
$formats_nav .=
# non-textual hash id's can be cached
my $expires;
- if ($hash =~ m/^[0-9a-fA-F]{40}$/) {
+ if ($hash =~ m/^$oid_regex$/) {
$expires = "+1d";
}
int hash_algo_by_name(const char *name);
/* Identical, except based on the format ID. */
int hash_algo_by_id(uint32_t format_id);
+/* Identical, except based on the length. */
+int hash_algo_by_length(int len);
/* Identical, except for a pointer to struct git_hash_algo. */
static inline int hash_algo_by_ptr(const struct git_hash_algo *p)
{
{
const char *cmd_list;
- /*
- * There's no actual repository setup at this point (and even
- * if there is, we don't really care; only global config
- * matters). If we accidentally set up a repository, it's ok
- * too since the caller (git --list-cmds=) should exit shortly
- * anyway.
- */
if (git_config_get_string_const("completion.commands", &cmd_list))
return;
const char *p = strchrnul(cmd_list, ' ');
strbuf_add(&sb, cmd_list, p - cmd_list);
- if (*cmd_list == '-')
- string_list_remove(list, cmd_list + 1, 0);
+ if (sb.buf[0] == '-')
+ string_list_remove(list, sb.buf + 1, 0);
else
string_list_insert(list, sb.buf);
strbuf_release(&sb);
{"GET", "/objects/info/http-alternates$", get_text_file},
{"GET", "/objects/info/packs$", get_info_packs},
{"GET", "/objects/[0-9a-f]{2}/[0-9a-f]{38}$", get_loose_object},
+ {"GET", "/objects/[0-9a-f]{2}/[0-9a-f]{62}$", get_loose_object},
{"GET", "/objects/pack/pack-[0-9a-f]{40}\\.pack$", get_pack_file},
+ {"GET", "/objects/pack/pack-[0-9a-f]{64}\\.pack$", get_pack_file},
{"GET", "/objects/pack/pack-[0-9a-f]{40}\\.idx$", get_idx_file},
+ {"GET", "/objects/pack/pack-[0-9a-f]{64}\\.idx$", get_idx_file},
{"POST", "/git-upload-pack$", service_rpc},
{"POST", "/git-receive-pack$", service_rpc}
char *url;
char *owner;
char *token;
- char tmpfile_suffix[41];
+ char tmpfile_suffix[GIT_MAX_HEXSZ + 1];
time_t start_time;
long timeout;
int refreshing;
return;
}
- fprintf(stderr, "Fetching pack %s\n", sha1_to_hex(target->sha1));
+ fprintf(stderr, "Fetching pack %s\n",
+ hash_to_hex(target->hash));
fprintf(stderr, " which contains %s\n", oid_to_hex(&request->obj->oid));
preq = new_http_pack_request(target, repo->url);
request->dest = strbuf_detach(&buf, NULL);
append_remote_object_url(&buf, repo->url, hex, 0);
- strbuf_add(&buf, request->lock->tmpfile_suffix, 41);
+ strbuf_add(&buf, request->lock->tmpfile_suffix, the_hash_algo->hexsz + 1);
request->url = strbuf_detach(&buf, NULL);
slot = get_active_slot();
static void handle_new_lock_ctx(struct xml_ctx *ctx, int tag_closed)
{
struct remote_lock *lock = (struct remote_lock *)ctx->userData;
- git_SHA_CTX sha_ctx;
- unsigned char lock_token_sha1[20];
+ git_hash_ctx hash_ctx;
+ unsigned char lock_token_hash[GIT_MAX_RAWSZ];
if (tag_closed && ctx->cdata) {
if (!strcmp(ctx->name, DAV_ACTIVELOCK_OWNER)) {
} else if (!strcmp(ctx->name, DAV_ACTIVELOCK_TOKEN)) {
lock->token = xstrdup(ctx->cdata);
- git_SHA1_Init(&sha_ctx);
- git_SHA1_Update(&sha_ctx, lock->token, strlen(lock->token));
- git_SHA1_Final(lock_token_sha1, &sha_ctx);
+ the_hash_algo->init_fn(&hash_ctx);
+ the_hash_algo->update_fn(&hash_ctx, lock->token, strlen(lock->token));
+ the_hash_algo->final_fn(lock_token_hash, &hash_ctx);
lock->tmpfile_suffix[0] = '_';
- memcpy(lock->tmpfile_suffix + 1, sha1_to_hex(lock_token_sha1), 40);
+ memcpy(lock->tmpfile_suffix + 1, hash_to_hex(lock_token_hash), the_hash_algo->hexsz);
}
}
}
/* extract hex from sharded "xx/x{38}" filename */
static int get_oid_hex_from_objpath(const char *path, struct object_id *oid)
{
- if (strlen(path) != GIT_SHA1_HEXSZ + 1)
+ if (strlen(path) != the_hash_algo->hexsz + 1)
return -1;
if (hex_to_bytes(oid->hash, path, 1))
path += 2;
path++; /* skip '/' */
- return hex_to_bytes(oid->hash + 1, path, GIT_SHA1_RAWSZ - 1);
+ return hex_to_bytes(oid->hash + 1, path, the_hash_algo->rawsz - 1);
}
static void process_ls_object(struct remote_ls_ctx *ls)
return count;
}
-static int update_remote(unsigned char *sha1, struct remote_lock *lock)
+static int update_remote(const struct object_id *oid, struct remote_lock *lock)
{
struct active_request_slot *slot;
struct slot_results results;
dav_headers = get_dav_token_headers(lock, DAV_HEADER_IF);
- strbuf_addf(&out_buffer.buf, "%s\n", sha1_to_hex(sha1));
+ strbuf_addf(&out_buffer.buf, "%s\n", oid_to_hex(oid));
slot = get_active_slot();
slot->results = &results;
run_request_queue();
/* Update the remote branch if all went well */
- if (aborted || !update_remote(ref->new_oid.hash, ref_lock))
+ if (aborted || !update_remote(&ref->new_oid, ref_lock))
rc = 1;
if (!rc)
process_http_object_request(obj_req->req);
obj_req->state = COMPLETE;
+ normalize_curl_result(&obj_req->req->curl_result,
+ obj_req->req->http_code,
+ obj_req->req->errorstr,
+ sizeof(obj_req->req->errorstr));
+
/* Use alternates if necessary */
if (missing_target(obj_req->req)) {
fetch_alternates(walker, alt->base);
char *data;
int i = 0;
+ normalize_curl_result(&slot->curl_result, slot->http_code,
+ curl_errorstr, sizeof(curl_errorstr));
+
if (alt_req->http_specific) {
if (slot->curl_result != CURLE_OK ||
!alt_req->buffer->len) {
if (walker->get_verbosely) {
fprintf(stderr, "Getting pack %s\n",
- sha1_to_hex(target->sha1));
+ hash_to_hex(target->hash));
fprintf(stderr, " which contains %s\n",
- sha1_to_hex(sha1));
+ hash_to_hex(sha1));
}
preq = new_http_pack_request(target, repo->base);
release_object_request(obj_req);
}
-static int fetch_object(struct walker *walker, unsigned char *sha1)
+static int fetch_object(struct walker *walker, unsigned char *hash)
{
- char *hex = sha1_to_hex(sha1);
+ char *hex = hash_to_hex(hash);
int ret = 0;
struct object_request *obj_req = NULL;
struct http_object_request *req;
list_for_each(pos, head) {
obj_req = list_entry(pos, struct object_request, node);
- if (hasheq(obj_req->oid.hash, sha1))
+ if (hasheq(obj_req->oid.hash, hash))
break;
}
if (obj_req == NULL)
req->localfile = -1;
}
- /*
- * we turned off CURLOPT_FAILONERROR to avoid losing a
- * persistent connection and got CURLE_OK.
- */
- if (req->http_code >= 300 && req->curl_result == CURLE_OK &&
- (starts_with(req->url, "http://") ||
- starts_with(req->url, "https://"))) {
- req->curl_result = CURLE_HTTP_RETURNED_ERROR;
- xsnprintf(req->errorstr, sizeof(req->errorstr),
- "HTTP request failed");
- }
+ normalize_curl_result(&req->curl_result, req->http_code,
+ req->errorstr, sizeof(req->errorstr));
if (obj_req->state == ABORTED) {
ret = error("Request for %s aborted", hex);
return ret;
}
-static int fetch(struct walker *walker, unsigned char *sha1)
+static int fetch(struct walker *walker, unsigned char *hash)
{
struct walker_data *data = walker->data;
struct alt_base *altbase = data->alt;
- if (!fetch_object(walker, sha1))
+ if (!fetch_object(walker, hash))
return 0;
while (altbase) {
- if (!http_fetch_pack(walker, altbase, sha1))
+ if (!http_fetch_pack(walker, altbase, hash))
return 0;
fetch_alternates(walker, data->alt->base);
altbase = altbase->next;
}
- return error("Unable to find %s under %s", sha1_to_hex(sha1),
+ return error("Unable to find %s under %s", hash_to_hex(hash),
data->alt->base);
}
return strbuf_detach(&buf, NULL);
}
-static int handle_curl_result(struct slot_results *results)
+void normalize_curl_result(CURLcode *result, long http_code,
+ char *errorstr, size_t errorlen)
{
/*
* If we see a failing http code with CURLE_OK, we have turned off
* Likewise, if we see a redirect (30x code), that means we turned off
* redirect-following, and we should treat the result as an error.
*/
- if (results->curl_result == CURLE_OK &&
- results->http_code >= 300) {
- results->curl_result = CURLE_HTTP_RETURNED_ERROR;
+ if (*result == CURLE_OK && http_code >= 300) {
+ *result = CURLE_HTTP_RETURNED_ERROR;
/*
* Normally curl will already have put the "reason phrase"
* from the server into curl_errorstr; unfortunately without
* FAILONERROR it is lost, so we can give only the numeric
* status code.
*/
- xsnprintf(curl_errorstr, sizeof(curl_errorstr),
+ xsnprintf(errorstr, errorlen,
"The requested URL returned error: %ld",
- results->http_code);
+ http_code);
}
+}
+
+static int handle_curl_result(struct slot_results *results)
+{
+ normalize_curl_result(&results->curl_result, results->http_code,
+ curl_errorstr, sizeof(curl_errorstr));
if (results->curl_result == CURLE_OK) {
credential_approve(&http_auth);
url = quote_ref_url(base, ref->name);
if (http_get_strbuf(url, &buffer, &options) == HTTP_OK) {
strbuf_rtrim(&buffer);
- if (buffer.len == 40)
+ if (buffer.len == the_hash_algo->hexsz)
ret = get_oid_hex(buffer.buf, &ref->old_oid);
else if (starts_with(buffer.buf, "ref: ")) {
ref->symref = xstrdup(buffer.buf + 5);
}
/* Helpers for fetching packs */
-static char *fetch_pack_index(unsigned char *sha1, const char *base_url)
+static char *fetch_pack_index(unsigned char *hash, const char *base_url)
{
char *url, *tmp;
struct strbuf buf = STRBUF_INIT;
if (http_is_verbose)
- fprintf(stderr, "Getting index for pack %s\n", sha1_to_hex(sha1));
+ fprintf(stderr, "Getting index for pack %s\n", hash_to_hex(hash));
end_url_with_slash(&buf, base_url);
- strbuf_addf(&buf, "objects/pack/pack-%s.idx", sha1_to_hex(sha1));
+ strbuf_addf(&buf, "objects/pack/pack-%s.idx", hash_to_hex(hash));
url = strbuf_detach(&buf, NULL);
- strbuf_addf(&buf, "%s.temp", sha1_pack_index_name(sha1));
+ strbuf_addf(&buf, "%s.temp", sha1_pack_index_name(hash));
tmp = strbuf_detach(&buf, NULL);
if (http_get_file(url, tmp, NULL) != HTTP_OK) {
int http_get_info_packs(const char *base_url, struct packed_git **packs_head)
{
struct http_get_options options = {0};
- int ret = 0, i = 0;
- char *url, *data;
+ int ret = 0;
+ char *url;
+ const char *data;
struct strbuf buf = STRBUF_INIT;
- unsigned char hash[GIT_MAX_RAWSZ];
- const unsigned hexsz = the_hash_algo->hexsz;
+ struct object_id oid;
end_url_with_slash(&buf, base_url);
strbuf_addstr(&buf, "objects/info/packs");
goto cleanup;
data = buf.buf;
- while (i < buf.len) {
- switch (data[i]) {
- case 'P':
- i++;
- if (i + hexsz + 12 <= buf.len &&
- starts_with(data + i, " pack-") &&
- starts_with(data + i + hexsz + 6, ".pack\n")) {
- get_sha1_hex(data + i + 6, hash);
- fetch_and_setup_pack_index(packs_head, hash,
- base_url);
- i += hexsz + 11;
- break;
- }
- default:
- while (i < buf.len && data[i] != '\n')
- i++;
+ while (*data) {
+ if (skip_prefix(data, "P pack-", &data) &&
+ !parse_oid_hex(data, &oid, &data) &&
+ skip_prefix(data, ".pack", &data) &&
+ (*data == '\n' || *data == '\0')) {
+ fetch_and_setup_pack_index(packs_head, oid.hash, base_url);
+ } else {
+ data = strchrnul(data, '\n');
}
- i++;
+ if (*data)
+ data++; /* skip past newline */
}
cleanup:
return -1;
}
- unlink(sha1_pack_index_name(p->sha1));
+ unlink(sha1_pack_index_name(p->hash));
- if (finalize_object_file(preq->tmpfile.buf, sha1_pack_name(p->sha1))
- || finalize_object_file(tmp_idx, sha1_pack_index_name(p->sha1))) {
+ if (finalize_object_file(preq->tmpfile.buf, sha1_pack_name(p->hash))
+ || finalize_object_file(tmp_idx, sha1_pack_index_name(p->hash))) {
free(tmp_idx);
return -1;
}
end_url_with_slash(&buf, base_url);
strbuf_addf(&buf, "objects/pack/pack-%s.pack",
- sha1_to_hex(target->sha1));
+ hash_to_hex(target->hash));
preq->url = strbuf_detach(&buf, NULL);
- strbuf_addf(&preq->tmpfile, "%s.temp", sha1_pack_name(target->sha1));
+ strbuf_addf(&preq->tmpfile, "%s.temp", sha1_pack_name(target->hash));
preq->packfile = fopen(preq->tmpfile.buf, "a");
if (!preq->packfile) {
error("Unable to open local file %s for pack",
if (http_is_verbose)
fprintf(stderr,
"Resuming fetch of pack %s at byte %"PRIuMAX"\n",
- sha1_to_hex(target->sha1), (uintmax_t)prev_posn);
+ hash_to_hex(target->hash),
+ (uintmax_t)prev_posn);
http_opt_request_remainder(preq->slot->curl, prev_posn);
}
freq->stream.next_out = expn;
freq->stream.avail_out = sizeof(expn);
freq->zret = git_inflate(&freq->stream, Z_SYNC_FLUSH);
- git_SHA1_Update(&freq->c, expn,
- sizeof(expn) - freq->stream.avail_out);
+ the_hash_algo->update_fn(&freq->c, expn,
+ sizeof(expn) - freq->stream.avail_out);
} while (freq->stream.avail_in && freq->zret == Z_OK);
return size;
}
git_inflate_init(&freq->stream);
- git_SHA1_Init(&freq->c);
+ the_hash_algo->init_fn(&freq->c);
freq->url = get_remote_object_url(base_url, hex, 0);
if (prev_read == -1) {
memset(&freq->stream, 0, sizeof(freq->stream));
git_inflate_init(&freq->stream);
- git_SHA1_Init(&freq->c);
+ the_hash_algo->init_fn(&freq->c);
if (prev_posn>0) {
prev_posn = 0;
lseek(freq->localfile, 0, SEEK_SET);
}
git_inflate_end(&freq->stream);
- git_SHA1_Final(freq->real_oid.hash, &freq->c);
+ the_hash_algo->final_fn(freq->real_oid.hash, &freq->c);
if (freq->zret != Z_STREAM_END) {
unlink_or_warn(freq->tmpfile.buf);
return -1;
#define missing_target(a) missing__target((a)->http_code, (a)->curl_result)
+/*
+ * Normalize curl results to handle CURL_FAILONERROR (or lack thereof). Failing
+ * http codes have their "result" converted to CURLE_HTTP_RETURNED_ERROR, and
+ * an appropriate string placed in the errorstr buffer (pass curl_errorstr if
+ * you don't have a custom buffer).
+ */
+void normalize_curl_result(CURLcode *result, long http_code, char *errorstr,
+ size_t errorlen);
+
/* Helpers for modifying and creating URLs */
extern void append_remote_object_url(struct strbuf *buf, const char *url,
const char *hex,
long http_code;
struct object_id oid;
struct object_id real_oid;
- git_SHA_CTX c;
+ git_hash_ctx c;
git_zstream stream;
int zret;
int rename;
static struct strbuf git_default_name = STRBUF_INIT;
static struct strbuf git_default_email = STRBUF_INIT;
static struct strbuf git_default_date = STRBUF_INIT;
+static struct strbuf git_author_name = STRBUF_INIT;
+static struct strbuf git_author_email = STRBUF_INIT;
+static struct strbuf git_committer_name = STRBUF_INIT;
+static struct strbuf git_committer_email = STRBUF_INIT;
static int default_email_is_bogus;
static int default_name_is_bogus;
"\n");
const char *fmt_ident(const char *name, const char *email,
- const char *date_str, int flag)
+ enum want_ident whose_ident, const char *date_str, int flag)
{
static struct strbuf ident = STRBUF_INIT;
int strict = (flag & IDENT_STRICT);
int want_date = !(flag & IDENT_NO_DATE);
int want_name = !(flag & IDENT_NO_NAME);
+ if (!email) {
+ if (whose_ident == WANT_AUTHOR_IDENT && git_author_email.len)
+ email = git_author_email.buf;
+ else if (whose_ident == WANT_COMMITTER_IDENT && git_committer_email.len)
+ email = git_committer_email.buf;
+ }
if (!email) {
if (strict && ident_use_config_only
&& !(ident_config_given & IDENT_MAIL_GIVEN)) {
if (want_name) {
int using_default = 0;
+ if (!name) {
+ if (whose_ident == WANT_AUTHOR_IDENT && git_author_name.len)
+ name = git_author_name.buf;
+ else if (whose_ident == WANT_COMMITTER_IDENT &&
+ git_committer_name.len)
+ name = git_committer_name.buf;
+ }
if (!name) {
if (strict && ident_use_config_only
&& !(ident_config_given & IDENT_NAME_GIVEN)) {
return ident.buf;
}
-const char *fmt_name(const char *name, const char *email)
+const char *fmt_name(enum want_ident whose_ident)
{
- return fmt_ident(name, email, NULL, IDENT_STRICT | IDENT_NO_DATE);
+ char *name = NULL;
+ char *email = NULL;
+
+ switch (whose_ident) {
+ case WANT_BLANK_IDENT:
+ break;
+ case WANT_AUTHOR_IDENT:
+ name = getenv("GIT_AUTHOR_NAME");
+ email = getenv("GIT_AUTHOR_EMAIL");
+ break;
+ case WANT_COMMITTER_IDENT:
+ name = getenv("GIT_COMMITTER_NAME");
+ email = getenv("GIT_COMMITTER_EMAIL");
+ break;
+ }
+ return fmt_ident(name, email, whose_ident, NULL,
+ IDENT_STRICT | IDENT_NO_DATE);
}
const char *git_author_info(int flag)
author_ident_explicitly_given |= IDENT_MAIL_GIVEN;
return fmt_ident(getenv("GIT_AUTHOR_NAME"),
getenv("GIT_AUTHOR_EMAIL"),
+ WANT_AUTHOR_IDENT,
getenv("GIT_AUTHOR_DATE"),
flag);
}
committer_ident_explicitly_given |= IDENT_MAIL_GIVEN;
return fmt_ident(getenv("GIT_COMMITTER_NAME"),
getenv("GIT_COMMITTER_EMAIL"),
+ WANT_COMMITTER_IDENT,
getenv("GIT_COMMITTER_DATE"),
flag);
}
return ident_is_sufficient(author_ident_explicitly_given);
}
-int git_ident_config(const char *var, const char *value, void *data)
+static int set_ident(const char *var, const char *value)
{
- if (!strcmp(var, "user.useconfigonly")) {
- ident_use_config_only = git_config_bool(var, value);
+ if (!strcmp(var, "author.name")) {
+ if (!value)
+ return config_error_nonbool(var);
+ strbuf_reset(&git_author_name);
+ strbuf_addstr(&git_author_name, value);
+ author_ident_explicitly_given |= IDENT_NAME_GIVEN;
+ ident_config_given |= IDENT_NAME_GIVEN;
+ return 0;
+ }
+
+ if (!strcmp(var, "author.email")) {
+ if (!value)
+ return config_error_nonbool(var);
+ strbuf_reset(&git_author_email);
+ strbuf_addstr(&git_author_email, value);
+ author_ident_explicitly_given |= IDENT_MAIL_GIVEN;
+ ident_config_given |= IDENT_MAIL_GIVEN;
+ return 0;
+ }
+
+ if (!strcmp(var, "committer.name")) {
+ if (!value)
+ return config_error_nonbool(var);
+ strbuf_reset(&git_committer_name);
+ strbuf_addstr(&git_committer_name, value);
+ committer_ident_explicitly_given |= IDENT_NAME_GIVEN;
+ ident_config_given |= IDENT_NAME_GIVEN;
+ return 0;
+ }
+
+ if (!strcmp(var, "committer.email")) {
+ if (!value)
+ return config_error_nonbool(var);
+ strbuf_reset(&git_committer_email);
+ strbuf_addstr(&git_committer_email, value);
+ committer_ident_explicitly_given |= IDENT_MAIL_GIVEN;
+ ident_config_given |= IDENT_MAIL_GIVEN;
return 0;
}
return 0;
}
+int git_ident_config(const char *var, const char *value, void *data)
+{
+ if (!strcmp(var, "user.useconfigonly")) {
+ ident_use_config_only = git_config_bool(var, value);
+ return 0;
+ }
+
+ return set_ident(var, value);
+}
+
+static void set_env_if(const char *key, const char *value, int *given, int bit)
+{
+ if ((*given & bit) || getenv(key))
+ return; /* nothing to do */
+ setenv(key, value, 0);
+ *given |= bit;
+}
+
+void prepare_fallback_ident(const char *name, const char *email)
+{
+ set_env_if("GIT_AUTHOR_NAME", name,
+ &author_ident_explicitly_given, IDENT_NAME_GIVEN);
+ set_env_if("GIT_AUTHOR_EMAIL", email,
+ &author_ident_explicitly_given, IDENT_MAIL_GIVEN);
+ set_env_if("GIT_COMMITTER_NAME", name,
+ &committer_ident_explicitly_given, IDENT_NAME_GIVEN);
+ set_env_if("GIT_COMMITTER_EMAIL", email,
+ &committer_ident_explicitly_given, IDENT_MAIL_GIVEN);
+}
+
static int buf_cmp(const char *a_begin, const char *a_end,
const char *b_begin, const char *b_end)
{
KHASH_INIT(sha1_pos, const unsigned char *, int, 1, sha1hash, __kh_oid_cmp)
typedef kh_sha1_pos_t khash_sha1_pos;
+static inline unsigned int oid_hash(struct object_id oid)
+{
+ return sha1hash(oid.hash);
+}
+
+static inline int oid_equal(struct object_id a, struct object_id b)
+{
+ return oideq(&a, &b);
+}
+
+KHASH_INIT(oid, struct object_id, int, 0, oid_hash, oid_equal)
+
+KHASH_INIT(oid_map, struct object_id, void *, 1, oid_hash, oid_equal)
+typedef kh_oid_t khash_oid_map;
+
+KHASH_INIT(oid_pos, struct object_id, int, 1, oid_hash, oid_equal)
+typedef kh_oid_pos_t khash_oid_pos;
+
#endif /* __AC_KHASH_H */
int line_log_print(struct rev_info *rev, struct commit *commit)
{
- struct line_log_data *range = lookup_line_range(rev, commit);
show_log(rev);
- dump_diff_hacky(rev, range);
+ if (!(rev->diffopt.output_format & DIFF_FORMAT_NO_OUTPUT)) {
+ struct line_log_data *range = lookup_line_range(rev, commit);
+ dump_diff_hacky(rev, range);
+ }
return 1;
}
filter_options->sparse_path_value = strdup(v0);
return 0;
}
+ /*
+ * Please update _git_fetch() in git-completion.bash when you
+ * add new filters
+ */
if (errbuf)
strbuf_addf(errbuf, "invalid filter-spec '%s'", arg);
*/
if (ctx.need_8bit_cte >= 0 && opt->add_signoff)
ctx.need_8bit_cte =
- has_non_ascii(fmt_name(getenv("GIT_COMMITTER_NAME"),
- getenv("GIT_COMMITTER_EMAIL")));
+ has_non_ascii(fmt_name(WANT_COMMITTER_IDENT));
ctx.date_mode = opt->date_mode;
ctx.date_mode_explicit = opt->date_mode_explicit;
ctx.abbrev = opt->diffopt.abbrev;
struct commit *commit;
int contains_another;
- char merged_revision[42];
+ char merged_revision[GIT_MAX_HEXSZ + 2];
const char *rev_args[] = { "rev-list", "--merges", "--ancestry-path",
"--all", merged_revision, NULL };
struct rev_info revs;
static int handle_rename_via_dir(struct merge_options *o,
struct diff_filepair *pair,
- const char *rename_branch,
- const char *other_branch)
+ const char *rename_branch)
{
/*
* Handle file adds that need to be renamed due to directory rename
remove_hashmap_entries(dir_re_merge, &remove_from_merge);
}
-static struct hashmap *get_directory_renames(struct diff_queue_struct *pairs,
- struct tree *tree)
+static struct hashmap *get_directory_renames(struct diff_queue_struct *pairs)
{
struct hashmap *dir_renames;
struct hashmap_iter iter;
struct tree *o_tree,
struct tree *a_tree,
struct tree *b_tree,
- struct string_list *entries,
- int *clean)
+ struct string_list *entries)
{
struct string_list_item *item;
int stage = (tree == a_tree ? 2 : 3);
apply_directory_rename_modifications(o, pair, new_path,
re, tree, o_tree,
a_tree, b_tree,
- entries,
- clean_merge);
+ entries);
}
hashmap_iter_init(&collisions, &iter);
merge_pairs = get_diffpairs(o, common, merge);
if (o->detect_directory_renames) {
- dir_re_head = get_directory_renames(head_pairs, head);
- dir_re_merge = get_directory_renames(merge_pairs, merge);
+ dir_re_head = get_directory_renames(head_pairs);
+ dir_re_merge = get_directory_renames(merge_pairs);
handle_directory_level_conflicts(o,
dir_re_head, head,
clean_merge = 1;
if (handle_rename_via_dir(o,
conflict_info->pair1,
- conflict_info->branch1,
- conflict_info->branch2))
+ conflict_info->branch1))
clean_merge = -1;
break;
case RENAME_ADD:
return -1;
o->merge_detect_rename = 1;
}
+ /*
+ * Please update $__git_merge_strategy_options in
+ * git-completion.bash when you add new options
+ */
else
return -1;
return 0;
--- /dev/null
+diff_cmd () {
+ "$merge_tool_path" mergetool "$LOCAL" "$REMOTE" -o "$MERGED"
+}
+
+merge_cmd () {
+ if $base_present
+ then
+ "$merge_tool_path" mergetool "$BASE" "$LOCAL" "$REMOTE" -o "$MERGED"
+ else
+ "$merge_tool_path" mergetool "$LOCAL" "$REMOTE" -o "$MERGED"
+ fi
+}
#include "sha1-lookup.h"
#include "midx.h"
#include "progress.h"
+#include "trace2.h"
#define MIDX_SIGNATURE 0x4d494458 /* "MIDX" */
#define MIDX_VERSION 1
midx_map = xmmap(NULL, midx_size, PROT_READ, MAP_PRIVATE, fd, 0);
- FLEX_ALLOC_MEM(m, object_dir, object_dir, strlen(object_dir));
+ FLEX_ALLOC_STR(m, object_dir, object_dir);
m->fd = fd;
m->data = midx_map;
m->data_len = midx_size;
m->pack_names[i]);
}
+ trace2_data_intmax("midx", the_repository, "load/num_packs", m->num_packs);
+ trace2_data_intmax("midx", the_repository, "load/num_objects", m->num_objects);
+
return m;
cleanup_fail:
return nth_midxed_pack_entry(m, e, pos);
}
-int midx_contains_pack(struct multi_pack_index *m, const char *idx_name)
+/* Match "foo.idx" against either "foo.pack" _or_ "foo.idx". */
+static int cmp_idx_or_pack_name(const char *idx_or_pack_name,
+ const char *idx_name)
+{
+ /* Skip past any initial matching prefix. */
+ while (*idx_name && *idx_name == *idx_or_pack_name) {
+ idx_name++;
+ idx_or_pack_name++;
+ }
+
+ /*
+ * If we didn't match completely, we may have matched "pack-1234." and
+ * be left with "idx" and "pack" respectively, which is also OK. We do
+ * not have to check for "idx" and "idx", because that would have been
+ * a complete match (and in that case these strcmps will be false, but
+ * we'll correctly return 0 from the final strcmp() below.
+ *
+ * Technically this matches "fooidx" and "foopack", but we'd never have
+ * such names in the first place.
+ */
+ if (!strcmp(idx_name, "idx") && !strcmp(idx_or_pack_name, "pack"))
+ return 0;
+
+ /*
+ * This not only checks for a complete match, but also orders based on
+ * the first non-identical character, which means our ordering will
+ * match a raw strcmp(). That makes it OK to use this to binary search
+ * a naively-sorted list.
+ */
+ return strcmp(idx_or_pack_name, idx_name);
+}
+
+int midx_contains_pack(struct multi_pack_index *m, const char *idx_or_pack_name)
{
uint32_t first = 0, last = m->num_packs;
int cmp;
current = m->pack_names[mid];
- cmp = strcmp(idx_name, current);
+ cmp = cmp_idx_or_pack_name(idx_or_pack_name, current);
if (!cmp)
return 1;
if (cmp > 0) {
va_end(ap);
}
+struct pair_pos_vs_id
+{
+ uint32_t pos;
+ uint32_t pack_int_id;
+};
+
+static int compare_pair_pos_vs_id(const void *_a, const void *_b)
+{
+ struct pair_pos_vs_id *a = (struct pair_pos_vs_id *)_a;
+ struct pair_pos_vs_id *b = (struct pair_pos_vs_id *)_b;
+
+ return b->pack_int_id - a->pack_int_id;
+}
+
+/*
+ * Limit calls to display_progress() for performance reasons.
+ * The interval here was arbitrarily chosen.
+ */
+#define SPARSE_PROGRESS_INTERVAL (1 << 12)
+#define midx_display_sparse_progress(progress, n) \
+ do { \
+ uint64_t _n = (n); \
+ if ((_n & (SPARSE_PROGRESS_INTERVAL - 1)) == 0) \
+ display_progress(progress, _n); \
+ } while (0)
+
int verify_midx_file(const char *object_dir)
{
+ struct pair_pos_vs_id *pairs = NULL;
uint32_t i;
struct progress *progress;
struct multi_pack_index *m = load_multi_pack_index(object_dir, 1);
if (!m)
return 0;
+ progress = start_progress(_("Looking for referenced packfiles"),
+ m->num_packs);
for (i = 0; i < m->num_packs; i++) {
if (prepare_midx_pack(m, i))
midx_report("failed to load pack in position %d", i);
+
+ display_progress(progress, i + 1);
}
+ stop_progress(&progress);
for (i = 0; i < 255; i++) {
uint32_t oid_fanout1 = ntohl(m->chunk_oid_fanout[i]);
i, oid_fanout1, oid_fanout2, i + 1);
}
+ progress = start_sparse_progress(_("Verifying OID order in MIDX"),
+ m->num_objects - 1);
for (i = 0; i < m->num_objects - 1; i++) {
struct object_id oid1, oid2;
if (oidcmp(&oid1, &oid2) >= 0)
midx_report(_("oid lookup out of order: oid[%d] = %s >= %s = oid[%d]"),
i, oid_to_hex(&oid1), oid_to_hex(&oid2), i + 1);
+
+ midx_display_sparse_progress(progress, i + 1);
}
+ stop_progress(&progress);
- progress = start_progress(_("Verifying object offsets"), m->num_objects);
+ /*
+ * Create an array mapping each object to its packfile id. Sort it
+ * to group the objects by packfile. Use this permutation to visit
+ * each of the objects and only require 1 packfile to be open at a
+ * time.
+ */
+ ALLOC_ARRAY(pairs, m->num_objects);
+ for (i = 0; i < m->num_objects; i++) {
+ pairs[i].pos = i;
+ pairs[i].pack_int_id = nth_midxed_pack_int_id(m, i);
+ }
+
+ progress = start_sparse_progress(_("Sorting objects by packfile"),
+ m->num_objects);
+ display_progress(progress, 0); /* TODO: Measure QSORT() progress */
+ QSORT(pairs, m->num_objects, compare_pair_pos_vs_id);
+ stop_progress(&progress);
+
+ progress = start_sparse_progress(_("Verifying object offsets"), m->num_objects);
for (i = 0; i < m->num_objects; i++) {
struct object_id oid;
struct pack_entry e;
off_t m_offset, p_offset;
- nth_midxed_object_oid(&oid, m, i);
+ if (i > 0 && pairs[i-1].pack_int_id != pairs[i].pack_int_id &&
+ m->packs[pairs[i-1].pack_int_id])
+ {
+ close_pack_fd(m->packs[pairs[i-1].pack_int_id]);
+ close_pack_index(m->packs[pairs[i-1].pack_int_id]);
+ }
+
+ nth_midxed_object_oid(&oid, m, pairs[i].pos);
+
if (!fill_midx_entry(&oid, &e, m)) {
midx_report(_("failed to load pack entry for oid[%d] = %s"),
- i, oid_to_hex(&oid));
+ pairs[i].pos, oid_to_hex(&oid));
continue;
}
if (m_offset != p_offset)
midx_report(_("incorrect object offset for oid[%d] = %s: %"PRIx64" != %"PRIx64),
- i, oid_to_hex(&oid), m_offset, p_offset);
+ pairs[i].pos, oid_to_hex(&oid), m_offset, p_offset);
- display_progress(progress, i + 1);
+ midx_display_sparse_progress(progress, i + 1);
}
stop_progress(&progress);
+ free(pairs);
+
return verify_midx_error;
}
struct multi_pack_index *m,
uint32_t n);
int fill_midx_entry(const struct object_id *oid, struct pack_entry *e, struct multi_pack_index *m);
-int midx_contains_pack(struct multi_pack_index *m, const char *idx_name);
+int midx_contains_pack(struct multi_pack_index *m, const char *idx_or_pack_name);
int prepare_multi_pack_index_one(struct repository *r, const char *object_dir, int local);
int write_midx_file(const char *object_dir);
static int path_to_oid(const char *path, struct object_id *oid)
{
- char hex_oid[GIT_SHA1_HEXSZ];
+ char hex_oid[GIT_MAX_HEXSZ];
int i = 0;
- while (*path && i < GIT_SHA1_HEXSZ) {
+ while (*path && i < the_hash_algo->hexsz) {
if (*path != '/')
hex_oid[i++] = *path;
path++;
}
- if (*path || i != GIT_SHA1_HEXSZ)
+ if (*path || i != the_hash_algo->hexsz)
return -1;
return get_oid_hex(hex_oid, oid);
}
#define GET_NIBBLE(n, sha1) ((((sha1)[(n) >> 1]) >> ((~(n) & 0x01) << 2)) & 0x0f)
-#define KEY_INDEX (GIT_SHA1_RAWSZ - 1)
-#define FANOUT_PATH_SEPARATORS ((GIT_SHA1_HEXSZ / 2) - 1)
+#define KEY_INDEX (the_hash_algo->rawsz - 1)
+#define FANOUT_PATH_SEPARATORS (the_hash_algo->rawsz - 1)
+#define FANOUT_PATH_SEPARATORS_MAX ((GIT_MAX_HEXSZ / 2) - 1)
#define SUBTREE_SHA1_PREFIXCMP(key_sha1, subtree_sha1) \
(memcmp(key_sha1, subtree_sha1, subtree_sha1[KEY_INDEX]))
struct leaf_node *entry)
{
struct leaf_node *l;
- struct int_node *parent_stack[GIT_SHA1_RAWSZ];
+ struct int_node *parent_stack[GIT_MAX_RAWSZ];
unsigned char i, j;
void **p = note_tree_search(t, &tree, &n, entry->key_oid.hash);
void *buf;
struct tree_desc desc;
struct name_entry entry;
+ const unsigned hashsz = the_hash_algo->rawsz;
buf = fill_tree_descriptor(&desc, &subtree->val_oid);
if (!buf)
oid_to_hex(&subtree->val_oid));
prefix_len = subtree->key_oid.hash[KEY_INDEX];
- if (prefix_len >= GIT_SHA1_RAWSZ)
+ if (prefix_len >= hashsz)
BUG("prefix_len (%"PRIuMAX") is out of range", (uintmax_t)prefix_len);
if (prefix_len * 2 < n)
BUG("prefix_len (%"PRIuMAX") is too small", (uintmax_t)prefix_len);
struct leaf_node *l;
size_t path_len = strlen(entry.path);
- if (path_len == 2 * (GIT_SHA1_RAWSZ - prefix_len)) {
+ if (path_len == 2 * (hashsz - prefix_len)) {
/* This is potentially the remainder of the SHA-1 */
if (!S_ISREG(entry.mode))
goto handle_non_note;
if (hex_to_bytes(object_oid.hash + prefix_len, entry.path,
- GIT_SHA1_RAWSZ - prefix_len))
+ hashsz - prefix_len))
goto handle_non_note; /* entry.path is not a SHA1 */
type = PTR_TYPE_NOTE;
* except for the last byte, where we write
* the length:
*/
- memset(object_oid.hash + len, 0, GIT_SHA1_RAWSZ - len - 1);
+ memset(object_oid.hash + len, 0, hashsz - len - 1);
object_oid.hash[KEY_INDEX] = (unsigned char)len;
type = PTR_TYPE_SUBTREE;
return fanout + 1;
}
-/* hex SHA1 + 19 * '/' + NUL */
-#define FANOUT_PATH_MAX GIT_SHA1_HEXSZ + FANOUT_PATH_SEPARATORS + 1
+/* hex oid + '/' between each pair of hex digits + NUL */
+#define FANOUT_PATH_MAX GIT_MAX_HEXSZ + FANOUT_PATH_SEPARATORS_MAX + 1
-static void construct_path_with_fanout(const unsigned char *sha1,
+static void construct_path_with_fanout(const unsigned char *hash,
unsigned char fanout, char *path)
{
unsigned int i = 0, j = 0;
- const char *hex_sha1 = sha1_to_hex(sha1);
- assert(fanout < GIT_SHA1_RAWSZ);
+ const char *hex_hash = hash_to_hex(hash);
+ assert(fanout < the_hash_algo->rawsz);
while (fanout) {
- path[i++] = hex_sha1[j++];
- path[i++] = hex_sha1[j++];
+ path[i++] = hex_hash[j++];
+ path[i++] = hex_hash[j++];
path[i++] = '/';
fanout--;
}
- xsnprintf(path + i, FANOUT_PATH_MAX - i, "%s", hex_sha1 + j);
+ xsnprintf(path + i, FANOUT_PATH_MAX - i, "%s", hex_hash + j);
}
static int for_each_note_helper(struct notes_tree *t, struct int_node *tree,
static void write_tree_entry(struct strbuf *buf, unsigned int mode,
const char *path, unsigned int path_len, const
- unsigned char *sha1)
+ unsigned char *hash)
{
strbuf_addf(buf, "%o %.*s%c", mode, path_len, path, '\0');
- strbuf_add(buf, sha1, GIT_SHA1_RAWSZ);
+ strbuf_add(buf, hash, the_hash_algo->rawsz);
}
static void tree_write_stack_init_subtree(struct tree_write_stack *tws,
n = (struct tree_write_stack *)
xmalloc(sizeof(struct tree_write_stack));
n->next = NULL;
- strbuf_init(&n->buf, 256 * (32 + GIT_SHA1_HEXSZ)); /* assume 256 entries per tree */
+ strbuf_init(&n->buf, 256 * (32 + the_hash_algo->hexsz)); /* assume 256 entries per tree */
n->path[0] = n->path[1] = '\0';
tws->next = n;
tws->path[0] = path[0];
note_path[note_path_len] = '\0';
mode = 040000;
}
- assert(note_path_len <= GIT_SHA1_HEXSZ + FANOUT_PATH_SEPARATORS);
+ assert(note_path_len <= GIT_MAX_HEXSZ + FANOUT_PATH_SEPARATORS);
/* Weave non-note entries into note entries */
return write_each_non_note_until(note_path, d) ||
/* Prepare for traversal of current notes tree */
root.next = NULL; /* last forward entry in list is grounded */
- strbuf_init(&root.buf, 256 * (32 + GIT_SHA1_HEXSZ)); /* assume 256 entries */
+ strbuf_init(&root.buf, 256 * (32 + the_hash_algo->hexsz)); /* assume 256 entries */
root.path[0] = root.path[1] = '\0';
cb_data.root = &root;
cb_data.next_non_note = t->first_non_note;
while (l) {
if (flags & NOTES_PRUNE_VERBOSE)
- printf("%s\n", sha1_to_hex(l->sha1));
+ printf("%s\n", hash_to_hex(l->sha1));
if (!(flags & NOTES_PRUNE_DRYRUN))
remove_note(t, l->sha1);
l = l->next;
freshened:1,
do_not_close:1,
pack_promisor:1;
- unsigned char sha1[20];
+ unsigned char hash[GIT_MAX_RAWSZ];
struct revindex_entry *revindex;
/* something like ".git/objects/pack/xxxxx.pack" */
char pack_name[FLEX_ARRAY]; /* more */
#define OBJECT_INFO_QUICK 8
/* Do not check loose object */
#define OBJECT_INFO_IGNORE_LOOSE 16
+/*
+ * Do not attempt to fetch the object if missing (even if fetch_is_missing is
+ * nonzero). This is meant for bulk prefetching of missing blobs in a partial
+ * clone. Implies OBJECT_INFO_QUICK.
+ */
+#define OBJECT_INFO_FOR_PREFETCH (32 + OBJECT_INFO_QUICK)
int oid_object_info_extended(struct repository *r,
const struct object_id *,
* table overhead.
*/
-static inline unsigned int oid_hash(struct object_id oid)
-{
- return sha1hash(oid.hash);
-}
-
-static inline int oid_equal(struct object_id a, struct object_id b)
-{
- return oideq(&a, &b);
-}
-
-KHASH_INIT(oid, struct object_id, int, 0, oid_hash, oid_equal)
-
/**
* A single oidset; should be zero-initialized (or use OIDSET_INIT).
*/
seen_objects_nr = 0;
}
-static uint32_t find_object_pos(const unsigned char *sha1)
+static uint32_t find_object_pos(const unsigned char *hash)
{
- struct object_entry *entry = packlist_find(writer.to_pack, sha1, NULL);
+ struct object_entry *entry = packlist_find(writer.to_pack, hash, NULL);
if (!entry) {
die("Failed to write bitmap index. Packfile doesn't have full closure "
- "(object %s is missing)", sha1_to_hex(sha1));
+ "(object %s is missing)", hash_to_hex(hash));
}
return oe_in_pack_pos(writer.to_pack, entry);
header.entry_count = htonl(writer.selected_nr);
hashcpy(header.checksum, writer.pack_checksum);
- hashwrite(f, &header, sizeof(header));
+ hashwrite(f, &header, sizeof(header) - GIT_MAX_RAWSZ + the_hash_algo->rawsz);
dump_bitmap(f, writer.commits);
dump_bitmap(f, writer.trees);
dump_bitmap(f, writer.blobs);
* commit.
*/
struct stored_bitmap {
- unsigned char sha1[20];
+ struct object_id oid;
struct ewah_bitmap *root;
struct stored_bitmap *xor;
int flags;
struct ewah_bitmap *blobs;
struct ewah_bitmap *tags;
- /* Map from SHA1 -> `stored_bitmap` for all the bitmapped commits */
- khash_sha1 *bitmaps;
+ /* Map from object ID -> `stored_bitmap` for all the bitmapped commits */
+ kh_oid_map_t *bitmaps;
/* Number of bitmapped commits */
uint32_t entry_count;
struct object **objects;
uint32_t *hashes;
uint32_t count, alloc;
- khash_sha1_pos *positions;
+ kh_oid_pos_t *positions;
} ext_index;
/* Bitmap result of the last performed walk */
{
struct bitmap_disk_header *header = (void *)index->map;
- if (index->map_size < sizeof(*header) + 20)
+ if (index->map_size < sizeof(*header) + the_hash_algo->rawsz)
return error("Corrupted bitmap index (missing header data)");
if (memcmp(header->magic, BITMAP_IDX_SIGNATURE, sizeof(BITMAP_IDX_SIGNATURE)) != 0)
"(Git requires BITMAP_OPT_FULL_DAG)");
if (flags & BITMAP_OPT_HASH_CACHE) {
- unsigned char *end = index->map + index->map_size - 20;
+ unsigned char *end = index->map + index->map_size - the_hash_algo->rawsz;
index->hashes = ((uint32_t *)end) - index->pack->num_objects;
}
}
index->entry_count = ntohl(header->entry_count);
- index->map_pos += sizeof(*header);
+ index->map_pos += sizeof(*header) - GIT_MAX_RAWSZ + the_hash_algo->rawsz;
return 0;
}
static struct stored_bitmap *store_bitmap(struct bitmap_index *index,
struct ewah_bitmap *root,
- const unsigned char *sha1,
+ const unsigned char *hash,
struct stored_bitmap *xor_with,
int flags)
{
stored->root = root;
stored->xor = xor_with;
stored->flags = flags;
- hashcpy(stored->sha1, sha1);
+ oidread(&stored->oid, hash);
- hash_pos = kh_put_sha1(index->bitmaps, stored->sha1, &ret);
+ hash_pos = kh_put_oid_map(index->bitmaps, stored->oid, &ret);
/* a 0 return code means the insertion succeeded with no changes,
* because the SHA1 already existed on the map. this is bad, there
* shouldn't be duplicated commits in the index */
if (ret == 0) {
- error("Duplicate entry in bitmap index: %s", sha1_to_hex(sha1));
+ error("Duplicate entry in bitmap index: %s", hash_to_hex(hash));
return NULL;
}
{
assert(bitmap_git->map);
- bitmap_git->bitmaps = kh_init_sha1();
- bitmap_git->ext_index.positions = kh_init_sha1_pos();
- load_pack_revindex(bitmap_git->pack);
+ bitmap_git->bitmaps = kh_init_oid_map();
+ bitmap_git->ext_index.positions = kh_init_oid_pos();
+ if (load_pack_revindex(bitmap_git->pack))
+ goto failed;
if (!(bitmap_git->commits = read_bitmap_1(bitmap_git)) ||
!(bitmap_git->trees = read_bitmap_1(bitmap_git)) ||
};
static inline int bitmap_position_extended(struct bitmap_index *bitmap_git,
- const unsigned char *sha1)
+ const struct object_id *oid)
{
- khash_sha1_pos *positions = bitmap_git->ext_index.positions;
- khiter_t pos = kh_get_sha1_pos(positions, sha1);
+ khash_oid_pos *positions = bitmap_git->ext_index.positions;
+ khiter_t pos = kh_get_oid_pos(positions, *oid);
if (pos < kh_end(positions)) {
int bitmap_pos = kh_value(positions, pos);
}
static inline int bitmap_position_packfile(struct bitmap_index *bitmap_git,
- const unsigned char *sha1)
+ const struct object_id *oid)
{
- off_t offset = find_pack_entry_one(sha1, bitmap_git->pack);
+ off_t offset = find_pack_entry_one(oid->hash, bitmap_git->pack);
if (!offset)
return -1;
}
static int bitmap_position(struct bitmap_index *bitmap_git,
- const unsigned char *sha1)
+ const struct object_id *oid)
{
- int pos = bitmap_position_packfile(bitmap_git, sha1);
- return (pos >= 0) ? pos : bitmap_position_extended(bitmap_git, sha1);
+ int pos = bitmap_position_packfile(bitmap_git, oid);
+ return (pos >= 0) ? pos : bitmap_position_extended(bitmap_git, oid);
}
static int ext_index_add_object(struct bitmap_index *bitmap_git,
int hash_ret;
int bitmap_pos;
- hash_pos = kh_put_sha1_pos(eindex->positions, object->oid.hash, &hash_ret);
+ hash_pos = kh_put_oid_pos(eindex->positions, object->oid, &hash_ret);
if (hash_ret > 0) {
if (eindex->count >= eindex->alloc) {
eindex->alloc = (eindex->alloc + 16) * 3 / 2;
struct bitmap_show_data *data = data_;
int bitmap_pos;
- bitmap_pos = bitmap_position(data->bitmap_git, object->oid.hash);
+ bitmap_pos = bitmap_position(data->bitmap_git, &object->oid);
if (bitmap_pos < 0)
bitmap_pos = ext_index_add_object(data->bitmap_git, object,
static int add_to_include_set(struct bitmap_index *bitmap_git,
struct include_data *data,
- const unsigned char *sha1,
+ const struct object_id *oid,
int bitmap_pos)
{
khiter_t hash_pos;
if (bitmap_get(data->base, bitmap_pos))
return 0;
- hash_pos = kh_get_sha1(bitmap_git->bitmaps, sha1);
+ hash_pos = kh_get_oid_map(bitmap_git->bitmaps, *oid);
if (hash_pos < kh_end(bitmap_git->bitmaps)) {
struct stored_bitmap *st = kh_value(bitmap_git->bitmaps, hash_pos);
bitmap_or_ewah(data->base, lookup_stored_bitmap(st));
struct include_data *data = _data;
int bitmap_pos;
- bitmap_pos = bitmap_position(data->bitmap_git, commit->object.oid.hash);
+ bitmap_pos = bitmap_position(data->bitmap_git, &commit->object.oid);
if (bitmap_pos < 0)
bitmap_pos = ext_index_add_object(data->bitmap_git,
(struct object *)commit,
NULL);
- if (!add_to_include_set(data->bitmap_git, data, commit->object.oid.hash,
+ if (!add_to_include_set(data->bitmap_git, data, &commit->object.oid,
bitmap_pos)) {
struct commit_list *parent = commit->parents;
roots = roots->next;
if (object->type == OBJ_COMMIT) {
- khiter_t pos = kh_get_sha1(bitmap_git->bitmaps, object->oid.hash);
+ khiter_t pos = kh_get_oid_map(bitmap_git->bitmaps, object->oid);
if (pos < kh_end(bitmap_git->bitmaps)) {
struct stored_bitmap *st = kh_value(bitmap_git->bitmaps, pos);
int pos;
roots = roots->next;
- pos = bitmap_position(bitmap_git, object->oid.hash);
+ pos = bitmap_position(bitmap_git, &object->oid);
if (pos < 0 || base == NULL || !bitmap_get(base, pos)) {
object->flags &= ~UNINTERESTING;
fprintf(stderr, "Failed to reuse at %d (%016llx)\n",
reuse_objects, result->words[i]);
- fprintf(stderr, " %s\n", sha1_to_hex(sha1));
+ fprintf(stderr, " %s\n", hash_to_hex(sha1));
}
#endif
struct bitmap_test_data *tdata = data;
int bitmap_pos;
- bitmap_pos = bitmap_position(tdata->bitmap_git, object->oid.hash);
+ bitmap_pos = bitmap_position(tdata->bitmap_git, &object->oid);
if (bitmap_pos < 0)
die("Object not in bitmap: %s\n", oid_to_hex(&object->oid));
int bitmap_pos;
bitmap_pos = bitmap_position(tdata->bitmap_git,
- commit->object.oid.hash);
+ &commit->object.oid);
if (bitmap_pos < 0)
die("Object not in bitmap: %s\n", oid_to_hex(&commit->object.oid));
bitmap_git->version, bitmap_git->entry_count);
root = revs->pending.objects[0].item;
- pos = kh_get_sha1(bitmap_git->bitmaps, root->oid.hash);
+ pos = kh_get_oid_map(bitmap_git->bitmaps, root->oid);
if (pos < kh_end(bitmap_git->bitmaps)) {
struct stored_bitmap *st = kh_value(bitmap_git->bitmaps, pos);
lookup_stored_bitmap(stored),
rebuild)) {
hash_pos = kh_put_sha1(reused_bitmaps,
- stored->sha1,
+ stored->oid.hash,
&hash_ret);
kh_value(reused_bitmaps, hash_pos) =
bitmap_to_ewah(rebuild);
ewah_pool_free(b->trees);
ewah_pool_free(b->blobs);
ewah_pool_free(b->tags);
- kh_destroy_sha1(b->bitmaps);
+ kh_destroy_oid_map(b->bitmaps);
free(b->ext_index.objects);
free(b->ext_index.hashes);
bitmap_free(b->result);
free(b);
}
-int bitmap_has_sha1_in_uninteresting(struct bitmap_index *bitmap_git,
- const unsigned char *sha1)
+int bitmap_has_oid_in_uninteresting(struct bitmap_index *bitmap_git,
+ const struct object_id *oid)
{
int pos;
if (!bitmap_git->haves)
return 0; /* walk had no "haves" */
- pos = bitmap_position_packfile(bitmap_git, sha1);
+ pos = bitmap_position_packfile(bitmap_git, oid);
if (pos < 0)
return 0;
uint16_t version;
uint16_t options;
uint32_t entry_count;
- unsigned char checksum[20];
+ unsigned char checksum[GIT_MAX_RAWSZ];
};
static const char BITMAP_IDX_SIGNATURE[] = {'B', 'I', 'T', 'M'};
* queried to see if a particular object was reachable from any of the
* objects flagged as UNINTERESTING.
*/
-int bitmap_has_sha1_in_uninteresting(struct bitmap_index *, const unsigned char *sha1);
+int bitmap_has_oid_in_uninteresting(struct bitmap_index *, const struct object_id *oid);
void bitmap_writer_show_progress(int show);
void bitmap_writer_set_checksum(unsigned char *sha1);
* this fall back code, just stay simple and fall back to using
* in_pack[] array.
*/
-void oe_map_new_pack(struct packing_data *pack,
- struct packed_git *p)
+void oe_map_new_pack(struct packing_data *pack)
{
uint32_t i;
return pack->in_pack[e - pack->objects];
}
-void oe_map_new_pack(struct packing_data *pack,
- struct packed_git *p);
+void oe_map_new_pack(struct packing_data *pack);
+
static inline void oe_set_in_pack(struct packing_data *pack,
struct object_entry *e,
struct packed_git *p)
{
if (!p->index)
- oe_map_new_pack(pack, p);
+ oe_map_new_pack(pack);
if (pack->in_pack_by_idx)
e->in_pack_idx = p->index;
else
#include "cache.h"
#include "pack-revindex.h"
#include "object-store.h"
+#include "packfile.h"
/*
* Pack index for existing packs give us easy access to the offsets into
sort_revindex(p->revindex, num_ent, p->pack_size);
}
-void load_pack_revindex(struct packed_git *p)
+int load_pack_revindex(struct packed_git *p)
{
- if (!p->revindex)
+ if (!p->revindex) {
+ if (open_pack_index(p))
+ return -1;
create_pack_revindex(p);
+ }
+ return 0;
}
int find_revindex_position(struct packed_git *p, off_t ofs)
{
int pos;
- load_pack_revindex(p);
+ if (load_pack_revindex(p))
+ return NULL;
+
pos = find_revindex_position(p, ofs);
if (pos < 0)
unsigned int nr;
};
-void load_pack_revindex(struct packed_git *p);
+int load_pack_revindex(struct packed_git *p);
int find_revindex_position(struct packed_git *p, off_t ofs);
struct revindex_entry *find_pack_revindex(struct packed_git *p, off_t ofs);
struct packed_git *p = alloc_packed_git(alloc);
memcpy(p->pack_name, path, alloc); /* includes NUL */
- hashcpy(p->sha1, sha1);
+ hashcpy(p->hash, sha1);
if (check_packed_git_idx(idx_path, p)) {
free(p);
return NULL;
}
}
-static int close_pack_fd(struct packed_git *p)
+int close_pack_fd(struct packed_git *p)
{
if (p->pack_fd < 0)
return 0;
#endif
}
+const char *pack_basename(struct packed_git *p)
+{
+ const char *ret = strrchr(p->pack_name, '/');
+ if (ret)
+ ret = ret + 1; /* skip past slash */
+ else
+ ret = p->pack_name; /* we only have a base */
+ return ret;
+}
+
/*
* Do not call this directly as this leaks p->pack_fd on error return;
* call open_packed_git() instead.
if (!p->index_data) {
struct multi_pack_index *m;
- const char *pack_name = strrchr(p->pack_name, '/');
+ const char *pack_name = pack_basename(p);
for (m = the_repository->objects->multi_pack_index;
m; m = m->next) {
p->pack_local = local;
p->mtime = st.st_mtime;
if (path_len < the_hash_algo->hexsz ||
- get_sha1_hex(path + path_len - the_hash_algo->hexsz, p->sha1))
- hashclr(p->sha1);
+ get_sha1_hex(path + path_len - the_hash_algo->hexsz, p->hash))
+ hashclr(p->hash);
return p;
}
uint32_t i;
int r = 0;
- if (flags & FOR_EACH_OBJECT_PACK_ORDER)
- load_pack_revindex(p);
+ if (flags & FOR_EACH_OBJECT_PACK_ORDER) {
+ if (load_pack_revindex(p))
+ return -1;
+ }
for (i = 0; i < p->num_objects; i++) {
uint32_t pos;
*
* Example: odb_pack_name(out, sha1, "idx") => ".git/objects/pack/pack-1234..idx"
*/
-extern char *odb_pack_name(struct strbuf *buf, const unsigned char *sha1, const char *ext);
+char *odb_pack_name(struct strbuf *buf, const unsigned char *sha1, const char *ext);
/*
* Return the name of the (local) packfile with the specified sha1 in
* its name. The return value is a pointer to memory that is
* overwritten each time this function is called.
*/
-extern char *sha1_pack_name(const unsigned char *sha1);
+char *sha1_pack_name(const unsigned char *sha1);
/*
* Return the name of the (local) pack index file with the specified
* sha1 in its name. The return value is a pointer to memory that is
* overwritten each time this function is called.
*/
-extern char *sha1_pack_index_name(const unsigned char *sha1);
+char *sha1_pack_index_name(const unsigned char *sha1);
-extern struct packed_git *parse_pack_index(unsigned char *sha1, const char *idx_path);
+/*
+ * Return the basename of the packfile, omitting any containing directory
+ * (e.g., "pack-1234abcd[...].pack").
+ */
+const char *pack_basename(struct packed_git *p);
+
+struct packed_git *parse_pack_index(unsigned char *sha1, const char *idx_path);
typedef void each_file_in_pack_dir_fn(const char *full_path, size_t full_path_len,
const char *file_pach, void *data);
#define PACKDIR_FILE_GARBAGE 4
extern void (*report_garbage)(unsigned seen_bits, const char *path);
-extern void reprepare_packed_git(struct repository *r);
-extern void install_packed_git(struct repository *r, struct packed_git *pack);
+void reprepare_packed_git(struct repository *r);
+void install_packed_git(struct repository *r, struct packed_git *pack);
struct packed_git *get_packed_git(struct repository *r);
struct list_head *get_packed_git_mru(struct repository *r);
*/
unsigned long approximate_object_count(void);
-extern struct packed_git *find_sha1_pack(const unsigned char *sha1,
- struct packed_git *packs);
+struct packed_git *find_sha1_pack(const unsigned char *sha1,
+ struct packed_git *packs);
-extern void pack_report(void);
+void pack_report(void);
/*
* mmap the index file for the specified packfile (if it is not
* already mmapped). Return 0 on success.
*/
-extern int open_pack_index(struct packed_git *);
+int open_pack_index(struct packed_git *);
/*
* munmap the index file for the specified packfile (if it is
* currently mmapped).
*/
-extern void close_pack_index(struct packed_git *);
+void close_pack_index(struct packed_git *);
+
+int close_pack_fd(struct packed_git *p);
-extern uint32_t get_pack_fanout(struct packed_git *p, uint32_t value);
+uint32_t get_pack_fanout(struct packed_git *p, uint32_t value);
-extern unsigned char *use_pack(struct packed_git *, struct pack_window **, off_t, unsigned long *);
-extern void close_pack_windows(struct packed_git *);
-extern void close_pack(struct packed_git *);
-extern void close_all_packs(struct raw_object_store *o);
-extern void unuse_pack(struct pack_window **);
-extern void clear_delta_base_cache(void);
-extern struct packed_git *add_packed_git(const char *path, size_t path_len, int local);
+unsigned char *use_pack(struct packed_git *, struct pack_window **, off_t, unsigned long *);
+void close_pack_windows(struct packed_git *);
+void close_pack(struct packed_git *);
+void close_all_packs(struct raw_object_store *o);
+void unuse_pack(struct pack_window **);
+void clear_delta_base_cache(void);
+struct packed_git *add_packed_git(const char *path, size_t path_len, int local);
/*
* Make sure that a pointer access into an mmap'd index file is within bounds,
* (like the 64-bit extended offset table), as we compare the size to the
* fixed-length parts when we open the file.
*/
-extern void check_pack_index_ptr(const struct packed_git *p, const void *ptr);
+void check_pack_index_ptr(const struct packed_git *p, const void *ptr);
/*
* Perform binary search on a pack-index for a given oid. Packfile is expected to
* at the SHA-1 within the mmapped index. Return NULL if there is an
* error.
*/
-extern const unsigned char *nth_packed_object_sha1(struct packed_git *, uint32_t n);
+const unsigned char *nth_packed_object_sha1(struct packed_git *, uint32_t n);
/*
* Like nth_packed_object_sha1, but write the data into the object specified by
* the the first argument. Returns the first argument on success, and NULL on
* error.
*/
-extern const struct object_id *nth_packed_object_oid(struct object_id *, struct packed_git *, uint32_t n);
+const struct object_id *nth_packed_object_oid(struct object_id *, struct packed_git *, uint32_t n);
/*
* Return the offset of the nth object within the specified packfile.
* The index must already be opened.
*/
-extern off_t nth_packed_object_offset(const struct packed_git *, uint32_t n);
+off_t nth_packed_object_offset(const struct packed_git *, uint32_t n);
/*
* If the object named sha1 is present in the specified packfile,
* return its offset within the packfile; otherwise, return 0.
*/
-extern off_t find_pack_entry_one(const unsigned char *sha1, struct packed_git *);
+off_t find_pack_entry_one(const unsigned char *sha1, struct packed_git *);
-extern int is_pack_valid(struct packed_git *);
-extern void *unpack_entry(struct repository *r, struct packed_git *, off_t, enum object_type *, unsigned long *);
-extern unsigned long unpack_object_header_buffer(const unsigned char *buf, unsigned long len, enum object_type *type, unsigned long *sizep);
-extern unsigned long get_size_from_delta(struct packed_git *, struct pack_window **, off_t);
-extern int unpack_object_header(struct packed_git *, struct pack_window **, off_t *, unsigned long *);
+int is_pack_valid(struct packed_git *);
+void *unpack_entry(struct repository *r, struct packed_git *, off_t, enum object_type *, unsigned long *);
+unsigned long unpack_object_header_buffer(const unsigned char *buf, unsigned long len, enum object_type *type, unsigned long *sizep);
+unsigned long get_size_from_delta(struct packed_git *, struct pack_window **, off_t);
+int unpack_object_header(struct packed_git *, struct pack_window **, off_t *, unsigned long *);
-extern void release_pack_memory(size_t);
+void release_pack_memory(size_t);
/* global flag to enable extra checks when accessing packed objects */
extern int do_check_packed_object_crc;
-extern int packed_object_info(struct repository *r,
- struct packed_git *pack,
- off_t offset, struct object_info *);
+int packed_object_info(struct repository *r,
+ struct packed_git *pack,
+ off_t offset, struct object_info *);
-extern void mark_bad_packed_object(struct packed_git *p, const unsigned char *sha1);
-extern const struct packed_git *has_packed_and_bad(struct repository *r, const unsigned char *sha1);
+void mark_bad_packed_object(struct packed_git *p, const unsigned char *sha1);
+const struct packed_git *has_packed_and_bad(struct repository *r, const unsigned char *sha1);
/*
* Iff a pack file in the given repository contains the object named by sha1,
* return true and store its location to e.
*/
-extern int find_pack_entry(struct repository *r, const struct object_id *oid, struct pack_entry *e);
+int find_pack_entry(struct repository *r, const struct object_id *oid, struct pack_entry *e);
-extern int has_object_pack(const struct object_id *oid);
+int has_object_pack(const struct object_id *oid);
-extern int has_pack_index(const unsigned char *sha1);
+int has_pack_index(const unsigned char *sha1);
/*
* Return 1 if an object in a promisor packfile is or refers to the given
* object, 0 otherwise.
*/
-extern int is_promisor_object(const struct object_id *oid);
+int is_promisor_object(const struct object_id *oid);
/*
* Expose a function for fuzz testing.
* have a convenient entry-point for fuzz testing. For real uses, you should
* probably use open_pack_index() or parse_pack_index() instead.
*/
-extern int load_idx(const char *path, const unsigned int hashsz, void *idx_map,
- size_t idx_size, struct packed_git *p);
+int load_idx(const char *path, const unsigned int hashsz, void *idx_map,
+ size_t idx_size, struct packed_git *p);
#endif
argv_array_push(&pager_process->args, pager);
pager_process->use_shell = 1;
setup_pager_env(&pager_process->env_array);
+ pager_process->trace2_child_class = "pager";
}
void setup_pager(void)
opt->long_name);
if (v && v < MINIMUM_ABBREV)
v = MINIMUM_ABBREV;
- else if (v > 40)
- v = 40;
+ else if (v > the_hash_algo->hexsz)
+ v = the_hash_algo->hexsz;
}
*(int *)(opt->value) = v;
return 0;
* "-h" output even if it's not being handled directly by
* parse_options().
*/
-int parse_opt_unknown_cb(const struct option *opt, const char *arg, int unset)
+enum parse_opt_result parse_opt_unknown_cb(struct parse_opt_ctx_t *ctx,
+ const struct option *opt,
+ const char *arg, int unset)
{
- return -2;
+ BUG_ON_OPT_ARG(arg);
+ return PARSE_OPT_UNKNOWN;
}
/**
#include "color.h"
#include "utf8.h"
+static int disallow_abbreviated_options;
+
#define OPT_SHORT 1
#define OPT_UNSET 2
return error("BUG: switch '%c' %s", opt->short_name, reason);
}
-static int get_arg(struct parse_opt_ctx_t *p, const struct option *opt,
- int flags, const char **arg)
+static enum parse_opt_result get_arg(struct parse_opt_ctx_t *p,
+ const struct option *opt,
+ int flags, const char **arg)
{
if (p->opt) {
*arg = p->opt;
*file = prefix_filename(prefix, *file);
}
-static int opt_command_mode_error(const struct option *opt,
- const struct option *all_opts,
- int flags)
+static enum parse_opt_result opt_command_mode_error(
+ const struct option *opt,
+ const struct option *all_opts,
+ int flags)
{
const struct option *that;
struct strbuf that_name = STRBUF_INIT;
error(_("%s is incompatible with %s"),
optname(opt, flags), that_name.buf);
strbuf_release(&that_name);
- return -1;
+ return PARSE_OPT_ERROR;
}
return error(_("%s : incompatible with something else"),
optname(opt, flags));
}
-static int get_value(struct parse_opt_ctx_t *p,
- const struct option *opt,
- const struct option *all_opts,
- int flags)
+static enum parse_opt_result get_value(struct parse_opt_ctx_t *p,
+ const struct option *opt,
+ const struct option *all_opts,
+ int flags)
{
const char *s, *arg;
const int unset = flags & OPT_UNSET;
switch (opt->type) {
case OPTION_LOWLEVEL_CALLBACK:
- return (*(parse_opt_ll_cb *)opt->callback)(p, opt, unset);
+ return opt->ll_callback(p, opt, NULL, unset);
case OPTION_BIT:
if (unset)
*(int *)opt->value &= ~opt->defval;
return 0;
+ case OPTION_BITOP:
+ if (unset)
+ BUG("BITOP can't have unset form");
+ *(int *)opt->value &= ~opt->extra;
+ *(int *)opt->value |= opt->defval;
+ return 0;
+
case OPTION_COUNTUP:
if (*(int *)opt->value < 0)
*(int *)opt->value = 0;
return err;
case OPTION_CALLBACK:
+ {
+ const char *p_arg = NULL;
+ int p_unset;
+
if (unset)
- return (*opt->callback)(opt, NULL, 1) ? (-1) : 0;
- if (opt->flags & PARSE_OPT_NOARG)
- return (*opt->callback)(opt, NULL, 0) ? (-1) : 0;
- if (opt->flags & PARSE_OPT_OPTARG && !p->opt)
- return (*opt->callback)(opt, NULL, 0) ? (-1) : 0;
- if (get_arg(p, opt, flags, &arg))
+ p_unset = 1;
+ else if (opt->flags & PARSE_OPT_NOARG)
+ p_unset = 0;
+ else if (opt->flags & PARSE_OPT_OPTARG && !p->opt)
+ p_unset = 0;
+ else if (get_arg(p, opt, flags, &arg))
return -1;
- return (*opt->callback)(opt, arg, 0) ? (-1) : 0;
-
+ else {
+ p_unset = 0;
+ p_arg = arg;
+ }
+ if (opt->callback)
+ return (*opt->callback)(opt, p_arg, p_unset) ? (-1) : 0;
+ else
+ return (*opt->ll_callback)(p, opt, p_arg, p_unset);
+ }
case OPTION_INTEGER:
if (unset) {
*(int *)opt->value = 0;
}
}
-static int parse_short_opt(struct parse_opt_ctx_t *p, const struct option *options)
+static enum parse_opt_result parse_short_opt(struct parse_opt_ctx_t *p,
+ const struct option *options)
{
const struct option *all_opts = options;
const struct option *numopt = NULL;
len++;
arg = xmemdupz(p->opt, len);
p->opt = p->opt[len] ? p->opt + len : NULL;
- rc = (*numopt->callback)(numopt, arg, 0) ? (-1) : 0;
+ if (numopt->callback)
+ rc = (*numopt->callback)(numopt, arg, 0) ? (-1) : 0;
+ else
+ rc = (*numopt->ll_callback)(p, numopt, arg, 0);
free(arg);
return rc;
}
- return -2;
+ return PARSE_OPT_UNKNOWN;
}
-static int parse_long_opt(struct parse_opt_ctx_t *p, const char *arg,
- const struct option *options)
+static enum parse_opt_result parse_long_opt(
+ struct parse_opt_ctx_t *p, const char *arg,
+ const struct option *options)
{
const struct option *all_opts = options;
const char *arg_end = strchrnul(arg, '=');
optname(options, flags));
if (*rest)
continue;
+ if (options->value)
+ *(int *)options->value = options->defval;
p->out[p->cpidx++] = arg - 2;
- return 0;
+ return PARSE_OPT_DONE;
}
if (!rest) {
/* abbreviated? */
- if (!strncmp(long_name, arg, arg_end - arg)) {
+ if (!(p->flags & PARSE_OPT_KEEP_UNKNOWN) &&
+ !strncmp(long_name, arg, arg_end - arg)) {
is_abbreviated:
if (abbrev_option) {
/*
return get_value(p, options, all_opts, flags ^ opt_flags);
}
+ if (disallow_abbreviated_options && (ambiguous_option || abbrev_option))
+ die("disallowed abbreviated or ambiguous option '%.*s'",
+ (int)(arg_end - arg), arg);
+
if (ambiguous_option) {
error(_("ambiguous option: %s "
"(could be --%s%s or --%s%s)"),
ambiguous_option->long_name,
(abbrev_flags & OPT_UNSET) ? "no-" : "",
abbrev_option->long_name);
- return -3;
+ return PARSE_OPT_HELP;
}
if (abbrev_option)
return get_value(p, abbrev_option, all_opts, abbrev_flags);
- return -2;
+ return PARSE_OPT_UNKNOWN;
}
static int parse_nodash_opt(struct parse_opt_ctx_t *p, const char *arg,
if ((opts->flags & PARSE_OPT_OPTARG) ||
!(opts->flags & PARSE_OPT_NOARG))
err |= optbug(opts, "should not accept an argument");
+ break;
+ case OPTION_CALLBACK:
+ if (!opts->callback && !opts->ll_callback)
+ BUG("OPTION_CALLBACK needs one callback");
+ if (opts->callback && opts->ll_callback)
+ BUG("OPTION_CALLBACK can't have two callbacks");
+ break;
+ case OPTION_LOWLEVEL_CALLBACK:
+ if (!opts->ll_callback)
+ BUG("OPTION_LOWLEVEL_CALLBACK needs a callback");
+ if (opts->callback)
+ BUG("OPTION_LOWLEVEL_CALLBACK needs no high level callback");
+ break;
default:
; /* ok. (usually accepts an argument) */
}
const struct option *options, int flags)
{
memset(ctx, 0, sizeof(*ctx));
- ctx->argc = ctx->total = argc - 1;
- ctx->argv = argv + 1;
- ctx->out = argv;
+ ctx->argc = argc;
+ ctx->argv = argv;
+ if (!(flags & PARSE_OPT_ONE_SHOT)) {
+ ctx->argc--;
+ ctx->argv++;
+ }
+ ctx->total = ctx->argc;
+ ctx->out = argv;
ctx->prefix = prefix;
ctx->cpidx = ((flags & PARSE_OPT_KEEP_ARGV0) != 0);
ctx->flags = flags;
if ((flags & PARSE_OPT_KEEP_UNKNOWN) &&
- (flags & PARSE_OPT_STOP_AT_NON_OPTION))
+ (flags & PARSE_OPT_STOP_AT_NON_OPTION) &&
+ !(flags & PARSE_OPT_ONE_SHOT))
BUG("STOP_AT_NON_OPTION and KEEP_UNKNOWN don't go together");
+ if ((flags & PARSE_OPT_ONE_SHOT) &&
+ (flags & PARSE_OPT_KEEP_ARGV0))
+ BUG("Can't keep argv0 if you don't have it");
parse_options_check(options);
}
}
}
-static int show_gitcomp(struct parse_opt_ctx_t *ctx,
- const struct option *opts)
+static int show_gitcomp(const struct option *opts)
{
const struct option *original_opts = opts;
int nr_noopts = 0;
for (; ctx->argc; ctx->argc--, ctx->argv++) {
const char *arg = ctx->argv[0];
+ if (ctx->flags & PARSE_OPT_ONE_SHOT &&
+ ctx->argc != ctx->total)
+ break;
+
if (*arg != '-' || !arg[1]) {
if (parse_nodash_opt(ctx, arg, options) == 0)
continue;
/* lone --git-completion-helper is asked by git-completion.bash */
if (ctx->total == 1 && !strcmp(arg + 1, "-git-completion-helper"))
- return show_gitcomp(ctx, options);
+ return show_gitcomp(options);
if (arg[1] != '-') {
ctx->opt = arg + 1;
switch (parse_short_opt(ctx, options)) {
- case -1:
+ case PARSE_OPT_ERROR:
return PARSE_OPT_ERROR;
- case -2:
+ case PARSE_OPT_UNKNOWN:
if (ctx->opt)
check_typos(arg + 1, options);
if (internal_help && *ctx->opt == 'h')
goto show_usage;
goto unknown;
+ case PARSE_OPT_NON_OPTION:
+ case PARSE_OPT_HELP:
+ case PARSE_OPT_COMPLETE:
+ BUG("parse_short_opt() cannot return these");
+ case PARSE_OPT_DONE:
+ break;
}
if (ctx->opt)
check_typos(arg + 1, options);
while (ctx->opt) {
switch (parse_short_opt(ctx, options)) {
- case -1:
+ case PARSE_OPT_ERROR:
return PARSE_OPT_ERROR;
- case -2:
+ case PARSE_OPT_UNKNOWN:
if (internal_help && *ctx->opt == 'h')
goto show_usage;
ctx->argv[0] = xstrdup(ctx->opt - 1);
*(char *)ctx->argv[0] = '-';
goto unknown;
+ case PARSE_OPT_NON_OPTION:
+ case PARSE_OPT_COMPLETE:
+ case PARSE_OPT_HELP:
+ BUG("parse_short_opt() cannot return these");
+ case PARSE_OPT_DONE:
+ break;
}
}
continue;
if (internal_help && !strcmp(arg + 2, "help"))
goto show_usage;
switch (parse_long_opt(ctx, arg + 2, options)) {
- case -1:
+ case PARSE_OPT_ERROR:
return PARSE_OPT_ERROR;
- case -2:
+ case PARSE_OPT_UNKNOWN:
goto unknown;
- case -3:
+ case PARSE_OPT_HELP:
goto show_usage;
+ case PARSE_OPT_NON_OPTION:
+ case PARSE_OPT_COMPLETE:
+ BUG("parse_long_opt() cannot return these");
+ case PARSE_OPT_DONE:
+ break;
}
continue;
unknown:
+ if (ctx->flags & PARSE_OPT_ONE_SHOT)
+ break;
if (!(ctx->flags & PARSE_OPT_KEEP_UNKNOWN))
return PARSE_OPT_UNKNOWN;
ctx->out[ctx->cpidx++] = ctx->argv[0];
int parse_options_end(struct parse_opt_ctx_t *ctx)
{
+ if (ctx->flags & PARSE_OPT_ONE_SHOT)
+ return ctx->total - ctx->argc;
+
MOVE_ARRAY(ctx->out + ctx->cpidx, ctx->argv, ctx->argc);
ctx->out[ctx->cpidx + ctx->argc] = NULL;
return ctx->cpidx + ctx->argc;
{
struct parse_opt_ctx_t ctx;
+ disallow_abbreviated_options =
+ git_env_bool("GIT_TEST_DISALLOW_ABBREVIATED_OPTIONS", 0);
+
parse_options_start(&ctx, argc, argv, prefix, options, flags);
switch (parse_options_step(&ctx, options, usagestr)) {
case PARSE_OPT_HELP:
/* options with no arguments */
OPTION_BIT,
OPTION_NEGBIT,
+ OPTION_BITOP,
OPTION_COUNTUP,
OPTION_SET_INT,
OPTION_CMDMODE,
PARSE_OPT_STOP_AT_NON_OPTION = 2,
PARSE_OPT_KEEP_ARGV0 = 4,
PARSE_OPT_KEEP_UNKNOWN = 8,
- PARSE_OPT_NO_INTERNAL_HELP = 16
+ PARSE_OPT_NO_INTERNAL_HELP = 16,
+ PARSE_OPT_ONE_SHOT = 32
};
enum parse_opt_option_flags {
typedef int parse_opt_cb(const struct option *, const char *arg, int unset);
struct parse_opt_ctx_t;
-typedef int parse_opt_ll_cb(struct parse_opt_ctx_t *ctx,
- const struct option *opt, int unset);
+typedef enum parse_opt_result parse_opt_ll_cb(struct parse_opt_ctx_t *ctx,
+ const struct option *opt,
+ const char *arg, int unset);
/*
* `type`::
* the option takes optional argument.
*
* `callback`::
- * pointer to the callback to use for OPTION_CALLBACK or
- * OPTION_LOWLEVEL_CALLBACK.
+ * pointer to the callback to use for OPTION_CALLBACK
*
* `defval`::
* default value to fill (*->value) with for PARSE_OPT_OPTARG.
* OPTION_{BIT,SET_INT} store the {mask,integer} to put in the value when met.
* CALLBACKS can use it like they want.
+ *
+ * `ll_callback`::
+ * pointer to the callback to use for OPTION_LOWLEVEL_CALLBACK
+ *
*/
struct option {
enum parse_opt_type type;
int flags;
parse_opt_cb *callback;
intptr_t defval;
+ parse_opt_ll_cb *ll_callback;
+ intptr_t extra;
};
#define OPT_BIT_F(s, l, v, h, b, f) { OPTION_BIT, (s), (l), (v), NULL, (h), \
#define OPT_SET_INT_F(s, l, v, h, i, f) { OPTION_SET_INT, (s), (l), (v), NULL, \
(h), PARSE_OPT_NOARG | (f), NULL, (i) }
#define OPT_BOOL_F(s, l, v, h, f) OPT_SET_INT_F(s, l, v, h, 1, f)
+#define OPT_CALLBACK_F(s, l, v, a, h, f, cb) \
+ { OPTION_CALLBACK, (s), (l), (v), (a), (h), (f), (cb) }
+#define OPT_STRING_F(s, l, v, a, h, f) { OPTION_STRING, (s), (l), (v), (a), (h), (f) }
+#define OPT_INTEGER_F(s, l, v, h, f) { OPTION_INTEGER, (s), (l), (v), N_("n"), (h), (f) }
#define OPT_END() { OPTION_END }
-#define OPT_ARGUMENT(l, h) { OPTION_ARGUMENT, 0, (l), NULL, NULL, \
- (h), PARSE_OPT_NOARG}
+#define OPT_ARGUMENT(l, v, h) { OPTION_ARGUMENT, 0, (l), (v), NULL, \
+ (h), PARSE_OPT_NOARG, NULL, 1 }
#define OPT_GROUP(h) { OPTION_GROUP, 0, NULL, NULL, NULL, (h) }
#define OPT_BIT(s, l, v, h, b) OPT_BIT_F(s, l, v, h, b, 0)
+#define OPT_BITOP(s, l, v, h, set, clear) { OPTION_BITOP, (s), (l), (v), NULL, (h), \
+ PARSE_OPT_NOARG|PARSE_OPT_NONEG, NULL, \
+ (set), NULL, (clear) }
#define OPT_NEGBIT(s, l, v, h, b) { OPTION_NEGBIT, (s), (l), (v), NULL, \
(h), PARSE_OPT_NOARG, NULL, (b) }
#define OPT_COUNTUP(s, l, v, h) OPT_COUNTUP_F(s, l, v, h, 0)
(h), PARSE_OPT_NOARG | PARSE_OPT_HIDDEN, NULL, 1}
#define OPT_CMDMODE(s, l, v, h, i) { OPTION_CMDMODE, (s), (l), (v), NULL, \
(h), PARSE_OPT_NOARG|PARSE_OPT_NONEG, NULL, (i) }
-#define OPT_INTEGER(s, l, v, h) { OPTION_INTEGER, (s), (l), (v), N_("n"), (h) }
+#define OPT_INTEGER(s, l, v, h) OPT_INTEGER_F(s, l, v, h, 0)
#define OPT_MAGNITUDE(s, l, v, h) { OPTION_MAGNITUDE, (s), (l), (v), \
N_("n"), (h), PARSE_OPT_NONEG }
-#define OPT_STRING(s, l, v, a, h) { OPTION_STRING, (s), (l), (v), (a), (h) }
+#define OPT_STRING(s, l, v, a, h) OPT_STRING_F(s, l, v, a, h, 0)
#define OPT_STRING_LIST(s, l, v, a, h) \
{ OPTION_CALLBACK, (s), (l), (v), (a), \
(h), 0, &parse_opt_string_list }
#define OPT_EXPIRY_DATE(s, l, v, h) \
{ OPTION_CALLBACK, (s), (l), (v), N_("expiry-date"),(h), 0, \
parse_opt_expiry_date_cb }
-#define OPT_CALLBACK(s, l, v, a, h, f) \
- { OPTION_CALLBACK, (s), (l), (v), (a), (h), 0, (f) }
+#define OPT_CALLBACK(s, l, v, a, h, f) OPT_CALLBACK_F(s, l, v, a, h, 0, f)
#define OPT_NUMBER_CALLBACK(v, h, f) \
{ OPTION_NUMBER, 0, NULL, (v), NULL, (h), \
PARSE_OPT_NOARG | PARSE_OPT_NONEG, (f) }
N_("no-op (backward compatibility)"), \
PARSE_OPT_HIDDEN | PARSE_OPT_NOARG, parse_opt_noop_cb }
-/* parse_options() will filter out the processed options and leave the
- * non-option arguments in argv[]. usagestr strings should be marked
- * for translation with N_().
+/*
+ * parse_options() will filter out the processed options and leave the
+ * non-option arguments in argv[]. argv0 is assumed program name and
+ * skipped.
+ *
+ * usagestr strings should be marked for translation with N_().
+ *
* Returns the number of arguments left in argv[].
+ *
+ * In one-shot mode, argv0 is not a program name, argv[] is left
+ * untouched and parse_options() returns the number of options
+ * processed.
*/
-extern int parse_options(int argc, const char **argv, const char *prefix,
- const struct option *options,
- const char * const usagestr[], int flags);
+int parse_options(int argc, const char **argv, const char *prefix,
+ const struct option *options,
+ const char * const usagestr[], int flags);
-extern NORETURN void usage_with_options(const char * const *usagestr,
- const struct option *options);
+NORETURN void usage_with_options(const char * const *usagestr,
+ const struct option *options);
-extern NORETURN void usage_msg_opt(const char *msg,
- const char * const *usagestr,
- const struct option *options);
+NORETURN void usage_msg_opt(const char *msg,
+ const char * const *usagestr,
+ const struct option *options);
-extern int optbug(const struct option *opt, const char *reason);
+int optbug(const struct option *opt, const char *reason);
const char *optname(const struct option *opt, int flags);
/*
BUG("option callback does not expect an argument"); \
} while (0)
+/*
+ * Similar to the assertions above, but checks that "arg" is always non-NULL.
+ * This assertion also implies BUG_ON_OPT_NEG(), letting you declare both
+ * assertions in a single line.
+ */
+#define BUG_ON_OPT_NEG_NOARG(unset, arg) do { \
+ BUG_ON_OPT_NEG(unset); \
+ if(!(arg)) \
+ BUG("option callback expects an argument"); \
+} while(0)
+
/*----- incremental advanced APIs -----*/
-enum {
- PARSE_OPT_COMPLETE = -2,
- PARSE_OPT_HELP = -1,
- PARSE_OPT_DONE,
+enum parse_opt_result {
+ PARSE_OPT_COMPLETE = -3,
+ PARSE_OPT_HELP = -2,
+ PARSE_OPT_ERROR = -1, /* must be the same as error() */
+ PARSE_OPT_DONE = 0, /* fixed so that "return 0" works */
PARSE_OPT_NON_OPTION,
- PARSE_OPT_ERROR,
PARSE_OPT_UNKNOWN
};
const char *prefix;
};
-extern void parse_options_start(struct parse_opt_ctx_t *ctx,
- int argc, const char **argv, const char *prefix,
- const struct option *options, int flags);
+void parse_options_start(struct parse_opt_ctx_t *ctx,
+ int argc, const char **argv, const char *prefix,
+ const struct option *options, int flags);
-extern int parse_options_step(struct parse_opt_ctx_t *ctx,
- const struct option *options,
- const char * const usagestr[]);
+int parse_options_step(struct parse_opt_ctx_t *ctx,
+ const struct option *options,
+ const char * const usagestr[]);
-extern int parse_options_end(struct parse_opt_ctx_t *ctx);
+int parse_options_end(struct parse_opt_ctx_t *ctx);
-extern struct option *parse_options_concat(struct option *a, struct option *b);
+struct option *parse_options_concat(struct option *a, struct option *b);
/*----- some often used options -----*/
-extern int parse_opt_abbrev_cb(const struct option *, const char *, int);
-extern int parse_opt_expiry_date_cb(const struct option *, const char *, int);
-extern int parse_opt_color_flag_cb(const struct option *, const char *, int);
-extern int parse_opt_verbosity_cb(const struct option *, const char *, int);
-extern int parse_opt_object_name(const struct option *, const char *, int);
-extern int parse_opt_commits(const struct option *, const char *, int);
-extern int parse_opt_tertiary(const struct option *, const char *, int);
-extern int parse_opt_string_list(const struct option *, const char *, int);
-extern int parse_opt_noop_cb(const struct option *, const char *, int);
-extern int parse_opt_unknown_cb(const struct option *, const char *, int);
-extern int parse_opt_passthru(const struct option *, const char *, int);
-extern int parse_opt_passthru_argv(const struct option *, const char *, int);
+int parse_opt_abbrev_cb(const struct option *, const char *, int);
+int parse_opt_expiry_date_cb(const struct option *, const char *, int);
+int parse_opt_color_flag_cb(const struct option *, const char *, int);
+int parse_opt_verbosity_cb(const struct option *, const char *, int);
+int parse_opt_object_name(const struct option *, const char *, int);
+int parse_opt_commits(const struct option *, const char *, int);
+int parse_opt_tertiary(const struct option *, const char *, int);
+int parse_opt_string_list(const struct option *, const char *, int);
+int parse_opt_noop_cb(const struct option *, const char *, int);
+int parse_opt_unknown_cb(struct parse_opt_ctx_t *ctx, const struct option *, const char *, int);
+int parse_opt_passthru(const struct option *, const char *, int);
+int parse_opt_passthru_argv(const struct option *, const char *, int);
#define OPT__VERBOSE(var, h) OPT_COUNTUP('v', "verbose", (var), (h))
#define OPT__QUIET(var, h) OPT_COUNTUP('q', "quiet", (var), (h))
{ 1, 1, 0, "logs" },
{ 1, 1, 1, "logs/HEAD" },
{ 0, 1, 1, "logs/refs/bisect" },
+ { 0, 1, 1, "logs/refs/rewritten" },
+ { 0, 1, 1, "logs/refs/worktree" },
{ 0, 1, 0, "lost-found" },
{ 0, 1, 0, "objects" },
{ 0, 1, 0, "refs" },
{ 0, 1, 1, "refs/bisect" },
+ { 0, 1, 1, "refs/rewritten" },
{ 0, 1, 1, "refs/worktree" },
{ 0, 1, 0, "remotes" },
{ 0, 1, 0, "worktrees" },
return -1;
}
- if ($description !~ /^[0-9a-fA-F]{40} \S+ (\d+)$/) {
+ if ($description !~ /^[0-9a-fA-F]{40}(?:[0-9a-fA-F]{24})? \S+ (\d+)$/) {
carp "Unexpected result returned from git cat-file";
return -1;
}
void packet_flush(int fd)
{
packet_trace("0000", 4, 1);
- write_or_die(fd, "0000", 4);
+ if (write_in_full(fd, "0000", 4) < 0)
+ die_errno(_("unable to write flush packet"));
}
void packet_delim(int fd)
{
packet_trace("0001", 4, 1);
- write_or_die(fd, "0001", 4);
+ if (write_in_full(fd, "0001", 4) < 0)
+ die_errno(_("unable to write delim packet"));
}
int packet_flush_gently(int fd)
strbuf_add(buf, "0001", 4);
}
-static void set_packet_header(char *buf, const int size)
+void set_packet_header(char *buf, const int size)
{
static char hexchar[] = "0123456789abcdef";
void packet_write_fmt(int fd, const char *fmt, ...) __attribute__((format (printf, 2, 3)));
void packet_buf_flush(struct strbuf *buf);
void packet_buf_delim(struct strbuf *buf);
+void set_packet_header(char *buf, int size);
void packet_write(int fd_out, const char *buf, size_t size);
void packet_buf_write(struct strbuf *buf, const char *fmt, ...) __attribute__((format (printf, 2, 3)));
void packet_buf_write_len(struct strbuf *buf, const char *data, size_t len);
{ "fuller", CMIT_FMT_FULLER, 0, 8 },
{ "full", CMIT_FMT_FULL, 0, 8 },
{ "oneline", CMIT_FMT_ONELINE, 1, 0 }
+ /*
+ * Please update $__git_log_pretty_formats in
+ * git-completion.bash when you add new formats.
+ */
};
commit_formats_len = ARRAY_SIZE(builtin_formats);
builtin_formats_len = commit_formats_len;
return !(isalnum(ch) || ch == '!' || ch == '*' || ch == '+' || ch == '-' || ch == '/');
}
-static int needs_rfc2047_encoding(const char *line, int len,
- enum rfc2047_type type)
+static int needs_rfc2047_encoding(const char *line, int len)
{
int i;
}
strbuf_addstr(sb, "From: ");
- if (needs_rfc2047_encoding(namebuf, namelen, RFC2047_ADDRESS)) {
+ if (needs_rfc2047_encoding(namebuf, namelen)) {
add_rfc2047(sb, namebuf, namelen,
encoding, RFC2047_ADDRESS);
max_length = 76; /* per rfc2047 */
return rest - placeholder;
}
-static size_t parse_padding_placeholder(struct strbuf *sb,
- const char *placeholder,
+static size_t parse_padding_placeholder(const char *placeholder,
struct format_commit_context *c)
{
const char *ch = placeholder;
return 0;
}
-static int match_placeholder_arg(const char *to_parse, const char *candidate,
- const char **end)
+static int match_placeholder_arg_value(const char *to_parse, const char *candidate,
+ const char **end, const char **valuestart,
+ size_t *valuelen)
{
const char *p;
if (!(skip_prefix(to_parse, candidate, &p)))
return 0;
+ if (valuestart) {
+ if (*p == '=') {
+ *valuestart = p + 1;
+ *valuelen = strcspn(*valuestart, ",)");
+ p = *valuestart + *valuelen;
+ } else {
+ if (*p != ',' && *p != ')')
+ return 0;
+ *valuestart = NULL;
+ *valuelen = 0;
+ }
+ }
if (*p == ',') {
*end = p + 1;
return 1;
return 0;
}
+static int match_placeholder_bool_arg(const char *to_parse, const char *candidate,
+ const char **end, int *val)
+{
+ const char *argval;
+ char *strval;
+ size_t arglen;
+ int v;
+
+ if (!match_placeholder_arg_value(to_parse, candidate, end, &argval, &arglen))
+ return 0;
+
+ if (!argval) {
+ *val = 1;
+ return 1;
+ }
+
+ strval = xstrndup(argval, arglen);
+ v = git_parse_maybe_bool(strval);
+ free(strval);
+
+ if (v == -1)
+ return 0;
+
+ *val = v;
+
+ return 1;
+}
+
+static int format_trailer_match_cb(const struct strbuf *key, void *ud)
+{
+ const struct string_list *list = ud;
+ const struct string_list_item *item;
+
+ for_each_string_list_item (item, list) {
+ if (key->len == (uintptr_t)item->util &&
+ !strncasecmp(item->string, key->buf, key->len))
+ return 1;
+ }
+ return 0;
+}
+
static size_t format_commit_one(struct strbuf *sb, /* in UTF-8 */
const char *placeholder,
void *context)
const char *msg = c->message;
struct commit_list *p;
const char *arg;
- int ch;
+ size_t res;
char **slot;
/* these are independent of the commit */
+ res = strbuf_expand_literal_cb(sb, placeholder, NULL);
+ if (res)
+ return res;
+
switch (placeholder[0]) {
case 'C':
if (starts_with(placeholder + 1, "(auto)")) {
*/
return ret;
}
- case 'n': /* newline */
- strbuf_addch(sb, '\n');
- return 1;
- case 'x':
- /* %x00 == NUL, %x0a == LF, etc. */
- ch = hex2chr(placeholder + 1);
- if (ch < 0)
- return 0;
- strbuf_addch(sb, ch);
- return 3;
case 'w':
if (placeholder[1] == '(') {
unsigned long width = 0, indent1 = 0, indent2 = 0;
case '<':
case '>':
- return parse_padding_placeholder(sb, placeholder, c);
+ return parse_padding_placeholder(placeholder, c);
}
/* these depend on the commit */
if (skip_prefix(placeholder, "(trailers", &arg)) {
struct process_trailer_options opts = PROCESS_TRAILER_OPTIONS_INIT;
+ struct string_list filter_list = STRING_LIST_INIT_NODUP;
+ struct strbuf sepbuf = STRBUF_INIT;
+ size_t ret = 0;
opts.no_divider = 1;
if (*arg == ':') {
arg++;
for (;;) {
- if (match_placeholder_arg(arg, "only", &arg))
+ const char *argval;
+ size_t arglen;
+
+ if (match_placeholder_arg_value(arg, "key", &arg, &argval, &arglen)) {
+ uintptr_t len = arglen;
+
+ if (!argval)
+ goto trailer_out;
+
+ if (len && argval[len - 1] == ':')
+ len--;
+ string_list_append(&filter_list, argval)->util = (char *)len;
+
+ opts.filter = format_trailer_match_cb;
+ opts.filter_data = &filter_list;
opts.only_trailers = 1;
- else if (match_placeholder_arg(arg, "unfold", &arg))
- opts.unfold = 1;
- else
+ } else if (match_placeholder_arg_value(arg, "separator", &arg, &argval, &arglen)) {
+ char *fmt;
+
+ strbuf_reset(&sepbuf);
+ fmt = xstrndup(argval, arglen);
+ strbuf_expand(&sepbuf, fmt, strbuf_expand_literal_cb, NULL);
+ free(fmt);
+ opts.separator = &sepbuf;
+ } else if (!match_placeholder_bool_arg(arg, "only", &arg, &opts.only_trailers) &&
+ !match_placeholder_bool_arg(arg, "unfold", &arg, &opts.unfold) &&
+ !match_placeholder_bool_arg(arg, "valueonly", &arg, &opts.value_only))
break;
}
}
if (*arg == ')') {
format_trailers_from_commit(sb, msg + c->subject_off, &opts);
- return arg - placeholder + 1;
+ ret = arg - placeholder + 1;
}
+ trailer_out:
+ string_list_clear(&filter_list, 0);
+ strbuf_release(&sepbuf);
+ return ret;
}
return 0; /* unknown placeholder */
if (pp->print_email_subject) {
if (pp->rev)
fmt_output_email_subject(sb, pp->rev);
- if (needs_rfc2047_encoding(title.buf, title.len, RFC2047_SUBJECT))
+ if (needs_rfc2047_encoding(title.buf, title.len))
add_rfc2047(sb, title.buf, title.len,
encoding, RFC2047_SUBJECT);
else
* published by the Free Software Foundation.
*/
-#include "git-compat-util.h"
+#include "cache.h"
#include "gettext.h"
#include "progress.h"
#include "strbuf.h"
#include "trace.h"
+#include "utf8.h"
#define TP_IDX_MAX 8
uint64_t total;
unsigned last_percent;
unsigned delay;
+ unsigned sparse;
struct throughput *throughput;
uint64_t start_ns;
+ struct strbuf counters_sb;
+ int title_len;
+ int split;
};
static volatile sig_atomic_t progress_update;
return tpgrp < 0 || tpgrp == getpgid(0);
}
-static int display(struct progress *progress, uint64_t n, const char *done)
+static void display(struct progress *progress, uint64_t n, const char *done)
{
- const char *eol, *tp;
+ const char *tp;
+ struct strbuf *counters_sb = &progress->counters_sb;
+ int show_update = 0;
+ int last_count_len = counters_sb->len;
if (progress->delay && (!progress_update || --progress->delay))
- return 0;
+ return;
progress->last_value = n;
tp = (progress->throughput) ? progress->throughput->display.buf : "";
- eol = done ? done : " \r";
if (progress->total) {
unsigned percent = n * 100 / progress->total;
if (percent != progress->last_percent || progress_update) {
progress->last_percent = percent;
- if (is_foreground_fd(fileno(stderr)) || done) {
- fprintf(stderr, "%s: %3u%% (%"PRIuMAX"/%"PRIuMAX")%s%s",
- progress->title, percent,
- (uintmax_t)n, (uintmax_t)progress->total,
- tp, eol);
- fflush(stderr);
- }
- progress_update = 0;
- return 1;
+
+ strbuf_reset(counters_sb);
+ strbuf_addf(counters_sb,
+ "%3u%% (%"PRIuMAX"/%"PRIuMAX")%s", percent,
+ (uintmax_t)n, (uintmax_t)progress->total,
+ tp);
+ show_update = 1;
}
} else if (progress_update) {
+ strbuf_reset(counters_sb);
+ strbuf_addf(counters_sb, "%"PRIuMAX"%s", (uintmax_t)n, tp);
+ show_update = 1;
+ }
+
+ if (show_update) {
if (is_foreground_fd(fileno(stderr)) || done) {
- fprintf(stderr, "%s: %"PRIuMAX"%s%s",
- progress->title, (uintmax_t)n, tp, eol);
+ const char *eol = done ? done : "\r";
+ size_t clear_len = counters_sb->len < last_count_len ?
+ last_count_len - counters_sb->len + 1 :
+ 0;
+ size_t progress_line_len = progress->title_len +
+ counters_sb->len + 2;
+ int cols = term_columns();
+
+ if (progress->split) {
+ fprintf(stderr, " %s%*s", counters_sb->buf,
+ (int) clear_len, eol);
+ } else if (!done && cols < progress_line_len) {
+ clear_len = progress->title_len + 1 < cols ?
+ cols - progress->title_len : 0;
+ fprintf(stderr, "%s:%*s\n %s%s",
+ progress->title, (int) clear_len, "",
+ counters_sb->buf, eol);
+ progress->split = 1;
+ } else {
+ fprintf(stderr, "%s: %s%*s", progress->title,
+ counters_sb->buf, (int) clear_len, eol);
+ }
fflush(stderr);
}
progress_update = 0;
- return 1;
}
-
- return 0;
}
static void throughput_string(struct strbuf *buf, uint64_t total,
display(progress, progress->last_value, NULL);
}
-int display_progress(struct progress *progress, uint64_t n)
+void display_progress(struct progress *progress, uint64_t n)
{
- return progress ? display(progress, n, NULL) : 0;
+ if (progress)
+ display(progress, n, NULL);
}
static struct progress *start_progress_delay(const char *title, uint64_t total,
- unsigned delay)
+ unsigned delay, unsigned sparse)
{
struct progress *progress = malloc(sizeof(*progress));
if (!progress) {
progress->last_value = -1;
progress->last_percent = -1;
progress->delay = delay;
+ progress->sparse = sparse;
progress->throughput = NULL;
progress->start_ns = getnanotime();
+ strbuf_init(&progress->counters_sb, 0);
+ progress->title_len = utf8_strwidth(title);
+ progress->split = 0;
set_progress_signal();
return progress;
}
struct progress *start_delayed_progress(const char *title, uint64_t total)
{
- return start_progress_delay(title, total, 2);
+ return start_progress_delay(title, total, 2, 0);
}
struct progress *start_progress(const char *title, uint64_t total)
{
- return start_progress_delay(title, total, 0);
+ return start_progress_delay(title, total, 0, 0);
+}
+
+/*
+ * Here "sparse" means that the caller might use some sampling criteria to
+ * decide when to call display_progress() rather than calling it for every
+ * integer value in[0 .. total). In particular, the caller might not call
+ * display_progress() for the last value in the range.
+ *
+ * When "sparse" is set, stop_progress() will automatically force the done
+ * message to show 100%.
+ */
+struct progress *start_sparse_progress(const char *title, uint64_t total)
+{
+ return start_progress_delay(title, total, 0, 1);
+}
+
+struct progress *start_delayed_sparse_progress(const char *title,
+ uint64_t total)
+{
+ return start_progress_delay(title, total, 2, 1);
+}
+
+static void finish_if_sparse(struct progress *progress)
+{
+ if (progress &&
+ progress->sparse &&
+ progress->last_value != progress->total)
+ display_progress(progress, progress->total);
}
void stop_progress(struct progress **p_progress)
{
+ finish_if_sparse(*p_progress);
+
stop_progress_msg(p_progress, _("done"));
}
free(buf);
}
clear_progress_signal();
+ strbuf_release(&progress->counters_sb);
if (progress->throughput)
strbuf_release(&progress->throughput->display);
free(progress->throughput);
struct progress;
void display_throughput(struct progress *progress, uint64_t total);
-int display_progress(struct progress *progress, uint64_t n);
+void display_progress(struct progress *progress, uint64_t n);
struct progress *start_progress(const char *title, uint64_t total);
+struct progress *start_sparse_progress(const char *title, uint64_t total);
struct progress *start_delayed_progress(const char *title, uint64_t total);
+struct progress *start_delayed_sparse_progress(const char *title,
+ uint64_t total);
void stop_progress(struct progress **progress);
void stop_progress_msg(struct progress **progress, const char *msg);
enum protocol_version get_protocol_version_config(void)
{
const char *value;
+ enum protocol_version retval = protocol_v0;
+ const char *git_test_k = "GIT_TEST_PROTOCOL_VERSION";
+ const char *git_test_v = getenv(git_test_k);
+
if (!git_config_get_string_const("protocol.version", &value)) {
enum protocol_version version = parse_protocol_version(value);
die("unknown value for config 'protocol.version': %s",
value);
- return version;
+ retval = version;
+ }
+
+ if (git_test_v && *git_test_v) {
+ enum protocol_version env = parse_protocol_version(git_test_v);
+
+ if (env == protocol_unknown_version)
+ die("unknown value for %s: %s", git_test_k, git_test_v);
+ if (retval < env)
+ retval = env;
}
- return protocol_v0;
+ return retval;
}
enum protocol_version determine_protocol_version_server(void)
#include "packfile.h"
#include "worktree.h"
#include "object-store.h"
+#include "pack-bitmap.h"
struct connectivity_progress {
struct progress *progress;
FOR_EACH_OBJECT_LOCAL_ONLY);
}
+static void *lookup_object_by_type(struct repository *r,
+ const struct object_id *oid,
+ enum object_type type)
+{
+ switch (type) {
+ case OBJ_COMMIT:
+ return lookup_commit(r, oid);
+ case OBJ_TREE:
+ return lookup_tree(r, oid);
+ case OBJ_TAG:
+ return lookup_tag(r, oid);
+ case OBJ_BLOB:
+ return lookup_blob(r, oid);
+ default:
+ die("BUG: unknown object type %d", type);
+ }
+}
+
+static int mark_object_seen(const struct object_id *oid,
+ enum object_type type,
+ int exclude,
+ uint32_t name_hash,
+ struct packed_git *found_pack,
+ off_t found_offset)
+{
+ struct object *obj = lookup_object_by_type(the_repository, oid, type);
+ if (!obj)
+ die("unable to create object '%s'", oid_to_hex(oid));
+
+ obj->flags |= SEEN;
+ return 0;
+}
+
void mark_reachable_objects(struct rev_info *revs, int mark_reflog,
timestamp_t mark_recent, struct progress *progress)
{
struct connectivity_progress cp;
+ struct bitmap_index *bitmap_git;
/*
* Set up revision parsing, and mark us as being interested
cp.progress = progress;
cp.count = 0;
+ bitmap_git = prepare_bitmap_walk(revs);
+ if (bitmap_git) {
+ traverse_bitmap_commit_list(bitmap_git, mark_object_seen);
+ free_bitmap_index(bitmap_git);
+ return;
+ }
+
/*
* Set up the revision walk - this will move all commits
* from the pending list to the commit walking list.
#include "commit.h"
#include "blob.h"
#include "resolve-undo.h"
+#include "run-command.h"
#include "strbuf.h"
#include "varint.h"
#include "split-index.h"
* CE_REMOVE is set in ce_flags. This is much more effective than
* calling remove_index_entry_at() for each entry to be removed.
*/
-void remove_marked_cache_entries(struct index_state *istate)
+void remove_marked_cache_entries(struct index_state *istate, int invalidate)
{
struct cache_entry **ce_array = istate->cache;
unsigned int i, j;
for (i = j = 0; i < istate->cache_nr; i++) {
if (ce_array[i]->ce_flags & CE_REMOVE) {
+ if (invalidate) {
+ cache_tree_invalidate_path(istate,
+ ce_array[i]->name);
+ untracked_cache_remove_from_index(istate,
+ ce_array[i]->name);
+ }
remove_name_hash(istate, ce_array[i]);
save_or_free_index_entry(istate, ce_array[i]);
}
uint32_t uid;
uint32_t gid;
uint32_t size;
- unsigned char sha1[20];
- uint16_t flags;
- char name[FLEX_ARRAY]; /* more */
-};
-
-/*
- * This struct is used when CE_EXTENDED bit is 1
- * The struct must match ondisk_cache_entry exactly from
- * ctime till flags
- */
-struct ondisk_cache_entry_extended {
- struct cache_time ctime;
- struct cache_time mtime;
- uint32_t dev;
- uint32_t ino;
- uint32_t mode;
- uint32_t uid;
- uint32_t gid;
- uint32_t size;
- unsigned char sha1[20];
- uint16_t flags;
- uint16_t flags2;
- char name[FLEX_ARRAY]; /* more */
+ /*
+ * unsigned char hash[hashsz];
+ * uint16_t flags;
+ * if (flags & CE_EXTENDED)
+ * uint16_t flags2;
+ */
+ unsigned char data[GIT_MAX_RAWSZ + 2 * sizeof(uint16_t)];
+ char name[FLEX_ARRAY];
};
/* These are only used for v3 or lower */
#define align_padding_size(size, len) ((size + (len) + 8) & ~7) - (size + len)
-#define align_flex_name(STRUCT,len) ((offsetof(struct STRUCT,name) + (len) + 8) & ~7)
+#define align_flex_name(STRUCT,len) ((offsetof(struct STRUCT,data) + (len) + 8) & ~7)
#define ondisk_cache_entry_size(len) align_flex_name(ondisk_cache_entry,len)
-#define ondisk_cache_entry_extended_size(len) align_flex_name(ondisk_cache_entry_extended,len)
-#define ondisk_ce_size(ce) (((ce)->ce_flags & CE_EXTENDED) ? \
- ondisk_cache_entry_extended_size(ce_namelen(ce)) : \
- ondisk_cache_entry_size(ce_namelen(ce)))
+#define ondisk_data_size(flags, len) (the_hash_algo->rawsz + \
+ ((flags & CE_EXTENDED) ? 2 : 1) * sizeof(uint16_t) + len)
+#define ondisk_data_size_max(len) (ondisk_data_size(CE_EXTENDED, len))
+#define ondisk_ce_size(ce) (ondisk_cache_entry_size(ondisk_data_size((ce)->ce_flags, ce_namelen(ce))))
/* Allow fsck to force verification of the index checksum. */
int verify_index_checksum;
struct cache_entry *ce;
size_t len;
const char *name;
+ const unsigned hashsz = the_hash_algo->rawsz;
+ const uint16_t *flagsp = (const uint16_t *)(ondisk->data + hashsz);
unsigned int flags;
size_t copy_len = 0;
/*
int expand_name_field = version == 4;
/* On-disk flags are just 16 bits */
- flags = get_be16(&ondisk->flags);
+ flags = get_be16(flagsp);
len = flags & CE_NAMEMASK;
if (flags & CE_EXTENDED) {
- struct ondisk_cache_entry_extended *ondisk2;
int extended_flags;
- ondisk2 = (struct ondisk_cache_entry_extended *)ondisk;
- extended_flags = get_be16(&ondisk2->flags2) << 16;
+ extended_flags = get_be16(flagsp + 1) << 16;
/* We do not yet understand any bit out of CE_EXTENDED_FLAGS */
if (extended_flags & ~CE_EXTENDED_FLAGS)
die(_("unknown index entry format 0x%08x"), extended_flags);
flags |= extended_flags;
- name = ondisk2->name;
+ name = (const char *)(flagsp + 2);
}
else
- name = ondisk->name;
+ name = (const char *)(flagsp + 1);
if (expand_name_field) {
const unsigned char *cp = (const unsigned char *)name;
ce->ce_flags = flags & ~CE_NAMEMASK;
ce->ce_namelen = len;
ce->index = 0;
- hashcpy(ce->oid.hash, ondisk->sha1);
+ hashcpy(ce->oid.hash, ondisk->data);
+ memcpy(ce->name, name, len);
+ ce->name[len] = '\0';
if (expand_name_field) {
if (copy_len)
load_index_extensions(&p);
}
munmap((void *)mmap, mmap_size);
+
+ /*
+ * TODO trace2: replace "the_repository" with the actual repo instance
+ * that is associated with the given "istate".
+ */
+ trace2_data_intmax("index", the_repository, "read/version",
+ istate->version);
+ trace2_data_intmax("index", the_repository, "read/cache_nr",
+ istate->cache_nr);
+
return istate->cache_nr;
unmap:
if (istate->initialized)
return istate->cache_nr;
+ /*
+ * TODO trace2: replace "the_repository" with the actual repo instance
+ * that is associated with the given "istate".
+ */
+ trace2_region_enter_printf("index", "do_read_index", the_repository,
+ "%s", path);
trace_performance_enter();
ret = do_read_index(istate, path, 0);
trace_performance_leave("read cache %s", path);
+ trace2_region_leave_printf("index", "do_read_index", the_repository,
+ "%s", path);
split_index = istate->split_index;
if (!split_index || is_null_oid(&split_index->base_oid)) {
base_oid_hex = oid_to_hex(&split_index->base_oid);
base_path = xstrfmt("%s/sharedindex.%s", gitdir, base_oid_hex);
+ trace2_region_enter_printf("index", "shared/do_read_index",
+ the_repository, "%s", base_path);
ret = do_read_index(split_index->base, base_path, 1);
+ trace2_region_leave_printf("index", "shared/do_read_index",
+ the_repository, "%s", base_path);
if (!oideq(&split_index->base_oid, &split_index->base->oid))
die(_("broken index, expect %s in %s, got %s"),
base_oid_hex, base_path,
struct cache_entry *ce)
{
short flags;
+ const unsigned hashsz = the_hash_algo->rawsz;
+ uint16_t *flagsp = (uint16_t *)(ondisk->data + hashsz);
ondisk->ctime.sec = htonl(ce->ce_stat_data.sd_ctime.sec);
ondisk->mtime.sec = htonl(ce->ce_stat_data.sd_mtime.sec);
ondisk->uid = htonl(ce->ce_stat_data.sd_uid);
ondisk->gid = htonl(ce->ce_stat_data.sd_gid);
ondisk->size = htonl(ce->ce_stat_data.sd_size);
- hashcpy(ondisk->sha1, ce->oid.hash);
+ hashcpy(ondisk->data, ce->oid.hash);
flags = ce->ce_flags & ~CE_NAMEMASK;
flags |= (ce_namelen(ce) >= CE_NAMEMASK ? CE_NAMEMASK : ce_namelen(ce));
- ondisk->flags = htons(flags);
+ flagsp[0] = htons(flags);
if (ce->ce_flags & CE_EXTENDED) {
- struct ondisk_cache_entry_extended *ondisk2;
- ondisk2 = (struct ondisk_cache_entry_extended *)ondisk;
- ondisk2->flags2 = htons((ce->ce_flags & CE_EXTENDED_FLAGS) >> 16);
+ flagsp[1] = htons((ce->ce_flags & CE_EXTENDED_FLAGS) >> 16);
}
}
stripped_name = 1;
}
- if (ce->ce_flags & CE_EXTENDED)
- size = offsetof(struct ondisk_cache_entry_extended, name);
- else
- size = offsetof(struct ondisk_cache_entry, name);
+ size = offsetof(struct ondisk_cache_entry,data) + ondisk_data_size(ce->ce_flags, 0);
if (!previous_name) {
int len = ce_namelen(ce);
struct cache_entry **cache = istate->cache;
int entries = istate->cache_nr;
struct stat st;
- struct ondisk_cache_entry_extended ondisk;
+ struct ondisk_cache_entry ondisk;
struct strbuf previous_name_buf = STRBUF_INIT, *previous_name;
int drop_cache_tree = istate->drop_cache_tree;
off_t offset;
return -1;
}
- if (!strip_extensions && istate->split_index) {
+ if (!strip_extensions && istate->split_index &&
+ !is_null_oid(&istate->split_index->base_oid)) {
struct strbuf sb = STRBUF_INIT;
err = write_link_extension(&sb, istate) < 0 ||
istate->timestamp.sec = (unsigned int)st.st_mtime;
istate->timestamp.nsec = ST_MTIME_NSEC(st);
trace_performance_since(start, "write index, changed mask = %x", istate->cache_changed);
+
+ /*
+ * TODO trace2: replace "the_repository" with the actual repo instance
+ * that is associated with the given "istate".
+ */
+ trace2_data_intmax("index", the_repository, "write/version",
+ istate->version);
+ trace2_data_intmax("index", the_repository, "write/cache_nr",
+ istate->cache_nr);
+
return 0;
}
static int do_write_locked_index(struct index_state *istate, struct lock_file *lock,
unsigned flags)
{
- int ret = do_write_index(istate, lock->tempfile, 0);
+ int ret;
+
+ /*
+ * TODO trace2: replace "the_repository" with the actual repo instance
+ * that is associated with the given "istate".
+ */
+ trace2_region_enter_printf("index", "do_write_index", the_repository,
+ "%s", lock->tempfile->filename.buf);
+ ret = do_write_index(istate, lock->tempfile, 0);
+ trace2_region_leave_printf("index", "do_write_index", the_repository,
+ "%s", lock->tempfile->filename.buf);
+
if (ret)
return ret;
if (flags & COMMIT_LOCK)
- return commit_locked_index(lock);
- return close_lock_file_gently(lock);
+ ret = commit_locked_index(lock);
+ else
+ ret = close_lock_file_gently(lock);
+
+ run_hook_le(NULL, "post-index-change",
+ istate->updated_workdir ? "1" : "0",
+ istate->updated_skipworktree ? "1" : "0", NULL);
+ istate->updated_workdir = 0;
+ istate->updated_skipworktree = 0;
+
+ return ret;
}
static int write_split_index(struct index_state *istate,
int ret;
move_cache_to_base_index(istate);
+
+ trace2_region_enter_printf("index", "shared/do_write_index",
+ the_repository, "%s", (*temp)->filename.buf);
ret = do_write_index(si->base, *temp, 1);
+ trace2_region_enter_printf("index", "shared/do_write_index",
+ the_repository, "%s", (*temp)->filename.buf);
+
if (ret)
return ret;
ret = adjust_shared_perm(get_tempfile_path(*temp));
ret = write_split_index(istate, lock, flags);
/* Freshen the shared index only if the split-index was written */
- if (!ret && !new_shared_index) {
+ if (!ret && !new_shared_index && !is_null_oid(&si->base_oid)) {
const char *shared_index = git_path("sharedindex.%s",
oid_to_hex(&si->base_oid));
freshen_shared_index(shared_index, 1);
#include "cache.h"
#include "commit.h"
-#include "rebase-interactive.h"
#include "sequencer.h"
+#include "rebase-interactive.h"
#include "strbuf.h"
+#include "commit-slab.h"
+#include "config.h"
+
+enum missing_commit_check_level {
+ MISSING_COMMIT_CHECK_IGNORE = 0,
+ MISSING_COMMIT_CHECK_WARN,
+ MISSING_COMMIT_CHECK_ERROR
+};
+
+static enum missing_commit_check_level get_missing_commit_check_level(void)
+{
+ const char *value;
+
+ if (git_config_get_value("rebase.missingcommitscheck", &value) ||
+ !strcasecmp("ignore", value))
+ return MISSING_COMMIT_CHECK_IGNORE;
+ if (!strcasecmp("warn", value))
+ return MISSING_COMMIT_CHECK_WARN;
+ if (!strcasecmp("error", value))
+ return MISSING_COMMIT_CHECK_ERROR;
+ warning(_("unrecognized setting %s for option "
+ "rebase.missingCommitsCheck. Ignoring."), value);
+ return MISSING_COMMIT_CHECK_IGNORE;
+}
-void append_todo_help(unsigned edit_todo, unsigned keep_empty,
+void append_todo_help(unsigned keep_empty, int command_count,
+ const char *shortrevisions, const char *shortonto,
struct strbuf *buf)
{
const char *msg = _("\nCommands:\n"
". specified). Use -c <commit> to reword the commit message.\n"
"\n"
"These lines can be re-ordered; they are executed from top to bottom.\n");
+ unsigned edit_todo = !(shortrevisions && shortonto);
+
+ if (!edit_todo) {
+ strbuf_addch(buf, '\n');
+ strbuf_commented_addf(buf, Q_("Rebase %s onto %s (%d command)",
+ "Rebase %s onto %s (%d commands)",
+ command_count),
+ shortrevisions, shortonto, command_count);
+ }
strbuf_add_commented_lines(buf, msg, strlen(msg));
}
}
-int edit_todo_list(struct repository *r, unsigned flags)
+int edit_todo_list(struct repository *r, struct todo_list *todo_list,
+ struct todo_list *new_todo, const char *shortrevisions,
+ const char *shortonto, unsigned flags)
{
- struct strbuf buf = STRBUF_INIT;
const char *todo_file = rebase_path_todo();
+ unsigned initial = shortrevisions && shortonto;
- if (strbuf_read_file(&buf, todo_file, 0) < 0)
- return error_errno(_("could not read '%s'."), todo_file);
+ /* If the user is editing the todo list, we first try to parse
+ * it. If there is an error, we do not return, because the user
+ * might want to fix it in the first place. */
+ if (!initial)
+ todo_list_parse_insn_buffer(r, todo_list->buf.buf, todo_list);
- strbuf_stripspace(&buf, 1);
- if (write_message(buf.buf, buf.len, todo_file, 0)) {
- strbuf_release(&buf);
- return -1;
- }
+ if (todo_list_write_to_file(r, todo_list, todo_file, shortrevisions, shortonto,
+ -1, flags | TODO_LIST_SHORTEN_IDS | TODO_LIST_APPEND_TODO_HELP))
+ return error_errno(_("could not write '%s'"), todo_file);
+
+ if (initial && copy_file(rebase_path_todo_backup(), todo_file, 0666))
+ return error(_("could not copy '%s' to '%s'."), todo_file,
+ rebase_path_todo_backup());
- strbuf_release(&buf);
+ if (launch_sequence_editor(todo_file, &new_todo->buf, NULL))
+ return -2;
- transform_todos(r, flags | TODO_LIST_SHORTEN_IDS);
+ strbuf_stripspace(&new_todo->buf, 1);
+ if (initial && new_todo->buf.len == 0)
+ return -3;
- if (strbuf_read_file(&buf, todo_file, 0) < 0)
- return error_errno(_("could not read '%s'."), todo_file);
+ /* For the initial edit, the todo list gets parsed in
+ * complete_action(). */
+ if (!initial)
+ return todo_list_parse_insn_buffer(r, new_todo->buf.buf, new_todo);
- append_todo_help(1, 0, &buf);
- if (write_message(buf.buf, buf.len, todo_file, 0)) {
- strbuf_release(&buf);
- return -1;
+ return 0;
+}
+
+define_commit_slab(commit_seen, unsigned char);
+/*
+ * Check if the user dropped some commits by mistake
+ * Behaviour determined by rebase.missingCommitsCheck.
+ * Check if there is an unrecognized command or a
+ * bad SHA-1 in a command.
+ */
+int todo_list_check(struct todo_list *old_todo, struct todo_list *new_todo)
+{
+ enum missing_commit_check_level check_level = get_missing_commit_check_level();
+ struct strbuf missing = STRBUF_INIT;
+ int res = 0, i;
+ struct commit_seen commit_seen;
+
+ init_commit_seen(&commit_seen);
+
+ if (check_level == MISSING_COMMIT_CHECK_IGNORE)
+ goto leave_check;
+
+ /* Mark the commits in git-rebase-todo as seen */
+ for (i = 0; i < new_todo->nr; i++) {
+ struct commit *commit = new_todo->items[i].commit;
+ if (commit)
+ *commit_seen_at(&commit_seen, commit) = 1;
}
- strbuf_release(&buf);
+ /* Find commits in git-rebase-todo.backup yet unseen */
+ for (i = old_todo->nr - 1; i >= 0; i--) {
+ struct todo_item *item = old_todo->items + i;
+ struct commit *commit = item->commit;
+ if (commit && !*commit_seen_at(&commit_seen, commit)) {
+ strbuf_addf(&missing, " - %s %.*s\n",
+ find_unique_abbrev(&commit->object.oid, DEFAULT_ABBREV),
+ item->arg_len,
+ todo_item_get_arg(old_todo, item));
+ *commit_seen_at(&commit_seen, commit) = 1;
+ }
+ }
- if (launch_sequence_editor(todo_file, NULL, NULL))
- return -1;
+ /* Warn about missing commits */
+ if (!missing.len)
+ goto leave_check;
- transform_todos(r, flags & ~(TODO_LIST_SHORTEN_IDS));
+ if (check_level == MISSING_COMMIT_CHECK_ERROR)
+ res = 1;
- return 0;
+ fprintf(stderr,
+ _("Warning: some commits may have been dropped accidentally.\n"
+ "Dropped commits (newer to older):\n"));
+
+ /* Make the list user-friendly and display */
+ fputs(missing.buf, stderr);
+ strbuf_release(&missing);
+
+ fprintf(stderr, _("To avoid this message, use \"drop\" to "
+ "explicitly remove a commit.\n\n"
+ "Use 'git config rebase.missingCommitsCheck' to change "
+ "the level of warnings.\n"
+ "The possible behaviours are: ignore, warn, error.\n\n"));
+
+leave_check:
+ clear_commit_seen(&commit_seen);
+ return res;
}
struct strbuf;
struct repository;
+struct todo_list;
-void append_todo_help(unsigned edit_todo, unsigned keep_empty,
+void append_todo_help(unsigned keep_empty, int command_count,
+ const char *shortrevisions, const char *shortonto,
struct strbuf *buf);
-int edit_todo_list(struct repository *r, unsigned flags);
+int edit_todo_list(struct repository *r, struct todo_list *todo_list,
+ struct todo_list *new_todo, const char *shortrevisions,
+ const char *shortonto, unsigned flags);
+int todo_list_check(struct todo_list *old_todo, struct todo_list *new_todo);
#endif
{ "if", SOURCE_NONE, FIELD_STR, if_atom_parser },
{ "then", SOURCE_NONE },
{ "else", SOURCE_NONE },
+ /*
+ * Please update $__git_ref_fieldlist in git-completion.bash
+ * when you add new atoms
+ */
};
#define REF_FORMATTING_STATE_INIT { 0, NULL }
}
/* See grab_values */
-static void grab_tag_values(struct atom_value *val, int deref, struct object *obj, void *buf, unsigned long sz)
+static void grab_tag_values(struct atom_value *val, int deref, struct object *obj)
{
int i;
struct tag *tag = (struct tag *) obj;
}
/* See grab_values */
-static void grab_commit_values(struct atom_value *val, int deref, struct object *obj, void *buf, unsigned long sz)
+static void grab_commit_values(struct atom_value *val, int deref, struct object *obj)
{
int i;
struct commit *commit = (struct commit *) obj;
}
}
-static const char *find_wholine(const char *who, int wholen, const char *buf, unsigned long sz)
+static const char *find_wholine(const char *who, int wholen, const char *buf)
{
const char *eol;
while (*buf) {
}
/* See grab_values */
-static void grab_person(const char *who, struct atom_value *val, int deref, struct object *obj, void *buf, unsigned long sz)
+static void grab_person(const char *who, struct atom_value *val, int deref, void *buf)
{
int i;
int wholen = strlen(who);
!starts_with(name + wholen, "date"))
continue;
if (!wholine)
- wholine = find_wholine(who, wholen, buf, sz);
+ wholine = find_wholine(who, wholen, buf);
if (!wholine)
return; /* no point looking for it */
if (name[wholen] == 0)
if (strcmp(who, "tagger") && strcmp(who, "committer"))
return; /* "author" for commit object is not wanted */
if (!wholine)
- wholine = find_wholine(who, wholen, buf, sz);
+ wholine = find_wholine(who, wholen, buf);
if (!wholine)
return;
for (i = 0; i < used_atom_cnt; i++) {
}
}
-static void find_subpos(const char *buf, unsigned long sz,
+static void find_subpos(const char *buf,
const char **sub, unsigned long *sublen,
const char **body, unsigned long *bodylen,
unsigned long *nonsiglen,
}
/* See grab_values */
-static void grab_sub_body_contents(struct atom_value *val, int deref, struct object *obj, void *buf, unsigned long sz)
+static void grab_sub_body_contents(struct atom_value *val, int deref, void *buf)
{
int i;
const char *subpos = NULL, *bodypos = NULL, *sigpos = NULL;
!starts_with(name, "contents"))
continue;
if (!subpos)
- find_subpos(buf, sz,
+ find_subpos(buf,
&subpos, &sublen,
&bodypos, &bodylen, &nonsiglen,
&sigpos, &siglen);
* pointed at by the ref itself; otherwise it is the object the
* ref (which is a tag) refers to.
*/
-static void grab_values(struct atom_value *val, int deref, struct object *obj, void *buf, unsigned long sz)
+static void grab_values(struct atom_value *val, int deref, struct object *obj, void *buf)
{
switch (obj->type) {
case OBJ_TAG:
- grab_tag_values(val, deref, obj, buf, sz);
- grab_sub_body_contents(val, deref, obj, buf, sz);
- grab_person("tagger", val, deref, obj, buf, sz);
+ grab_tag_values(val, deref, obj);
+ grab_sub_body_contents(val, deref, buf);
+ grab_person("tagger", val, deref, buf);
break;
case OBJ_COMMIT:
- grab_commit_values(val, deref, obj, buf, sz);
- grab_sub_body_contents(val, deref, obj, buf, sz);
- grab_person("author", val, deref, obj, buf, sz);
- grab_person("committer", val, deref, obj, buf, sz);
+ grab_commit_values(val, deref, obj);
+ grab_sub_body_contents(val, deref, buf);
+ grab_person("author", val, deref, buf);
+ grab_person("committer", val, deref, buf);
break;
case OBJ_TREE:
/* grab_tree_values(val, deref, obj, buf, sz); */
return strbuf_addf_ret(err, -1, _("parse_object_buffer failed on %s for %s"),
oid_to_hex(&oi->oid), ref->refname);
}
- grab_values(ref->value, deref, *obj, oi->content, oi->size);
+ grab_values(ref->value, deref, *obj, oi->content);
}
grab_common_values(ref->value, deref, oi);
int parse_opt_ref_sorting(const struct option *opt, const char *arg, int unset)
{
- if (!arg) /* should --no-sort void the list ? */
- return -1;
+ /*
+ * NEEDSWORK: We should probably clear the list in this case, but we've
+ * already munged the global used_atoms list, which would need to be
+ * undone.
+ */
+ BUG_ON_OPT_NEG(unset);
+
parse_ref_sorting(opt->value, arg);
return 0;
}
#define OPT_MERGED(f, h) _OPT_MERGED_NO_MERGED("merged", f, h)
#define OPT_NO_MERGED(f, h) _OPT_MERGED_NO_MERGED("no-merged", f, h)
+#define OPT_REF_SORT(var) \
+ OPT_CALLBACK_F(0, "sort", (var), \
+ N_("key"), N_("field name to sort on"), \
+ PARSE_OPT_NONEG, parse_opt_ref_sorting)
+
/*
* API for filtering a set of refs. Based on the type of refs the user
* has requested, we iterate through those refs and apply filters
}
}
+/*
+ * Manually add refs/bisect, refs/rewritten and refs/worktree, which, being
+ * per-worktree, might not appear in the directory listing for
+ * refs/ in the main repo.
+ */
+static void add_per_worktree_entries_to_dir(struct ref_dir *dir, const char *dirname)
+{
+ const char *prefixes[] = { "refs/bisect/", "refs/worktree/", "refs/rewritten/" };
+ int ip;
+
+ if (strcmp(dirname, "refs/"))
+ return;
+
+ for (ip = 0; ip < ARRAY_SIZE(prefixes); ip++) {
+ const char *prefix = prefixes[ip];
+ int prefix_len = strlen(prefix);
+ struct ref_entry *child_entry;
+ int pos;
+
+ pos = search_ref_dir(dir, prefix, prefix_len);
+ if (pos >= 0)
+ continue;
+ child_entry = create_dir_entry(dir->cache, prefix, prefix_len, 1);
+ add_entry_to_dir(dir, child_entry);
+ }
+}
+
/*
* Read the loose references from the namespace dirname into dir
* (without recursing). dirname must end with '/'. dir must be the
strbuf_release(&path);
closedir(d);
- /*
- * Manually add refs/bisect and refs/worktree, which, being
- * per-worktree, might not appear in the directory listing for
- * refs/ in the main repo.
- */
- if (!strcmp(dirname, "refs/")) {
- int pos = search_ref_dir(dir, "refs/bisect/", 12);
-
- if (pos < 0) {
- struct ref_entry *child_entry = create_dir_entry(
- dir->cache, "refs/bisect/", 12, 1);
- add_entry_to_dir(dir, child_entry);
- }
-
- pos = search_ref_dir(dir, "refs/worktree/", 11);
-
- if (pos < 0) {
- struct ref_entry *child_entry = create_dir_entry(
- dir->cache, "refs/worktree/", 11, 1);
- add_entry_to_dir(dir, child_entry);
- }
- }
+ add_per_worktree_entries_to_dir(dir, dirname);
}
static struct ref_cache *get_loose_ref_cache(struct files_ref_store *refs)
* Note that the new update will itself be subject to splitting when
* the iteration gets to it.
*/
-static int split_symref_update(struct files_ref_store *refs,
- struct ref_update *update,
+static int split_symref_update(struct ref_update *update,
const char *referent,
struct ref_transaction *transaction,
struct string_list *affected_refnames,
* of processing the split-off update, so we
* don't have to do it here.
*/
- ret = split_symref_update(refs, update,
+ ret = split_symref_update(update,
referent.buf, transaction,
affected_refnames, err);
if (ret)
if (is_packed_transaction_needed(refs->packed_ref_store,
packed_transaction)) {
ret = ref_transaction_prepare(packed_transaction, err);
+ /*
+ * A failure during the prepare step will abort
+ * itself, but not free. Do that now, and disconnect
+ * from the files_transaction so it does not try to
+ * abort us when we hit the cleanup code below.
+ */
+ if (ret) {
+ ref_transaction_free(packed_transaction);
+ backend_data->packed_transaction = NULL;
+ }
} else {
/*
* We can skip rewriting the `packed-refs`
* file. But we do need to leave it locked, so
* that somebody else doesn't pack a reference
* that we are trying to delete.
+ *
+ * We need to disconnect our transaction from
+ * backend_data, since the abort (whether successful or
+ * not) will free it.
*/
+ backend_data->packed_transaction = NULL;
if (ref_transaction_abort(packed_transaction, err)) {
ret = TRANSACTION_GENERIC_ERROR;
goto cleanup;
}
- backend_data->packed_transaction = NULL;
}
}
/* LHS */
if (!*item->src)
; /* empty is ok; it means "HEAD" */
- else if (llen == GIT_SHA1_HEXSZ && !get_oid_hex(item->src, &unused))
+ else if (llen == the_hash_algo->hexsz && !get_oid_hex(item->src, &unused))
item->exact_sha1 = 1; /* ok */
else if (!check_refname_format(item->src, flags))
; /* valid looking ref is ok */
#include "send-pack.h"
#include "protocol.h"
#include "quote.h"
+#include "transport.h"
static struct remote *remote;
/* always ends with a trailing slash */
else {
struct strbuf unquoted = STRBUF_INIT;
if (unquote_c_style(&unquoted, value, NULL) < 0)
- die("invalid quoting in push-option value");
+ die(_("invalid quoting in push-option value: '%s'"), value);
string_list_append_nodup(&options.push_options,
strbuf_detach(&unquoted, NULL));
}
if (data[i] == '\t')
mid = &data[i];
if (data[i] == '\n') {
- if (mid - start != 40)
- die("%sinfo/refs not valid: is this a git repository?",
- url.buf);
+ if (mid - start != the_hash_algo->hexsz)
+ die(_("%sinfo/refs not valid: is this a git repository?"),
+ transport_anonymize_url(url.buf));
data[i] = 0;
ref_name = mid + 1;
ref = alloc_ref(ref_name);
PACKET_READ_CHOMP_NEWLINE |
PACKET_READ_DIE_ON_ERR_PACKET);
if (packet_reader_read(&reader) != PACKET_READ_NORMAL)
- die("invalid server response; expected service, got flush packet");
+ die(_("invalid server response; expected service, got flush packet"));
if (skip_prefix(reader.line, "# service=", &p) && !strcmp(p, service)) {
/*
d->proto_git = 1;
} else {
- die("invalid server response; got '%s'", reader.line);
+ die(_("invalid server response; got '%s'"), reader.line);
}
}
break;
case HTTP_MISSING_TARGET:
show_http_message(&type, &charset, &buffer);
- die("repository '%s' not found", url.buf);
+ die(_("repository '%s' not found"),
+ transport_anonymize_url(url.buf));
case HTTP_NOAUTH:
show_http_message(&type, &charset, &buffer);
- die("Authentication failed for '%s'", url.buf);
+ die(_("Authentication failed for '%s'"),
+ transport_anonymize_url(url.buf));
default:
show_http_message(&type, &charset, &buffer);
- die("unable to access '%s': %s", url.buf, curl_errorstr);
+ die(_("unable to access '%s': %s"),
+ transport_anonymize_url(url.buf), curl_errorstr);
}
- if (options.verbosity && !starts_with(refs_url.buf, url.buf))
- warning(_("redirecting to %s"), url.buf);
+ if (options.verbosity && !starts_with(refs_url.buf, url.buf)) {
+ char *u = transport_anonymize_url(url.buf);
+ warning(_("redirecting to %s"), u);
+ free(u);
+ }
last= xcalloc(1, sizeof(*last_discovery));
last->service = xstrdup(service);
struct rpc_state {
const char *service_name;
- const char **argv;
- struct strbuf *stdin_preamble;
char *service_url;
char *hdr_content_type;
char *hdr_accept;
int in;
int out;
int any_written;
- struct strbuf result;
unsigned gzip_request : 1;
unsigned initial_buffer : 1;
+
+ /*
+ * Whenever a pkt-line is read into buf, append the 4 characters
+ * denoting its length before appending the payload.
+ */
+ unsigned write_line_lengths : 1;
+
+ /*
+ * Used by rpc_out; initialize to 0. This is true if a flush has been
+ * read, but the corresponding line length (if write_line_lengths is
+ * true) and EOF have not been sent to libcurl. Since each flush marks
+ * the end of a request, each flush must be completely sent before any
+ * further reading occurs.
+ */
+ unsigned flush_read_but_not_sent : 1;
};
+/*
+ * Appends the result of reading from rpc->out to the string represented by
+ * rpc->buf and rpc->len if there is enough space. Returns 1 if there was
+ * enough space, 0 otherwise.
+ *
+ * If rpc->write_line_lengths is true, appends the line length as a 4-byte
+ * hexadecimal string before appending the result described above.
+ *
+ * Writes the total number of bytes appended into appended.
+ */
+static int rpc_read_from_out(struct rpc_state *rpc, int options,
+ size_t *appended,
+ enum packet_read_status *status) {
+ size_t left;
+ char *buf;
+ int pktlen_raw;
+
+ if (rpc->write_line_lengths) {
+ left = rpc->alloc - rpc->len - 4;
+ buf = rpc->buf + rpc->len + 4;
+ } else {
+ left = rpc->alloc - rpc->len;
+ buf = rpc->buf + rpc->len;
+ }
+
+ if (left < LARGE_PACKET_MAX)
+ return 0;
+
+ *status = packet_read_with_status(rpc->out, NULL, NULL, buf,
+ left, &pktlen_raw, options);
+ if (*status != PACKET_READ_EOF) {
+ *appended = pktlen_raw + (rpc->write_line_lengths ? 4 : 0);
+ rpc->len += *appended;
+ }
+
+ if (rpc->write_line_lengths) {
+ switch (*status) {
+ case PACKET_READ_EOF:
+ if (!(options & PACKET_READ_GENTLE_ON_EOF))
+ die(_("shouldn't have EOF when not gentle on EOF"));
+ break;
+ case PACKET_READ_NORMAL:
+ set_packet_header(buf - 4, *appended);
+ break;
+ case PACKET_READ_DELIM:
+ memcpy(buf - 4, "0001", 4);
+ break;
+ case PACKET_READ_FLUSH:
+ memcpy(buf - 4, "0000", 4);
+ break;
+ }
+ }
+
+ return 1;
+}
+
static size_t rpc_out(void *ptr, size_t eltsize,
size_t nmemb, void *buffer_)
{
size_t max = eltsize * nmemb;
struct rpc_state *rpc = buffer_;
size_t avail = rpc->len - rpc->pos;
+ enum packet_read_status status;
if (!avail) {
rpc->initial_buffer = 0;
- avail = packet_read(rpc->out, NULL, NULL, rpc->buf, rpc->alloc, 0);
- if (!avail)
- return 0;
+ rpc->len = 0;
rpc->pos = 0;
- rpc->len = avail;
+ if (!rpc->flush_read_but_not_sent) {
+ if (!rpc_read_from_out(rpc, 0, &avail, &status))
+ BUG("The entire rpc->buf should be larger than LARGE_PACKET_MAX");
+ if (status == PACKET_READ_FLUSH)
+ rpc->flush_read_but_not_sent = 1;
+ }
+ /*
+ * If flush_read_but_not_sent is true, we have already read one
+ * full request but have not fully sent it + EOF, which is why
+ * we need to refrain from reading.
+ */
+ }
+ if (rpc->flush_read_but_not_sent) {
+ if (!avail) {
+ /*
+ * The line length either does not need to be sent at
+ * all or has already been completely sent. Now we can
+ * return 0, indicating EOF, meaning that the flush has
+ * been fully sent.
+ */
+ rpc->flush_read_but_not_sent = 0;
+ return 0;
+ }
+ /*
+ * If avail is non-zerp, the line length for the flush still
+ * hasn't been fully sent. Proceed with sending the line
+ * length.
+ */
}
if (max < avail)
rpc->pos = 0;
return CURLIOE_OK;
}
- error("unable to rewind rpc post data - try increasing http.postBuffer");
+ error(_("unable to rewind rpc post data - try increasing http.postBuffer"));
return CURLIOE_FAILRESTART;
default:
strbuf_addstr(&msg, curl_errorstr);
}
}
- error("RPC failed; %s", msg.buf);
+ error(_("RPC failed; %s"), msg.buf);
strbuf_release(&msg);
}
{
uintmax_t size = len;
if (size > maximum_signed_value_of_type(curl_off_t))
- die("cannot handle pushes this big");
+ die(_("cannot handle pushes this big"));
return (curl_off_t)size;
}
-static int post_rpc(struct rpc_state *rpc)
+/*
+ * If flush_received is true, do not attempt to read any more; just use what's
+ * in rpc->buf.
+ */
+static int post_rpc(struct rpc_state *rpc, int flush_received)
{
struct active_request_slot *slot;
struct curl_slist *headers = http_copy_default_headers();
* allocated buffer space we can use HTTP/1.0 and avoid the
* chunked encoding mess.
*/
- while (1) {
- size_t left = rpc->alloc - rpc->len;
- char *buf = rpc->buf + rpc->len;
- int n;
-
- if (left < LARGE_PACKET_MAX) {
- large_request = 1;
- use_gzip = 0;
- break;
+ if (!flush_received) {
+ while (1) {
+ size_t n;
+ enum packet_read_status status;
+
+ if (!rpc_read_from_out(rpc, 0, &n, &status)) {
+ large_request = 1;
+ use_gzip = 0;
+ break;
+ }
+ if (status == PACKET_READ_FLUSH)
+ break;
}
-
- n = packet_read(rpc->out, NULL, NULL, buf, left, 0);
- if (!n)
- break;
- rpc->len += n;
}
if (large_request) {
ret = git_deflate(&stream, Z_FINISH);
if (ret != Z_STREAM_END)
- die("cannot deflate request; zlib deflate error %d", ret);
+ die(_("cannot deflate request; zlib deflate error %d"), ret);
ret = git_deflate_end_gently(&stream);
if (ret != Z_OK)
- die("cannot deflate request; zlib end error %d", ret);
+ die(_("cannot deflate request; zlib end error %d"), ret);
gzip_size = stream.total_out;
return err;
}
-static int rpc_service(struct rpc_state *rpc, struct discovery *heads)
+static int rpc_service(struct rpc_state *rpc, struct discovery *heads,
+ const char **client_argv, const struct strbuf *preamble,
+ struct strbuf *rpc_result)
{
const char *svc = rpc->service_name;
struct strbuf buf = STRBUF_INIT;
- struct strbuf *preamble = rpc->stdin_preamble;
struct child_process client = CHILD_PROCESS_INIT;
int err = 0;
client.in = -1;
client.out = -1;
client.git_cmd = 1;
- client.argv = rpc->argv;
+ client.argv = client_argv;
if (start_command(&client))
exit(1);
- if (preamble)
- write_or_die(client.in, preamble->buf, preamble->len);
+ write_or_die(client.in, preamble->buf, preamble->len);
if (heads)
write_or_die(client.in, heads->buf, heads->len);
rpc->buf = xmalloc(rpc->alloc);
rpc->in = client.in;
rpc->out = client.out;
- strbuf_init(&rpc->result, 0);
strbuf_addf(&buf, "%s%s", url.buf, svc);
rpc->service_url = strbuf_detach(&buf, NULL);
break;
rpc->pos = 0;
rpc->len = n;
- err |= post_rpc(rpc);
+ err |= post_rpc(rpc, 0);
}
close(client.in);
client.in = -1;
if (!err) {
- strbuf_read(&rpc->result, client.out, 0);
+ strbuf_read(rpc_result, client.out, 0);
} else {
char buf[4096];
for (;;)
ALLOC_ARRAY(targets, nr_heads);
if (options.depth || options.deepen_since)
- die("dumb http transport does not support shallow capabilities");
+ die(_("dumb http transport does not support shallow capabilities"));
for (i = 0; i < nr_heads; i++)
targets[i] = xstrdup(oid_to_hex(&to_fetch[i]->old_oid));
free(targets[i]);
free(targets);
- return ret ? error("fetch failed.") : 0;
+ return ret ? error(_("fetch failed.")) : 0;
}
static int fetch_git(struct discovery *heads,
struct strbuf preamble = STRBUF_INIT;
int i, err;
struct argv_array args = ARGV_ARRAY_INIT;
+ struct strbuf rpc_result = STRBUF_INIT;
argv_array_pushl(&args, "fetch-pack", "--stateless-rpc",
"--stdin", "--lock-pack", NULL);
for (i = 0; i < nr_heads; i++) {
struct ref *ref = to_fetch[i];
if (!*ref->name)
- die("cannot fetch by sha1 over smart http");
+ die(_("cannot fetch by sha1 over smart http"));
packet_buf_write(&preamble, "%s %s\n",
oid_to_hex(&ref->old_oid), ref->name);
}
memset(&rpc, 0, sizeof(rpc));
rpc.service_name = "git-upload-pack",
- rpc.argv = args.argv;
- rpc.stdin_preamble = &preamble;
rpc.gzip_request = 1;
- err = rpc_service(&rpc, heads);
- if (rpc.result.len)
- write_or_die(1, rpc.result.buf, rpc.result.len);
- strbuf_release(&rpc.result);
+ err = rpc_service(&rpc, heads, args.argv, &preamble, &rpc_result);
+ if (rpc_result.len)
+ write_or_die(1, rpc_result.buf, rpc_result.len);
+ strbuf_release(&rpc_result);
strbuf_release(&preamble);
argv_array_clear(&args);
return err;
const char *name;
struct ref *ref;
struct object_id old_oid;
+ const char *q;
- if (get_oid_hex(p, &old_oid))
- die("protocol error: expected sha/ref, got %s'", p);
- if (p[GIT_SHA1_HEXSZ] == ' ')
- name = p + GIT_SHA1_HEXSZ + 1;
- else if (!p[GIT_SHA1_HEXSZ])
+ if (parse_oid_hex(p, &old_oid, &q))
+ die(_("protocol error: expected sha/ref, got %s'"), p);
+ if (*q == ' ')
+ name = q + 1;
+ else if (!*q)
name = "";
else
- die("protocol error: expected sha/ref, got %s'", p);
+ die(_("protocol error: expected sha/ref, got %s'"), p);
ref = alloc_ref(name);
oidcpy(&ref->old_oid, &old_oid);
to_fetch[nr_heads++] = ref;
}
else
- die("http transport does not support %s", buf->buf);
+ die(_("http transport does not support %s"), buf->buf);
strbuf_reset(buf);
if (strbuf_getline_lf(buf, stdin) == EOF)
argv_array_push(&child.args, specs[i]);
if (run_command(&child))
- die("git-http-push failed");
+ die(_("git-http-push failed"));
return 0;
}
struct argv_array args;
struct string_list_item *cas_option;
struct strbuf preamble = STRBUF_INIT;
+ struct strbuf rpc_result = STRBUF_INIT;
argv_array_init(&args);
argv_array_pushl(&args, "send-pack", "--stateless-rpc", "--helper-status",
memset(&rpc, 0, sizeof(rpc));
rpc.service_name = "git-receive-pack",
- rpc.argv = args.argv;
- rpc.stdin_preamble = &preamble;
- err = rpc_service(&rpc, heads);
- if (rpc.result.len)
- write_or_die(1, rpc.result.buf, rpc.result.len);
- strbuf_release(&rpc.result);
+ err = rpc_service(&rpc, heads, args.argv, &preamble, &rpc_result);
+ if (rpc_result.len)
+ write_or_die(1, rpc_result.buf, rpc_result.len);
+ strbuf_release(&rpc_result);
strbuf_release(&preamble);
argv_array_clear(&args);
return err;
specs[nr_spec++] = xstrdup(buf->buf + 5);
}
else
- die("http transport does not support %s", buf->buf);
+ die(_("http transport does not support %s"), buf->buf);
strbuf_reset(buf);
if (strbuf_getline_lf(buf, stdin) == EOF)
free(specs);
}
-/*
- * Used to represent the state of a connection to an HTTP server when
- * communicating using git's wire-protocol version 2.
- */
-struct proxy_state {
- char *service_name;
- char *service_url;
- struct curl_slist *headers;
- struct strbuf request_buffer;
- int in;
- int out;
- struct packet_reader reader;
- size_t pos;
- int seen_flush;
-};
-
-static void proxy_state_init(struct proxy_state *p, const char *service_name,
- enum protocol_version version)
-{
- struct strbuf buf = STRBUF_INIT;
-
- memset(p, 0, sizeof(*p));
- p->service_name = xstrdup(service_name);
-
- p->in = 0;
- p->out = 1;
- strbuf_init(&p->request_buffer, 0);
-
- strbuf_addf(&buf, "%s%s", url.buf, p->service_name);
- p->service_url = strbuf_detach(&buf, NULL);
-
- p->headers = http_copy_default_headers();
-
- strbuf_addf(&buf, "Content-Type: application/x-%s-request", p->service_name);
- p->headers = curl_slist_append(p->headers, buf.buf);
- strbuf_reset(&buf);
-
- strbuf_addf(&buf, "Accept: application/x-%s-result", p->service_name);
- p->headers = curl_slist_append(p->headers, buf.buf);
- strbuf_reset(&buf);
-
- p->headers = curl_slist_append(p->headers, "Transfer-Encoding: chunked");
-
- /* Add the Git-Protocol header */
- if (get_protocol_http_header(version, &buf))
- p->headers = curl_slist_append(p->headers, buf.buf);
-
- packet_reader_init(&p->reader, p->in, NULL, 0,
- PACKET_READ_GENTLE_ON_EOF |
- PACKET_READ_DIE_ON_ERR_PACKET);
-
- strbuf_release(&buf);
-}
-
-static void proxy_state_clear(struct proxy_state *p)
-{
- free(p->service_name);
- free(p->service_url);
- curl_slist_free_all(p->headers);
- strbuf_release(&p->request_buffer);
-}
-
-/*
- * CURLOPT_READFUNCTION callback function.
- * Attempts to copy over a single packet-line at a time into the
- * curl provided buffer.
- */
-static size_t proxy_in(char *buffer, size_t eltsize,
- size_t nmemb, void *userdata)
-{
- size_t max;
- struct proxy_state *p = userdata;
- size_t avail = p->request_buffer.len - p->pos;
-
-
- if (eltsize != 1)
- BUG("curl read callback called with size = %"PRIuMAX" != 1",
- (uintmax_t)eltsize);
- max = nmemb;
-
- if (!avail) {
- if (p->seen_flush) {
- p->seen_flush = 0;
- return 0;
- }
-
- strbuf_reset(&p->request_buffer);
- switch (packet_reader_read(&p->reader)) {
- case PACKET_READ_EOF:
- die("unexpected EOF when reading from parent process");
- case PACKET_READ_NORMAL:
- packet_buf_write_len(&p->request_buffer, p->reader.line,
- p->reader.pktlen);
- break;
- case PACKET_READ_DELIM:
- packet_buf_delim(&p->request_buffer);
- break;
- case PACKET_READ_FLUSH:
- packet_buf_flush(&p->request_buffer);
- p->seen_flush = 1;
- break;
- }
- p->pos = 0;
- avail = p->request_buffer.len;
- }
-
- if (max < avail)
- avail = max;
- memcpy(buffer, p->request_buffer.buf + p->pos, avail);
- p->pos += avail;
- return avail;
-}
-
-static size_t proxy_out(char *buffer, size_t eltsize,
- size_t nmemb, void *userdata)
-{
- size_t size;
- struct proxy_state *p = userdata;
-
- if (eltsize != 1)
- BUG("curl read callback called with size = %"PRIuMAX" != 1",
- (uintmax_t)eltsize);
- size = nmemb;
-
- write_or_die(p->out, buffer, size);
- return size;
-}
-
-/* Issues a request to the HTTP server configured in `p` */
-static int proxy_request(struct proxy_state *p)
-{
- struct active_request_slot *slot;
-
- slot = get_active_slot();
-
- curl_easy_setopt(slot->curl, CURLOPT_ENCODING, "");
- curl_easy_setopt(slot->curl, CURLOPT_NOBODY, 0);
- curl_easy_setopt(slot->curl, CURLOPT_POST, 1);
- curl_easy_setopt(slot->curl, CURLOPT_URL, p->service_url);
- curl_easy_setopt(slot->curl, CURLOPT_HTTPHEADER, p->headers);
-
- /* Setup function to read request from client */
- curl_easy_setopt(slot->curl, CURLOPT_READFUNCTION, proxy_in);
- curl_easy_setopt(slot->curl, CURLOPT_READDATA, p);
-
- /* Setup function to write server response to client */
- curl_easy_setopt(slot->curl, CURLOPT_WRITEFUNCTION, proxy_out);
- curl_easy_setopt(slot->curl, CURLOPT_WRITEDATA, p);
-
- if (run_slot(slot, NULL) != HTTP_OK)
- return -1;
-
- return 0;
-}
-
static int stateless_connect(const char *service_name)
{
struct discovery *discover;
- struct proxy_state p;
+ struct rpc_state rpc;
+ struct strbuf buf = STRBUF_INIT;
/*
* Run the info/refs request and see if the server supports protocol
fflush(stdout);
}
- proxy_state_init(&p, service_name, discover->version);
+ rpc.service_name = service_name;
+ rpc.service_url = xstrfmt("%s%s", url.buf, rpc.service_name);
+ rpc.hdr_content_type = xstrfmt("Content-Type: application/x-%s-request", rpc.service_name);
+ rpc.hdr_accept = xstrfmt("Accept: application/x-%s-result", rpc.service_name);
+ if (get_protocol_http_header(discover->version, &buf)) {
+ rpc.protocol_header = strbuf_detach(&buf, NULL);
+ } else {
+ rpc.protocol_header = NULL;
+ strbuf_release(&buf);
+ }
+ rpc.buf = xmalloc(http_post_buffer);
+ rpc.alloc = http_post_buffer;
+ rpc.len = 0;
+ rpc.pos = 0;
+ rpc.in = 1;
+ rpc.out = 0;
+ rpc.any_written = 0;
+ rpc.gzip_request = 1;
+ rpc.initial_buffer = 0;
+ rpc.write_line_lengths = 1;
+ rpc.flush_read_but_not_sent = 0;
/*
* Dump the capability listing that we got from the server earlier
* during the info/refs request.
*/
- write_or_die(p.out, discover->buf, discover->len);
+ write_or_die(rpc.in, discover->buf, discover->len);
- /* Peek the next packet line. Until we see EOF keep sending POSTs */
- while (packet_reader_peek(&p.reader) != PACKET_READ_EOF) {
- if (proxy_request(&p)) {
+ /* Until we see EOF keep sending POSTs */
+ while (1) {
+ size_t avail;
+ enum packet_read_status status;
+
+ if (!rpc_read_from_out(&rpc, PACKET_READ_GENTLE_ON_EOF, &avail,
+ &status))
+ BUG("The entire rpc->buf should be larger than LARGE_PACKET_MAX");
+ if (status == PACKET_READ_EOF)
+ break;
+ if (post_rpc(&rpc, status == PACKET_READ_FLUSH))
/* We would have an err here */
break;
- }
+ /* Reset the buffer for next request */
+ rpc.len = 0;
}
- proxy_state_clear(&p);
+ free(rpc.service_url);
+ free(rpc.hdr_content_type);
+ free(rpc.hdr_accept);
+ free(rpc.protocol_header);
+ free(rpc.buf);
+ strbuf_release(&buf);
+
return 0;
}
setup_git_directory_gently(&nongit);
if (argc < 2) {
- error("remote-curl: usage: git remote-curl <remote> [<url>]");
+ error(_("remote-curl: usage: git remote-curl <remote> [<url>]"));
return 1;
}
string_list_init(&options.deepen_not, 1);
string_list_init(&options.push_options, 1);
+ /*
+ * Just report "remote-curl" here (folding all the various aliases
+ * ("git-remote-http", "git-remote-https", and etc.) here since they
+ * are all just copies of the same actual executable.
+ */
+ trace2_cmd_name("remote-curl");
+
remote = remote_get(argv[1]);
if (argc > 2) {
if (strbuf_getline_lf(&buf, stdin) == EOF) {
if (ferror(stdin))
- error("remote-curl: error reading command stream from git");
+ error(_("remote-curl: error reading command stream from git"));
return 1;
}
if (buf.len == 0)
break;
if (starts_with(buf.buf, "fetch ")) {
if (nongit)
- die("remote-curl: fetch attempted without a local repo");
+ die(_("remote-curl: fetch attempted without a local repo"));
parse_fetch(&buf);
} else if (!strcmp(buf.buf, "list") || starts_with(buf.buf, "list ")) {
if (!stateless_connect(arg))
break;
} else {
- error("remote-curl: unknown command '%s' from git", buf.buf);
+ error(_("remote-curl: unknown command '%s' from git"), buf.buf);
return 1;
}
strbuf_reset(&buf);
void repo_set_worktree(struct repository *repo, const char *path)
{
repo->worktree = real_pathdup(path, 1);
+
+ trace2_def_repo(repo);
}
static int read_and_verify_repository_format(struct repository_format *format,
const char *gitdir,
const char *worktree)
{
- struct repository_format format;
+ struct repository_format format = REPOSITORY_FORMAT_INIT;
memset(repo, 0, sizeof(*repo));
repo->objects = raw_object_store_new();
if (worktree)
repo_set_worktree(repo, worktree);
+ clear_repository_format(&format);
return 0;
error:
/* Repository's current hash algorithm, as serialized on disk. */
const struct git_hash_algo *hash_algo;
+ /* A unique-id for tracing purposes. */
+ int trace2_repo_id;
+
/* Configurations */
/* Indicate if a repository has a different 'commondir' from 'gitdir' */
commit->object.flags |= TREESAME;
}
-static void commit_list_insert_by_date_cached(struct commit *p, struct commit_list **head,
- struct commit_list *cached_base, struct commit_list **cache)
-{
- struct commit_list *new_entry;
-
- if (cached_base && p->date < cached_base->item->date)
- new_entry = commit_list_insert_by_date(p, &cached_base->next);
- else
- new_entry = commit_list_insert_by_date(p, head);
-
- if (cache && (!*cache || p->date < (*cache)->item->date))
- *cache = new_entry;
-}
-
static int process_parents(struct rev_info *revs, struct commit *commit,
- struct commit_list **list, struct commit_list **cache_ptr)
+ struct commit_list **list, struct prio_queue *queue)
{
struct commit_list *parent = commit->parents;
unsigned left_flag;
- struct commit_list *cached_base = cache_ptr ? *cache_ptr : NULL;
if (commit->object.flags & ADDED)
return 0;
continue;
p->object.flags |= SEEN;
if (list)
- commit_list_insert_by_date_cached(p, list, cached_base, cache_ptr);
+ commit_list_insert_by_date(p, list);
+ if (queue)
+ prio_queue_put(queue, p);
}
return 0;
}
if (!(p->object.flags & SEEN)) {
p->object.flags |= SEEN;
if (list)
- commit_list_insert_by_date_cached(p, list, cached_base, cache_ptr);
+ commit_list_insert_by_date(p, list);
+ if (queue)
+ prio_queue_put(queue, p);
}
if (revs->first_parent_only)
break;
return 0;
}
-static void read_pathspec_from_stdin(struct rev_info *revs, struct strbuf *sb,
+static void read_pathspec_from_stdin(struct strbuf *sb,
struct argv_array *prune)
{
while (strbuf_getline(sb, stdin) != EOF)
die("bad revision '%s'", sb.buf);
}
if (seen_dashdash)
- read_pathspec_from_stdin(revs, &sb, prune);
+ read_pathspec_from_stdin(&sb, prune);
strbuf_release(&sb);
warn_on_object_refname_ambiguity = save_warning;
revs->diff = 1;
revs->dense_combined_merges = 0;
revs->combine_merges = 1;
+ } else if (!strcmp(arg, "--combined-all-paths")) {
+ revs->diff = 1;
+ revs->combined_all_paths = 1;
} else if (!strcmp(arg, "--cc")) {
revs->diff = 1;
revs->dense_combined_merges = 1;
}
if (revs->combine_merges)
revs->ignore_merges = 0;
+ if (revs->combined_all_paths && !revs->combine_merges)
+ die("--combined-all-paths makes no sense without -c or --cc");
+
revs->diffopt.abbrev = revs->abbrev;
if (revs->line_level_traverse) {
if (revs->first_parent_only && revs->bisect)
die(_("--first-parent is incompatible with --bisect"));
+ if (revs->line_level_traverse &&
+ (revs->diffopt.output_format & ~(DIFF_FORMAT_PATCH | DIFF_FORMAT_NO_OUTPUT)))
+ die(_("-L does not yet support diff formats besides -p and -s"));
+
if (revs->expand_tabs_in_log < 0)
revs->expand_tabs_in_log = revs->expand_tabs_in_log_default;
return st;
}
-static int mark_redundant_parents(struct rev_info *revs, struct commit *commit)
+static int mark_redundant_parents(struct commit *commit)
{
struct commit_list *h = reduce_heads(commit->parents);
int i = 0, marked = 0;
return marked;
}
-static int mark_treesame_root_parents(struct rev_info *revs, struct commit *commit)
+static int mark_treesame_root_parents(struct commit *commit)
{
struct commit_list *p;
int marked = 0;
* Detect and simplify both cases.
*/
if (1 < cnt) {
- int marked = mark_redundant_parents(revs, commit);
- marked += mark_treesame_root_parents(revs, commit);
+ int marked = mark_redundant_parents(commit);
+ marked += mark_treesame_root_parents(commit);
if (marked)
marked -= leave_one_treesame_to_parent(revs, commit);
if (marked)
return 0;
}
-static enum rewrite_result rewrite_one(struct rev_info *revs, struct commit **pp)
+static enum rewrite_result rewrite_one_1(struct rev_info *revs,
+ struct commit **pp,
+ struct prio_queue *queue)
{
- struct commit_list *cache = NULL;
-
for (;;) {
struct commit *p = *pp;
if (!revs->limited)
- if (process_parents(revs, p, &revs->commits, &cache) < 0)
+ if (process_parents(revs, p, NULL, queue) < 0)
return rewrite_one_error;
if (p->object.flags & UNINTERESTING)
return rewrite_one_ok;
}
}
+static void merge_queue_into_list(struct prio_queue *q, struct commit_list **list)
+{
+ while (q->nr) {
+ struct commit *item = prio_queue_peek(q);
+ struct commit_list *p = *list;
+
+ if (p && p->item->date >= item->date)
+ list = &p->next;
+ else {
+ p = commit_list_insert(item, list);
+ list = &p->next; /* skip newly added item */
+ prio_queue_get(q); /* pop item */
+ }
+ }
+}
+
+static enum rewrite_result rewrite_one(struct rev_info *revs, struct commit **pp)
+{
+ struct prio_queue queue = { compare_commits_by_commit_date };
+ enum rewrite_result ret = rewrite_one_1(revs, pp, &queue);
+ merge_queue_into_list(&queue, &revs->commits);
+ clear_prio_queue(&queue);
+ return ret;
+}
+
int rewrite_parents(struct rev_info *revs, struct commit *commit,
rewrite_parent_fn_t rewrite_parent)
{
verbose_header:1,
ignore_merges:1,
combine_merges:1,
+ combined_all_paths:1,
dense_combined_merges:1,
always_show_header:1;
int sane_execvp(const char *file, char * const argv[])
{
+#ifndef GIT_WINDOWS_NATIVE
+ /*
+ * execvp() doesn't return, so we all we can do is tell trace2
+ * what we are about to do and let it leave a hint in the log
+ * (unless of course the execvp() fails).
+ *
+ * we skip this for Windows because the compat layer already
+ * has to emulate the execvp() call anyway.
+ */
+ int exec_id = trace2_exec(file, (const char **)argv);
+#endif
+
if (!execvp(file, argv))
return 0; /* cannot happen ;-) */
+#ifndef GIT_WINDOWS_NATIVE
+ {
+ int ec = errno;
+ trace2_exec_result(exec_id, ec);
+ errno = ec;
+ }
+#endif
+
/*
* When a command can't be found because one of the directories
* listed in $PATH is unsearchable, execvp reports EACCES, but
cmd->err = fderr[0];
}
+ trace2_child_start(cmd);
trace_run_command(cmd);
fflush(NULL);
#endif
if (cmd->pid < 0) {
+ trace2_child_exit(cmd, -1);
+
if (need_in)
close_pair(fdin);
else if (cmd->in)
int finish_command(struct child_process *cmd)
{
int ret = wait_or_whine(cmd->pid, cmd->argv[0], 0);
+ trace2_child_exit(cmd, ret);
child_process_clear(cmd);
return ret;
}
int finish_command_in_signal(struct child_process *cmd)
{
- return wait_or_whine(cmd->pid, cmd->argv[0], 1);
+ int ret = wait_or_whine(cmd->pid, cmd->argv[0], 1);
+ trace2_child_exit(cmd, ret);
+ return ret;
}
return run_command_v_opt_cd_env(argv, opt, NULL, NULL);
}
+int run_command_v_opt_tr2(const char **argv, int opt, const char *tr2_class)
+{
+ return run_command_v_opt_cd_env_tr2(argv, opt, NULL, NULL, tr2_class);
+}
+
int run_command_v_opt_cd_env(const char **argv, int opt, const char *dir, const char *const *env)
+{
+ return run_command_v_opt_cd_env_tr2(argv, opt, dir, env, NULL);
+}
+
+int run_command_v_opt_cd_env_tr2(const char **argv, int opt, const char *dir,
+ const char *const *env, const char *tr2_class)
{
struct child_process cmd = CHILD_PROCESS_INIT;
cmd.argv = argv;
cmd.clean_on_exit = opt & RUN_CLEAN_ON_EXIT ? 1 : 0;
cmd.dir = dir;
cmd.env = env;
+ cmd.trace2_child_class = tr2_class;
return run_command(&cmd);
}
hook.env = env;
hook.no_stdin = 1;
hook.stdout_to_stderr = 1;
+ hook.trace2_hook_name = name;
return run_command(&hook);
}
pp_cleanup(&pp);
return 0;
}
+
+int run_processes_parallel_tr2(int n, get_next_task_fn get_next_task,
+ start_failure_fn start_failure,
+ task_finished_fn task_finished, void *pp_cb,
+ const char *tr2_category, const char *tr2_label)
+{
+ int result;
+
+ trace2_region_enter_printf(tr2_category, tr2_label, NULL, "max:%d",
+ ((n < 1) ? online_cpus() : n));
+
+ result = run_processes_parallel(n, get_next_task, start_failure,
+ task_finished, pp_cb);
+
+ trace2_region_leave(tr2_category, tr2_label, NULL);
+
+ return result;
+}
struct argv_array args;
struct argv_array env_array;
pid_t pid;
+
+ int trace2_child_id;
+ uint64_t trace2_child_us_start;
+ const char *trace2_child_class;
+ const char *trace2_hook_name;
+
/*
* Using .in, .out, .err:
* - Specify 0 for no redirections (child inherits stdin, stdout,
#define RUN_USING_SHELL 16
#define RUN_CLEAN_ON_EXIT 32
int run_command_v_opt(const char **argv, int opt);
-
+int run_command_v_opt_tr2(const char **argv, int opt, const char *tr2_class);
/*
* env (the environment) is to be formatted like environ: "VAR=VALUE".
* To unset an environment variable use just "VAR".
*/
int run_command_v_opt_cd_env(const char **argv, int opt, const char *dir, const char *const *env);
+int run_command_v_opt_cd_env_tr2(const char **argv, int opt, const char *dir,
+ const char *const *env, const char *tr2_class);
/**
* Execute the given command, sending "in" to its stdin, and capturing its
start_failure_fn,
task_finished_fn,
void *pp_cb);
+int run_processes_parallel_tr2(int n, get_next_task_fn, start_failure_fn,
+ task_finished_fn, void *pp_cb,
+ const char *tr2_category, const char *tr2_label);
#endif
* file and written to the tail of 'done'.
*/
GIT_PATH_FUNC(rebase_path_todo, "rebase-merge/git-rebase-todo")
-static GIT_PATH_FUNC(rebase_path_todo_backup,
- "rebase-merge/git-rebase-todo.backup")
+GIT_PATH_FUNC(rebase_path_todo_backup, "rebase-merge/git-rebase-todo.backup")
/*
* The rebase command lines that have already been processed. A line
}
}
-int write_message(const void *buf, size_t len, const char *filename,
- int append_eol)
+static int write_message(const void *buf, size_t len, const char *filename,
+ int append_eol)
{
struct lock_file msg_file = LOCK_INIT;
}
strbuf_reset(&out);
- strbuf_addstr(&out, fmt_ident(name, email, date, 0));
+ strbuf_addstr(&out, fmt_ident(name, email, WANT_AUTHOR_IDENT, date, 0));
strbuf_swap(buf, &out);
strbuf_release(&out);
free(name);
proc.argv = argv;
proc.in = -1;
proc.stdout_to_stderr = 1;
+ proc.trace2_hook_name = "post-rewrite";
code = start_command(&proc);
if (code)
return 1;
}
-/*
- * Note that ordering matters in this enum. Not only must it match the mapping
- * below, it is also divided into several sections that matter. When adding
- * new commands, make sure you add it in the right section.
- */
-enum todo_command {
- /* commands that handle commits */
- TODO_PICK = 0,
- TODO_REVERT,
- TODO_EDIT,
- TODO_REWORD,
- TODO_FIXUP,
- TODO_SQUASH,
- /* commands that do something else than handling a single commit */
- TODO_EXEC,
- TODO_BREAK,
- TODO_LABEL,
- TODO_RESET,
- TODO_MERGE,
- /* commands that do nothing but are counted for reporting progress */
- TODO_NOOP,
- TODO_DROP,
- /* comments (not counted for reporting progress) */
- TODO_COMMENT
-};
-
static struct {
char c;
const char *str;
TODO_EDIT_MERGE_MSG = 1
};
-struct todo_item {
- enum todo_command command;
- struct commit *commit;
- unsigned int flags;
- const char *arg;
- int arg_len;
- size_t offset_in_buf;
-};
-
-struct todo_list {
- struct strbuf buf;
- struct todo_item *items;
- int nr, alloc, current;
- int done_nr, total_nr;
- struct stat_data stat;
-};
-
-#define TODO_LIST_INIT { STRBUF_INIT }
-
-static void todo_list_release(struct todo_list *todo_list)
+void todo_list_release(struct todo_list *todo_list)
{
strbuf_release(&todo_list->buf);
FREE_AND_NULL(todo_list->items);
return todo_list->items + todo_list->nr++;
}
+const char *todo_item_get_arg(struct todo_list *todo_list,
+ struct todo_item *item)
+{
+ return todo_list->buf.buf + item->arg_offset;
+}
+
static int parse_insn_line(struct repository *r, struct todo_item *item,
- const char *bol, char *eol)
+ const char *buf, const char *bol, char *eol)
{
struct object_id commit_oid;
char *end_of_object_name;
if (bol == eol || *bol == '\r' || *bol == comment_line_char) {
item->command = TODO_COMMENT;
item->commit = NULL;
- item->arg = bol;
+ item->arg_offset = bol - buf;
item->arg_len = eol - bol;
return 0;
}
return error(_("%s does not accept arguments: '%s'"),
command_to_string(item->command), bol);
item->commit = NULL;
- item->arg = bol;
+ item->arg_offset = bol - buf;
item->arg_len = eol - bol;
return 0;
}
if (item->command == TODO_EXEC || item->command == TODO_LABEL ||
item->command == TODO_RESET) {
item->commit = NULL;
- item->arg = bol;
+ item->arg_offset = bol - buf;
item->arg_len = (int)(eol - bol);
return 0;
}
} else {
item->flags |= TODO_EDIT_MERGE_MSG;
item->commit = NULL;
- item->arg = bol;
+ item->arg_offset = bol - buf;
item->arg_len = (int)(eol - bol);
return 0;
}
status = get_oid(bol, &commit_oid);
*end_of_object_name = saved;
- item->arg = end_of_object_name + strspn(end_of_object_name, " \t");
- item->arg_len = (int)(eol - item->arg);
+ bol = end_of_object_name + strspn(end_of_object_name, " \t");
+ item->arg_offset = bol - buf;
+ item->arg_len = (int)(eol - bol);
if (status < 0)
- return -1;
+ return error(_("could not parse '%.*s'"),
+ (int)(end_of_object_name - bol), bol);
item->commit = lookup_commit_reference(r, &commit_oid);
return !item->commit;
}
-static int parse_insn_buffer(struct repository *r, char *buf,
- struct todo_list *todo_list)
+int todo_list_parse_insn_buffer(struct repository *r, char *buf,
+ struct todo_list *todo_list)
{
struct todo_item *item;
char *p = buf, *next_p;
int i, res = 0, fixup_okay = file_exists(rebase_path_done());
+ todo_list->current = todo_list->nr = 0;
+
for (i = 1; *p; i++, p = next_p) {
char *eol = strchrnul(p, '\n');
item = append_new_todo(todo_list);
item->offset_in_buf = p - todo_list->buf.buf;
- if (parse_insn_line(r, item, p, eol)) {
+ if (parse_insn_line(r, item, buf, p, eol)) {
res = error(_("invalid line %d: %.*s"),
i, (int)(eol - p), p);
- item->command = TODO_NOOP;
+ item->command = TODO_COMMENT + 1;
+ item->arg_offset = p - buf;
+ item->arg_len = (int)(eol - p);
+ item->commit = NULL;
}
if (fixup_okay)
return error(_("could not stat '%s'"), todo_file);
fill_stat_data(&todo_list->stat, &st);
- res = parse_insn_buffer(r, todo_list->buf.buf, todo_list);
+ res = todo_list_parse_insn_buffer(r, todo_list->buf.buf, todo_list);
if (res) {
if (is_rebase_i(opts))
return error(_("please fix this using "
FILE *f = fopen_or_warn(rebase_path_msgtotal(), "w");
if (strbuf_read_file(&done.buf, rebase_path_done(), 0) > 0 &&
- !parse_insn_buffer(r, done.buf.buf, &done))
+ !todo_list_parse_insn_buffer(r, done.buf.buf, &done))
todo_list->done_nr = count_commands(&done);
else
todo_list->done_nr = 0;
opts->no_commit = git_config_bool_or_int(key, value, &error_flag);
else if (!strcmp(key, "options.edit"))
opts->edit = git_config_bool_or_int(key, value, &error_flag);
+ else if (!strcmp(key, "options.allow-empty"))
+ opts->allow_empty =
+ git_config_bool_or_int(key, value, &error_flag);
+ else if (!strcmp(key, "options.allow-empty-message"))
+ opts->allow_empty_message =
+ git_config_bool_or_int(key, value, &error_flag);
+ else if (!strcmp(key, "options.keep-redundant-commits"))
+ opts->keep_redundant_commits =
+ git_config_bool_or_int(key, value, &error_flag);
else if (!strcmp(key, "options.signoff"))
opts->signoff = git_config_bool_or_int(key, value, &error_flag);
else if (!strcmp(key, "options.record-origin"))
item->command = command;
item->commit = commit;
- item->arg = NULL;
+ item->arg_offset = 0;
item->arg_len = 0;
item->offset_in_buf = todo_list->buf.len;
subject_len = find_commit_subject(commit_buffer, &subject);
int res = 0;
if (opts->no_commit)
- res |= git_config_set_in_file_gently(opts_file, "options.no-commit", "true");
+ res |= git_config_set_in_file_gently(opts_file,
+ "options.no-commit", "true");
if (opts->edit)
- res |= git_config_set_in_file_gently(opts_file, "options.edit", "true");
+ res |= git_config_set_in_file_gently(opts_file,
+ "options.edit", "true");
+ if (opts->allow_empty)
+ res |= git_config_set_in_file_gently(opts_file,
+ "options.allow-empty", "true");
+ if (opts->allow_empty_message)
+ res |= git_config_set_in_file_gently(opts_file,
+ "options.allow-empty-message", "true");
+ if (opts->keep_redundant_commits)
+ res |= git_config_set_in_file_gently(opts_file,
+ "options.keep-redundant-commits", "true");
if (opts->signoff)
- res |= git_config_set_in_file_gently(opts_file, "options.signoff", "true");
+ res |= git_config_set_in_file_gently(opts_file,
+ "options.signoff", "true");
if (opts->record_origin)
- res |= git_config_set_in_file_gently(opts_file, "options.record-origin", "true");
+ res |= git_config_set_in_file_gently(opts_file,
+ "options.record-origin", "true");
if (opts->allow_ff)
- res |= git_config_set_in_file_gently(opts_file, "options.allow-ff", "true");
+ res |= git_config_set_in_file_gently(opts_file,
+ "options.allow-ff", "true");
if (opts->mainline) {
struct strbuf buf = STRBUF_INIT;
strbuf_addf(&buf, "%d", opts->mainline);
- res |= git_config_set_in_file_gently(opts_file, "options.mainline", buf.buf);
+ res |= git_config_set_in_file_gently(opts_file,
+ "options.mainline", buf.buf);
strbuf_release(&buf);
}
if (opts->strategy)
- res |= git_config_set_in_file_gently(opts_file, "options.strategy", opts->strategy);
+ res |= git_config_set_in_file_gently(opts_file,
+ "options.strategy", opts->strategy);
if (opts->gpg_sign)
- res |= git_config_set_in_file_gently(opts_file, "options.gpg-sign", opts->gpg_sign);
+ res |= git_config_set_in_file_gently(opts_file,
+ "options.gpg-sign", opts->gpg_sign);
if (opts->xopts) {
int i;
for (i = 0; i < opts->xopts_nr; i++)
res |= git_config_set_multivar_in_file_gently(opts_file,
- "options.strategy-option",
- opts->xopts[i], "^$", 0);
+ "options.strategy-option",
+ opts->xopts[i], "^$", 0);
}
if (opts->allow_rerere_auto)
- res |= git_config_set_in_file_gently(opts_file, "options.allow-rerere-auto",
- opts->allow_rerere_auto == RERERE_AUTOUPDATE ?
- "true" : "false");
+ res |= git_config_set_in_file_gently(opts_file,
+ "options.allow-rerere-auto",
+ opts->allow_rerere_auto == RERERE_AUTOUPDATE ?
+ "true" : "false");
return res;
}
while (todo_list->current < todo_list->nr) {
struct todo_item *item = todo_list->items + todo_list->current;
+ const char *arg = todo_item_get_arg(todo_list, item);
+
if (save_todo(todo_list, opts))
return -1;
if (is_rebase_i(opts)) {
fprintf(stderr,
_("Stopped at %s... %.*s\n"),
short_commit_name(commit),
- item->arg_len, item->arg);
+ item->arg_len, arg);
return error_with_patch(r, commit,
- item->arg, item->arg_len, opts, res,
- !res);
+ arg, item->arg_len, opts, res, !res);
}
if (is_rebase_i(opts) && !res)
record_in_rewritten(&item->commit->object.oid,
if (res == 1)
intend_to_amend();
return error_failed_squash(r, item->commit, opts,
- item->arg_len, item->arg);
+ item->arg_len, arg);
} else if (res && is_rebase_i(opts) && item->commit) {
int to_amend = 0;
struct object_id oid;
to_amend = 1;
return res | error_with_patch(r, item->commit,
- item->arg, item->arg_len, opts,
+ arg, item->arg_len, opts,
res, to_amend);
}
} else if (item->command == TODO_EXEC) {
- char *end_of_arg = (char *)(item->arg + item->arg_len);
+ char *end_of_arg = (char *)(arg + item->arg_len);
int saved = *end_of_arg;
struct stat st;
*end_of_arg = '\0';
- res = do_exec(r, item->arg);
+ res = do_exec(r, arg);
*end_of_arg = saved;
- /* Reread the todo file if it has changed. */
if (res) {
if (opts->reschedule_failed_exec)
reschedule = 1;
res = error_errno(_("could not stat '%s'"),
get_todo_path(opts));
else if (match_stat_data(&todo_list->stat, &st)) {
+ /* Reread the todo file if it has changed. */
todo_list_release(todo_list);
if (read_populate_todo(r, todo_list, opts))
res = -1; /* message was printed */
todo_list->current = -1;
}
} else if (item->command == TODO_LABEL) {
- if ((res = do_label(r, item->arg, item->arg_len)))
+ if ((res = do_label(r, arg, item->arg_len)))
reschedule = 1;
} else if (item->command == TODO_RESET) {
- if ((res = do_reset(r, item->arg, item->arg_len, opts)))
+ if ((res = do_reset(r, arg, item->arg_len, opts)))
reschedule = 1;
} else if (item->command == TODO_MERGE) {
if ((res = do_merge(r, item->commit,
- item->arg, item->arg_len,
+ arg, item->arg_len,
item->flags, opts)) < 0)
reschedule = 1;
else if (item->commit)
if (res > 0)
/* failed with merge conflicts */
return error_with_patch(r, item->commit,
- item->arg,
- item->arg_len, opts,
- res, 0);
+ arg, item->arg_len,
+ opts, res, 0);
} else if (!is_noop(item->command))
return error(_("unknown command %d"), item->command);
if (item->commit)
return error_with_patch(r,
item->commit,
- item->arg,
- item->arg_len, opts,
- res, 0);
+ arg, item->arg_len,
+ opts, res, 0);
}
todo_list->current++;
hook.in = open(rebase_path_rewritten_list(),
O_RDONLY);
hook.stdout_to_stderr = 1;
+ hook.trace2_hook_name = "post-rewrite";
argv_array_push(&hook.args, post_rewrite_hook);
argv_array_push(&hook.args, "rebase");
/* we don't care if this hook failed */
int has_footer;
strbuf_addstr(&sob, sign_off_header);
- strbuf_addstr(&sob, fmt_name(getenv("GIT_COMMITTER_NAME"),
- getenv("GIT_COMMITTER_EMAIL")));
+ strbuf_addstr(&sob, fmt_name(WANT_COMMITTER_IDENT));
strbuf_addch(&sob, '\n');
if (!ignore_footer)
}
static int make_script_with_merges(struct pretty_print_context *pp,
- struct rev_info *revs, FILE *out,
+ struct rev_info *revs, struct strbuf *out,
unsigned flags)
{
int keep_empty = flags & TODO_LIST_KEEP_EMPTY;
* gathering commits not yet shown, reversing the list on the fly,
* then outputting that list (labeling revisions as needed).
*/
- fprintf(out, "%s onto\n", cmd_label);
+ strbuf_addf(out, "%s onto\n", cmd_label);
for (iter = tips; iter; iter = iter->next) {
struct commit_list *list = NULL, *iter2;
entry = oidmap_get(&state.commit2label, &commit->object.oid);
if (entry)
- fprintf(out, "\n%c Branch %s\n", comment_line_char, entry->string);
+ strbuf_addf(out, "\n%c Branch %s\n", comment_line_char, entry->string);
else
- fprintf(out, "\n");
+ strbuf_addch(out, '\n');
while (oidset_contains(&interesting, &commit->object.oid) &&
!oidset_contains(&shown, &commit->object.oid)) {
}
if (!commit)
- fprintf(out, "%s %s\n", cmd_reset,
- rebase_cousins ? "onto" : "[new root]");
+ strbuf_addf(out, "%s %s\n", cmd_reset,
+ rebase_cousins ? "onto" : "[new root]");
else {
const char *to = NULL;
&state);
if (!to || !strcmp(to, "onto"))
- fprintf(out, "%s onto\n", cmd_reset);
+ strbuf_addf(out, "%s onto\n", cmd_reset);
else {
strbuf_reset(&oneline);
pretty_print_commit(pp, commit, &oneline);
- fprintf(out, "%s %s # %s\n",
- cmd_reset, to, oneline.buf);
+ strbuf_addf(out, "%s %s # %s\n",
+ cmd_reset, to, oneline.buf);
}
}
entry = oidmap_get(&commit2todo, oid);
/* only show if not already upstream */
if (entry)
- fprintf(out, "%s\n", entry->string);
+ strbuf_addf(out, "%s\n", entry->string);
entry = oidmap_get(&state.commit2label, oid);
if (entry)
- fprintf(out, "%s %s\n",
- cmd_label, entry->string);
+ strbuf_addf(out, "%s %s\n",
+ cmd_label, entry->string);
oidset_insert(&shown, oid);
}
return 0;
}
-int sequencer_make_script(struct repository *r, FILE *out,
- int argc, const char **argv,
- unsigned flags)
+int sequencer_make_script(struct repository *r, struct strbuf *out, int argc,
+ const char **argv, unsigned flags)
{
char *format = NULL;
struct pretty_print_context pp = {0};
- struct strbuf buf = STRBUF_INIT;
struct rev_info revs;
struct commit *commit;
int keep_empty = flags & TODO_LIST_KEEP_EMPTY;
if (!is_empty && (commit->object.flags & PATCHSAME))
continue;
- strbuf_reset(&buf);
if (!keep_empty && is_empty)
- strbuf_addf(&buf, "%c ", comment_line_char);
- strbuf_addf(&buf, "%s %s ", insn,
+ strbuf_addf(out, "%c ", comment_line_char);
+ strbuf_addf(out, "%s %s ", insn,
oid_to_hex(&commit->object.oid));
- pretty_print_commit(&pp, commit, &buf);
- strbuf_addch(&buf, '\n');
- fputs(buf.buf, out);
+ pretty_print_commit(&pp, commit, out);
+ strbuf_addch(out, '\n');
}
- strbuf_release(&buf);
return 0;
}
* Add commands after pick and (series of) squash/fixup commands
* in the todo list.
*/
-int sequencer_add_exec_commands(struct repository *r,
- const char *commands)
+void todo_list_add_exec_commands(struct todo_list *todo_list,
+ struct string_list *commands)
{
- const char *todo_file = rebase_path_todo();
- struct todo_list todo_list = TODO_LIST_INIT;
- struct strbuf *buf = &todo_list.buf;
- size_t offset = 0, commands_len = strlen(commands);
- int i, insert;
+ struct strbuf *buf = &todo_list->buf;
+ size_t base_offset = buf->len;
+ int i, insert, nr = 0, alloc = 0;
+ struct todo_item *items = NULL, *base_items = NULL;
- if (strbuf_read_file(&todo_list.buf, todo_file, 0) < 0)
- return error(_("could not read '%s'."), todo_file);
+ base_items = xcalloc(commands->nr, sizeof(struct todo_item));
+ for (i = 0; i < commands->nr; i++) {
+ size_t command_len = strlen(commands->items[i].string);
- if (parse_insn_buffer(r, todo_list.buf.buf, &todo_list)) {
- todo_list_release(&todo_list);
- return error(_("unusable todo list: '%s'"), todo_file);
+ strbuf_addstr(buf, commands->items[i].string);
+ strbuf_addch(buf, '\n');
+
+ base_items[i].command = TODO_EXEC;
+ base_items[i].offset_in_buf = base_offset;
+ base_items[i].arg_offset = base_offset + strlen("exec ");
+ base_items[i].arg_len = command_len - strlen("exec ");
+
+ base_offset += command_len + 1;
}
/*
* Insert <commands> after every pick. Here, fixup/squash chains
* are considered part of the pick, so we insert the commands *after*
* those chains if there are any.
+ *
+ * As we insert the exec commands immediatly after rearranging
+ * any fixups and before the user edits the list, a fixup chain
+ * can never contain comments (any comments are empty picks that
+ * have been commented out because the user did not specify
+ * --keep-empty). So, it is safe to insert an exec command
+ * without looking at the command following a comment.
*/
- insert = -1;
- for (i = 0; i < todo_list.nr; i++) {
- enum todo_command command = todo_list.items[i].command;
-
- if (insert >= 0) {
- /* skip fixup/squash chains */
- if (command == TODO_COMMENT)
- continue;
- else if (is_fixup(command)) {
- insert = i + 1;
- continue;
- }
- strbuf_insert(buf,
- todo_list.items[insert].offset_in_buf +
- offset, commands, commands_len);
- offset += commands_len;
- insert = -1;
+ insert = 0;
+ for (i = 0; i < todo_list->nr; i++) {
+ enum todo_command command = todo_list->items[i].command;
+ if (insert && !is_fixup(command)) {
+ ALLOC_GROW(items, nr + commands->nr, alloc);
+ COPY_ARRAY(items + nr, base_items, commands->nr);
+ nr += commands->nr;
+
+ insert = 0;
}
+ ALLOC_GROW(items, nr + 1, alloc);
+ items[nr++] = todo_list->items[i];
+
if (command == TODO_PICK || command == TODO_MERGE)
- insert = i + 1;
+ insert = 1;
}
/* insert or append final <commands> */
- if (insert >= 0 && insert < todo_list.nr)
- strbuf_insert(buf, todo_list.items[insert].offset_in_buf +
- offset, commands, commands_len);
- else if (insert >= 0 || !offset)
- strbuf_add(buf, commands, commands_len);
+ if (insert || nr == todo_list->nr) {
+ ALLOC_GROW(items, nr + commands->nr, alloc);
+ COPY_ARRAY(items + nr, base_items, commands->nr);
+ nr += commands->nr;
+ }
- i = write_message(buf->buf, buf->len, todo_file, 0);
- todo_list_release(&todo_list);
- return i;
+ free(base_items);
+ FREE_AND_NULL(todo_list->items);
+ todo_list->items = items;
+ todo_list->nr = nr;
+ todo_list->alloc = alloc;
}
-int transform_todos(struct repository *r, unsigned flags)
+static void todo_list_to_strbuf(struct repository *r, struct todo_list *todo_list,
+ struct strbuf *buf, int num, unsigned flags)
{
- const char *todo_file = rebase_path_todo();
- struct todo_list todo_list = TODO_LIST_INIT;
- struct strbuf buf = STRBUF_INIT;
struct todo_item *item;
- int i;
+ int i, max = todo_list->nr;
- if (strbuf_read_file(&todo_list.buf, todo_file, 0) < 0)
- return error(_("could not read '%s'."), todo_file);
+ if (num > 0 && num < max)
+ max = num;
- if (parse_insn_buffer(r, todo_list.buf.buf, &todo_list)) {
- todo_list_release(&todo_list);
- return error(_("unusable todo list: '%s'"), todo_file);
- }
-
- for (item = todo_list.items, i = 0; i < todo_list.nr; i++, item++) {
+ for (item = todo_list->items, i = 0; i < max; i++, item++) {
/* if the item is not a command write it and continue */
if (item->command >= TODO_COMMENT) {
- strbuf_addf(&buf, "%.*s\n", item->arg_len, item->arg);
+ strbuf_addf(buf, "%.*s\n", item->arg_len,
+ todo_item_get_arg(todo_list, item));
continue;
}
/* add command to the buffer */
if (flags & TODO_LIST_ABBREVIATE_CMDS)
- strbuf_addch(&buf, command_to_char(item->command));
+ strbuf_addch(buf, command_to_char(item->command));
else
- strbuf_addstr(&buf, command_to_string(item->command));
+ strbuf_addstr(buf, command_to_string(item->command));
/* add commit id */
if (item->commit) {
if (item->command == TODO_MERGE) {
if (item->flags & TODO_EDIT_MERGE_MSG)
- strbuf_addstr(&buf, " -c");
+ strbuf_addstr(buf, " -c");
else
- strbuf_addstr(&buf, " -C");
+ strbuf_addstr(buf, " -C");
}
- strbuf_addf(&buf, " %s", oid);
+ strbuf_addf(buf, " %s", oid);
}
/* add all the rest */
if (!item->arg_len)
- strbuf_addch(&buf, '\n');
+ strbuf_addch(buf, '\n');
else
- strbuf_addf(&buf, " %.*s\n", item->arg_len, item->arg);
+ strbuf_addf(buf, " %.*s\n", item->arg_len,
+ todo_item_get_arg(todo_list, item));
}
-
- i = write_message(buf.buf, buf.len, todo_file, 0);
- todo_list_release(&todo_list);
- return i;
}
-enum missing_commit_check_level get_missing_commit_check_level(void)
+int todo_list_write_to_file(struct repository *r, struct todo_list *todo_list,
+ const char *file, const char *shortrevisions,
+ const char *shortonto, int num, unsigned flags)
{
- const char *value;
+ int res;
+ struct strbuf buf = STRBUF_INIT;
- if (git_config_get_value("rebase.missingcommitscheck", &value) ||
- !strcasecmp("ignore", value))
- return MISSING_COMMIT_CHECK_IGNORE;
- if (!strcasecmp("warn", value))
- return MISSING_COMMIT_CHECK_WARN;
- if (!strcasecmp("error", value))
- return MISSING_COMMIT_CHECK_ERROR;
- warning(_("unrecognized setting %s for option "
- "rebase.missingCommitsCheck. Ignoring."), value);
- return MISSING_COMMIT_CHECK_IGNORE;
-}
+ todo_list_to_strbuf(r, todo_list, &buf, num, flags);
+ if (flags & TODO_LIST_APPEND_TODO_HELP)
+ append_todo_help(flags & TODO_LIST_KEEP_EMPTY, count_commands(todo_list),
+ shortrevisions, shortonto, &buf);
-define_commit_slab(commit_seen, unsigned char);
-/*
- * Check if the user dropped some commits by mistake
- * Behaviour determined by rebase.missingCommitsCheck.
- * Check if there is an unrecognized command or a
- * bad SHA-1 in a command.
- */
-int check_todo_list(struct repository *r)
-{
- enum missing_commit_check_level check_level = get_missing_commit_check_level();
- struct strbuf todo_file = STRBUF_INIT;
- struct todo_list todo_list = TODO_LIST_INIT;
- struct strbuf missing = STRBUF_INIT;
- int advise_to_edit_todo = 0, res = 0, i;
- struct commit_seen commit_seen;
+ res = write_message(buf.buf, buf.len, file, 0);
+ strbuf_release(&buf);
- init_commit_seen(&commit_seen);
+ return res;
+}
- strbuf_addstr(&todo_file, rebase_path_todo());
- if (strbuf_read_file_or_whine(&todo_list.buf, todo_file.buf) < 0) {
- res = -1;
- goto leave_check;
- }
- advise_to_edit_todo = res =
- parse_insn_buffer(r, todo_list.buf.buf, &todo_list);
+static const char edit_todo_list_advice[] =
+N_("You can fix this with 'git rebase --edit-todo' "
+"and then run 'git rebase --continue'.\n"
+"Or you can abort the rebase with 'git rebase"
+" --abort'.\n");
- if (res || check_level == MISSING_COMMIT_CHECK_IGNORE)
- goto leave_check;
+int check_todo_list_from_file(struct repository *r)
+{
+ struct todo_list old_todo = TODO_LIST_INIT, new_todo = TODO_LIST_INIT;
+ int res = 0;
- /* Mark the commits in git-rebase-todo as seen */
- for (i = 0; i < todo_list.nr; i++) {
- struct commit *commit = todo_list.items[i].commit;
- if (commit)
- *commit_seen_at(&commit_seen, commit) = 1;
+ if (strbuf_read_file_or_whine(&new_todo.buf, rebase_path_todo()) < 0) {
+ res = -1;
+ goto out;
}
- todo_list_release(&todo_list);
- strbuf_addstr(&todo_file, ".backup");
- if (strbuf_read_file_or_whine(&todo_list.buf, todo_file.buf) < 0) {
+ if (strbuf_read_file_or_whine(&old_todo.buf, rebase_path_todo_backup()) < 0) {
res = -1;
- goto leave_check;
- }
- strbuf_release(&todo_file);
- res = !!parse_insn_buffer(r, todo_list.buf.buf, &todo_list);
-
- /* Find commits in git-rebase-todo.backup yet unseen */
- for (i = todo_list.nr - 1; i >= 0; i--) {
- struct todo_item *item = todo_list.items + i;
- struct commit *commit = item->commit;
- if (commit && !*commit_seen_at(&commit_seen, commit)) {
- strbuf_addf(&missing, " - %s %.*s\n",
- short_commit_name(commit),
- item->arg_len, item->arg);
- *commit_seen_at(&commit_seen, commit) = 1;
- }
+ goto out;
}
- /* Warn about missing commits */
- if (!missing.len)
- goto leave_check;
-
- if (check_level == MISSING_COMMIT_CHECK_ERROR)
- advise_to_edit_todo = res = 1;
-
- fprintf(stderr,
- _("Warning: some commits may have been dropped accidentally.\n"
- "Dropped commits (newer to older):\n"));
-
- /* Make the list user-friendly and display */
- fputs(missing.buf, stderr);
- strbuf_release(&missing);
-
- fprintf(stderr, _("To avoid this message, use \"drop\" to "
- "explicitly remove a commit.\n\n"
- "Use 'git config rebase.missingCommitsCheck' to change "
- "the level of warnings.\n"
- "The possible behaviours are: ignore, warn, error.\n\n"));
-
-leave_check:
- clear_commit_seen(&commit_seen);
- strbuf_release(&todo_file);
- todo_list_release(&todo_list);
-
- if (advise_to_edit_todo)
- fprintf(stderr,
- _("You can fix this with 'git rebase --edit-todo' "
- "and then run 'git rebase --continue'.\n"
- "Or you can abort the rebase with 'git rebase"
- " --abort'.\n"));
+ res = todo_list_parse_insn_buffer(r, old_todo.buf.buf, &old_todo);
+ if (!res)
+ res = todo_list_parse_insn_buffer(r, new_todo.buf.buf, &new_todo);
+ if (!res)
+ res = todo_list_check(&old_todo, &new_todo);
+ if (res)
+ fprintf(stderr, _(edit_todo_list_advice));
+out:
+ todo_list_release(&old_todo);
+ todo_list_release(&new_todo);
return res;
}
-static int rewrite_file(const char *path, const char *buf, size_t len)
-{
- int rc = 0;
- int fd = open(path, O_WRONLY | O_TRUNC);
- if (fd < 0)
- return error_errno(_("could not open '%s' for writing"), path);
- if (write_in_full(fd, buf, len) < 0)
- rc = error_errno(_("could not write to '%s'"), path);
- if (close(fd) && !rc)
- rc = error_errno(_("could not close '%s'"), path);
- return rc;
-}
-
/* skip picking commits whose parents are unchanged */
-static int skip_unnecessary_picks(struct repository *r, struct object_id *output_oid)
+static int skip_unnecessary_picks(struct repository *r,
+ struct todo_list *todo_list,
+ struct object_id *base_oid)
{
- const char *todo_file = rebase_path_todo();
- struct strbuf buf = STRBUF_INIT;
- struct todo_list todo_list = TODO_LIST_INIT;
struct object_id *parent_oid;
- int fd, i;
-
- if (!read_oneliner(&buf, rebase_path_onto(), 0))
- return error(_("could not read 'onto'"));
- if (get_oid(buf.buf, output_oid)) {
- strbuf_release(&buf);
- return error(_("need a HEAD to fixup"));
- }
- strbuf_release(&buf);
-
- if (strbuf_read_file_or_whine(&todo_list.buf, todo_file) < 0)
- return -1;
- if (parse_insn_buffer(r, todo_list.buf.buf, &todo_list) < 0) {
- todo_list_release(&todo_list);
- return -1;
- }
+ int i;
- for (i = 0; i < todo_list.nr; i++) {
- struct todo_item *item = todo_list.items + i;
+ for (i = 0; i < todo_list->nr; i++) {
+ struct todo_item *item = todo_list->items + i;
if (item->command >= TODO_NOOP)
continue;
if (item->command != TODO_PICK)
break;
if (parse_commit(item->commit)) {
- todo_list_release(&todo_list);
return error(_("could not parse commit '%s'"),
oid_to_hex(&item->commit->object.oid));
}
if (item->commit->parents->next)
break; /* merge commit */
parent_oid = &item->commit->parents->item->object.oid;
- if (!oideq(parent_oid, output_oid))
+ if (!oideq(parent_oid, base_oid))
break;
- oidcpy(output_oid, &item->commit->object.oid);
+ oidcpy(base_oid, &item->commit->object.oid);
}
if (i > 0) {
- int offset = get_item_line_offset(&todo_list, i);
const char *done_path = rebase_path_done();
- fd = open(done_path, O_CREAT | O_WRONLY | O_APPEND, 0666);
- if (fd < 0) {
- error_errno(_("could not open '%s' for writing"),
- done_path);
- todo_list_release(&todo_list);
- return -1;
- }
- if (write_in_full(fd, todo_list.buf.buf, offset) < 0) {
+ if (todo_list_write_to_file(r, todo_list, done_path, NULL, NULL, i, 0)) {
error_errno(_("could not write to '%s'"), done_path);
- todo_list_release(&todo_list);
- close(fd);
return -1;
}
- close(fd);
- if (rewrite_file(rebase_path_todo(), todo_list.buf.buf + offset,
- todo_list.buf.len - offset) < 0) {
- todo_list_release(&todo_list);
- return -1;
- }
+ MOVE_ARRAY(todo_list->items, todo_list->items + i, todo_list->nr - i);
+ todo_list->nr -= i;
+ todo_list->current = 0;
- todo_list.current = i;
- if (is_fixup(peek_command(&todo_list, 0)))
- record_in_rewritten(output_oid, peek_command(&todo_list, 0));
+ if (is_fixup(peek_command(todo_list, 0)))
+ record_in_rewritten(base_oid, peek_command(todo_list, 0));
}
- todo_list_release(&todo_list);
-
return 0;
}
int complete_action(struct repository *r, struct replay_opts *opts, unsigned flags,
const char *shortrevisions, const char *onto_name,
- const char *onto, const char *orig_head, const char *cmd,
- unsigned autosquash)
+ const char *onto, const char *orig_head, struct string_list *commands,
+ unsigned autosquash, struct todo_list *todo_list)
{
const char *shortonto, *todo_file = rebase_path_todo();
- struct todo_list todo_list = TODO_LIST_INIT;
- struct strbuf *buf = &(todo_list.buf);
+ struct todo_list new_todo = TODO_LIST_INIT;
+ struct strbuf *buf = &todo_list->buf;
struct object_id oid;
- struct stat st;
+ int res;
get_oid(onto, &oid);
shortonto = find_unique_abbrev(&oid, DEFAULT_ABBREV);
- if (!lstat(todo_file, &st) && st.st_size == 0 &&
- write_message("noop\n", 5, todo_file, 0))
- return -1;
+ if (buf->len == 0) {
+ struct todo_item *item = append_new_todo(todo_list);
+ item->command = TODO_NOOP;
+ item->commit = NULL;
+ item->arg_len = item->arg_offset = item->flags = item->offset_in_buf = 0;
+ }
- if (autosquash && rearrange_squash(r))
+ if (autosquash && todo_list_rearrange_squash(todo_list))
return -1;
- if (cmd && *cmd)
- sequencer_add_exec_commands(r, cmd);
-
- if (strbuf_read_file(buf, todo_file, 0) < 0)
- return error_errno(_("could not read '%s'."), todo_file);
-
- if (parse_insn_buffer(r, buf->buf, &todo_list)) {
- todo_list_release(&todo_list);
- return error(_("unusable todo list: '%s'"), todo_file);
- }
+ if (commands->nr)
+ todo_list_add_exec_commands(todo_list, commands);
- if (count_commands(&todo_list) == 0) {
+ if (count_commands(todo_list) == 0) {
apply_autostash(opts);
sequencer_remove_state(opts);
- todo_list_release(&todo_list);
return error(_("nothing to do"));
}
- strbuf_addch(buf, '\n');
- strbuf_commented_addf(buf, Q_("Rebase %s onto %s (%d command)",
- "Rebase %s onto %s (%d commands)",
- count_commands(&todo_list)),
- shortrevisions, shortonto, count_commands(&todo_list));
- append_todo_help(0, flags & TODO_LIST_KEEP_EMPTY, buf);
-
- if (write_message(buf->buf, buf->len, todo_file, 0)) {
- todo_list_release(&todo_list);
+ res = edit_todo_list(r, todo_list, &new_todo, shortrevisions,
+ shortonto, flags);
+ if (res == -1)
return -1;
- }
-
- if (copy_file(rebase_path_todo_backup(), todo_file, 0666))
- return error(_("could not copy '%s' to '%s'."), todo_file,
- rebase_path_todo_backup());
-
- if (transform_todos(r, flags | TODO_LIST_SHORTEN_IDS))
- return error(_("could not transform the todo list"));
-
- strbuf_reset(buf);
-
- if (launch_sequence_editor(todo_file, buf, NULL)) {
+ else if (res == -2) {
apply_autostash(opts);
sequencer_remove_state(opts);
- todo_list_release(&todo_list);
return -1;
- }
-
- strbuf_stripspace(buf, 1);
- if (buf->len == 0) {
+ } else if (res == -3) {
apply_autostash(opts);
sequencer_remove_state(opts);
- todo_list_release(&todo_list);
+ todo_list_release(&new_todo);
return error(_("nothing to do"));
}
- todo_list_release(&todo_list);
-
- if (check_todo_list(r)) {
+ if (todo_list_parse_insn_buffer(r, new_todo.buf.buf, &new_todo) ||
+ todo_list_check(todo_list, &new_todo)) {
+ fprintf(stderr, _(edit_todo_list_advice));
checkout_onto(opts, onto_name, onto, orig_head);
+ todo_list_release(&new_todo);
+
return -1;
}
- if (transform_todos(r, flags & ~(TODO_LIST_SHORTEN_IDS)))
- return error(_("could not transform the todo list"));
-
- if (opts->allow_ff && skip_unnecessary_picks(r, &oid))
+ if (opts->allow_ff && skip_unnecessary_picks(r, &new_todo, &oid)) {
+ todo_list_release(&new_todo);
return error(_("could not skip unnecessary pick commands"));
+ }
+
+ if (todo_list_write_to_file(r, &new_todo, todo_file, NULL, NULL, -1,
+ flags & ~(TODO_LIST_SHORTEN_IDS))) {
+ todo_list_release(&new_todo);
+ return error_errno(_("could not write '%s'"), todo_file);
+ }
+
+ todo_list_release(&new_todo);
if (checkout_onto(opts, onto_name, oid_to_hex(&oid), orig_head))
return -1;
* message will have to be retrieved from the commit (as the oneline in the
* script cannot be trusted) in order to normalize the autosquash arrangement.
*/
-int rearrange_squash(struct repository *r)
+int todo_list_rearrange_squash(struct todo_list *todo_list)
{
- const char *todo_file = rebase_path_todo();
- struct todo_list todo_list = TODO_LIST_INIT;
struct hashmap subject2item;
- int res = 0, rearranged = 0, *next, *tail, i;
+ int rearranged = 0, *next, *tail, i, nr = 0, alloc = 0;
char **subjects;
struct commit_todo_item commit_todo;
-
- if (strbuf_read_file_or_whine(&todo_list.buf, todo_file) < 0)
- return -1;
- if (parse_insn_buffer(r, todo_list.buf.buf, &todo_list) < 0) {
- todo_list_release(&todo_list);
- return -1;
- }
+ struct todo_item *items = NULL;
init_commit_todo_item(&commit_todo);
/*
* be moved to appear after the i'th.
*/
hashmap_init(&subject2item, (hashmap_cmp_fn) subject2item_cmp,
- NULL, todo_list.nr);
- ALLOC_ARRAY(next, todo_list.nr);
- ALLOC_ARRAY(tail, todo_list.nr);
- ALLOC_ARRAY(subjects, todo_list.nr);
- for (i = 0; i < todo_list.nr; i++) {
+ NULL, todo_list->nr);
+ ALLOC_ARRAY(next, todo_list->nr);
+ ALLOC_ARRAY(tail, todo_list->nr);
+ ALLOC_ARRAY(subjects, todo_list->nr);
+ for (i = 0; i < todo_list->nr; i++) {
struct strbuf buf = STRBUF_INIT;
- struct todo_item *item = todo_list.items + i;
+ struct todo_item *item = todo_list->items + i;
const char *commit_buffer, *subject, *p;
size_t subject_len;
int i2 = -1;
}
if (is_fixup(item->command)) {
- todo_list_release(&todo_list);
clear_commit_todo_item(&commit_todo);
return error(_("the script was already rearranged."));
}
*commit_todo_item_at(&commit_todo, commit2))
/* found by commit name */
i2 = *commit_todo_item_at(&commit_todo, commit2)
- - todo_list.items;
+ - todo_list->items;
else {
/* copy can be a prefix of the commit subject */
for (i2 = 0; i2 < i; i2++)
}
if (i2 >= 0) {
rearranged = 1;
- todo_list.items[i].command =
+ todo_list->items[i].command =
starts_with(subject, "fixup!") ?
TODO_FIXUP : TODO_SQUASH;
if (next[i2] < 0)
}
if (rearranged) {
- struct strbuf buf = STRBUF_INIT;
-
- for (i = 0; i < todo_list.nr; i++) {
- enum todo_command command = todo_list.items[i].command;
+ for (i = 0; i < todo_list->nr; i++) {
+ enum todo_command command = todo_list->items[i].command;
int cur = i;
/*
continue;
while (cur >= 0) {
- const char *bol =
- get_item_line(&todo_list, cur);
- const char *eol =
- get_item_line(&todo_list, cur + 1);
-
- /* replace 'pick', by 'fixup' or 'squash' */
- command = todo_list.items[cur].command;
- if (is_fixup(command)) {
- strbuf_addstr(&buf,
- todo_command_info[command].str);
- bol += strcspn(bol, " \t");
- }
-
- strbuf_add(&buf, bol, eol - bol);
-
+ ALLOC_GROW(items, nr + 1, alloc);
+ items[nr++] = todo_list->items[cur];
cur = next[cur];
}
}
- res = rewrite_file(todo_file, buf.buf, buf.len);
- strbuf_release(&buf);
+ FREE_AND_NULL(todo_list->items);
+ todo_list->items = items;
+ todo_list->nr = nr;
+ todo_list->alloc = alloc;
}
free(next);
free(tail);
- for (i = 0; i < todo_list.nr; i++)
+ for (i = 0; i < todo_list->nr; i++)
free(subjects[i]);
free(subjects);
hashmap_free(&subject2item, 1);
- todo_list_release(&todo_list);
clear_commit_todo_item(&commit_todo);
- return res;
+
+ return 0;
}
const char *git_path_commit_editmsg(void);
const char *git_path_seq_dir(void);
const char *rebase_path_todo(void);
+const char *rebase_path_todo_backup(void);
#define APPEND_SIGNOFF_DEDUP (1u << 0)
};
#define REPLAY_OPTS_INIT { .action = -1, .current_fixups = STRBUF_INIT }
-enum missing_commit_check_level {
- MISSING_COMMIT_CHECK_IGNORE = 0,
- MISSING_COMMIT_CHECK_WARN,
- MISSING_COMMIT_CHECK_ERROR
+/*
+ * Note that ordering matters in this enum. Not only must it match the mapping
+ * of todo_command_info (in sequencer.c), it is also divided into several
+ * sections that matter. When adding new commands, make sure you add it in the
+ * right section.
+ */
+enum todo_command {
+ /* commands that handle commits */
+ TODO_PICK = 0,
+ TODO_REVERT,
+ TODO_EDIT,
+ TODO_REWORD,
+ TODO_FIXUP,
+ TODO_SQUASH,
+ /* commands that do something else than handling a single commit */
+ TODO_EXEC,
+ TODO_BREAK,
+ TODO_LABEL,
+ TODO_RESET,
+ TODO_MERGE,
+ /* commands that do nothing but are counted for reporting progress */
+ TODO_NOOP,
+ TODO_DROP,
+ /* comments (not counted for reporting progress) */
+ TODO_COMMENT
};
-int write_message(const void *buf, size_t len, const char *filename,
- int append_eol);
+struct todo_item {
+ enum todo_command command;
+ struct commit *commit;
+ unsigned int flags;
+ int arg_len;
+ /* The offset of the command and its argument in the strbuf */
+ size_t offset_in_buf, arg_offset;
+};
+
+struct todo_list {
+ struct strbuf buf;
+ struct todo_item *items;
+ int nr, alloc, current;
+ int done_nr, total_nr;
+ struct stat_data stat;
+};
+
+#define TODO_LIST_INIT { STRBUF_INIT }
+
+int todo_list_parse_insn_buffer(struct repository *r, char *buf,
+ struct todo_list *todo_list);
+int todo_list_write_to_file(struct repository *r, struct todo_list *todo_list,
+ const char *file, const char *shortrevisions,
+ const char *shortonto, int num, unsigned flags);
+void todo_list_release(struct todo_list *todo_list);
+const char *todo_item_get_arg(struct todo_list *todo_list,
+ struct todo_item *item);
/* Call this to setup defaults before parsing command line options */
void sequencer_init_config(struct replay_opts *opts);
* commits should be rebased onto the new base, this flag needs to be passed.
*/
#define TODO_LIST_REBASE_COUSINS (1U << 4)
-int sequencer_make_script(struct repository *repo, FILE *out,
- int argc, const char **argv,
- unsigned flags);
-
-int sequencer_add_exec_commands(struct repository *r, const char *command);
-int transform_todos(struct repository *r, unsigned flags);
-enum missing_commit_check_level get_missing_commit_check_level(void);
-int check_todo_list(struct repository *r);
+#define TODO_LIST_APPEND_TODO_HELP (1U << 5)
+
+int sequencer_make_script(struct repository *r, struct strbuf *out, int argc,
+ const char **argv, unsigned flags);
+
+void todo_list_add_exec_commands(struct todo_list *todo_list,
+ struct string_list *commands);
+int check_todo_list_from_file(struct repository *r);
int complete_action(struct repository *r, struct replay_opts *opts, unsigned flags,
const char *shortrevisions, const char *onto_name,
- const char *onto, const char *orig_head, const char *cmd,
- unsigned autosquash);
-int rearrange_squash(struct repository *r);
+ const char *onto, const char *orig_head, struct string_list *commands,
+ unsigned autosquash, struct todo_list *todo_list);
+int todo_list_rearrange_squash(struct todo_list *todo_list);
/*
* Append a signoff to the commit message in "msgbuf". The ignore_footer
return for_each_ref(add_info_ref, fp);
}
-static int update_info_refs(int force)
+static int update_info_refs(void)
{
char *path = git_pathdup("info/refs");
int ret = update_info_file(path, generate_info_refs);
struct packed_git *p;
int old_num;
int new_num;
- int nr_alloc;
} **info;
static int num_pack;
-static const char *objdir;
-static int objdirlen;
static struct pack_info *find_pack_by_name(const char *name)
{
int i;
for (i = 0; i < num_pack; i++) {
struct packed_git *p = info[i]->p;
- /* skip "/pack/" after ".git/objects" */
- if (!strcmp(p->pack_name + objdirlen + 6, name))
+ if (!strcmp(pack_basename(p), name))
return info[i];
}
return NULL;
/* Returns non-zero when we detect that the info in the
* old file is useless.
*/
-static int parse_pack_def(const char *line, int old_cnt)
+static int parse_pack_def(const char *packname, int old_cnt)
{
- struct pack_info *i = find_pack_by_name(line + 2);
+ struct pack_info *i = find_pack_by_name(packname);
if (i) {
i->old_num = old_cnt;
return 0;
static int read_pack_info_file(const char *infofile)
{
FILE *fp;
- char line[1000];
+ struct strbuf line = STRBUF_INIT;
int old_cnt = 0;
+ int stale = 1;
fp = fopen_or_warn(infofile, "r");
if (!fp)
return 1; /* nonexistent is not an error. */
- while (fgets(line, sizeof(line), fp)) {
- int len = strlen(line);
- if (len && line[len-1] == '\n')
- line[--len] = 0;
+ while (strbuf_getline(&line, fp) != EOF) {
+ const char *arg;
- if (!len)
+ if (!line.len)
continue;
- switch (line[0]) {
- case 'P': /* P name */
- if (parse_pack_def(line, old_cnt++))
+ if (skip_prefix(line.buf, "P ", &arg)) {
+ /* P name */
+ if (parse_pack_def(arg, old_cnt++))
goto out_stale;
- break;
- case 'D': /* we used to emit D but that was misguided. */
- case 'T': /* we used to emit T but nobody uses it. */
+ } else if (line.buf[0] == 'D') {
+ /* we used to emit D but that was misguided. */
goto out_stale;
- default:
- error("unrecognized: %s", line);
- break;
+ } else if (line.buf[0] == 'T') {
+ /* we used to emit T but nobody uses it. */
+ goto out_stale;
+ } else {
+ error("unrecognized: %s", line.buf);
}
}
- fclose(fp);
- return 0;
+ stale = 0;
+
out_stale:
+ strbuf_release(&line);
fclose(fp);
- return 1;
+ return stale;
}
static int compare_info(const void *a_, const void *b_)
int stale;
int i = 0;
- objdir = get_object_directory();
- objdirlen = strlen(objdir);
-
for (p = get_all_packs(the_repository); p; p = p->next) {
/* we ignore things on alternate path since they are
* not available to the pullers in general.
for (i = 0, p = get_all_packs(the_repository); p; p = p->next) {
if (!p->pack_local)
continue;
+ assert(i < num_pack);
info[i] = xcalloc(1, sizeof(struct pack_info));
info[i]->p = p;
info[i]->old_num = -1;
{
int i;
for (i = 0; i < num_pack; i++) {
- if (fprintf(fp, "P %s\n", info[i]->p->pack_name + objdirlen + 6) < 0)
+ if (fprintf(fp, "P %s\n", pack_basename(info[i]->p)) < 0)
return -1;
}
if (fputc('\n', fp) == EOF)
*/
int errs = 0;
- errs = errs | update_info_refs(force);
+ errs = errs | update_info_refs();
errs = errs | update_info_packs(force);
/* remove leftover rev-cache file if there is any */
} else if (strcmp(var, "core.worktree") == 0) {
if (!value)
return config_error_nonbool(var);
+ free(data->work_tree);
data->work_tree = xstrdup(value);
}
return 0;
}
repository_format_precious_objects = candidate->precious_objects;
- repository_format_partial_clone = candidate->partial_clone;
+ repository_format_partial_clone = xstrdup_or_null(candidate->partial_clone);
repository_format_worktree_config = candidate->worktree_config;
string_list_clear(&candidate->unknown_extensions, 0);
}
if (candidate->work_tree) {
free(git_work_tree_cfg);
- git_work_tree_cfg = candidate->work_tree;
+ git_work_tree_cfg = xstrdup(candidate->work_tree);
inside_work_tree = -1;
}
- } else {
- free(candidate->work_tree);
}
return 0;
}
+static void init_repository_format(struct repository_format *format)
+{
+ const struct repository_format fresh = REPOSITORY_FORMAT_INIT;
+
+ memcpy(format, &fresh, sizeof(fresh));
+}
+
int read_repository_format(struct repository_format *format, const char *path)
{
- memset(format, 0, sizeof(*format));
- format->version = -1;
- format->is_bare = -1;
- format->hash_algo = GIT_HASH_SHA1;
- string_list_init(&format->unknown_extensions, 1);
+ clear_repository_format(format);
git_config_from_file(check_repo_format, path, format);
+ if (format->version == -1)
+ clear_repository_format(format);
return format->version;
}
+void clear_repository_format(struct repository_format *format)
+{
+ string_list_clear(&format->unknown_extensions, 0);
+ free(format->work_tree);
+ free(format->partial_clone);
+ init_repository_format(format);
+}
+
int verify_repository_format(const struct repository_format *format,
struct strbuf *err)
{
struct strbuf dir = STRBUF_INIT, err = STRBUF_INIT;
size_t gitdir_offset = gitdir->len, cwd_len;
size_t commondir_offset = commondir->len;
- struct repository_format candidate;
+ struct repository_format candidate = REPOSITORY_FORMAT_INIT;
if (strbuf_getcwd(&dir))
return -1;
strbuf_release(&err);
strbuf_setlen(commondir, commondir_offset);
strbuf_setlen(gitdir, gitdir_offset);
+ clear_repository_format(&candidate);
return -1;
}
+ clear_repository_format(&candidate);
return 0;
}
static struct strbuf cwd = STRBUF_INIT;
struct strbuf dir = STRBUF_INIT, gitdir = STRBUF_INIT;
const char *prefix = NULL;
- struct repository_format repo_fmt;
+ struct repository_format repo_fmt = REPOSITORY_FORMAT_INIT;
/*
* We may have read an incomplete configuration before
strbuf_release(&dir);
strbuf_release(&gitdir);
+ clear_repository_format(&repo_fmt);
return prefix;
}
void check_repository_format(void)
{
- struct repository_format repo_fmt;
+ struct repository_format repo_fmt = REPOSITORY_FORMAT_INIT;
check_repository_format_gently(get_git_dir(), &repo_fmt, NULL);
startup_info->have_repository = 1;
+ clear_repository_format(&repo_fmt);
}
/*
*/
#include "git-compat-util.h"
+#include "trace2.h"
/* Substitution of environment variables in shell format strings.
Copyright (C) 2003-2007 Free Software Foundation, Inc.
/* Default values for command line options. */
/* unsigned short int show_variables = 0; */
+ trace2_cmd_name("sh-i18n--envsubst");
+
switch (argc)
{
case 1:
return GIT_HASH_UNKNOWN;
}
+int hash_algo_by_length(int len)
+{
+ int i;
+ for (i = 1; i < GIT_HASH_NALGOS; i++)
+ if (len == hash_algos[i].rawsz)
+ return i;
+ return GIT_HASH_UNKNOWN;
+}
/*
* This is meant to hold a *small* number of objects that you would
/* Check if it is a missing object */
if (fetch_if_missing && repository_format_partial_clone &&
- !already_retried && r == the_repository) {
+ !already_retried && r == the_repository &&
+ !(flags & OBJECT_INFO_FOR_PREFETCH)) {
/*
* TODO Investigate having fetch_object() return
* TODO error/success and stopping the music here.
find_short_packed_object(&ds);
status = finish_object_disambiguation(&ds, oid);
+ /*
+ * If we didn't find it, do the usual reprepare() slow-path,
+ * since the object may have recently been added to the repository
+ * or migrated from loose to packed.
+ */
+ if (status == MISSING_OBJECT) {
+ reprepare_packed_git(the_repository);
+ find_short_object_filename(&ds);
+ find_short_packed_object(&ds);
+ status = finish_object_disambiguation(&ds, oid);
+ }
+
if (!quietly && (status == SHORT_NAME_AMBIGUOUS)) {
struct oid_array collect = OID_ARRAY_INIT;
return get_oid_with_context(the_repository, name, 0, oid, &unused);
}
+/*
+ * This returns a non-zero value if the string (built using printf
+ * format and the given arguments) is not a valid object.
+ */
+int get_oidf(struct object_id *oid, const char *fmt, ...)
+{
+ va_list ap;
+ int ret;
+ struct strbuf sb = STRBUF_INIT;
+
+ va_start(ap, fmt);
+ strbuf_vaddf(&sb, fmt, ap);
+ va_end(ap);
+
+ ret = get_oid(sb.buf, oid);
+ strbuf_release(&sb);
+
+ return ret;
+}
/*
* Many callers know that the user meant to name a commit-ish by
-Subproject commit 232357eb2ea0397388254a4b188333a227bf5b10
+Subproject commit 16033998da4b273aebd92c84b1e1b12e4aaf7009
#endif
/*ENDIANNESS SELECTION*/
+#ifndef SHA1DC_FORCE_ALIGNED_ACCESS
#if defined(SHA1DC_FORCE_UNALIGNED_ACCESS) || defined(SHA1DC_ON_INTEL_LIKE_PROCESSOR)
#define SHA1DC_ALLOW_UNALIGNED_ACCESS
-#endif /*UNALIGNMENT DETECTION*/
-
+#endif /*UNALIGNED ACCESS DETECTION*/
+#endif /*FORCE ALIGNED ACCESS*/
#define rotate_right(x,n) (((x)>>(n))|((x)<<(32-(n))))
#define rotate_left(x,n) (((x)<<(n))|((x)>>(32-(n))))
ewah_each_bit(si->replace_bitmap, replace_entry, istate);
ewah_each_bit(si->delete_bitmap, mark_entry_for_delete, istate);
if (si->nr_deletions)
- remove_marked_cache_entries(istate);
+ remove_marked_cache_entries(istate, 0);
for (i = si->nr_replacements; i < si->saved_cache_nr; i++) {
if (!ce_namelen(si->saved_cache[i]))
void remove_split_index(struct index_state *istate)
{
if (istate->split_index) {
- /*
- * When removing the split index, we need to move
- * ownership of the mem_pool associated with the
- * base index to the main index. There may be cache entries
- * allocated from the base's memory pool that are shared with
- * the_index.cache[].
- */
- mem_pool_combine(istate->ce_mem_pool, istate->split_index->base->ce_mem_pool);
+ if (istate->split_index->base) {
+ /*
+ * When removing the split index, we need to move
+ * ownership of the mem_pool associated with the
+ * base index to the main index. There may be cache entries
+ * allocated from the base's memory pool that are shared with
+ * the_index.cache[].
+ */
+ mem_pool_combine(istate->ce_mem_pool,
+ istate->split_index->base->ce_mem_pool);
- /*
- * The split index no longer owns the mem_pool backing
- * its cache array. As we are discarding this index,
- * mark the index as having no cache entries, so it
- * will not attempt to clean up the cache entries or
- * validate them.
- */
- if (istate->split_index->base)
+ /*
+ * The split index no longer owns the mem_pool backing
+ * its cache array. As we are discarding this index,
+ * mark the index as having no cache entries, so it
+ * will not attempt to clean up the cache entries or
+ * validate them.
+ */
istate->split_index->base->cache_nr = 0;
+ }
/*
* We can discard the split index because its
strbuf_splice(sb, pos, 0, data, len);
}
+void strbuf_vinsertf(struct strbuf *sb, size_t pos, const char *fmt, va_list ap)
+{
+ int len, len2;
+ char save;
+ va_list cp;
+
+ if (pos > sb->len)
+ die("`pos' is too far after the end of the buffer");
+ va_copy(cp, ap);
+ len = vsnprintf(sb->buf + sb->len, 0, fmt, cp);
+ va_end(cp);
+ if (len < 0)
+ BUG("your vsnprintf is broken (returned %d)", len);
+ if (!len)
+ return; /* nothing to do */
+ if (unsigned_add_overflows(sb->len, len))
+ die("you want to use way too much memory");
+ strbuf_grow(sb, len);
+ memmove(sb->buf + pos + len, sb->buf + pos, sb->len - pos);
+ /* vsnprintf() will append a NUL, overwriting one of our characters */
+ save = sb->buf[pos + len];
+ len2 = vsnprintf(sb->buf + pos, len + 1, fmt, ap);
+ sb->buf[pos + len] = save;
+ if (len2 != len)
+ BUG("your vsnprintf is broken (returns inconsistent lengths)");
+ strbuf_setlen(sb, sb->len + len);
+}
+
+void strbuf_insertf(struct strbuf *sb, size_t pos, const char *fmt, ...)
+{
+ va_list ap;
+ va_start(ap, fmt);
+ strbuf_vinsertf(sb, pos, fmt, ap);
+ va_end(ap);
+}
+
void strbuf_remove(struct strbuf *sb, size_t pos, size_t len)
{
strbuf_splice(sb, pos, len, "", 0);
strbuf_setlen(sb, sb->len + sb2->len);
}
+const char *strbuf_join_argv(struct strbuf *buf,
+ int argc, const char **argv, char delim)
+{
+ if (!argc)
+ return buf->buf;
+
+ strbuf_addstr(buf, *argv);
+ while (--argc) {
+ strbuf_addch(buf, delim);
+ strbuf_addstr(buf, *(++argv));
+ }
+
+ return buf->buf;
+}
+
void strbuf_addchars(struct strbuf *sb, int c, size_t n)
{
strbuf_grow(sb, n);
}
}
+size_t strbuf_expand_literal_cb(struct strbuf *sb,
+ const char *placeholder,
+ void *context)
+{
+ int ch;
+
+ switch (placeholder[0]) {
+ case 'n': /* newline */
+ strbuf_addch(sb, '\n');
+ return 1;
+ case 'x':
+ /* %x00 == NUL, %x0a == LF, etc. */
+ ch = hex2chr(placeholder + 1);
+ if (ch < 0)
+ return 0;
+ strbuf_addch(sb, ch);
+ return 3;
+ }
+ return 0;
+}
+
size_t strbuf_expand_dict_cb(struct strbuf *sb, const char *placeholder,
void *context)
{
*/
void strbuf_insert(struct strbuf *sb, size_t pos, const void *, size_t);
+/**
+ * Insert data to the given position of the buffer giving a printf format
+ * string. The contents will be shifted, not overwritten.
+ */
+void strbuf_vinsertf(struct strbuf *sb, size_t pos, const char *fmt,
+ va_list ap);
+
+void strbuf_insertf(struct strbuf *sb, size_t pos, const char *fmt, ...);
+
/**
* Remove given amount of data from a given position of the buffer.
*/
*/
void strbuf_addbuf(struct strbuf *sb, const struct strbuf *sb2);
+/**
+ * Join the arguments into a buffer. `delim` is put between every
+ * two arguments.
+ */
+const char *strbuf_join_argv(struct strbuf *buf, int argc,
+ const char **argv, char delim);
+
/**
* This function can be used to expand a format string containing
* placeholders. To that end, it parses the string and calls the specified
expand_fn_t fn,
void *context);
+/**
+ * Used as callback for `strbuf_expand` to only expand literals
+ * (i.e. %n and %xNN). The context argument is ignored.
+ */
+size_t strbuf_expand_literal_cb(struct strbuf *sb,
+ const char *placeholder,
+ void *context);
+
/**
* Used as callback for `strbuf_expand()`, expects an array of
* struct strbuf_expand_dict_entry as context, i.e. pairs of
process->out = -1;
process->clean_on_exit = 1;
process->clean_on_exit_handler = subprocess_exit_handler;
+ process->trace2_child_class = "subprocess";
err = start_command(process);
if (err) {
default:
if (!strcmp(arg, "on-demand"))
return RECURSE_SUBMODULES_ON_DEMAND;
-
+ /*
+ * Please update $__git_fetch_recurse_submodules in
+ * git-completion.bash when you add new options.
+ */
if (die_on_error)
die("bad %s argument: %s", opt, arg);
else
return RECURSE_SUBMODULES_CHECK;
else if (!strcmp(arg, "only"))
return RECURSE_SUBMODULES_ONLY;
+ /*
+ * Please update $__git_push_recurse_submodules in
+ * git-completion.bash when you add new modes.
+ */
else if (die_on_error)
die("bad %s argument: %s", opt, arg);
else
diffopt->flags.ignore_dirty_submodules = 1;
else if (strcmp(arg, "none"))
die("bad --ignore-submodules argument: %s", arg);
+ /*
+ * Please update _git_status() in git-completion.bash when you
+ * add new options
+ */
}
static int prepare_submodule_summary(struct rev_info *rev, const char *path,
if (start_command(&cp))
die("Could not run 'git rev-list <commits> --not --remotes -n 1' command in submodule %s",
path);
- if (strbuf_read(&buf, cp.out, 41))
+ if (strbuf_read(&buf, cp.out, the_hash_algo->hexsz + 1))
needs_pushing = 1;
finish_command(&cp);
close(cp.out);
struct oid_array *commits;
if (retvalue)
+ /*
+ * NEEDSWORK: This indicates that the overall fetch
+ * failed, even though there may be a subsequent fetch
+ * by commit hash that might work. It may be a good
+ * idea to not indicate failure in this case, and only
+ * indicate failure if the subsequent fetch fails.
+ */
spf->result = 1;
if (!task || !task->sub)
calculate_changed_submodule_paths(r, &spf.changed_submodule_names);
string_list_sort(&spf.changed_submodule_names);
- run_processes_parallel(max_parallel_jobs,
- get_next_submodule,
- fetch_start_failure,
- fetch_finish,
- &spf);
+ run_processes_parallel_tr2(max_parallel_jobs,
+ get_next_submodule,
+ fetch_start_failure,
+ fetch_finish,
+ &spf,
+ "submodule", "parallel/fetch");
argv_array_clear(&spf.args);
out:
variable to "1" or "0", respectively.
--stress::
---stress=<N>::
Run the test script repeatedly in multiple parallel jobs until
one of them fails. Useful for reproducing rare failures in
flaky tests. The number of parallel jobs is, in order of
- precedence: <N>, or the value of the GIT_TEST_STRESS_LOAD
+ precedence: the value of the GIT_TEST_STRESS_LOAD
environment variable, or twice the number of available
processors (as shown by the 'getconf' utility), or 8.
Implies `--verbose -x --immediate` to get the most information
'.stress-<nr>' suffix, and the trash directory of the failed
test job is renamed to end with a '.stress-failed' suffix.
+--stress-jobs=<N>::
+ Override the number of parallel jobs. Implies `--stress`.
+
--stress-limit=<N>::
When combined with --stress run the test script repeatedly
this many times in each of the parallel jobs or until one of
- them fails, whichever comes first.
+ them fails, whichever comes first. Implies `--stress`.
You can also set the GIT_TEST_INSTALLED environment variable to
the bindir of an existing git installation to test that installation.
GIT_TEST_SPLIT_INDEX=<boolean> forces split-index mode on the whole
test suite. Accept any boolean values that are accepted by git-config.
+GIT_TEST_PROTOCOL_VERSION=<n>, when set, overrides the
+'protocol.version' setting to n if it is less than n.
+
GIT_TEST_FULL_IN_PACK_ARRAY=<boolean> exercises the uncommon
pack-objects code path where there are more than 1024 packs even if
the actual number of packs in repository is below this limit. Accept
GIT_TEST_PRELOAD_INDEX=<boolean> exercises the preload-index code path
by overriding the minimum number of cache entries required per thread.
-GIT_TEST_REBASE_USE_BUILTIN=<boolean>, when false, disables the
-builtin version of git-rebase. See 'rebase.useBuiltin' in
+GIT_TEST_STASH_USE_BUILTIN=<boolean>, when false, disables the
+built-in version of git-stash. See 'stash.useBuiltin' in
git-config(1).
GIT_TEST_INDEX_THREADS=<n> enables exercising the multi-threaded loading
fetch-pack to not request sideband-all (even if the server advertises
sideband-all).
+GIT_TEST_DISALLOW_ABBREVIATED_OPTIONS=<boolean>, when true (which is
+the default when running tests), errors out when an abbreviated option
+is used.
+
Naming Tests
------------
...
'
+ - test_atexit <script>
+
+ Prepend <script> to a list of commands to run unconditionally to
+ clean up before the test script exits, e.g. to stop a daemon:
+
+ test_expect_success 'test git daemon' '
+ git daemon &
+ daemon_pid=$! &&
+ test_atexit 'kill $daemon_pid' &&
+ hello world
+ '
+
+ The commands will be executed before the trash directory is removed,
+ i.e. the atexit commands will still be able to access any pidfiles or
+ socket files.
+
+ Note that these commands will be run even when a test script run
+ with '--immediate' fails. Be careful with your atexit commands to
+ minimize any changes to the failed state.
+
- test_write_lines <lines>
Write <lines> on standard output, one line per argument.
test_oid_init or test_oid_cache. Providing an unknown key is an
error.
+ - yes [<string>]
+
+ This is often seen in modern UNIX but some platforms lack it, so
+ the test harness overrides the platform implementation with a
+ more limited one. Use this only when feeding a handful lines of
+ output to the downstream---unlike the real version, it generates
+ only up to 99 lines.
+
+
Prerequisites
-------------
}
}
-static void parse_dates(const char **argv, struct timeval *now)
+static void parse_dates(const char **argv)
{
struct strbuf result = STRBUF_INIT;
else if (skip_prefix(*argv, "show:", &x))
show_dates(argv+1, x);
else if (!strcmp(*argv, "parse"))
- parse_dates(argv+1, &now);
+ parse_dates(argv+1);
else if (!strcmp(*argv, "approxidate"))
parse_approxidate(argv+1, &now);
else if (!strcmp(*argv, "timestamp"))
#include "cache.h"
#include "parse-options.h"
#include "string-list.h"
+#include "trace2.h"
static int boolean = 0;
static int integer = 0;
OPT_NOOP_NOARG(0, "obsolete"),
OPT_STRING_LIST(0, "list", &list, "str", "add str to list"),
OPT_GROUP("Magic arguments"),
- OPT_ARGUMENT("quux", "means --quux"),
+ OPT_ARGUMENT("quux", NULL, "means --quux"),
OPT_NUMBER_CALLBACK(&integer, "set integer to NUM",
number_callback),
{ OPTION_COUNTUP, '+', NULL, &boolean, NULL, "same as -b",
int i;
int ret = 0;
+ trace2_cmd_name("_parse_");
+
argc = parse_options(argc, (const char **)argv, prefix, options, usage, 0);
if (length_cb.called) {
#include "git-compat-util.h"
#include "test-tool.h"
+#include "trace2.h"
struct test_cmd {
const char *name;
{ "submodule-config", cmd__submodule_config },
{ "submodule-nested-repo-config", cmd__submodule_nested_repo_config },
{ "subprocess", cmd__subprocess },
+ { "trace2", cmd__trace2 },
{ "urlmatch-normalization", cmd__urlmatch_normalization },
{ "xml-encode", cmd__xml_encode },
{ "wildmatch", cmd__wildmatch },
if (!strcmp(cmds[i].name, argv[1])) {
argv++;
argc--;
+ trace2_cmd_name(cmds[i].name);
+ trace2_cmd_list_config();
return cmds[i].fn(argc, argv);
}
}
int cmd__submodule_config(int argc, const char **argv);
int cmd__submodule_nested_repo_config(int argc, const char **argv);
int cmd__subprocess(int argc, const char **argv);
+int cmd__trace2(int argc, const char **argv);
int cmd__urlmatch_normalization(int argc, const char **argv);
int cmd__xml_encode(int argc, const char **argv);
int cmd__wildmatch(int argc, const char **argv);
--- /dev/null
+#include "test-tool.h"
+#include "cache.h"
+#include "argv-array.h"
+#include "run-command.h"
+#include "exec-cmd.h"
+#include "config.h"
+
+typedef int(fn_unit_test)(int argc, const char **argv);
+
+struct unit_test {
+ fn_unit_test *ut_fn;
+ const char *ut_name;
+ const char *ut_usage;
+};
+
+#define MyOk 0
+#define MyError 1
+
+static int get_i(int *p_value, const char *data)
+{
+ char *endptr;
+
+ if (!data || !*data)
+ return MyError;
+
+ *p_value = strtol(data, &endptr, 10);
+ if (*endptr || errno == ERANGE)
+ return MyError;
+
+ return MyOk;
+}
+
+/*
+ * Cause process to exit with the requested value via "return".
+ *
+ * Rely on test-tool.c:cmd_main() to call trace2_cmd_exit()
+ * with our result.
+ *
+ * Test harness can confirm:
+ * [] the process-exit value.
+ * [] the "code" field in the "exit" trace2 event.
+ * [] the "code" field in the "atexit" trace2 event.
+ * [] the "name" field in the "cmd_name" trace2 event.
+ * [] "def_param" events for all of the "interesting" pre-defined
+ * config settings.
+ */
+static int ut_001return(int argc, const char **argv)
+{
+ int rc;
+
+ if (get_i(&rc, argv[0]))
+ die("expect <exit_code>");
+
+ return rc;
+}
+
+/*
+ * Cause the process to exit with the requested value via "exit()".
+ *
+ * Test harness can confirm:
+ * [] the "code" field in the "exit" trace2 event.
+ * [] the "code" field in the "atexit" trace2 event.
+ * [] the "name" field in the "cmd_name" trace2 event.
+ * [] "def_param" events for all of the "interesting" pre-defined
+ * config settings.
+ */
+static int ut_002exit(int argc, const char **argv)
+{
+ int rc;
+
+ if (get_i(&rc, argv[0]))
+ die("expect <exit_code>");
+
+ exit(rc);
+}
+
+/*
+ * Send an "error" event with each value in argv. Normally, git only issues
+ * a single "error" event immediately before issuing an "exit" event (such
+ * as in die() or BUG()), but multiple "error" events are allowed.
+ *
+ * Test harness can confirm:
+ * [] a trace2 "error" event for each value in argv.
+ * [] the "name" field in the "cmd_name" trace2 event.
+ * [] (optional) the file:line in the "exit" event refers to this function.
+ */
+static int ut_003error(int argc, const char **argv)
+{
+ int k;
+
+ if (!argv[0] || !*argv[0])
+ die("expect <error_message>");
+
+ for (k = 0; k < argc; k++)
+ error("%s", argv[k]);
+
+ return 0;
+}
+
+/*
+ * Run a child process and wait for it to finish and exit with its return code.
+ * test-tool trace2 004child [<child-command-line>]
+ *
+ * For example:
+ * test-tool trace2 004child git version
+ * test-tool trace2 004child test-tool trace2 001return 0
+ * test-tool trace2 004child test-tool trace2 004child test-tool trace2 004child
+ * test-tool trace2 004child git -c alias.xyz=version xyz
+ *
+ * Test harness can confirm:
+ * [] the "name" field in the "cmd_name" trace2 event.
+ * [] that the outer process has a single component SID (or depth "d0" in
+ * the PERF stream).
+ * [] that "child_start" and "child_exit" events are generated for the child.
+ * [] if the child process is an instrumented executable:
+ * [] that "version", "start", ..., "exit", and "atexit" events are
+ * generated by the child process.
+ * [] that the child process events have a multiple component SID (or
+ * depth "dN+1" in the PERF stream).
+ * [] that the child exit code is propagated to the parent process "exit"
+ * and "atexit" events..
+ * [] (optional) that the "t_abs" field in the child process "atexit" event
+ * is less than the "t_rel" field in the "child_exit" event of the parent
+ * process.
+ * [] if the child process is like the alias example above,
+ * [] (optional) the child process attempts to run "git-xyx" as a dashed
+ * command.
+ * [] the child process emits an "alias" event with "xyz" => "version"
+ * [] the child process runs "git version" as a child process.
+ * [] the child process has a 3 component SID (or depth "d2" in the PERF
+ * stream).
+ */
+static int ut_004child(int argc, const char **argv)
+{
+ int result;
+
+ /*
+ * Allow empty <child_command_line> so we can do arbitrarily deep
+ * command nesting and let the last one be null.
+ */
+ if (!argc)
+ return 0;
+
+ result = run_command_v_opt(argv, 0);
+ exit(result);
+}
+
+/*
+ * Exec a git command. This may either create a child process (Windows)
+ * or replace the existing process.
+ * test-tool trace2 005exec <git_command_args>
+ *
+ * For example:
+ * test-tool trace2 005exec version
+ *
+ * Test harness can confirm (on Windows):
+ * [] the "name" field in the "cmd_name" trace2 event.
+ * [] that the outer process has a single component SID (or depth "d0" in
+ * the PERF stream).
+ * [] that "exec" and "exec_result" events are generated for the child
+ * process (since the Windows compatibility layer fakes an exec() with
+ * a CreateProcess(), WaitForSingleObject(), and exit()).
+ * [] that the child process has multiple component SID (or depth "dN+1"
+ * in the PERF stream).
+ *
+ * Test harness can confirm (on platforms with a real exec() function):
+ * [] TODO talk about process replacement and how it affects SID.
+ */
+static int ut_005exec(int argc, const char **argv)
+{
+ int result;
+
+ if (!argc)
+ return 0;
+
+ result = execv_git_cmd(argv);
+ return result;
+}
+
+static int ut_006data(int argc, const char **argv)
+{
+ const char *usage_error =
+ "expect <cat0> <k0> <v0> [<cat1> <k1> <v1> [...]]";
+
+ if (argc % 3 != 0)
+ die("%s", usage_error);
+
+ while (argc) {
+ if (!argv[0] || !*argv[0] || !argv[1] || !*argv[1] ||
+ !argv[2] || !*argv[2])
+ die("%s", usage_error);
+
+ trace2_data_string(argv[0], the_repository, argv[1], argv[2]);
+ argv += 3;
+ argc -= 3;
+ }
+
+ return 0;
+}
+
+/*
+ * Usage:
+ * test-tool trace2 <ut_name_1> <ut_usage_1>
+ * test-tool trace2 <ut_name_2> <ut_usage_2>
+ * ...
+ */
+#define USAGE_PREFIX "test-tool trace2"
+
+/* clang-format off */
+static struct unit_test ut_table[] = {
+ { ut_001return, "001return", "<exit_code>" },
+ { ut_002exit, "002exit", "<exit_code>" },
+ { ut_003error, "003error", "<error_message>+" },
+ { ut_004child, "004child", "[<child_command_line>]" },
+ { ut_005exec, "005exec", "<git_command_args>" },
+ { ut_006data, "006data", "[<category> <key> <value>]+" },
+};
+/* clang-format on */
+
+/* clang-format off */
+#define for_each_ut(k, ut_k) \
+ for (k = 0, ut_k = &ut_table[k]; \
+ k < ARRAY_SIZE(ut_table); \
+ k++, ut_k = &ut_table[k])
+/* clang-format on */
+
+static int print_usage(void)
+{
+ int k;
+ struct unit_test *ut_k;
+
+ fprintf(stderr, "usage:\n");
+ for_each_ut (k, ut_k)
+ fprintf(stderr, "\t%s %s %s\n", USAGE_PREFIX, ut_k->ut_name,
+ ut_k->ut_usage);
+
+ return 129;
+}
+
+/*
+ * Issue various trace2 events for testing.
+ *
+ * We assume that these trace2 routines has already been called:
+ * [] trace2_initialize() [common-main.c:main()]
+ * [] trace2_cmd_start() [common-main.c:main()]
+ * [] trace2_cmd_name() [test-tool.c:cmd_main()]
+ * [] tracd2_cmd_list_config() [test-tool.c:cmd_main()]
+ * So that:
+ * [] the various trace2 streams are open.
+ * [] the process SID has been created.
+ * [] the "version" event has been generated.
+ * [] the "start" event has been generated.
+ * [] the "cmd_name" event has been generated.
+ * [] this writes various "def_param" events for interesting config values.
+ *
+ * We further assume that if we return (rather than exit()), trace2_cmd_exit()
+ * will be called by test-tool.c:cmd_main().
+ */
+int cmd__trace2(int argc, const char **argv)
+{
+ int k;
+ struct unit_test *ut_k;
+
+ argc--; /* skip over "trace2" arg */
+ argv++;
+
+ if (argc)
+ for_each_ut (k, ut_k)
+ if (!strcmp(argv[0], ut_k->ut_name))
+ return ut_k->ut_fn(argc - 1, argv + 1);
+
+ return print_usage();
+}
test_cmp expect actual
'
-stop_git_daemon
test_done
#
# test_expect_success ...
#
-# stop_git_daemon
# test_done
test_tristate GIT_TEST_GIT_DAEMON
test_set_port LIB_GIT_DAEMON_PORT
GIT_DAEMON_PID=
+GIT_DAEMON_PIDFILE="$PWD"/daemon.pid
GIT_DAEMON_DOCUMENT_ROOT_PATH="$PWD"/repo
GIT_DAEMON_HOST_PORT=127.0.0.1:$LIB_GIT_DAEMON_PORT
GIT_DAEMON_URL=git://$GIT_DAEMON_HOST_PORT
+registered_stop_git_daemon_atexit_handler=
start_git_daemon() {
if test -n "$GIT_DAEMON_PID"
then
mkdir -p "$GIT_DAEMON_DOCUMENT_ROOT_PATH"
- trap 'code=$?; stop_git_daemon; (exit $code); die' EXIT
+ # One of the test scripts stops and then re-starts 'git daemon'.
+ # Don't register and then run the same atexit handlers several times.
+ if test -z "$registered_stop_git_daemon_atexit_handler"
+ then
+ test_atexit 'stop_git_daemon'
+ registered_stop_git_daemon_atexit_handler=AlreadyDone
+ fi
say >&3 "Starting git daemon ..."
mkfifo git_daemon_output
${LIB_GIT_DAEMON_COMMAND:-git daemon} \
--listen=127.0.0.1 --port="$LIB_GIT_DAEMON_PORT" \
- --reuseaddr --verbose \
+ --reuseaddr --verbose --pid-file="$GIT_DAEMON_PIDFILE" \
--base-path="$GIT_DAEMON_DOCUMENT_ROOT_PATH" \
"$@" "$GIT_DAEMON_DOCUMENT_ROOT_PATH" \
>&3 2>git_daemon_output &
then
kill "$GIT_DAEMON_PID"
wait "$GIT_DAEMON_PID"
- trap 'die' EXIT
+ unset GIT_DAEMON_PID
test_skip_or_die $GIT_TEST_GIT_DAEMON \
"git daemon failed to start"
fi
return
fi
- trap 'die' EXIT
-
# kill git-daemon child of git
say >&3 "Stopping git daemon ..."
kill "$GIT_DAEMON_PID"
then
error "git daemon exited with status: $ret"
fi
+ kill "$(cat "$GIT_DAEMON_PIDFILE")" 2>/dev/null
GIT_DAEMON_PID=
- rm -f git_daemon_output
+ rm -f git_daemon_output "$GIT_DAEMON_PIDFILE"
}
# A stripped-down version of a netcat client, that connects to a "host:port"
echo "$path"
}
-# On Solaris the 'date +%s' function is not supported and therefore we
-# need this replacement.
-# Attention: This function is not safe again against time offset updates
-# at runtime (e.g. via NTP). The 'clock_gettime(CLOCK_MONOTONIC)'
-# function could fix that but it is not in Python until 3.3.
-time_in_seconds () {
- (cd / && "$PYTHON_PATH" -c 'import time; print(int(time.time()))')
-}
-
test_set_port P4DPORT
P4PORT=localhost:$P4DPORT
git="$TRASH_DIRECTORY/git"
pidfile="$TRASH_DIRECTORY/p4d.pid"
-# Sometimes "prove" seems to hang on exit because p4d is still running
-cleanup () {
- if test -f "$pidfile"
- then
- kill -9 $(cat "$pidfile") 2>/dev/null && exit 255
- fi
+stop_p4d_and_watchdog () {
+ kill -9 $p4d_pid $watchdog_pid
}
-trap cleanup EXIT
# git p4 submit generates a temp file, which will
# not get cleaned up if the submission fails. Don't
TMPDIR="$TRASH_DIRECTORY"
export TMPDIR
+registered_stop_p4d_atexit_handler=
start_p4d () {
+ # One of the test scripts stops and then re-starts p4d.
+ # Don't register and then run the same atexit handlers several times.
+ if test -z "$registered_stop_p4d_atexit_handler"
+ then
+ test_atexit 'stop_p4d_and_watchdog'
+ registered_stop_p4d_atexit_handler=AlreadyDone
+ fi
+
mkdir -p "$db" "$cli" "$git" &&
rm -f "$pidfile" &&
(
echo $! >"$pidfile"
}
) &&
+ p4d_pid=$(cat "$pidfile")
# This gives p4d a long time to start up, as it can be
# quite slow depending on the machine. Set this environment
# an automated test setup. If the p4d process dies, that
# will be caught with the "kill -0" check below.
i=${P4D_START_PATIENCE:-300}
- pid=$(cat "$pidfile")
- timeout=$(($(time_in_seconds) + $P4D_TIMEOUT))
+ nr_tries_left=$P4D_TIMEOUT
while true
do
- if test $(time_in_seconds) -gt $timeout
+ if test $nr_tries_left -eq 0
then
- kill -9 $pid
+ kill -9 $p4d_pid
exit 1
fi
sleep 1
- done &
+ nr_tries_left=$(($nr_tries_left - 1))
+ done 2>/dev/null 4>&2 &
watchdog_pid=$!
ready=
break
fi
# fail if p4d died
- kill -0 $pid 2>/dev/null || break
+ kill -0 $p4d_pid 2>/dev/null || break
echo waiting for p4d to start
sleep 1
i=$(( $i - 1 ))
}
retry_until_success () {
- timeout=$(($(time_in_seconds) + $RETRY_TIMEOUT))
- until "$@" 2>/dev/null || test $(time_in_seconds) -gt $timeout
- do
- sleep 1
- done
-}
-
-retry_until_fail () {
- timeout=$(($(time_in_seconds) + $RETRY_TIMEOUT))
- until ! "$@" 2>/dev/null || test $(time_in_seconds) -gt $timeout
+ nr_tries_left=$RETRY_TIMEOUT
+ until "$@" 2>/dev/null || test $nr_tries_left -eq 0
do
sleep 1
+ nr_tries_left=$(($nr_tries_left - 1))
done
}
-kill_p4d () {
- pid=$(cat "$pidfile")
- retry_until_fail kill $pid
- retry_until_fail kill -9 $pid
- # complain if it would not die
- test_must_fail kill $pid >/dev/null 2>&1 &&
- rm -rf "$db" "$cli" "$pidfile" &&
- retry_until_fail kill -9 $watchdog_pid
+stop_and_cleanup_p4d () {
+ kill -9 $p4d_pid $watchdog_pid
+ wait $p4d_pid
+ rm -rf "$db" "$cli" "$pidfile"
}
cleanup_git () {
LIB_HTTPD_SVN="$loc"
start_httpd
;;
- *)
- stop_httpd () {
- : noop
- }
- ;;
esac
}
#
# test_expect_success ...
#
-# stop_httpd
# test_done
#
# Can be configured using the following variables.
start_httpd() {
prepare_httpd >&3 2>&4
- trap 'code=$?; stop_httpd; (exit $code); die' EXIT
+ test_atexit stop_httpd
"$LIB_HTTPD_PATH" -d "$HTTPD_ROOT_PATH" \
-f "$TEST_PATH/apache.conf" $HTTPD_PARA \
>&3 2>&4
if test $? -ne 0
then
- trap 'die' EXIT
cat "$HTTPD_ROOT_PATH"/error.log >&4 2>/dev/null
test_skip_or_die $GIT_TEST_HTTPD "web server setup failed"
fi
}
stop_httpd() {
- trap 'die' EXIT
-
"$LIB_HTTPD_PATH" -d "$HTTPD_ROOT_PATH" \
-f "$TEST_PATH/apache.conf" $HTTPD_PARA -k stop
}
git revert HEAD &&
git checkout -b invalid_sub1 add_sub1 &&
- git update-index --cacheinfo 160000 0123456789012345678901234567890123456789 sub1 &&
+ git update-index --cacheinfo 160000 $(test_oid numeric) sub1 &&
git commit -m "Invalid sub1 commit" &&
git checkout -b valid_sub1 &&
git revert HEAD &&
# the submodule repo if it doesn't exist and configures the most problematic
# settings for diff.ignoreSubmodules.
prolog () {
+ test_oid_init &&
(test -d submodule_update_repo || create_lib_submodule_repo) &&
test_config_global diff.ignoreSubmodules all &&
test_config diff.ignoreSubmodules all
git rev-list --all --objects >/dev/null
'
+test_perf 'rev-list --parents' '
+ git rev-list --parents HEAD >/dev/null
+'
+
+test_expect_success 'create dummy file' '
+ echo unlikely-to-already-be-there >dummy &&
+ git add dummy &&
+ git commit -m dummy
+'
+
+test_perf 'rev-list -- dummy' '
+ git rev-list HEAD -- dummy
+'
+
+test_perf 'rev-list --parents -- dummy' '
+ git rev-list --parents HEAD -- dummy
+'
+
test_expect_success 'create new unreferenced commit' '
commit=$(git commit-tree HEAD^{tree} -p HEAD) &&
test_export commit
--- /dev/null
+#!/bin/sh
+
+test_description='performance tests of prune'
+. ./perf-lib.sh
+
+test_perf_default_repo
+
+test_expect_success 'remove reachable loose objects' '
+ git repack -ad
+'
+
+test_expect_success 'remove unreachable loose objects' '
+ git prune
+'
+
+test_expect_success 'confirm there are no loose objects' '
+ git count-objects | grep ^0
+'
+
+test_perf 'prune with no objects' '
+ git prune
+'
+
+test_expect_success 'repack with bitmaps' '
+ git repack -adb
+'
+
+# We have to create the object in each trial run, since otherwise
+# runs after the first see no object and just skip the traversal entirely!
+test_perf 'prune with bitmaps' '
+ echo "probably not present in repo" | git hash-object -w --stdin &&
+ git prune
+'
+
+test_done
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses/ .
-# do the --tee work early; it otherwise confuses our careful
-# GIT_BUILD_DIR mangling
-case "$GIT_TEST_TEE_STARTED, $* " in
-done,*)
- # do not redirect again
- ;;
-*' --tee '*|*' --va'*)
- mkdir -p test-results
- BASE=test-results/$(basename "$0" .sh)
- (GIT_TEST_TEE_STARTED=done ${SHELL-sh} "$0" "$@" 2>&1;
- echo $? > $BASE.exit) | tee $BASE.out
- test "$(cat $BASE.exit)" = 0
- exit
- ;;
-esac
-
+# These variables must be set before the inclusion of test-lib.sh below,
+# because it will change our working directory.
TEST_DIRECTORY=$(pwd)/..
TEST_OUTPUT_DIRECTORY=$(pwd)
-if test -z "$GIT_TEST_INSTALLED"; then
- perf_results_prefix=
-else
- perf_results_prefix=$(printf "%s" "${GIT_TEST_INSTALLED%/bin-wrappers}" | tr -c "[a-zA-Z0-9]" "[_*]")"."
- # make the tested dir absolute
- GIT_TEST_INSTALLED=$(cd "$GIT_TEST_INSTALLED" && pwd)
-fi
+ABSOLUTE_GIT_TEST_INSTALLED=$(
+ test -n "$GIT_TEST_INSTALLED" && cd "$GIT_TEST_INSTALLED" && pwd)
TEST_NO_CREATE_REPO=t
TEST_NO_MALLOC_CHECK=t
. ../test-lib.sh
+if test -z "$GIT_TEST_INSTALLED"; then
+ perf_results_prefix=
+else
+ perf_results_prefix=$(printf "%s" "${GIT_TEST_INSTALLED%/bin-wrappers}" | tr -c "[a-zA-Z0-9]" "[_*]")"."
+ GIT_TEST_INSTALLED=$ABSOLUTE_GIT_TEST_INSTALLED
+fi
+
# Variables from test-lib that are normally internal to the tests; we
# need to export them for test_perf subshells
export TEST_DIRECTORY TRASH_DIRECTORY GIT_BUILD_DIR GIT_TEST_CMP
EOF
"
+test_expect_success 'test_atexit is run' "
+ test_must_fail run_sub_test_lib_test \
+ atexit-cleanup 'Run atexit commands' -i <<-\\EOF &&
+ test_expect_success 'tests clean up even after a failure' '
+ > ../../clean-atexit &&
+ test_atexit rm ../../clean-atexit &&
+ > ../../also-clean-atexit &&
+ test_atexit rm ../../also-clean-atexit &&
+ > ../../dont-clean-atexit &&
+ (exit 1)
+ '
+ test_done
+ EOF
+ test_path_is_file dont-clean-atexit &&
+ test_path_is_missing clean-atexit &&
+ test_path_is_missing also-clean-atexit
+"
+
test_expect_success 'test_oid setup' '
test_oid_init
'
sed -n \
-e "/^GIT_PREFIX=/d" \
-e "/^GIT_TEXTDOMAINDIR=/d" \
+ -e "/^GIT_TR2_PARENT/d" \
-e "/^GIT_/s/=.*//p" |
sort
EOF
)
'
+test_expect_success MINGW 'core.hidedotfiles = false' '
+ git config --global core.hidedotfiles false &&
+ rm -rf newdir &&
+ mkdir newdir &&
+ (
+ sane_unset GIT_DIR GIT_WORK_TREE GIT_CONFIG &&
+ git -C newdir init
+ ) &&
+ ! is_hidden newdir/.git
+'
+
test_expect_success MINGW 'redirect std handles' '
GIT_REDIRECT_STDOUT=output.txt git rev-parse --git-dir &&
test .git = "$(cat output.txt)" &&
EOF
test_expect_success 'unambiguously abbreviated option' '
+ GIT_TEST_DISALLOW_ABBREVIATED_OPTIONS=false \
test-tool parse-options --int 2 --boolean --no-bo >output 2>output.err &&
test_must_be_empty output.err &&
test_cmp expect output
'
test_expect_success 'unambiguously abbreviated option with "="' '
+ GIT_TEST_DISALLOW_ABBREVIATED_OPTIONS=false \
test-tool parse-options --expect="integer: 2" --int=2
'
test_expect_success 'ambiguously abbreviated option' '
- test_expect_code 129 test-tool parse-options --strin 123
+ test_expect_code 129 env GIT_TEST_DISALLOW_ABBREVIATED_OPTIONS=false \
+ test-tool parse-options --strin 123
'
test_expect_success 'non ambiguous option (after two options it abbreviates)' '
+ GIT_TEST_DISALLOW_ABBREVIATED_OPTIONS=false \
test-tool parse-options --expect="string: 123" --st 123
'
EOF
test_expect_success 'negation of OPT_NONEG flags is not ambiguous' '
+ GIT_TEST_DISALLOW_ABBREVIATED_OPTIONS=false \
test-tool parse-options --no-ambig >output 2>output.err &&
test_must_be_empty output.err &&
test_cmp expect output
test-tool parse-options --expect="verbose: 0" -v -v -v --no-verbose
'
+test_expect_success 'GIT_TEST_DISALLOW_ABBREVIATED_OPTIONS works' '
+ GIT_TEST_DISALLOW_ABBREVIATED_OPTIONS=false \
+ test-tool parse-options --ye &&
+ test_must_fail env GIT_TEST_DISALLOW_ABBREVIATED_OPTIONS=true \
+ test-tool parse-options --ye
+'
+
test_done
--- /dev/null
+#!/bin/sh
+
+test_description='test trace2 facility (normal target)'
+. ./test-lib.sh
+
+# Add t/helper directory to PATH so that we can use a relative
+# path to run nested instances of test-tool.exe (see 004child).
+# This helps with HEREDOC comparisons later.
+TTDIR="$GIT_BUILD_DIR/t/helper/" && export TTDIR
+PATH="$TTDIR:$PATH" && export PATH
+
+# Warning: use of 'test_cmp' may run test-tool.exe and/or git.exe
+# Warning: to do the actual diff/comparison, so the HEREDOCs here
+# Warning: only cover our actual calls to test-tool and/or git.
+# Warning: So you may see extra lines in artifact files when
+# Warning: interactively debugging.
+
+# Turn off any inherited trace2 settings for this test.
+unset GIT_TR2 GIT_TR2_PERF GIT_TR2_EVENT
+unset GIT_TR2_BRIEF
+unset GIT_TR2_CONFIG_PARAMS
+
+V=$(git version | sed -e 's/^git version //') && export V
+
+# There are multiple trace2 targets: normal, perf, and event.
+# Trace2 events will/can be written to each active target (subject
+# to whatever filtering that target decides to do).
+# This script tests the normal target in isolation.
+#
+# Defer setting GIT_TR2 until the actual command line we want to test
+# because hidden git and test-tool commands run by the test harness
+# can contaminate our output.
+
+# Enable "brief" feature which turns off "<clock> <file>:<line> " prefix.
+GIT_TR2_BRIEF=1 && export GIT_TR2_BRIEF
+
+# Basic tests of the trace2 normal stream. Since this stream is used
+# primarily with printf-style debugging/tracing, we do limited testing
+# here.
+#
+# We do confirm the following API features:
+# [] the 'version <v>' event
+# [] the 'start <argv>' event
+# [] the 'cmd_name <name>' event
+# [] the 'exit <time> code:<code>' event
+# [] the 'atexit <time> code:<code>' event
+#
+# Fields of the form _FIELD_ are tokens that have been replaced (such
+# as the elapsed time).
+
+# Verb 001return
+#
+# Implicit return from cmd_<verb> function propagates <code>.
+
+test_expect_success 'normal stream, return code 0' '
+ test_when_finished "rm trace.normal actual expect" &&
+ GIT_TR2="$(pwd)/trace.normal" test-tool trace2 001return 0 &&
+ perl "$TEST_DIRECTORY/t0210/scrub_normal.perl" <trace.normal >actual &&
+ cat >expect <<-EOF &&
+ version $V
+ start _EXE_ trace2 001return 0
+ cmd_name trace2 (trace2)
+ exit elapsed:_TIME_ code:0
+ atexit elapsed:_TIME_ code:0
+ EOF
+ test_cmp expect actual
+'
+
+test_expect_success 'normal stream, return code 1' '
+ test_when_finished "rm trace.normal actual expect" &&
+ test_must_fail env GIT_TR2="$(pwd)/trace.normal" test-tool trace2 001return 1 &&
+ perl "$TEST_DIRECTORY/t0210/scrub_normal.perl" <trace.normal >actual &&
+ cat >expect <<-EOF &&
+ version $V
+ start _EXE_ trace2 001return 1
+ cmd_name trace2 (trace2)
+ exit elapsed:_TIME_ code:1
+ atexit elapsed:_TIME_ code:1
+ EOF
+ test_cmp expect actual
+'
+
+# Verb 002exit
+#
+# Explicit exit(code) from within cmd_<verb> propagates <code>.
+
+test_expect_success 'normal stream, exit code 0' '
+ test_when_finished "rm trace.normal actual expect" &&
+ GIT_TR2="$(pwd)/trace.normal" test-tool trace2 002exit 0 &&
+ perl "$TEST_DIRECTORY/t0210/scrub_normal.perl" <trace.normal >actual &&
+ cat >expect <<-EOF &&
+ version $V
+ start _EXE_ trace2 002exit 0
+ cmd_name trace2 (trace2)
+ exit elapsed:_TIME_ code:0
+ atexit elapsed:_TIME_ code:0
+ EOF
+ test_cmp expect actual
+'
+
+test_expect_success 'normal stream, exit code 1' '
+ test_when_finished "rm trace.normal actual expect" &&
+ test_must_fail env GIT_TR2="$(pwd)/trace.normal" test-tool trace2 002exit 1 &&
+ perl "$TEST_DIRECTORY/t0210/scrub_normal.perl" <trace.normal >actual &&
+ cat >expect <<-EOF &&
+ version $V
+ start _EXE_ trace2 002exit 1
+ cmd_name trace2 (trace2)
+ exit elapsed:_TIME_ code:1
+ atexit elapsed:_TIME_ code:1
+ EOF
+ test_cmp expect actual
+'
+
+# Verb 003error
+#
+# To the above, add multiple 'error <msg>' events
+
+test_expect_success 'normal stream, error event' '
+ test_when_finished "rm trace.normal actual expect" &&
+ GIT_TR2="$(pwd)/trace.normal" test-tool trace2 003error "hello world" "this is a test" &&
+ perl "$TEST_DIRECTORY/t0210/scrub_normal.perl" <trace.normal >actual &&
+ cat >expect <<-EOF &&
+ version $V
+ start _EXE_ trace2 003error '\''hello world'\'' '\''this is a test'\''
+ cmd_name trace2 (trace2)
+ error hello world
+ error this is a test
+ exit elapsed:_TIME_ code:0
+ atexit elapsed:_TIME_ code:0
+ EOF
+ test_cmp expect actual
+'
+
+test_done
--- /dev/null
+#!/usr/bin/perl
+#
+# Scrub the variable fields from the normal trace2 output to
+# make testing easier.
+
+use strict;
+use warnings;
+
+my $float = '[0-9]*\.[0-9]+([eE][-+]?[0-9]+)?';
+
+# This code assumes that the trace2 data was written with bare
+# turned on (which omits the "<clock> <file>:<line>" prefix.
+
+while (<>) {
+ # Various messages include an elapsed time in the middle
+ # of the message. Replace the time with a placeholder to
+ # simplify our HEREDOC in the test script.
+ s/elapsed:$float/elapsed:_TIME_/g;
+
+ my $line = $_;
+
+ # we expect:
+ # start <argv0> [<argv1> [<argv2> [...]]]
+ #
+ # where argv0 might be a relative or absolute path, with
+ # or without quotes, and platform dependent. Replace argv0
+ # with a token for HEREDOC matching in the test script.
+
+ if ($line =~ m/^start/) {
+ $line =~ /^start\s+(.*)/;
+ my $argv = $1;
+ $argv =~ m/(\'[^\']*\'|[^ ]+)\s+(.*)/;
+ my $argv_0 = $1;
+ my $argv_rest = $2;
+
+ print "start _EXE_ $argv_rest\n";
+ }
+ elsif ($line =~ m/^cmd_path/) {
+ # Likewise, the 'cmd_path' message breaks out argv[0].
+ #
+ # This line is only emitted when RUNTIME_PREFIX is defined,
+ # so just omit it for testing purposes.
+ # print "cmd_path _EXE_\n";
+ }
+ else {
+ print "$line";
+ }
+}
--- /dev/null
+#!/bin/sh
+
+test_description='test trace2 facility (perf target)'
+. ./test-lib.sh
+
+# Add t/helper directory to PATH so that we can use a relative
+# path to run nested instances of test-tool.exe (see 004child).
+# This helps with HEREDOC comparisons later.
+TTDIR="$GIT_BUILD_DIR/t/helper/" && export TTDIR
+PATH="$TTDIR:$PATH" && export PATH
+
+# Warning: use of 'test_cmp' may run test-tool.exe and/or git.exe
+# Warning: to do the actual diff/comparison, so the HEREDOCs here
+# Warning: only cover our actual calls to test-tool and/or git.
+# Warning: So you may see extra lines in artifact files when
+# Warning: interactively debugging.
+
+# Turn off any inherited trace2 settings for this test.
+unset GIT_TR2 GIT_TR2_PERF GIT_TR2_EVENT
+unset GIT_TR2_PERF_BRIEF
+unset GIT_TR2_CONFIG_PARAMS
+
+V=$(git version | sed -e 's/^git version //') && export V
+
+# There are multiple trace2 targets: normal, perf, and event.
+# Trace2 events will/can be written to each active target (subject
+# to whatever filtering that target decides to do).
+# Test each target independently.
+#
+# Defer setting GIT_TR2_PERF until the actual command we want to
+# test because hidden git and test-tool commands in the test
+# harness can contaminate our output.
+
+# Enable "brief" feature which turns off the prefix:
+# "<clock> <file>:<line> | <nr_parents> | "
+GIT_TR2_PERF_BRIEF=1 && export GIT_TR2_PERF_BRIEF
+
+# Repeat some of the t0210 tests using the perf target stream instead of
+# the normal stream.
+#
+# Tokens here of the form _FIELD_ have been replaced in the observed output.
+
+# Verb 001return
+#
+# Implicit return from cmd_<verb> function propagates <code>.
+
+test_expect_success 'perf stream, return code 0' '
+ test_when_finished "rm trace.perf actual expect" &&
+ GIT_TR2_PERF="$(pwd)/trace.perf" test-tool trace2 001return 0 &&
+ perl "$TEST_DIRECTORY/t0211/scrub_perf.perl" <trace.perf >actual &&
+ cat >expect <<-EOF &&
+ d0|main|version|||||$V
+ d0|main|start|||||_EXE_ trace2 001return 0
+ d0|main|cmd_name|||||trace2 (trace2)
+ d0|main|exit||_T_ABS_|||code:0
+ d0|main|atexit||_T_ABS_|||code:0
+ EOF
+ test_cmp expect actual
+'
+
+test_expect_success 'perf stream, return code 1' '
+ test_when_finished "rm trace.perf actual expect" &&
+ test_must_fail env GIT_TR2_PERF="$(pwd)/trace.perf" test-tool trace2 001return 1 &&
+ perl "$TEST_DIRECTORY/t0211/scrub_perf.perl" <trace.perf >actual &&
+ cat >expect <<-EOF &&
+ d0|main|version|||||$V
+ d0|main|start|||||_EXE_ trace2 001return 1
+ d0|main|cmd_name|||||trace2 (trace2)
+ d0|main|exit||_T_ABS_|||code:1
+ d0|main|atexit||_T_ABS_|||code:1
+ EOF
+ test_cmp expect actual
+'
+
+# Verb 003error
+#
+# To the above, add multiple 'error <msg>' events
+
+test_expect_success 'perf stream, error event' '
+ test_when_finished "rm trace.perf actual expect" &&
+ GIT_TR2_PERF="$(pwd)/trace.perf" test-tool trace2 003error "hello world" "this is a test" &&
+ perl "$TEST_DIRECTORY/t0211/scrub_perf.perl" <trace.perf >actual &&
+ cat >expect <<-EOF &&
+ d0|main|version|||||$V
+ d0|main|start|||||_EXE_ trace2 003error '\''hello world'\'' '\''this is a test'\''
+ d0|main|cmd_name|||||trace2 (trace2)
+ d0|main|error|||||hello world
+ d0|main|error|||||this is a test
+ d0|main|exit||_T_ABS_|||code:0
+ d0|main|atexit||_T_ABS_|||code:0
+ EOF
+ test_cmp expect actual
+'
+
+# Verb 004child
+#
+# Test nested spawning of child processes.
+#
+# Conceptually, this looks like:
+# P1: TT trace2 004child
+# P2: |--- TT trace2 004child
+# P3: |--- TT trace2 001return 0
+#
+# Which should generate events:
+# P1: version
+# P1: start
+# P1: cmd_name
+# P1: child_start
+# P2: version
+# P2: start
+# P2: cmd_name
+# P2: child_start
+# P3: version
+# P3: start
+# P3: cmd_name
+# P3: exit
+# P3: atexit
+# P2: child_exit
+# P2: exit
+# P2: atexit
+# P1: child_exit
+# P1: exit
+# P1: atexit
+
+test_expect_success 'perf stream, child processes' '
+ test_when_finished "rm trace.perf actual expect" &&
+ GIT_TR2_PERF="$(pwd)/trace.perf" test-tool trace2 004child test-tool trace2 004child test-tool trace2 001return 0 &&
+ perl "$TEST_DIRECTORY/t0211/scrub_perf.perl" <trace.perf >actual &&
+ cat >expect <<-EOF &&
+ d0|main|version|||||$V
+ d0|main|start|||||_EXE_ trace2 004child test-tool trace2 004child test-tool trace2 001return 0
+ d0|main|cmd_name|||||trace2 (trace2)
+ d0|main|child_start||_T_ABS_|||[ch0] class:? argv: test-tool trace2 004child test-tool trace2 001return 0
+ d1|main|version|||||$V
+ d1|main|start|||||_EXE_ trace2 004child test-tool trace2 001return 0
+ d1|main|cmd_name|||||trace2 (trace2/trace2)
+ d1|main|child_start||_T_ABS_|||[ch0] class:? argv: test-tool trace2 001return 0
+ d2|main|version|||||$V
+ d2|main|start|||||_EXE_ trace2 001return 0
+ d2|main|cmd_name|||||trace2 (trace2/trace2/trace2)
+ d2|main|exit||_T_ABS_|||code:0
+ d2|main|atexit||_T_ABS_|||code:0
+ d1|main|child_exit||_T_ABS_|_T_REL_||[ch0] pid:_PID_ code:0
+ d1|main|exit||_T_ABS_|||code:0
+ d1|main|atexit||_T_ABS_|||code:0
+ d0|main|child_exit||_T_ABS_|_T_REL_||[ch0] pid:_PID_ code:0
+ d0|main|exit||_T_ABS_|||code:0
+ d0|main|atexit||_T_ABS_|||code:0
+ EOF
+ test_cmp expect actual
+'
+
+test_done
--- /dev/null
+#!/usr/bin/perl
+#
+# Scrub the variable fields from the perf trace2 output to
+# make testing easier.
+
+use strict;
+use warnings;
+
+my $qpath = '\'[^\']*\'|[^ ]*';
+
+my $col_depth=0;
+my $col_thread=1;
+my $col_event=2;
+my $col_repo=3;
+my $col_t_abs=4;
+my $col_t_rel=5;
+my $col_category=6;
+my $col_rest=7;
+
+# This code assumes that the trace2 data was written with bare
+# turned on (which omits the "<clock> <file>:<line> | <parents>"
+# prefix.
+
+while (<>) {
+ my @tokens = split /\|/;
+
+ foreach my $col (@tokens) { $col =~ s/^\s+|\s+$//g; }
+
+ if ($tokens[$col_event] =~ m/^start/) {
+ # The 'start' message lists the contents of argv in $col_rest.
+ # On some platforms (Windows), argv[0] is *sometimes* a canonical
+ # absolute path to the EXE rather than the value passed in the
+ # shell script. Replace it with a placeholder to simplify our
+ # HEREDOC in the test script.
+ my $argv0;
+ my $argvRest;
+ $tokens[$col_rest] =~ s/^($qpath)\W*(.*)/_EXE_ $2/;
+ }
+ elsif ($tokens[$col_event] =~ m/cmd_path/) {
+ # Likewise, the 'cmd_path' message breaks out argv[0].
+ #
+ # This line is only emitted when RUNTIME_PREFIX is defined,
+ # so just omit it for testing purposes.
+ # $tokens[$col_rest] = "_EXE_";
+ goto SKIP_LINE;
+ }
+ elsif ($tokens[$col_event] =~ m/child_exit/) {
+ $tokens[$col_rest] =~ s/ pid:\d* / pid:_PID_ /;
+ }
+ elsif ($tokens[$col_event] =~ m/data/) {
+ if ($tokens[$col_category] =~ m/process/) {
+ # 'data' and 'data_json' events containing 'process'
+ # category data are assumed to be platform-specific
+ # and highly variable. Just omit them.
+ goto SKIP_LINE;
+ }
+ }
+
+ # t_abs and t_rel are either blank or a float. Replace the float
+ # with a constant for matching the HEREDOC in the test script.
+ if ($tokens[$col_t_abs] =~ m/\d/) {
+ $tokens[$col_t_abs] = "_T_ABS_";
+ }
+ if ($tokens[$col_t_rel] =~ m/\d/) {
+ $tokens[$col_t_rel] = "_T_REL_";
+ }
+
+ my $out;
+
+ $out = join('|', @tokens);
+ print "$out\n";
+
+ SKIP_LINE:
+}
+
+
--- /dev/null
+#!/bin/sh
+
+test_description='test trace2 facility'
+. ./test-lib.sh
+
+perl -MJSON::PP -e 0 >/dev/null 2>&1 && test_set_prereq JSON_PP
+
+# Add t/helper directory to PATH so that we can use a relative
+# path to run nested instances of test-tool.exe (see 004child).
+# This helps with HEREDOC comparisons later.
+TTDIR="$GIT_BUILD_DIR/t/helper/" && export TTDIR
+PATH="$TTDIR:$PATH" && export PATH
+
+# Warning: use of 'test_cmp' may run test-tool.exe and/or git.exe
+# Warning: to do the actual diff/comparison, so the HEREDOCs here
+# Warning: only cover our actual calls to test-tool and/or git.
+# Warning: So you may see extra lines in artifact files when
+# Warning: interactively debugging.
+
+# Turn off any inherited trace2 settings for this test.
+unset GIT_TR2 GIT_TR2_PERF GIT_TR2_EVENT
+unset GIT_TR2_BARE
+unset GIT_TR2_CONFIG_PARAMS
+
+V=$(git version | sed -e 's/^git version //') && export V
+
+# There are multiple trace2 targets: normal, perf, and event.
+# Trace2 events will/can be written to each active target (subject
+# to whatever filtering that target decides to do).
+# Test each target independently.
+#
+# Defer setting GIT_TR2_PERF until the actual command we want to
+# test because hidden git and test-tool commands in the test
+# harness can contaminate our output.
+
+# We don't bother repeating the 001return and 002exit tests, since they
+# have coverage in the normal and perf targets.
+
+# Verb 003error
+#
+# To the above, add multiple 'error <msg>' events
+
+test_expect_success JSON_PP 'event stream, error event' '
+ test_when_finished "rm trace.event actual expect" &&
+ GIT_TR2_EVENT="$(pwd)/trace.event" test-tool trace2 003error "hello world" "this is a test" &&
+ perl "$TEST_DIRECTORY/t0212/parse_events.perl" <trace.event >actual &&
+ sed -e "s/^|//" >expect <<-EOF &&
+ |VAR1 = {
+ | "_SID0_":{
+ | "argv":[
+ | "_EXE_",
+ | "trace2",
+ | "003error",
+ | "hello world",
+ | "this is a test"
+ | ],
+ | "errors":[
+ | "%s",
+ | "%s"
+ | ],
+ | "exit_code":0,
+ | "hierarchy":"trace2",
+ | "name":"trace2",
+ | "version":"$V"
+ | }
+ |};
+ EOF
+ test_cmp expect actual
+'
+
+# Verb 004child
+#
+# Test nested spawning of child processes.
+#
+# Conceptually, this looks like:
+# P1: TT trace2 004child
+# P2: |--- TT trace2 004child
+# P3: |--- TT trace2 001return 0
+
+test_expect_success JSON_PP 'event stream, return code 0' '
+ test_when_finished "rm trace.event actual expect" &&
+ GIT_TR2_EVENT="$(pwd)/trace.event" test-tool trace2 004child test-tool trace2 004child test-tool trace2 001return 0 &&
+ perl "$TEST_DIRECTORY/t0212/parse_events.perl" <trace.event >actual &&
+ sed -e "s/^|//" >expect <<-EOF &&
+ |VAR1 = {
+ | "_SID0_":{
+ | "argv":[
+ | "_EXE_",
+ | "trace2",
+ | "004child",
+ | "test-tool",
+ | "trace2",
+ | "004child",
+ | "test-tool",
+ | "trace2",
+ | "001return",
+ | "0"
+ | ],
+ | "child":{
+ | "0":{
+ | "child_argv":[
+ | "_EXE_",
+ | "trace2",
+ | "004child",
+ | "test-tool",
+ | "trace2",
+ | "001return",
+ | "0"
+ | ],
+ | "child_class":"?",
+ | "child_code":0,
+ | "use_shell":0
+ | }
+ | },
+ | "exit_code":0,
+ | "hierarchy":"trace2",
+ | "name":"trace2",
+ | "version":"$V"
+ | },
+ | "_SID0_/_SID1_":{
+ | "argv":[
+ | "_EXE_",
+ | "trace2",
+ | "004child",
+ | "test-tool",
+ | "trace2",
+ | "001return",
+ | "0"
+ | ],
+ | "child":{
+ | "0":{
+ | "child_argv":[
+ | "_EXE_",
+ | "trace2",
+ | "001return",
+ | "0"
+ | ],
+ | "child_class":"?",
+ | "child_code":0,
+ | "use_shell":0
+ | }
+ | },
+ | "exit_code":0,
+ | "hierarchy":"trace2/trace2",
+ | "name":"trace2",
+ | "version":"$V"
+ | },
+ | "_SID0_/_SID1_/_SID2_":{
+ | "argv":[
+ | "_EXE_",
+ | "trace2",
+ | "001return",
+ | "0"
+ | ],
+ | "exit_code":0,
+ | "hierarchy":"trace2/trace2/trace2",
+ | "name":"trace2",
+ | "version":"$V"
+ | }
+ |};
+ EOF
+ test_cmp expect actual
+'
+
+# Test listing of all "interesting" config settings.
+
+test_expect_success JSON_PP 'event stream, list config' '
+ test_when_finished "rm trace.event actual expect" &&
+ git config --local t0212.abc 1 &&
+ git config --local t0212.def "hello world" &&
+ GIT_TR2_EVENT="$(pwd)/trace.event" GIT_TR2_CONFIG_PARAMS="t0212.*" test-tool trace2 001return 0 &&
+ perl "$TEST_DIRECTORY/t0212/parse_events.perl" <trace.event >actual &&
+ sed -e "s/^|//" >expect <<-EOF &&
+ |VAR1 = {
+ | "_SID0_":{
+ | "argv":[
+ | "_EXE_",
+ | "trace2",
+ | "001return",
+ | "0"
+ | ],
+ | "exit_code":0,
+ | "hierarchy":"trace2",
+ | "name":"trace2",
+ | "params":[
+ | {
+ | "param":"t0212.abc",
+ | "value":"1"
+ | },
+ | {
+ | "param":"t0212.def",
+ | "value":"hello world"
+ | }
+ | ],
+ | "version":"$V"
+ | }
+ |};
+ EOF
+ test_cmp expect actual
+'
+
+test_expect_success JSON_PP 'basic trace2_data' '
+ test_when_finished "rm trace.event actual expect" &&
+ GIT_TR2_EVENT="$(pwd)/trace.event" test-tool trace2 006data test_category k1 v1 test_category k2 v2 &&
+ perl "$TEST_DIRECTORY/t0212/parse_events.perl" <trace.event >actual &&
+ sed -e "s/^|//" >expect <<-EOF &&
+ |VAR1 = {
+ | "_SID0_":{
+ | "argv":[
+ | "_EXE_",
+ | "trace2",
+ | "006data",
+ | "test_category",
+ | "k1",
+ | "v1",
+ | "test_category",
+ | "k2",
+ | "v2"
+ | ],
+ | "data":{
+ | "test_category":{
+ | "k1":"v1",
+ | "k2":"v2"
+ | }
+ | },
+ | "exit_code":0,
+ | "hierarchy":"trace2",
+ | "name":"trace2",
+ | "version":"$V"
+ | }
+ |};
+ EOF
+ test_cmp expect actual
+'
+
+test_done
--- /dev/null
+#!/usr/bin/perl
+#
+# Parse event stream and convert individual events into a summary
+# record for the process.
+#
+# Git.exe generates one or more "event" records for each API method,
+# such as "start <argv>" and "exit <code>", during the life of the git
+# process. Additionally, the input may contain interleaved events
+# from multiple concurrent git processes and/or multiple threads from
+# within a git process.
+#
+# Accumulate events for each process (based on its unique SID) in a
+# dictionary and emit process summary records.
+#
+# Convert some of the variable fields (such as elapsed time) into
+# placeholders (or omit them) to make HEREDOC comparisons easier in
+# the test scripts.
+#
+# We may also omit fields not (currently) useful for testing purposes.
+
+use strict;
+use warnings;
+use JSON::PP;
+use Data::Dumper;
+use Getopt::Long;
+
+# The version of the trace2 event target format that we understand.
+# This is reported in the 'version' event in the 'evt' field.
+# It comes from the GIT_TR2_EVENT_VERSION macro in trace2/tr2_tgt_event.c
+my $evt_version = '1';
+
+my $show_children = 1;
+my $show_exec = 1;
+my $show_threads = 1;
+
+# A hack to generate test HEREDOC data for pasting into the test script.
+# Usage:
+# cd "t/trash directory.t0212-trace2-event"
+# $TT trace ... >trace.event
+# VV=$(../../git.exe version | sed -e 's/^git version //')
+# perl ../t0212/parse_events.perl --HEREDOC --VERSION=$VV <trace.event >heredoc
+# Then paste heredoc into your new test.
+
+my $gen_heredoc = 0;
+my $gen_version = '';
+
+GetOptions("children!" => \$show_children,
+ "exec!" => \$show_exec,
+ "threads!" => \$show_threads,
+ "HEREDOC!" => \$gen_heredoc,
+ "VERSION=s" => \$gen_version )
+ or die("Error in command line arguments\n");
+
+
+# SIDs contains timestamps and PIDs of the process and its parents.
+# This makes it difficult to match up in a HEREDOC in the test script.
+# Build a map from actual SIDs to predictable constant values and yet
+# keep the parent/child relationships. For example:
+# {..., "sid":"1539706952458276-8652", ...}
+# {..., "sid":"1539706952458276-8652/1539706952649493-15452", ...}
+# becomes:
+# {..., "sid":"_SID1_", ...}
+# {..., "sid":"_SID1_/_SID2_", ...}
+my $sid_map;
+my $sid_count = 0;
+
+my $processes;
+
+while (<>) {
+ my $line = decode_json( $_ );
+
+ my $sid = "";
+ my $sid_sep = "";
+
+ my $raw_sid = $line->{'sid'};
+ my @raw_sid_parts = split /\//, $raw_sid;
+ foreach my $raw_sid_k (@raw_sid_parts) {
+ if (!exists $sid_map->{$raw_sid_k}) {
+ $sid_map->{$raw_sid_k} = '_SID' . $sid_count . '_';
+ $sid_count++;
+ }
+ $sid = $sid . $sid_sep . $sid_map->{$raw_sid_k};
+ $sid_sep = '/';
+ }
+
+ my $event = $line->{'event'};
+
+ if ($event eq 'version') {
+ $processes->{$sid}->{'version'} = $line->{'exe'};
+ if ($gen_heredoc == 1 && $gen_version eq $line->{'exe'}) {
+ # If we are generating data FOR the test script, replace
+ # the reported git.exe version with a reference to an
+ # environment variable. When our output is pasted into
+ # the test script, it will then be expanded in future
+ # test runs to the THEN current version of git.exe.
+ # We assume that the test script uses env var $V.
+ $processes->{$sid}->{'version'} = "\$V";
+ }
+ }
+
+ elsif ($event eq 'start') {
+ $processes->{$sid}->{'argv'} = $line->{'argv'};
+ $processes->{$sid}->{'argv'}[0] = "_EXE_";
+ }
+
+ elsif ($event eq 'exit') {
+ $processes->{$sid}->{'exit_code'} = $line->{'code'};
+ }
+
+ elsif ($event eq 'atexit') {
+ $processes->{$sid}->{'exit_code'} = $line->{'code'};
+ }
+
+ elsif ($event eq 'error') {
+ # For HEREDOC purposes, use the error message format string if
+ # available, rather than the formatted message (which probably
+ # has an absolute pathname).
+ if (exists $line->{'fmt'}) {
+ push( @{$processes->{$sid}->{'errors'}}, $line->{'fmt'} );
+ }
+ elsif (exists $line->{'msg'}) {
+ push( @{$processes->{$sid}->{'errors'}}, $line->{'msg'} );
+ }
+ }
+
+ elsif ($event eq 'cmd_path') {
+ ## $processes->{$sid}->{'path'} = $line->{'path'};
+ #
+ # Like in the 'start' event, we need to replace the value of
+ # argv[0] with a token for HEREDOC purposes. However, the
+ # event is only emitted when RUNTIME_PREFIX is defined, so
+ # just omit it for testing purposes.
+ # $processes->{$sid}->{'path'} = "_EXE_";
+ }
+
+ elsif ($event eq 'cmd_name') {
+ $processes->{$sid}->{'name'} = $line->{'name'};
+ $processes->{$sid}->{'hierarchy'} = $line->{'hierarchy'};
+ }
+
+ elsif ($event eq 'alias') {
+ $processes->{$sid}->{'alias'}->{'key'} = $line->{'alias'};
+ $processes->{$sid}->{'alias'}->{'argv'} = $line->{'argv'};
+ }
+
+ elsif ($event eq 'def_param') {
+ my $kv;
+ $kv->{'param'} = $line->{'param'};
+ $kv->{'value'} = $line->{'value'};
+ push( @{$processes->{$sid}->{'params'}}, $kv );
+ }
+
+ elsif ($event eq 'child_start') {
+ if ($show_children == 1) {
+ $processes->{$sid}->{'child'}->{$line->{'child_id'}}->{'child_class'} = $line->{'child_class'};
+ $processes->{$sid}->{'child'}->{$line->{'child_id'}}->{'child_argv'} = $line->{'argv'};
+ $processes->{$sid}->{'child'}->{$line->{'child_id'}}->{'child_argv'}[0] = "_EXE_";
+ $processes->{$sid}->{'child'}->{$line->{'child_id'}}->{'use_shell'} = $line->{'use_shell'} ? 1 : 0;
+ }
+ }
+
+ elsif ($event eq 'child_exit') {
+ if ($show_children == 1) {
+ $processes->{$sid}->{'child'}->{$line->{'child_id'}}->{'child_code'} = $line->{'code'};
+ }
+ }
+
+ # TODO decide what information we want to test from thread events.
+
+ elsif ($event eq 'thread_start') {
+ if ($show_threads == 1) {
+ }
+ }
+
+ elsif ($event eq 'thread_exit') {
+ if ($show_threads == 1) {
+ }
+ }
+
+ # TODO decide what information we want to test from exec events.
+
+ elsif ($event eq 'exec') {
+ if ($show_exec == 1) {
+ }
+ }
+
+ elsif ($event eq 'exec_result') {
+ if ($show_exec == 1) {
+ }
+ }
+
+ elsif ($event eq 'def_param') {
+ # Accumulate parameter key/value pairs by key rather than in an array
+ # so that we get overwrite (last one wins) effects.
+ $processes->{$sid}->{'params'}->{$line->{'param'}} = $line->{'value'};
+ }
+
+ elsif ($event eq 'def_repo') {
+ # $processes->{$sid}->{'repos'}->{$line->{'repo'}} = $line->{'worktree'};
+ $processes->{$sid}->{'repos'}->{$line->{'repo'}} = "_WORKTREE_";
+ }
+
+ # A series of potentially nested and threaded region and data events
+ # is fundamentally incompatibile with the type of summary record we
+ # are building in this script. Since they are intended for
+ # perf-trace-like analysis rather than a result summary, we ignore
+ # most of them here.
+
+ # elsif ($event eq 'region_enter') {
+ # }
+ # elsif ($event eq 'region_leave') {
+ # }
+
+ elsif ($event eq 'data') {
+ my $cat = $line->{'category'};
+ if ($cat eq 'test_category') {
+
+ my $key = $line->{'key'};
+ my $value = $line->{'value'};
+ $processes->{$sid}->{'data'}->{$cat}->{$key} = $value;
+ }
+ }
+
+ # This trace2 target does not emit 'printf' events.
+ #
+ # elsif ($event eq 'printf') {
+ # }
+}
+
+# Dump the resulting hash into something that we can compare against
+# in the test script. These options make Dumper output look a little
+# bit like JSON. Also convert variable references of the form "$VAR*"
+# so that the matching HEREDOC doesn't need to escape it.
+
+$Data::Dumper::Sortkeys = 1;
+$Data::Dumper::Indent = 1;
+$Data::Dumper::Purity = 1;
+$Data::Dumper::Pair = ':';
+
+my $out = Dumper($processes);
+$out =~ s/'/"/g;
+$out =~ s/\$VAR/VAR/g;
+
+# Finally, if we're running this script to generate (manually confirmed)
+# data to add to the test script, guard the indentation.
+
+if ($gen_heredoc == 1) {
+ $out =~ s/^/\t\|/gms;
+}
+
+print $out;
}
# don't leave a stale daemon running
-trap 'code=$?; git credential-cache exit; (exit $code); die' EXIT
+test_atexit 'git credential-cache exit'
# test that the daemon works with no special setup
helper_test cache
helper_test_timeout cache --timeout=1
-# we can't rely on our "trap" above working after test_done,
-# as test_done will delete the trash directory containing
-# our socket, leaving us with no way to access the daemon.
-git credential-cache exit
-
test_done
git verify-pack --verbose "$IDX" | grep "$HASH"
'
-stop_httpd
-
test_done
)
'
+test_expect_success 'internal tree objects are not "missing"' '
+ git init missing-empty &&
+ (
+ cd missing-empty &&
+ empty_tree=$(git hash-object -t tree /dev/null) &&
+ commit=$(echo foo | git commit-tree $empty_tree) &&
+ git rev-list --objects $commit
+ )
+'
+
test_done
)
'
+test_expect_success 'conditional include with /**/' '
+ REPO=foo/bar/repo &&
+ git init $REPO &&
+ cat >>$REPO/.git/config <<-\EOF &&
+ [includeIf "gitdir:**/foo/**/bar/**"]
+ path=bar7
+ EOF
+ echo "[test]seven=7" >$REPO/.git/bar7 &&
+ echo 7 >expect &&
+ git -C $REPO config test.seven >actual &&
+ test_cmp expect actual
+'
+
test_expect_success SYMLINKS 'conditional include, set up symlinked $HOME' '
mkdir real-home &&
ln -s real-home home &&
test_cmp unchanged actual
'
+test_expect_success 'delete fails cleanly if packed-refs.new write fails' '
+ # Setup and expectations are similar to the test above.
+ prefix=refs/failed-packed-refs &&
+ git update-ref $prefix/foo $C &&
+ git pack-refs --all &&
+ git update-ref $prefix/foo $D &&
+ git for-each-ref $prefix >unchanged &&
+ # This should not happen in practice, but it is an easy way to get a
+ # reliable error (we open with create_tempfile(), which uses O_EXCL).
+ : >.git/packed-refs.new &&
+ test_when_finished "rm -f .git/packed-refs.new" &&
+ test_must_fail git update-ref -d $prefix/foo &&
+ git for-each-ref $prefix >actual &&
+ test_cmp unchanged actual
+'
+
test_done
'
test_expect_success 'gc.reflogexpire=never' '
+ test_config gc.reflogexpire never &&
+ test_config gc.reflogexpireunreachable never &&
+
+ git reflog expire --verbose --all >output &&
+ test_line_count = 9 output &&
- git config gc.reflogexpire never &&
- git config gc.reflogexpireunreachable never &&
- git reflog expire --verbose --all &&
git reflog refs/heads/master >output &&
test_line_count = 4 output
'
test_expect_success 'gc.reflogexpire=false' '
+ test_config gc.reflogexpire false &&
+ test_config gc.reflogexpireunreachable false &&
- git config gc.reflogexpire false &&
- git config gc.reflogexpireunreachable false &&
git reflog expire --verbose --all &&
git reflog refs/heads/master >output &&
- test_line_count = 4 output &&
+ test_line_count = 4 output
+
+'
- git config --unset gc.reflogexpire &&
- git config --unset gc.reflogexpireunreachable
+test_expect_success 'git reflog expire unknown reference' '
+ test_config gc.reflogexpire never &&
+ test_config gc.reflogexpireunreachable never &&
+ test_must_fail git reflog expire master@{123} 2>stderr &&
+ test_i18ngrep "points nowhere" stderr &&
+ test_must_fail git reflog expire does-not-exist 2>stderr &&
+ test_i18ngrep "points nowhere" stderr
'
test_expect_success 'checkout should not delete log for packed ref' '
test_cmp expected actual.wt2
'
+test_expect_success 'for-each-ref from main repo' '
+ mkdir fer1 &&
+ git -C fer1 init repo &&
+ test_commit -C fer1/repo initial &&
+ git -C fer1/repo worktree add ../second &&
+ git -C fer1/repo update-ref refs/bisect/main HEAD &&
+ git -C fer1/repo update-ref refs/rewritten/main HEAD &&
+ git -C fer1/repo update-ref refs/worktree/main HEAD &&
+ git -C fer1/repo for-each-ref --format="%(refname)" | grep main >actual &&
+ cat >expected <<-\EOF &&
+ refs/bisect/main
+ refs/rewritten/main
+ refs/worktree/main
+ EOF
+ test_cmp expected actual
+'
+
+test_expect_success 'for-each-ref from linked repo' '
+ mkdir fer2 &&
+ git -C fer2 init repo &&
+ test_commit -C fer2/repo initial &&
+ git -C fer2/repo worktree add ../second &&
+ git -C fer2/second update-ref refs/bisect/second HEAD &&
+ git -C fer2/second update-ref refs/rewritten/second HEAD &&
+ git -C fer2/second update-ref refs/worktree/second HEAD &&
+ git -C fer2/second for-each-ref --format="%(refname)" | grep second >actual &&
+ cat >expected <<-\EOF &&
+ refs/bisect/second
+ refs/heads/second
+ refs/rewritten/second
+ refs/worktree/second
+ EOF
+ test_cmp expected actual
+'
+
test_done
# for each of type, we have one version which is referenced by another object
# (and so while unreachable, not dangling), and another variant which really is
# dangling.
-test_expect_success 'fsck notices dangling objects' '
+test_expect_success 'create dangling-object repository' '
git init dangling &&
(
cd dangling &&
commit=$(git commit-tree $tree) &&
dcommit=$(git commit-tree -p $commit $tree) &&
- cat >expect <<-EOF &&
+ cat >expect <<-EOF
dangling blob $dblob
dangling commit $dcommit
dangling tree $dtree
EOF
+ )
+'
+test_expect_success 'fsck notices dangling objects' '
+ (
+ cd dangling &&
git fsck >actual &&
# the output order is non-deterministic, as it comes from a hash
sort <actual >actual.sorted &&
)
'
+test_expect_success 'fsck --connectivity-only notices dangling objects' '
+ (
+ cd dangling &&
+ git fsck --connectivity-only >actual &&
+ # the output order is non-deterministic, as it comes from a hash
+ sort <actual >actual.sorted &&
+ test_i18ncmp expect actual.sorted
+ )
+'
+
test_expect_success 'fsck $name notices bogus $name' '
test_must_fail git fsck bogus &&
test_must_fail git fsck $ZERO_OID
test_line_count = 0 cache-tree.out
'
+test_expect_success 'do not refresh null base index' '
+ test_create_repo merge &&
+ (
+ cd merge &&
+ test_commit initial &&
+ git checkout -b side-branch &&
+ test_commit extra &&
+ git checkout master &&
+ git update-index --split-index &&
+ test_commit more &&
+ # must not write a new shareindex, or we wont catch the problem
+ git -c splitIndex.maxPercentChange=100 merge --no-edit side-branch 2>err &&
+ # i.e. do not expect warnings like
+ # could not freshen shared index .../shareindex.00000...
+ test_must_be_empty err
+ )
+'
+
test_done
test_cmp both.txt.conflicted.cleaned both.txt.cleaned
'
+test_expect_success 'force checkout a conflict file creates stage zero entry' '
+ git init co-force &&
+ (
+ cd co-force &&
+ echo a >a &&
+ git add a &&
+ git commit -ama &&
+ A_OBJ=$(git rev-parse :a) &&
+ git branch topic &&
+ echo b >a &&
+ git commit -amb &&
+ B_OBJ=$(git rev-parse :a) &&
+ git checkout topic &&
+ echo c >a &&
+ C_OBJ=$(git hash-object a) &&
+ git checkout -m master &&
+ test_cmp_rev :1:a $A_OBJ &&
+ test_cmp_rev :2:a $B_OBJ &&
+ test_cmp_rev :3:a $C_OBJ &&
+ git checkout -f topic &&
+ test_cmp_rev :0:a $A_OBJ
+ )
+'
+
test_done
--- /dev/null
+#!/bin/sh
+
+test_description='checkout --no-overlay <tree-ish> -- <pathspec>'
+
+. ./test-lib.sh
+
+test_expect_success 'setup' '
+ git commit --allow-empty -m "initial"
+'
+
+test_expect_success 'checkout --no-overlay deletes files not in <tree-ish>' '
+ >file &&
+ mkdir dir &&
+ >dir/file1 &&
+ git add file dir/file1 &&
+ git checkout --no-overlay HEAD -- file &&
+ test_path_is_missing file &&
+ test_path_is_file dir/file1
+'
+
+test_expect_success 'checkout --no-overlay removing last file from directory' '
+ git checkout --no-overlay HEAD -- dir/file1 &&
+ test_path_is_missing dir
+'
+
+test_expect_success 'checkout -p --overlay is disallowed' '
+ test_must_fail git checkout -p --overlay HEAD 2>actual &&
+ test_i18ngrep "fatal: -p and --overlay are mutually exclusive" actual
+'
+
+test_expect_success '--no-overlay --theirs with D/F conflict deletes file' '
+ test_commit file1 file1 &&
+ test_commit file2 file2 &&
+ git rm --cached file1 &&
+ echo 1234 >file1 &&
+ F1=$(git rev-parse HEAD:file1) &&
+ F2=$(git rev-parse HEAD:file2) &&
+ {
+ echo "100644 $F1 1 file1" &&
+ echo "100644 $F2 2 file1"
+ } | git update-index --index-info &&
+ test_path_is_file file1 &&
+ git checkout --theirs --no-overlay -- file1 &&
+ test_path_is_missing file1
+'
+
+test_done
+++ /dev/null
-#!/bin/sh
-
-test_description='test git worktree add'
-
-. ./test-lib.sh
-
-. "$TEST_DIRECTORY"/lib-rebase.sh
-
-test_expect_success 'setup' '
- test_commit init
-'
-
-test_expect_success '"add" an existing worktree' '
- mkdir -p existing/subtree &&
- test_must_fail git worktree add --detach existing master
-'
-
-test_expect_success '"add" an existing empty worktree' '
- mkdir existing_empty &&
- git worktree add --detach existing_empty master
-'
-
-test_expect_success '"add" using shorthand - fails when no previous branch' '
- test_must_fail git worktree add existing_short -
-'
-
-test_expect_success '"add" using - shorthand' '
- git checkout -b newbranch &&
- echo hello >myworld &&
- git add myworld &&
- git commit -m myworld &&
- git checkout master &&
- git worktree add short-hand - &&
- echo refs/heads/newbranch >expect &&
- git -C short-hand rev-parse --symbolic-full-name HEAD >actual &&
- test_cmp expect actual
-'
-
-test_expect_success '"add" refuses to checkout locked branch' '
- test_must_fail git worktree add zere master &&
- ! test -d zere &&
- ! test -d .git/worktrees/zere
-'
-
-test_expect_success 'checking out paths not complaining about linked checkouts' '
- (
- cd existing_empty &&
- echo dirty >>init.t &&
- git checkout master -- init.t
- )
-'
-
-test_expect_success '"add" worktree' '
- git rev-parse HEAD >expect &&
- git worktree add --detach here master &&
- (
- cd here &&
- test_cmp ../init.t init.t &&
- test_must_fail git symbolic-ref HEAD &&
- git rev-parse HEAD >actual &&
- test_cmp ../expect actual &&
- git fsck
- )
-'
-
-test_expect_success '"add" worktree with lock' '
- git rev-parse HEAD >expect &&
- git worktree add --detach --lock here-with-lock master &&
- test -f .git/worktrees/here-with-lock/locked
-'
-
-test_expect_success '"add" worktree from a subdir' '
- (
- mkdir sub &&
- cd sub &&
- git worktree add --detach here master &&
- cd here &&
- test_cmp ../../init.t init.t
- )
-'
-
-test_expect_success '"add" from a linked checkout' '
- (
- cd here &&
- git worktree add --detach nested-here master &&
- cd nested-here &&
- git fsck
- )
-'
-
-test_expect_success '"add" worktree creating new branch' '
- git worktree add -b newmaster there master &&
- (
- cd there &&
- test_cmp ../init.t init.t &&
- git symbolic-ref HEAD >actual &&
- echo refs/heads/newmaster >expect &&
- test_cmp expect actual &&
- git fsck
- )
-'
-
-test_expect_success 'die the same branch is already checked out' '
- (
- cd here &&
- test_must_fail git checkout newmaster
- )
-'
-
-test_expect_success SYMLINKS 'die the same branch is already checked out (symlink)' '
- head=$(git -C there rev-parse --git-path HEAD) &&
- ref=$(git -C there symbolic-ref HEAD) &&
- rm "$head" &&
- ln -s "$ref" "$head" &&
- test_must_fail git -C here checkout newmaster
-'
-
-test_expect_success 'not die the same branch is already checked out' '
- (
- cd here &&
- git worktree add --force anothernewmaster newmaster
- )
-'
-
-test_expect_success 'not die on re-checking out current branch' '
- (
- cd there &&
- git checkout newmaster
- )
-'
-
-test_expect_success '"add" from a bare repo' '
- (
- git clone --bare . bare &&
- cd bare &&
- git worktree add -b bare-master ../there2 master
- )
-'
-
-test_expect_success 'checkout from a bare repo without "add"' '
- (
- cd bare &&
- test_must_fail git checkout master
- )
-'
-
-test_expect_success '"add" default branch of a bare repo' '
- (
- git clone --bare . bare2 &&
- cd bare2 &&
- git worktree add ../there3 master
- )
-'
-
-test_expect_success 'checkout with grafts' '
- test_when_finished rm .git/info/grafts &&
- test_commit abc &&
- SHA1=$(git rev-parse HEAD) &&
- test_commit def &&
- test_commit xyz &&
- echo "$(git rev-parse HEAD) $SHA1" >.git/info/grafts &&
- cat >expected <<-\EOF &&
- xyz
- abc
- EOF
- git log --format=%s -2 >actual &&
- test_cmp expected actual &&
- git worktree add --detach grafted master &&
- git --git-dir=grafted/.git log --format=%s -2 >actual &&
- test_cmp expected actual
-'
-
-test_expect_success '"add" from relative HEAD' '
- test_commit a &&
- test_commit b &&
- test_commit c &&
- git rev-parse HEAD~1 >expected &&
- git worktree add relhead HEAD~1 &&
- git -C relhead rev-parse HEAD >actual &&
- test_cmp expected actual
-'
-
-test_expect_success '"add -b" with <branch> omitted' '
- git worktree add -b burble flornk &&
- test_cmp_rev HEAD burble
-'
-
-test_expect_success '"add --detach" with <branch> omitted' '
- git worktree add --detach fishhook &&
- git rev-parse HEAD >expected &&
- git -C fishhook rev-parse HEAD >actual &&
- test_cmp expected actual &&
- test_must_fail git -C fishhook symbolic-ref HEAD
-'
-
-test_expect_success '"add" with <branch> omitted' '
- git worktree add wiffle/bat &&
- test_cmp_rev HEAD bat
-'
-
-test_expect_success '"add" checks out existing branch of dwimd name' '
- git branch dwim HEAD~1 &&
- git worktree add dwim &&
- test_cmp_rev HEAD~1 dwim &&
- (
- cd dwim &&
- test_cmp_rev HEAD dwim
- )
-'
-
-test_expect_success '"add <path>" dwim fails with checked out branch' '
- git checkout -b test-branch &&
- test_must_fail git worktree add test-branch &&
- test_path_is_missing test-branch
-'
-
-test_expect_success '"add --force" with existing dwimd name doesnt die' '
- git checkout test-branch &&
- git worktree add --force test-branch
-'
-
-test_expect_success '"add" no auto-vivify with --detach and <branch> omitted' '
- git worktree add --detach mish/mash &&
- test_must_fail git rev-parse mash -- &&
- test_must_fail git -C mish/mash symbolic-ref HEAD
-'
-
-test_expect_success '"add" -b/-B mutually exclusive' '
- test_must_fail git worktree add -b poodle -B poodle bamboo master
-'
-
-test_expect_success '"add" -b/--detach mutually exclusive' '
- test_must_fail git worktree add -b poodle --detach bamboo master
-'
-
-test_expect_success '"add" -B/--detach mutually exclusive' '
- test_must_fail git worktree add -B poodle --detach bamboo master
-'
-
-test_expect_success '"add -B" fails if the branch is checked out' '
- git rev-parse newmaster >before &&
- test_must_fail git worktree add -B newmaster bamboo master &&
- git rev-parse newmaster >after &&
- test_cmp before after
-'
-
-test_expect_success 'add -B' '
- git worktree add -B poodle bamboo2 master^ &&
- git -C bamboo2 symbolic-ref HEAD >actual &&
- echo refs/heads/poodle >expected &&
- test_cmp expected actual &&
- test_cmp_rev master^ poodle
-'
-
-test_expect_success 'add --quiet' '
- git worktree add --quiet another-worktree master 2>actual &&
- test_must_be_empty actual
-'
-
-test_expect_success 'local clone from linked checkout' '
- git clone --local here here-clone &&
- ( cd here-clone && git fsck )
-'
-
-test_expect_success 'local clone --shared from linked checkout' '
- git -C bare worktree add --detach ../baretree &&
- git clone --local --shared baretree bare-clone &&
- grep /bare/ bare-clone/.git/objects/info/alternates
-'
-
-test_expect_success '"add" worktree with --no-checkout' '
- git worktree add --no-checkout -b swamp swamp &&
- ! test -e swamp/init.t &&
- git -C swamp reset --hard &&
- test_cmp init.t swamp/init.t
-'
-
-test_expect_success '"add" worktree with --checkout' '
- git worktree add --checkout -b swmap2 swamp2 &&
- test_cmp init.t swamp2/init.t
-'
-
-test_expect_success 'put a worktree under rebase' '
- git worktree add under-rebase &&
- (
- cd under-rebase &&
- set_fake_editor &&
- FAKE_LINES="edit 1" git rebase -i HEAD^ &&
- git worktree list | grep "under-rebase.*detached HEAD"
- )
-'
-
-test_expect_success 'add a worktree, checking out a rebased branch' '
- test_must_fail git worktree add new-rebase under-rebase &&
- ! test -d new-rebase
-'
-
-test_expect_success 'checking out a rebased branch from another worktree' '
- git worktree add new-place &&
- test_must_fail git -C new-place checkout under-rebase
-'
-
-test_expect_success 'not allow to delete a branch under rebase' '
- (
- cd under-rebase &&
- test_must_fail git branch -D under-rebase
- )
-'
-
-test_expect_success 'rename a branch under rebase not allowed' '
- test_must_fail git branch -M under-rebase rebase-with-new-name
-'
-
-test_expect_success 'check out from current worktree branch ok' '
- (
- cd under-rebase &&
- git checkout under-rebase &&
- git checkout - &&
- git rebase --abort
- )
-'
-
-test_expect_success 'checkout a branch under bisect' '
- git worktree add under-bisect &&
- (
- cd under-bisect &&
- git bisect start &&
- git bisect bad &&
- git bisect good HEAD~2 &&
- git worktree list | grep "under-bisect.*detached HEAD" &&
- test_must_fail git worktree add new-bisect under-bisect &&
- ! test -d new-bisect
- )
-'
-
-test_expect_success 'rename a branch under bisect not allowed' '
- test_must_fail git branch -M under-bisect bisect-with-new-name
-'
-# Is branch "refs/heads/$1" set to pull from "$2/$3"?
-test_branch_upstream () {
- printf "%s\n" "$2" "refs/heads/$3" >expect.upstream &&
- {
- git config "branch.$1.remote" &&
- git config "branch.$1.merge"
- } >actual.upstream &&
- test_cmp expect.upstream actual.upstream
-}
-
-test_expect_success '--track sets up tracking' '
- test_when_finished rm -rf track &&
- git worktree add --track -b track track master &&
- test_branch_upstream track . master
-'
-
-# setup remote repository $1 and repository $2 with $1 set up as
-# remote. The remote has two branches, master and foo.
-setup_remote_repo () {
- git init $1 &&
- (
- cd $1 &&
- test_commit $1_master &&
- git checkout -b foo &&
- test_commit upstream_foo
- ) &&
- git init $2 &&
- (
- cd $2 &&
- test_commit $2_master &&
- git remote add $1 ../$1 &&
- git config remote.$1.fetch \
- "refs/heads/*:refs/remotes/$1/*" &&
- git fetch --all
- )
-}
-
-test_expect_success '--no-track avoids setting up tracking' '
- test_when_finished rm -rf repo_upstream repo_local foo &&
- setup_remote_repo repo_upstream repo_local &&
- (
- cd repo_local &&
- git worktree add --no-track -b foo ../foo repo_upstream/foo
- ) &&
- (
- cd foo &&
- test_must_fail git config "branch.foo.remote" &&
- test_must_fail git config "branch.foo.merge" &&
- test_cmp_rev refs/remotes/repo_upstream/foo refs/heads/foo
- )
-'
-
-test_expect_success '"add" <path> <non-existent-branch> fails' '
- test_must_fail git worktree add foo non-existent
-'
-
-test_expect_success '"add" <path> <branch> dwims' '
- test_when_finished rm -rf repo_upstream repo_dwim foo &&
- setup_remote_repo repo_upstream repo_dwim &&
- git init repo_dwim &&
- (
- cd repo_dwim &&
- git worktree add ../foo foo
- ) &&
- (
- cd foo &&
- test_branch_upstream foo repo_upstream foo &&
- test_cmp_rev refs/remotes/repo_upstream/foo refs/heads/foo
- )
-'
-
-test_expect_success '"add" <path> <branch> dwims with checkout.defaultRemote' '
- test_when_finished rm -rf repo_upstream repo_dwim foo &&
- setup_remote_repo repo_upstream repo_dwim &&
- git init repo_dwim &&
- (
- cd repo_dwim &&
- git remote add repo_upstream2 ../repo_upstream &&
- git fetch repo_upstream2 &&
- test_must_fail git worktree add ../foo foo &&
- git -c checkout.defaultRemote=repo_upstream worktree add ../foo foo &&
- git status -uno --porcelain >status.actual &&
- test_must_be_empty status.actual
- ) &&
- (
- cd foo &&
- test_branch_upstream foo repo_upstream foo &&
- test_cmp_rev refs/remotes/repo_upstream/foo refs/heads/foo
- )
-'
-
-test_expect_success 'git worktree add does not match remote' '
- test_when_finished rm -rf repo_a repo_b foo &&
- setup_remote_repo repo_a repo_b &&
- (
- cd repo_b &&
- git worktree add ../foo
- ) &&
- (
- cd foo &&
- test_must_fail git config "branch.foo.remote" &&
- test_must_fail git config "branch.foo.merge" &&
- ! test_cmp_rev refs/remotes/repo_a/foo refs/heads/foo
- )
-'
-
-test_expect_success 'git worktree add --guess-remote sets up tracking' '
- test_when_finished rm -rf repo_a repo_b foo &&
- setup_remote_repo repo_a repo_b &&
- (
- cd repo_b &&
- git worktree add --guess-remote ../foo
- ) &&
- (
- cd foo &&
- test_branch_upstream foo repo_a foo &&
- test_cmp_rev refs/remotes/repo_a/foo refs/heads/foo
- )
-'
-
-test_expect_success 'git worktree add with worktree.guessRemote sets up tracking' '
- test_when_finished rm -rf repo_a repo_b foo &&
- setup_remote_repo repo_a repo_b &&
- (
- cd repo_b &&
- git config worktree.guessRemote true &&
- git worktree add ../foo
- ) &&
- (
- cd foo &&
- test_branch_upstream foo repo_a foo &&
- test_cmp_rev refs/remotes/repo_a/foo refs/heads/foo
- )
-'
-
-test_expect_success 'git worktree --no-guess-remote option overrides config' '
- test_when_finished rm -rf repo_a repo_b foo &&
- setup_remote_repo repo_a repo_b &&
- (
- cd repo_b &&
- git config worktree.guessRemote true &&
- git worktree add --no-guess-remote ../foo
- ) &&
- (
- cd foo &&
- test_must_fail git config "branch.foo.remote" &&
- test_must_fail git config "branch.foo.merge" &&
- ! test_cmp_rev refs/remotes/repo_a/foo refs/heads/foo
- )
-'
-
-post_checkout_hook () {
- gitdir=${1:-.git}
- test_when_finished "rm -f $gitdir/hooks/post-checkout" &&
- mkdir -p $gitdir/hooks &&
- write_script $gitdir/hooks/post-checkout <<-\EOF
- {
- echo $*
- git rev-parse --git-dir --show-toplevel
- } >hook.actual
- EOF
-}
-
-test_expect_success '"add" invokes post-checkout hook (branch)' '
- post_checkout_hook &&
- {
- echo $ZERO_OID $(git rev-parse HEAD) 1 &&
- echo $(pwd)/.git/worktrees/gumby &&
- echo $(pwd)/gumby
- } >hook.expect &&
- git worktree add gumby &&
- test_cmp hook.expect gumby/hook.actual
-'
-
-test_expect_success '"add" invokes post-checkout hook (detached)' '
- post_checkout_hook &&
- {
- echo $ZERO_OID $(git rev-parse HEAD) 1 &&
- echo $(pwd)/.git/worktrees/grumpy &&
- echo $(pwd)/grumpy
- } >hook.expect &&
- git worktree add --detach grumpy &&
- test_cmp hook.expect grumpy/hook.actual
-'
-
-test_expect_success '"add --no-checkout" suppresses post-checkout hook' '
- post_checkout_hook &&
- rm -f hook.actual &&
- git worktree add --no-checkout gloopy &&
- test_path_is_missing gloopy/hook.actual
-'
-
-test_expect_success '"add" in other worktree invokes post-checkout hook' '
- post_checkout_hook &&
- {
- echo $ZERO_OID $(git rev-parse HEAD) 1 &&
- echo $(pwd)/.git/worktrees/guppy &&
- echo $(pwd)/guppy
- } >hook.expect &&
- git -C gloopy worktree add --detach ../guppy &&
- test_cmp hook.expect guppy/hook.actual
-'
-
-test_expect_success '"add" in bare repo invokes post-checkout hook' '
- rm -rf bare &&
- git clone --bare . bare &&
- {
- echo $ZERO_OID $(git --git-dir=bare rev-parse HEAD) 1 &&
- echo $(pwd)/bare/worktrees/goozy &&
- echo $(pwd)/goozy
- } >hook.expect &&
- post_checkout_hook bare &&
- git -C bare worktree add --detach ../goozy &&
- test_cmp hook.expect goozy/hook.actual
-'
-
-test_expect_success '"add" an existing but missing worktree' '
- git worktree add --detach pneu &&
- test_must_fail git worktree add --detach pneu &&
- rm -fr pneu &&
- test_must_fail git worktree add --detach pneu &&
- git worktree add --force --detach pneu
-'
-
-test_expect_success '"add" an existing locked but missing worktree' '
- git worktree add --detach gnoo &&
- git worktree lock gnoo &&
- test_when_finished "git worktree unlock gnoo || :" &&
- rm -fr gnoo &&
- test_must_fail git worktree add --detach gnoo &&
- test_must_fail git worktree add --force --detach gnoo &&
- git worktree add --force --force --detach gnoo
-'
-
-test_done
+++ /dev/null
-#!/bin/sh
-
-test_description='prune $GIT_DIR/worktrees'
-
-. ./test-lib.sh
-
-test_expect_success initialize '
- git commit --allow-empty -m init
-'
-
-test_expect_success 'worktree prune on normal repo' '
- git worktree prune &&
- test_must_fail git worktree prune abc
-'
-
-test_expect_success 'prune files inside $GIT_DIR/worktrees' '
- mkdir .git/worktrees &&
- : >.git/worktrees/abc &&
- git worktree prune --verbose >actual &&
- cat >expect <<EOF &&
-Removing worktrees/abc: not a valid directory
-EOF
- test_i18ncmp expect actual &&
- ! test -f .git/worktrees/abc &&
- ! test -d .git/worktrees
-'
-
-test_expect_success 'prune directories without gitdir' '
- mkdir -p .git/worktrees/def/abc &&
- : >.git/worktrees/def/def &&
- cat >expect <<EOF &&
-Removing worktrees/def: gitdir file does not exist
-EOF
- git worktree prune --verbose >actual &&
- test_i18ncmp expect actual &&
- ! test -d .git/worktrees/def &&
- ! test -d .git/worktrees
-'
-
-test_expect_success SANITY 'prune directories with unreadable gitdir' '
- mkdir -p .git/worktrees/def/abc &&
- : >.git/worktrees/def/def &&
- : >.git/worktrees/def/gitdir &&
- chmod u-r .git/worktrees/def/gitdir &&
- git worktree prune --verbose >actual &&
- test_i18ngrep "Removing worktrees/def: unable to read gitdir file" actual &&
- ! test -d .git/worktrees/def &&
- ! test -d .git/worktrees
-'
-
-test_expect_success 'prune directories with invalid gitdir' '
- mkdir -p .git/worktrees/def/abc &&
- : >.git/worktrees/def/def &&
- : >.git/worktrees/def/gitdir &&
- git worktree prune --verbose >actual &&
- test_i18ngrep "Removing worktrees/def: invalid gitdir file" actual &&
- ! test -d .git/worktrees/def &&
- ! test -d .git/worktrees
-'
-
-test_expect_success 'prune directories with gitdir pointing to nowhere' '
- mkdir -p .git/worktrees/def/abc &&
- : >.git/worktrees/def/def &&
- echo "$(pwd)"/nowhere >.git/worktrees/def/gitdir &&
- git worktree prune --verbose >actual &&
- test_i18ngrep "Removing worktrees/def: gitdir file points to non-existent location" actual &&
- ! test -d .git/worktrees/def &&
- ! test -d .git/worktrees
-'
-
-test_expect_success 'not prune locked checkout' '
- test_when_finished rm -r .git/worktrees &&
- mkdir -p .git/worktrees/ghi &&
- : >.git/worktrees/ghi/locked &&
- git worktree prune &&
- test -d .git/worktrees/ghi
-'
-
-test_expect_success 'not prune recent checkouts' '
- test_when_finished rm -r .git/worktrees &&
- git worktree add jlm HEAD &&
- test -d .git/worktrees/jlm &&
- rm -rf jlm &&
- git worktree prune --verbose --expire=2.days.ago &&
- test -d .git/worktrees/jlm
-'
-
-test_expect_success 'not prune proper checkouts' '
- test_when_finished rm -r .git/worktrees &&
- git worktree add --detach "$PWD/nop" master &&
- git worktree prune &&
- test -d .git/worktrees/nop
-'
-
-test_done
+++ /dev/null
-#!/bin/sh
-
-test_description='test git worktree list'
-
-. ./test-lib.sh
-
-test_expect_success 'setup' '
- test_commit init
-'
-
-test_expect_success 'rev-parse --git-common-dir on main worktree' '
- git rev-parse --git-common-dir >actual &&
- echo .git >expected &&
- test_cmp expected actual &&
- mkdir sub &&
- git -C sub rev-parse --git-common-dir >actual2 &&
- echo ../.git >expected2 &&
- test_cmp expected2 actual2
-'
-
-test_expect_success 'rev-parse --git-path objects linked worktree' '
- echo "$(git rev-parse --show-toplevel)/.git/objects" >expect &&
- test_when_finished "rm -rf linked-tree actual expect && git worktree prune" &&
- git worktree add --detach linked-tree master &&
- git -C linked-tree rev-parse --git-path objects >actual &&
- test_cmp expect actual
-'
-
-test_expect_success '"list" all worktrees from main' '
- echo "$(git rev-parse --show-toplevel) $(git rev-parse --short HEAD) [$(git symbolic-ref --short HEAD)]" >expect &&
- test_when_finished "rm -rf here out actual expect && git worktree prune" &&
- git worktree add --detach here master &&
- echo "$(git -C here rev-parse --show-toplevel) $(git rev-parse --short HEAD) (detached HEAD)" >>expect &&
- git worktree list >out &&
- sed "s/ */ /g" <out >actual &&
- test_cmp expect actual
-'
-
-test_expect_success '"list" all worktrees from linked' '
- echo "$(git rev-parse --show-toplevel) $(git rev-parse --short HEAD) [$(git symbolic-ref --short HEAD)]" >expect &&
- test_when_finished "rm -rf here out actual expect && git worktree prune" &&
- git worktree add --detach here master &&
- echo "$(git -C here rev-parse --show-toplevel) $(git rev-parse --short HEAD) (detached HEAD)" >>expect &&
- git -C here worktree list >out &&
- sed "s/ */ /g" <out >actual &&
- test_cmp expect actual
-'
-
-test_expect_success '"list" all worktrees --porcelain' '
- echo "worktree $(git rev-parse --show-toplevel)" >expect &&
- echo "HEAD $(git rev-parse HEAD)" >>expect &&
- echo "branch $(git symbolic-ref HEAD)" >>expect &&
- echo >>expect &&
- test_when_finished "rm -rf here actual expect && git worktree prune" &&
- git worktree add --detach here master &&
- echo "worktree $(git -C here rev-parse --show-toplevel)" >>expect &&
- echo "HEAD $(git rev-parse HEAD)" >>expect &&
- echo "detached" >>expect &&
- echo >>expect &&
- git worktree list --porcelain >actual &&
- test_cmp expect actual
-'
-
-test_expect_success 'bare repo setup' '
- git init --bare bare1 &&
- echo "data" >file1 &&
- git add file1 &&
- git commit -m"File1: add data" &&
- git push bare1 master &&
- git reset --hard HEAD^
-'
-
-test_expect_success '"list" all worktrees from bare main' '
- test_when_finished "rm -rf there out actual expect && git -C bare1 worktree prune" &&
- git -C bare1 worktree add --detach ../there master &&
- echo "$(pwd)/bare1 (bare)" >expect &&
- echo "$(git -C there rev-parse --show-toplevel) $(git -C there rev-parse --short HEAD) (detached HEAD)" >>expect &&
- git -C bare1 worktree list >out &&
- sed "s/ */ /g" <out >actual &&
- test_cmp expect actual
-'
-
-test_expect_success '"list" all worktrees --porcelain from bare main' '
- test_when_finished "rm -rf there actual expect && git -C bare1 worktree prune" &&
- git -C bare1 worktree add --detach ../there master &&
- echo "worktree $(pwd)/bare1" >expect &&
- echo "bare" >>expect &&
- echo >>expect &&
- echo "worktree $(git -C there rev-parse --show-toplevel)" >>expect &&
- echo "HEAD $(git -C there rev-parse HEAD)" >>expect &&
- echo "detached" >>expect &&
- echo >>expect &&
- git -C bare1 worktree list --porcelain >actual &&
- test_cmp expect actual
-'
-
-test_expect_success '"list" all worktrees from linked with a bare main' '
- test_when_finished "rm -rf there out actual expect && git -C bare1 worktree prune" &&
- git -C bare1 worktree add --detach ../there master &&
- echo "$(pwd)/bare1 (bare)" >expect &&
- echo "$(git -C there rev-parse --show-toplevel) $(git -C there rev-parse --short HEAD) (detached HEAD)" >>expect &&
- git -C there worktree list >out &&
- sed "s/ */ /g" <out >actual &&
- test_cmp expect actual
-'
-
-test_expect_success 'bare repo cleanup' '
- rm -rf bare1
-'
-
-test_expect_success 'broken main worktree still at the top' '
- git init broken-main &&
- (
- cd broken-main &&
- test_commit new &&
- git worktree add linked &&
- cat >expected <<-EOF &&
- worktree $(pwd)
- HEAD $ZERO_OID
-
- EOF
- cd linked &&
- echo "worktree $(pwd)" >expected &&
- echo "ref: .broken" >../.git/HEAD &&
- git worktree list --porcelain >out &&
- head -n 3 out >actual &&
- test_cmp ../expected actual &&
- git worktree list >out &&
- head -n 1 out >actual.2 &&
- grep -F "(error)" actual.2
- )
-'
-
-test_expect_success 'linked worktrees are sorted' '
- mkdir sorted &&
- git init sorted/main &&
- (
- cd sorted/main &&
- test_tick &&
- test_commit new &&
- git worktree add ../first &&
- git worktree add ../second &&
- git worktree list --porcelain >out &&
- grep ^worktree out >actual
- ) &&
- cat >expected <<-EOF &&
- worktree $(pwd)/sorted/main
- worktree $(pwd)/sorted/first
- worktree $(pwd)/sorted/second
- EOF
- test_cmp expected sorted/main/actual
-'
-
-test_done
+++ /dev/null
-#!/bin/sh
-
-test_description='test git worktree move, remove, lock and unlock'
-
-. ./test-lib.sh
-
-test_expect_success 'setup' '
- test_commit init &&
- git worktree add source &&
- git worktree list --porcelain >out &&
- grep "^worktree" out >actual &&
- cat <<-EOF >expected &&
- worktree $(pwd)
- worktree $(pwd)/source
- EOF
- test_cmp expected actual
-'
-
-test_expect_success 'lock main worktree' '
- test_must_fail git worktree lock .
-'
-
-test_expect_success 'lock linked worktree' '
- git worktree lock --reason hahaha source &&
- echo hahaha >expected &&
- test_cmp expected .git/worktrees/source/locked
-'
-
-test_expect_success 'lock linked worktree from another worktree' '
- rm .git/worktrees/source/locked &&
- git worktree add elsewhere &&
- git -C elsewhere worktree lock --reason hahaha ../source &&
- echo hahaha >expected &&
- test_cmp expected .git/worktrees/source/locked
-'
-
-test_expect_success 'lock worktree twice' '
- test_must_fail git worktree lock source &&
- echo hahaha >expected &&
- test_cmp expected .git/worktrees/source/locked
-'
-
-test_expect_success 'lock worktree twice (from the locked worktree)' '
- test_must_fail git -C source worktree lock . &&
- echo hahaha >expected &&
- test_cmp expected .git/worktrees/source/locked
-'
-
-test_expect_success 'unlock main worktree' '
- test_must_fail git worktree unlock .
-'
-
-test_expect_success 'unlock linked worktree' '
- git worktree unlock source &&
- test_path_is_missing .git/worktrees/source/locked
-'
-
-test_expect_success 'unlock worktree twice' '
- test_must_fail git worktree unlock source &&
- test_path_is_missing .git/worktrees/source/locked
-'
-
-test_expect_success 'move non-worktree' '
- mkdir abc &&
- test_must_fail git worktree move abc def
-'
-
-test_expect_success 'move locked worktree' '
- git worktree lock source &&
- test_when_finished "git worktree unlock source" &&
- test_must_fail git worktree move source destination
-'
-
-test_expect_success 'move worktree' '
- git worktree move source destination &&
- test_path_is_missing source &&
- git worktree list --porcelain >out &&
- grep "^worktree.*/destination$" out &&
- ! grep "^worktree.*/source$" out &&
- git -C destination log --format=%s >actual2 &&
- echo init >expected2 &&
- test_cmp expected2 actual2
-'
-
-test_expect_success 'move main worktree' '
- test_must_fail git worktree move . def
-'
-
-test_expect_success 'move worktree to another dir' '
- mkdir some-dir &&
- git worktree move destination some-dir &&
- test_when_finished "git worktree move some-dir/destination destination" &&
- test_path_is_missing destination &&
- git worktree list --porcelain >out &&
- grep "^worktree.*/some-dir/destination$" out &&
- git -C some-dir/destination log --format=%s >actual2 &&
- echo init >expected2 &&
- test_cmp expected2 actual2
-'
-
-test_expect_success 'move locked worktree (force)' '
- test_when_finished "
- git worktree unlock flump || :
- git worktree remove flump || :
- git worktree unlock ploof || :
- git worktree remove ploof || :
- " &&
- git worktree add --detach flump &&
- git worktree lock flump &&
- test_must_fail git worktree move flump ploof" &&
- test_must_fail git worktree move --force flump ploof" &&
- git worktree move --force --force flump ploof
-'
-
-test_expect_success 'move a repo with uninitialized submodule' '
- git init withsub &&
- (
- cd withsub &&
- test_commit initial &&
- git submodule add "$PWD"/.git sub &&
- git commit -m withsub &&
- git worktree add second HEAD &&
- git worktree move second third
- )
-'
-
-test_expect_success 'not move a repo with initialized submodule' '
- (
- cd withsub &&
- git -C third submodule update &&
- test_must_fail git worktree move third forth
- )
-'
-
-test_expect_success 'remove main worktree' '
- test_must_fail git worktree remove .
-'
-
-test_expect_success 'remove locked worktree' '
- git worktree lock destination &&
- test_when_finished "git worktree unlock destination" &&
- test_must_fail git worktree remove destination
-'
-
-test_expect_success 'remove worktree with dirty tracked file' '
- echo dirty >>destination/init.t &&
- test_when_finished "git -C destination checkout init.t" &&
- test_must_fail git worktree remove destination
-'
-
-test_expect_success 'remove worktree with untracked file' '
- : >destination/untracked &&
- test_must_fail git worktree remove destination
-'
-
-test_expect_success 'force remove worktree with untracked file' '
- git worktree remove --force destination &&
- test_path_is_missing destination
-'
-
-test_expect_success 'remove missing worktree' '
- git worktree add to-be-gone &&
- test -d .git/worktrees/to-be-gone &&
- mv to-be-gone gone &&
- git worktree remove to-be-gone &&
- test_path_is_missing .git/worktrees/to-be-gone
-'
-
-test_expect_success 'NOT remove missing-but-locked worktree' '
- git worktree add gone-but-locked &&
- git worktree lock gone-but-locked &&
- test -d .git/worktrees/gone-but-locked &&
- mv gone-but-locked really-gone-now &&
- test_must_fail git worktree remove gone-but-locked &&
- test_path_is_dir .git/worktrees/gone-but-locked
-'
-
-test_expect_success 'proper error when worktree not found' '
- for i in noodle noodle/bork
- do
- test_must_fail git worktree lock $i 2>err &&
- test_i18ngrep "not a working tree" err || return 1
- done
-'
-
-test_expect_success 'remove locked worktree (force)' '
- git worktree add --detach gumby &&
- test_when_finished "git worktree remove gumby || :" &&
- git worktree lock gumby &&
- test_when_finished "git worktree unlock gumby || :" &&
- test_must_fail git worktree remove gumby &&
- test_must_fail git worktree remove --force gumby &&
- git worktree remove --force --force gumby
-'
-
-test_expect_success 'remove cleans up .git/worktrees when empty' '
- git init moog &&
- (
- cd moog &&
- test_commit bim &&
- git worktree add --detach goom &&
- test_path_exists .git/worktrees &&
- git worktree remove goom &&
- test_path_is_missing .git/worktrees
- )
-'
-
-test_expect_success 'remove a repo with uninitialized submodule' '
- (
- cd withsub &&
- git worktree add to-remove HEAD &&
- git worktree remove to-remove
- )
-'
-
-test_expect_success 'not remove a repo with initialized submodule' '
- (
- cd withsub &&
- git worktree add to-remove HEAD &&
- git -C to-remove submodule update &&
- test_must_fail git worktree remove to-remove
- )
-'
-
-test_done
+++ /dev/null
-#!/bin/sh
-
-test_description="config file in multi worktree"
-
-. ./test-lib.sh
-
-test_expect_success 'setup' '
- test_commit start
-'
-
-test_expect_success 'config --worktree in single worktree' '
- git config --worktree foo.bar true &&
- test_cmp_config true foo.bar
-'
-
-test_expect_success 'add worktrees' '
- git worktree add wt1 &&
- git worktree add wt2
-'
-
-test_expect_success 'config --worktree without extension' '
- test_must_fail git config --worktree foo.bar false
-'
-
-test_expect_success 'enable worktreeConfig extension' '
- git config extensions.worktreeConfig true &&
- test_cmp_config true extensions.worktreeConfig
-'
-
-test_expect_success 'config is shared as before' '
- git config this.is shared &&
- test_cmp_config shared this.is &&
- test_cmp_config -C wt1 shared this.is &&
- test_cmp_config -C wt2 shared this.is
-'
-
-test_expect_success 'config is shared (set from another worktree)' '
- git -C wt1 config that.is also-shared &&
- test_cmp_config also-shared that.is &&
- test_cmp_config -C wt1 also-shared that.is &&
- test_cmp_config -C wt2 also-shared that.is
-'
-
-test_expect_success 'config private to main worktree' '
- git config --worktree this.is for-main &&
- test_cmp_config for-main this.is &&
- test_cmp_config -C wt1 shared this.is &&
- test_cmp_config -C wt2 shared this.is
-'
-
-test_expect_success 'config private to linked worktree' '
- git -C wt1 config --worktree this.is for-wt1 &&
- test_cmp_config for-main this.is &&
- test_cmp_config -C wt1 for-wt1 this.is &&
- test_cmp_config -C wt2 shared this.is
-'
-
-test_expect_success 'core.bare no longer for main only' '
- test_config core.bare true &&
- test "$(git rev-parse --is-bare-repository)" = true &&
- test "$(git -C wt1 rev-parse --is-bare-repository)" = true &&
- test "$(git -C wt2 rev-parse --is-bare-repository)" = true
-'
-
-test_expect_success 'per-worktree core.bare is picked up' '
- git -C wt1 config --worktree core.bare true &&
- test "$(git rev-parse --is-bare-repository)" = false &&
- test "$(git -C wt1 rev-parse --is-bare-repository)" = true &&
- test "$(git -C wt2 rev-parse --is-bare-repository)" = false
-'
-
-test_expect_success 'config.worktree no longer read without extension' '
- git config --unset extensions.worktreeConfig &&
- test_cmp_config shared this.is &&
- test_cmp_config -C wt1 shared this.is &&
- test_cmp_config -C wt2 shared this.is
-'
-
-test_done
--- /dev/null
+#!/bin/sh
+
+test_description='test git worktree add'
+
+. ./test-lib.sh
+
+. "$TEST_DIRECTORY"/lib-rebase.sh
+
+test_expect_success 'setup' '
+ test_commit init
+'
+
+test_expect_success '"add" an existing worktree' '
+ mkdir -p existing/subtree &&
+ test_must_fail git worktree add --detach existing master
+'
+
+test_expect_success '"add" an existing empty worktree' '
+ mkdir existing_empty &&
+ git worktree add --detach existing_empty master
+'
+
+test_expect_success '"add" using shorthand - fails when no previous branch' '
+ test_must_fail git worktree add existing_short -
+'
+
+test_expect_success '"add" using - shorthand' '
+ git checkout -b newbranch &&
+ echo hello >myworld &&
+ git add myworld &&
+ git commit -m myworld &&
+ git checkout master &&
+ git worktree add short-hand - &&
+ echo refs/heads/newbranch >expect &&
+ git -C short-hand rev-parse --symbolic-full-name HEAD >actual &&
+ test_cmp expect actual
+'
+
+test_expect_success '"add" refuses to checkout locked branch' '
+ test_must_fail git worktree add zere master &&
+ ! test -d zere &&
+ ! test -d .git/worktrees/zere
+'
+
+test_expect_success 'checking out paths not complaining about linked checkouts' '
+ (
+ cd existing_empty &&
+ echo dirty >>init.t &&
+ git checkout master -- init.t
+ )
+'
+
+test_expect_success '"add" worktree' '
+ git rev-parse HEAD >expect &&
+ git worktree add --detach here master &&
+ (
+ cd here &&
+ test_cmp ../init.t init.t &&
+ test_must_fail git symbolic-ref HEAD &&
+ git rev-parse HEAD >actual &&
+ test_cmp ../expect actual &&
+ git fsck
+ )
+'
+
+test_expect_success '"add" worktree with lock' '
+ git rev-parse HEAD >expect &&
+ git worktree add --detach --lock here-with-lock master &&
+ test -f .git/worktrees/here-with-lock/locked
+'
+
+test_expect_success '"add" worktree from a subdir' '
+ (
+ mkdir sub &&
+ cd sub &&
+ git worktree add --detach here master &&
+ cd here &&
+ test_cmp ../../init.t init.t
+ )
+'
+
+test_expect_success '"add" from a linked checkout' '
+ (
+ cd here &&
+ git worktree add --detach nested-here master &&
+ cd nested-here &&
+ git fsck
+ )
+'
+
+test_expect_success '"add" worktree creating new branch' '
+ git worktree add -b newmaster there master &&
+ (
+ cd there &&
+ test_cmp ../init.t init.t &&
+ git symbolic-ref HEAD >actual &&
+ echo refs/heads/newmaster >expect &&
+ test_cmp expect actual &&
+ git fsck
+ )
+'
+
+test_expect_success 'die the same branch is already checked out' '
+ (
+ cd here &&
+ test_must_fail git checkout newmaster
+ )
+'
+
+test_expect_success SYMLINKS 'die the same branch is already checked out (symlink)' '
+ head=$(git -C there rev-parse --git-path HEAD) &&
+ ref=$(git -C there symbolic-ref HEAD) &&
+ rm "$head" &&
+ ln -s "$ref" "$head" &&
+ test_must_fail git -C here checkout newmaster
+'
+
+test_expect_success 'not die the same branch is already checked out' '
+ (
+ cd here &&
+ git worktree add --force anothernewmaster newmaster
+ )
+'
+
+test_expect_success 'not die on re-checking out current branch' '
+ (
+ cd there &&
+ git checkout newmaster
+ )
+'
+
+test_expect_success '"add" from a bare repo' '
+ (
+ git clone --bare . bare &&
+ cd bare &&
+ git worktree add -b bare-master ../there2 master
+ )
+'
+
+test_expect_success 'checkout from a bare repo without "add"' '
+ (
+ cd bare &&
+ test_must_fail git checkout master
+ )
+'
+
+test_expect_success '"add" default branch of a bare repo' '
+ (
+ git clone --bare . bare2 &&
+ cd bare2 &&
+ git worktree add ../there3 master
+ )
+'
+
+test_expect_success 'checkout with grafts' '
+ test_when_finished rm .git/info/grafts &&
+ test_commit abc &&
+ SHA1=$(git rev-parse HEAD) &&
+ test_commit def &&
+ test_commit xyz &&
+ echo "$(git rev-parse HEAD) $SHA1" >.git/info/grafts &&
+ cat >expected <<-\EOF &&
+ xyz
+ abc
+ EOF
+ git log --format=%s -2 >actual &&
+ test_cmp expected actual &&
+ git worktree add --detach grafted master &&
+ git --git-dir=grafted/.git log --format=%s -2 >actual &&
+ test_cmp expected actual
+'
+
+test_expect_success '"add" from relative HEAD' '
+ test_commit a &&
+ test_commit b &&
+ test_commit c &&
+ git rev-parse HEAD~1 >expected &&
+ git worktree add relhead HEAD~1 &&
+ git -C relhead rev-parse HEAD >actual &&
+ test_cmp expected actual
+'
+
+test_expect_success '"add -b" with <branch> omitted' '
+ git worktree add -b burble flornk &&
+ test_cmp_rev HEAD burble
+'
+
+test_expect_success '"add --detach" with <branch> omitted' '
+ git worktree add --detach fishhook &&
+ git rev-parse HEAD >expected &&
+ git -C fishhook rev-parse HEAD >actual &&
+ test_cmp expected actual &&
+ test_must_fail git -C fishhook symbolic-ref HEAD
+'
+
+test_expect_success '"add" with <branch> omitted' '
+ git worktree add wiffle/bat &&
+ test_cmp_rev HEAD bat
+'
+
+test_expect_success '"add" checks out existing branch of dwimd name' '
+ git branch dwim HEAD~1 &&
+ git worktree add dwim &&
+ test_cmp_rev HEAD~1 dwim &&
+ (
+ cd dwim &&
+ test_cmp_rev HEAD dwim
+ )
+'
+
+test_expect_success '"add <path>" dwim fails with checked out branch' '
+ git checkout -b test-branch &&
+ test_must_fail git worktree add test-branch &&
+ test_path_is_missing test-branch
+'
+
+test_expect_success '"add --force" with existing dwimd name doesnt die' '
+ git checkout test-branch &&
+ git worktree add --force test-branch
+'
+
+test_expect_success '"add" no auto-vivify with --detach and <branch> omitted' '
+ git worktree add --detach mish/mash &&
+ test_must_fail git rev-parse mash -- &&
+ test_must_fail git -C mish/mash symbolic-ref HEAD
+'
+
+test_expect_success '"add" -b/-B mutually exclusive' '
+ test_must_fail git worktree add -b poodle -B poodle bamboo master
+'
+
+test_expect_success '"add" -b/--detach mutually exclusive' '
+ test_must_fail git worktree add -b poodle --detach bamboo master
+'
+
+test_expect_success '"add" -B/--detach mutually exclusive' '
+ test_must_fail git worktree add -B poodle --detach bamboo master
+'
+
+test_expect_success '"add -B" fails if the branch is checked out' '
+ git rev-parse newmaster >before &&
+ test_must_fail git worktree add -B newmaster bamboo master &&
+ git rev-parse newmaster >after &&
+ test_cmp before after
+'
+
+test_expect_success 'add -B' '
+ git worktree add -B poodle bamboo2 master^ &&
+ git -C bamboo2 symbolic-ref HEAD >actual &&
+ echo refs/heads/poodle >expected &&
+ test_cmp expected actual &&
+ test_cmp_rev master^ poodle
+'
+
+test_expect_success 'add --quiet' '
+ git worktree add --quiet another-worktree master 2>actual &&
+ test_must_be_empty actual
+'
+
+test_expect_success 'local clone from linked checkout' '
+ git clone --local here here-clone &&
+ ( cd here-clone && git fsck )
+'
+
+test_expect_success 'local clone --shared from linked checkout' '
+ git -C bare worktree add --detach ../baretree &&
+ git clone --local --shared baretree bare-clone &&
+ grep /bare/ bare-clone/.git/objects/info/alternates
+'
+
+test_expect_success '"add" worktree with --no-checkout' '
+ git worktree add --no-checkout -b swamp swamp &&
+ ! test -e swamp/init.t &&
+ git -C swamp reset --hard &&
+ test_cmp init.t swamp/init.t
+'
+
+test_expect_success '"add" worktree with --checkout' '
+ git worktree add --checkout -b swmap2 swamp2 &&
+ test_cmp init.t swamp2/init.t
+'
+
+test_expect_success 'put a worktree under rebase' '
+ git worktree add under-rebase &&
+ (
+ cd under-rebase &&
+ set_fake_editor &&
+ FAKE_LINES="edit 1" git rebase -i HEAD^ &&
+ git worktree list | grep "under-rebase.*detached HEAD"
+ )
+'
+
+test_expect_success 'add a worktree, checking out a rebased branch' '
+ test_must_fail git worktree add new-rebase under-rebase &&
+ ! test -d new-rebase
+'
+
+test_expect_success 'checking out a rebased branch from another worktree' '
+ git worktree add new-place &&
+ test_must_fail git -C new-place checkout under-rebase
+'
+
+test_expect_success 'not allow to delete a branch under rebase' '
+ (
+ cd under-rebase &&
+ test_must_fail git branch -D under-rebase
+ )
+'
+
+test_expect_success 'rename a branch under rebase not allowed' '
+ test_must_fail git branch -M under-rebase rebase-with-new-name
+'
+
+test_expect_success 'check out from current worktree branch ok' '
+ (
+ cd under-rebase &&
+ git checkout under-rebase &&
+ git checkout - &&
+ git rebase --abort
+ )
+'
+
+test_expect_success 'checkout a branch under bisect' '
+ git worktree add under-bisect &&
+ (
+ cd under-bisect &&
+ git bisect start &&
+ git bisect bad &&
+ git bisect good HEAD~2 &&
+ git worktree list | grep "under-bisect.*detached HEAD" &&
+ test_must_fail git worktree add new-bisect under-bisect &&
+ ! test -d new-bisect
+ )
+'
+
+test_expect_success 'rename a branch under bisect not allowed' '
+ test_must_fail git branch -M under-bisect bisect-with-new-name
+'
+# Is branch "refs/heads/$1" set to pull from "$2/$3"?
+test_branch_upstream () {
+ printf "%s\n" "$2" "refs/heads/$3" >expect.upstream &&
+ {
+ git config "branch.$1.remote" &&
+ git config "branch.$1.merge"
+ } >actual.upstream &&
+ test_cmp expect.upstream actual.upstream
+}
+
+test_expect_success '--track sets up tracking' '
+ test_when_finished rm -rf track &&
+ git worktree add --track -b track track master &&
+ test_branch_upstream track . master
+'
+
+# setup remote repository $1 and repository $2 with $1 set up as
+# remote. The remote has two branches, master and foo.
+setup_remote_repo () {
+ git init $1 &&
+ (
+ cd $1 &&
+ test_commit $1_master &&
+ git checkout -b foo &&
+ test_commit upstream_foo
+ ) &&
+ git init $2 &&
+ (
+ cd $2 &&
+ test_commit $2_master &&
+ git remote add $1 ../$1 &&
+ git config remote.$1.fetch \
+ "refs/heads/*:refs/remotes/$1/*" &&
+ git fetch --all
+ )
+}
+
+test_expect_success '--no-track avoids setting up tracking' '
+ test_when_finished rm -rf repo_upstream repo_local foo &&
+ setup_remote_repo repo_upstream repo_local &&
+ (
+ cd repo_local &&
+ git worktree add --no-track -b foo ../foo repo_upstream/foo
+ ) &&
+ (
+ cd foo &&
+ test_must_fail git config "branch.foo.remote" &&
+ test_must_fail git config "branch.foo.merge" &&
+ test_cmp_rev refs/remotes/repo_upstream/foo refs/heads/foo
+ )
+'
+
+test_expect_success '"add" <path> <non-existent-branch> fails' '
+ test_must_fail git worktree add foo non-existent
+'
+
+test_expect_success '"add" <path> <branch> dwims' '
+ test_when_finished rm -rf repo_upstream repo_dwim foo &&
+ setup_remote_repo repo_upstream repo_dwim &&
+ git init repo_dwim &&
+ (
+ cd repo_dwim &&
+ git worktree add ../foo foo
+ ) &&
+ (
+ cd foo &&
+ test_branch_upstream foo repo_upstream foo &&
+ test_cmp_rev refs/remotes/repo_upstream/foo refs/heads/foo
+ )
+'
+
+test_expect_success '"add" <path> <branch> dwims with checkout.defaultRemote' '
+ test_when_finished rm -rf repo_upstream repo_dwim foo &&
+ setup_remote_repo repo_upstream repo_dwim &&
+ git init repo_dwim &&
+ (
+ cd repo_dwim &&
+ git remote add repo_upstream2 ../repo_upstream &&
+ git fetch repo_upstream2 &&
+ test_must_fail git worktree add ../foo foo &&
+ git -c checkout.defaultRemote=repo_upstream worktree add ../foo foo &&
+ git status -uno --porcelain >status.actual &&
+ test_must_be_empty status.actual
+ ) &&
+ (
+ cd foo &&
+ test_branch_upstream foo repo_upstream foo &&
+ test_cmp_rev refs/remotes/repo_upstream/foo refs/heads/foo
+ )
+'
+
+test_expect_success 'git worktree add does not match remote' '
+ test_when_finished rm -rf repo_a repo_b foo &&
+ setup_remote_repo repo_a repo_b &&
+ (
+ cd repo_b &&
+ git worktree add ../foo
+ ) &&
+ (
+ cd foo &&
+ test_must_fail git config "branch.foo.remote" &&
+ test_must_fail git config "branch.foo.merge" &&
+ ! test_cmp_rev refs/remotes/repo_a/foo refs/heads/foo
+ )
+'
+
+test_expect_success 'git worktree add --guess-remote sets up tracking' '
+ test_when_finished rm -rf repo_a repo_b foo &&
+ setup_remote_repo repo_a repo_b &&
+ (
+ cd repo_b &&
+ git worktree add --guess-remote ../foo
+ ) &&
+ (
+ cd foo &&
+ test_branch_upstream foo repo_a foo &&
+ test_cmp_rev refs/remotes/repo_a/foo refs/heads/foo
+ )
+'
+
+test_expect_success 'git worktree add with worktree.guessRemote sets up tracking' '
+ test_when_finished rm -rf repo_a repo_b foo &&
+ setup_remote_repo repo_a repo_b &&
+ (
+ cd repo_b &&
+ git config worktree.guessRemote true &&
+ git worktree add ../foo
+ ) &&
+ (
+ cd foo &&
+ test_branch_upstream foo repo_a foo &&
+ test_cmp_rev refs/remotes/repo_a/foo refs/heads/foo
+ )
+'
+
+test_expect_success 'git worktree --no-guess-remote option overrides config' '
+ test_when_finished rm -rf repo_a repo_b foo &&
+ setup_remote_repo repo_a repo_b &&
+ (
+ cd repo_b &&
+ git config worktree.guessRemote true &&
+ git worktree add --no-guess-remote ../foo
+ ) &&
+ (
+ cd foo &&
+ test_must_fail git config "branch.foo.remote" &&
+ test_must_fail git config "branch.foo.merge" &&
+ ! test_cmp_rev refs/remotes/repo_a/foo refs/heads/foo
+ )
+'
+
+post_checkout_hook () {
+ gitdir=${1:-.git}
+ test_when_finished "rm -f $gitdir/hooks/post-checkout" &&
+ mkdir -p $gitdir/hooks &&
+ write_script $gitdir/hooks/post-checkout <<-\EOF
+ {
+ echo $*
+ git rev-parse --git-dir --show-toplevel
+ } >hook.actual
+ EOF
+}
+
+test_expect_success '"add" invokes post-checkout hook (branch)' '
+ post_checkout_hook &&
+ {
+ echo $ZERO_OID $(git rev-parse HEAD) 1 &&
+ echo $(pwd)/.git/worktrees/gumby &&
+ echo $(pwd)/gumby
+ } >hook.expect &&
+ git worktree add gumby &&
+ test_cmp hook.expect gumby/hook.actual
+'
+
+test_expect_success '"add" invokes post-checkout hook (detached)' '
+ post_checkout_hook &&
+ {
+ echo $ZERO_OID $(git rev-parse HEAD) 1 &&
+ echo $(pwd)/.git/worktrees/grumpy &&
+ echo $(pwd)/grumpy
+ } >hook.expect &&
+ git worktree add --detach grumpy &&
+ test_cmp hook.expect grumpy/hook.actual
+'
+
+test_expect_success '"add --no-checkout" suppresses post-checkout hook' '
+ post_checkout_hook &&
+ rm -f hook.actual &&
+ git worktree add --no-checkout gloopy &&
+ test_path_is_missing gloopy/hook.actual
+'
+
+test_expect_success '"add" in other worktree invokes post-checkout hook' '
+ post_checkout_hook &&
+ {
+ echo $ZERO_OID $(git rev-parse HEAD) 1 &&
+ echo $(pwd)/.git/worktrees/guppy &&
+ echo $(pwd)/guppy
+ } >hook.expect &&
+ git -C gloopy worktree add --detach ../guppy &&
+ test_cmp hook.expect guppy/hook.actual
+'
+
+test_expect_success '"add" in bare repo invokes post-checkout hook' '
+ rm -rf bare &&
+ git clone --bare . bare &&
+ {
+ echo $ZERO_OID $(git --git-dir=bare rev-parse HEAD) 1 &&
+ echo $(pwd)/bare/worktrees/goozy &&
+ echo $(pwd)/goozy
+ } >hook.expect &&
+ post_checkout_hook bare &&
+ git -C bare worktree add --detach ../goozy &&
+ test_cmp hook.expect goozy/hook.actual
+'
+
+test_expect_success '"add" an existing but missing worktree' '
+ git worktree add --detach pneu &&
+ test_must_fail git worktree add --detach pneu &&
+ rm -fr pneu &&
+ test_must_fail git worktree add --detach pneu &&
+ git worktree add --force --detach pneu
+'
+
+test_expect_success '"add" an existing locked but missing worktree' '
+ git worktree add --detach gnoo &&
+ git worktree lock gnoo &&
+ test_when_finished "git worktree unlock gnoo || :" &&
+ rm -fr gnoo &&
+ test_must_fail git worktree add --detach gnoo &&
+ test_must_fail git worktree add --force --detach gnoo &&
+ git worktree add --force --force --detach gnoo
+'
+
+test_done
--- /dev/null
+#!/bin/sh
+
+test_description='prune $GIT_DIR/worktrees'
+
+. ./test-lib.sh
+
+test_expect_success initialize '
+ git commit --allow-empty -m init
+'
+
+test_expect_success 'worktree prune on normal repo' '
+ git worktree prune &&
+ test_must_fail git worktree prune abc
+'
+
+test_expect_success 'prune files inside $GIT_DIR/worktrees' '
+ mkdir .git/worktrees &&
+ : >.git/worktrees/abc &&
+ git worktree prune --verbose >actual &&
+ cat >expect <<EOF &&
+Removing worktrees/abc: not a valid directory
+EOF
+ test_i18ncmp expect actual &&
+ ! test -f .git/worktrees/abc &&
+ ! test -d .git/worktrees
+'
+
+test_expect_success 'prune directories without gitdir' '
+ mkdir -p .git/worktrees/def/abc &&
+ : >.git/worktrees/def/def &&
+ cat >expect <<EOF &&
+Removing worktrees/def: gitdir file does not exist
+EOF
+ git worktree prune --verbose >actual &&
+ test_i18ncmp expect actual &&
+ ! test -d .git/worktrees/def &&
+ ! test -d .git/worktrees
+'
+
+test_expect_success SANITY 'prune directories with unreadable gitdir' '
+ mkdir -p .git/worktrees/def/abc &&
+ : >.git/worktrees/def/def &&
+ : >.git/worktrees/def/gitdir &&
+ chmod u-r .git/worktrees/def/gitdir &&
+ git worktree prune --verbose >actual &&
+ test_i18ngrep "Removing worktrees/def: unable to read gitdir file" actual &&
+ ! test -d .git/worktrees/def &&
+ ! test -d .git/worktrees
+'
+
+test_expect_success 'prune directories with invalid gitdir' '
+ mkdir -p .git/worktrees/def/abc &&
+ : >.git/worktrees/def/def &&
+ : >.git/worktrees/def/gitdir &&
+ git worktree prune --verbose >actual &&
+ test_i18ngrep "Removing worktrees/def: invalid gitdir file" actual &&
+ ! test -d .git/worktrees/def &&
+ ! test -d .git/worktrees
+'
+
+test_expect_success 'prune directories with gitdir pointing to nowhere' '
+ mkdir -p .git/worktrees/def/abc &&
+ : >.git/worktrees/def/def &&
+ echo "$(pwd)"/nowhere >.git/worktrees/def/gitdir &&
+ git worktree prune --verbose >actual &&
+ test_i18ngrep "Removing worktrees/def: gitdir file points to non-existent location" actual &&
+ ! test -d .git/worktrees/def &&
+ ! test -d .git/worktrees
+'
+
+test_expect_success 'not prune locked checkout' '
+ test_when_finished rm -r .git/worktrees &&
+ mkdir -p .git/worktrees/ghi &&
+ : >.git/worktrees/ghi/locked &&
+ git worktree prune &&
+ test -d .git/worktrees/ghi
+'
+
+test_expect_success 'not prune recent checkouts' '
+ test_when_finished rm -r .git/worktrees &&
+ git worktree add jlm HEAD &&
+ test -d .git/worktrees/jlm &&
+ rm -rf jlm &&
+ git worktree prune --verbose --expire=2.days.ago &&
+ test -d .git/worktrees/jlm
+'
+
+test_expect_success 'not prune proper checkouts' '
+ test_when_finished rm -r .git/worktrees &&
+ git worktree add --detach "$PWD/nop" master &&
+ git worktree prune &&
+ test -d .git/worktrees/nop
+'
+
+test_done
--- /dev/null
+#!/bin/sh
+
+test_description='test git worktree list'
+
+. ./test-lib.sh
+
+test_expect_success 'setup' '
+ test_commit init
+'
+
+test_expect_success 'rev-parse --git-common-dir on main worktree' '
+ git rev-parse --git-common-dir >actual &&
+ echo .git >expected &&
+ test_cmp expected actual &&
+ mkdir sub &&
+ git -C sub rev-parse --git-common-dir >actual2 &&
+ echo ../.git >expected2 &&
+ test_cmp expected2 actual2
+'
+
+test_expect_success 'rev-parse --git-path objects linked worktree' '
+ echo "$(git rev-parse --show-toplevel)/.git/objects" >expect &&
+ test_when_finished "rm -rf linked-tree actual expect && git worktree prune" &&
+ git worktree add --detach linked-tree master &&
+ git -C linked-tree rev-parse --git-path objects >actual &&
+ test_cmp expect actual
+'
+
+test_expect_success '"list" all worktrees from main' '
+ echo "$(git rev-parse --show-toplevel) $(git rev-parse --short HEAD) [$(git symbolic-ref --short HEAD)]" >expect &&
+ test_when_finished "rm -rf here out actual expect && git worktree prune" &&
+ git worktree add --detach here master &&
+ echo "$(git -C here rev-parse --show-toplevel) $(git rev-parse --short HEAD) (detached HEAD)" >>expect &&
+ git worktree list >out &&
+ sed "s/ */ /g" <out >actual &&
+ test_cmp expect actual
+'
+
+test_expect_success '"list" all worktrees from linked' '
+ echo "$(git rev-parse --show-toplevel) $(git rev-parse --short HEAD) [$(git symbolic-ref --short HEAD)]" >expect &&
+ test_when_finished "rm -rf here out actual expect && git worktree prune" &&
+ git worktree add --detach here master &&
+ echo "$(git -C here rev-parse --show-toplevel) $(git rev-parse --short HEAD) (detached HEAD)" >>expect &&
+ git -C here worktree list >out &&
+ sed "s/ */ /g" <out >actual &&
+ test_cmp expect actual
+'
+
+test_expect_success '"list" all worktrees --porcelain' '
+ echo "worktree $(git rev-parse --show-toplevel)" >expect &&
+ echo "HEAD $(git rev-parse HEAD)" >>expect &&
+ echo "branch $(git symbolic-ref HEAD)" >>expect &&
+ echo >>expect &&
+ test_when_finished "rm -rf here actual expect && git worktree prune" &&
+ git worktree add --detach here master &&
+ echo "worktree $(git -C here rev-parse --show-toplevel)" >>expect &&
+ echo "HEAD $(git rev-parse HEAD)" >>expect &&
+ echo "detached" >>expect &&
+ echo >>expect &&
+ git worktree list --porcelain >actual &&
+ test_cmp expect actual
+'
+
+test_expect_success 'bare repo setup' '
+ git init --bare bare1 &&
+ echo "data" >file1 &&
+ git add file1 &&
+ git commit -m"File1: add data" &&
+ git push bare1 master &&
+ git reset --hard HEAD^
+'
+
+test_expect_success '"list" all worktrees from bare main' '
+ test_when_finished "rm -rf there out actual expect && git -C bare1 worktree prune" &&
+ git -C bare1 worktree add --detach ../there master &&
+ echo "$(pwd)/bare1 (bare)" >expect &&
+ echo "$(git -C there rev-parse --show-toplevel) $(git -C there rev-parse --short HEAD) (detached HEAD)" >>expect &&
+ git -C bare1 worktree list >out &&
+ sed "s/ */ /g" <out >actual &&
+ test_cmp expect actual
+'
+
+test_expect_success '"list" all worktrees --porcelain from bare main' '
+ test_when_finished "rm -rf there actual expect && git -C bare1 worktree prune" &&
+ git -C bare1 worktree add --detach ../there master &&
+ echo "worktree $(pwd)/bare1" >expect &&
+ echo "bare" >>expect &&
+ echo >>expect &&
+ echo "worktree $(git -C there rev-parse --show-toplevel)" >>expect &&
+ echo "HEAD $(git -C there rev-parse HEAD)" >>expect &&
+ echo "detached" >>expect &&
+ echo >>expect &&
+ git -C bare1 worktree list --porcelain >actual &&
+ test_cmp expect actual
+'
+
+test_expect_success '"list" all worktrees from linked with a bare main' '
+ test_when_finished "rm -rf there out actual expect && git -C bare1 worktree prune" &&
+ git -C bare1 worktree add --detach ../there master &&
+ echo "$(pwd)/bare1 (bare)" >expect &&
+ echo "$(git -C there rev-parse --show-toplevel) $(git -C there rev-parse --short HEAD) (detached HEAD)" >>expect &&
+ git -C there worktree list >out &&
+ sed "s/ */ /g" <out >actual &&
+ test_cmp expect actual
+'
+
+test_expect_success 'bare repo cleanup' '
+ rm -rf bare1
+'
+
+test_expect_success 'broken main worktree still at the top' '
+ git init broken-main &&
+ (
+ cd broken-main &&
+ test_commit new &&
+ git worktree add linked &&
+ cat >expected <<-EOF &&
+ worktree $(pwd)
+ HEAD $ZERO_OID
+
+ EOF
+ cd linked &&
+ echo "worktree $(pwd)" >expected &&
+ echo "ref: .broken" >../.git/HEAD &&
+ git worktree list --porcelain >out &&
+ head -n 3 out >actual &&
+ test_cmp ../expected actual &&
+ git worktree list >out &&
+ head -n 1 out >actual.2 &&
+ grep -F "(error)" actual.2
+ )
+'
+
+test_expect_success 'linked worktrees are sorted' '
+ mkdir sorted &&
+ git init sorted/main &&
+ (
+ cd sorted/main &&
+ test_tick &&
+ test_commit new &&
+ git worktree add ../first &&
+ git worktree add ../second &&
+ git worktree list --porcelain >out &&
+ grep ^worktree out >actual
+ ) &&
+ cat >expected <<-EOF &&
+ worktree $(pwd)/sorted/main
+ worktree $(pwd)/sorted/first
+ worktree $(pwd)/sorted/second
+ EOF
+ test_cmp expected sorted/main/actual
+'
+
+test_done
--- /dev/null
+#!/bin/sh
+
+test_description='test git worktree move, remove, lock and unlock'
+
+. ./test-lib.sh
+
+test_expect_success 'setup' '
+ test_commit init &&
+ git worktree add source &&
+ git worktree list --porcelain >out &&
+ grep "^worktree" out >actual &&
+ cat <<-EOF >expected &&
+ worktree $(pwd)
+ worktree $(pwd)/source
+ EOF
+ test_cmp expected actual
+'
+
+test_expect_success 'lock main worktree' '
+ test_must_fail git worktree lock .
+'
+
+test_expect_success 'lock linked worktree' '
+ git worktree lock --reason hahaha source &&
+ echo hahaha >expected &&
+ test_cmp expected .git/worktrees/source/locked
+'
+
+test_expect_success 'lock linked worktree from another worktree' '
+ rm .git/worktrees/source/locked &&
+ git worktree add elsewhere &&
+ git -C elsewhere worktree lock --reason hahaha ../source &&
+ echo hahaha >expected &&
+ test_cmp expected .git/worktrees/source/locked
+'
+
+test_expect_success 'lock worktree twice' '
+ test_must_fail git worktree lock source &&
+ echo hahaha >expected &&
+ test_cmp expected .git/worktrees/source/locked
+'
+
+test_expect_success 'lock worktree twice (from the locked worktree)' '
+ test_must_fail git -C source worktree lock . &&
+ echo hahaha >expected &&
+ test_cmp expected .git/worktrees/source/locked
+'
+
+test_expect_success 'unlock main worktree' '
+ test_must_fail git worktree unlock .
+'
+
+test_expect_success 'unlock linked worktree' '
+ git worktree unlock source &&
+ test_path_is_missing .git/worktrees/source/locked
+'
+
+test_expect_success 'unlock worktree twice' '
+ test_must_fail git worktree unlock source &&
+ test_path_is_missing .git/worktrees/source/locked
+'
+
+test_expect_success 'move non-worktree' '
+ mkdir abc &&
+ test_must_fail git worktree move abc def
+'
+
+test_expect_success 'move locked worktree' '
+ git worktree lock source &&
+ test_when_finished "git worktree unlock source" &&
+ test_must_fail git worktree move source destination
+'
+
+test_expect_success 'move worktree' '
+ git worktree move source destination &&
+ test_path_is_missing source &&
+ git worktree list --porcelain >out &&
+ grep "^worktree.*/destination$" out &&
+ ! grep "^worktree.*/source$" out &&
+ git -C destination log --format=%s >actual2 &&
+ echo init >expected2 &&
+ test_cmp expected2 actual2
+'
+
+test_expect_success 'move main worktree' '
+ test_must_fail git worktree move . def
+'
+
+test_expect_success 'move worktree to another dir' '
+ mkdir some-dir &&
+ git worktree move destination some-dir &&
+ test_when_finished "git worktree move some-dir/destination destination" &&
+ test_path_is_missing destination &&
+ git worktree list --porcelain >out &&
+ grep "^worktree.*/some-dir/destination$" out &&
+ git -C some-dir/destination log --format=%s >actual2 &&
+ echo init >expected2 &&
+ test_cmp expected2 actual2
+'
+
+test_expect_success 'move locked worktree (force)' '
+ test_when_finished "
+ git worktree unlock flump || :
+ git worktree remove flump || :
+ git worktree unlock ploof || :
+ git worktree remove ploof || :
+ " &&
+ git worktree add --detach flump &&
+ git worktree lock flump &&
+ test_must_fail git worktree move flump ploof" &&
+ test_must_fail git worktree move --force flump ploof" &&
+ git worktree move --force --force flump ploof
+'
+
+test_expect_success 'move a repo with uninitialized submodule' '
+ git init withsub &&
+ (
+ cd withsub &&
+ test_commit initial &&
+ git submodule add "$PWD"/.git sub &&
+ git commit -m withsub &&
+ git worktree add second HEAD &&
+ git worktree move second third
+ )
+'
+
+test_expect_success 'not move a repo with initialized submodule' '
+ (
+ cd withsub &&
+ git -C third submodule update &&
+ test_must_fail git worktree move third forth
+ )
+'
+
+test_expect_success 'remove main worktree' '
+ test_must_fail git worktree remove .
+'
+
+test_expect_success 'remove locked worktree' '
+ git worktree lock destination &&
+ test_when_finished "git worktree unlock destination" &&
+ test_must_fail git worktree remove destination
+'
+
+test_expect_success 'remove worktree with dirty tracked file' '
+ echo dirty >>destination/init.t &&
+ test_when_finished "git -C destination checkout init.t" &&
+ test_must_fail git worktree remove destination
+'
+
+test_expect_success 'remove worktree with untracked file' '
+ : >destination/untracked &&
+ test_must_fail git worktree remove destination
+'
+
+test_expect_success 'force remove worktree with untracked file' '
+ git worktree remove --force destination &&
+ test_path_is_missing destination
+'
+
+test_expect_success 'remove missing worktree' '
+ git worktree add to-be-gone &&
+ test -d .git/worktrees/to-be-gone &&
+ mv to-be-gone gone &&
+ git worktree remove to-be-gone &&
+ test_path_is_missing .git/worktrees/to-be-gone
+'
+
+test_expect_success 'NOT remove missing-but-locked worktree' '
+ git worktree add gone-but-locked &&
+ git worktree lock gone-but-locked &&
+ test -d .git/worktrees/gone-but-locked &&
+ mv gone-but-locked really-gone-now &&
+ test_must_fail git worktree remove gone-but-locked &&
+ test_path_is_dir .git/worktrees/gone-but-locked
+'
+
+test_expect_success 'proper error when worktree not found' '
+ for i in noodle noodle/bork
+ do
+ test_must_fail git worktree lock $i 2>err &&
+ test_i18ngrep "not a working tree" err || return 1
+ done
+'
+
+test_expect_success 'remove locked worktree (force)' '
+ git worktree add --detach gumby &&
+ test_when_finished "git worktree remove gumby || :" &&
+ git worktree lock gumby &&
+ test_when_finished "git worktree unlock gumby || :" &&
+ test_must_fail git worktree remove gumby &&
+ test_must_fail git worktree remove --force gumby &&
+ git worktree remove --force --force gumby
+'
+
+test_expect_success 'remove cleans up .git/worktrees when empty' '
+ git init moog &&
+ (
+ cd moog &&
+ test_commit bim &&
+ git worktree add --detach goom &&
+ test_path_exists .git/worktrees &&
+ git worktree remove goom &&
+ test_path_is_missing .git/worktrees
+ )
+'
+
+test_expect_success 'remove a repo with uninitialized submodule' '
+ (
+ cd withsub &&
+ git worktree add to-remove HEAD &&
+ git worktree remove to-remove
+ )
+'
+
+test_expect_success 'not remove a repo with initialized submodule' '
+ (
+ cd withsub &&
+ git worktree add to-remove HEAD &&
+ git -C to-remove submodule update &&
+ test_must_fail git worktree remove to-remove
+ )
+'
+
+test_done
--- /dev/null
+#!/bin/sh
+
+test_description="config file in multi worktree"
+
+. ./test-lib.sh
+
+test_expect_success 'setup' '
+ test_commit start
+'
+
+test_expect_success 'config --worktree in single worktree' '
+ git config --worktree foo.bar true &&
+ test_cmp_config true foo.bar
+'
+
+test_expect_success 'add worktrees' '
+ git worktree add wt1 &&
+ git worktree add wt2
+'
+
+test_expect_success 'config --worktree without extension' '
+ test_must_fail git config --worktree foo.bar false
+'
+
+test_expect_success 'enable worktreeConfig extension' '
+ git config extensions.worktreeConfig true &&
+ test_cmp_config true extensions.worktreeConfig
+'
+
+test_expect_success 'config is shared as before' '
+ git config this.is shared &&
+ test_cmp_config shared this.is &&
+ test_cmp_config -C wt1 shared this.is &&
+ test_cmp_config -C wt2 shared this.is
+'
+
+test_expect_success 'config is shared (set from another worktree)' '
+ git -C wt1 config that.is also-shared &&
+ test_cmp_config also-shared that.is &&
+ test_cmp_config -C wt1 also-shared that.is &&
+ test_cmp_config -C wt2 also-shared that.is
+'
+
+test_expect_success 'config private to main worktree' '
+ git config --worktree this.is for-main &&
+ test_cmp_config for-main this.is &&
+ test_cmp_config -C wt1 shared this.is &&
+ test_cmp_config -C wt2 shared this.is
+'
+
+test_expect_success 'config private to linked worktree' '
+ git -C wt1 config --worktree this.is for-wt1 &&
+ test_cmp_config for-main this.is &&
+ test_cmp_config -C wt1 for-wt1 this.is &&
+ test_cmp_config -C wt2 shared this.is
+'
+
+test_expect_success 'core.bare no longer for main only' '
+ test_config core.bare true &&
+ test "$(git rev-parse --is-bare-repository)" = true &&
+ test "$(git -C wt1 rev-parse --is-bare-repository)" = true &&
+ test "$(git -C wt2 rev-parse --is-bare-repository)" = true
+'
+
+test_expect_success 'per-worktree core.bare is picked up' '
+ git -C wt1 config --worktree core.bare true &&
+ test "$(git rev-parse --is-bare-repository)" = false &&
+ test "$(git -C wt1 rev-parse --is-bare-repository)" = true &&
+ test "$(git -C wt2 rev-parse --is-bare-repository)" = false
+'
+
+test_expect_success 'config.worktree no longer read without extension' '
+ git config --unset extensions.worktreeConfig &&
+ test_cmp_config shared this.is &&
+ test_cmp_config -C wt1 shared this.is &&
+ test_cmp_config -C wt2 shared this.is
+'
+
+test_done
test_must_fail git branch -v branch*
'
+test_expect_success 'git branch `--show-current` shows current branch' '
+ cat >expect <<-\EOF &&
+ branch-two
+ EOF
+ git checkout branch-two &&
+ git branch --show-current >actual &&
+ test_cmp expect actual
+'
+
+test_expect_success 'git branch `--show-current` is silent when detached HEAD' '
+ git checkout HEAD^0 &&
+ git branch --show-current >actual &&
+ test_must_be_empty actual
+'
+
+test_expect_success 'git branch `--show-current` works properly when tag exists' '
+ cat >expect <<-\EOF &&
+ branch-and-tag-name
+ EOF
+ test_when_finished "
+ git checkout branch-one
+ git branch -D branch-and-tag-name
+ " &&
+ git checkout -b branch-and-tag-name &&
+ test_when_finished "git tag -d branch-and-tag-name" &&
+ git tag branch-and-tag-name &&
+ git branch --show-current >actual &&
+ test_cmp expect actual
+'
+
+test_expect_success 'git branch `--show-current` works properly with worktrees' '
+ cat >expect <<-\EOF &&
+ branch-one
+ branch-two
+ EOF
+ git checkout branch-one &&
+ git worktree add worktree branch-two &&
+ {
+ git branch --show-current &&
+ git -C worktree branch --show-current
+ } >actual &&
+ test_cmp expect actual
+'
+
test_expect_success 'git branch shows detached HEAD properly' '
cat >expect <<EOF &&
* (HEAD detached at $(git rev-parse --short HEAD^0))
git rebase master
'
+test_expect_success 'rebase sets ORIG_HEAD to pre-rebase state' '
+ git checkout -b orig-head topic &&
+ pre="$(git rev-parse --verify HEAD)" &&
+ git rebase master &&
+ test_cmp_rev "$pre" ORIG_HEAD &&
+ ! test_cmp_rev "$pre" HEAD
+'
+
test_expect_success 'rebase, with <onto> and <upstream> specified as :/quuxery' '
test_when_finished "git branch -D torebase" &&
git checkout -b torebase my-topic-branch^ &&
)
'
+test_expect_success 'rebase -c rebase.useBuiltin=false warning' '
+ expected="rebase.useBuiltin support has been removed" &&
+
+ # Only warn when the legacy rebase is requested...
+ test_must_fail git -c rebase.useBuiltin=false rebase 2>err &&
+ test_i18ngrep "$expected" err &&
+ test_must_fail env GIT_TEST_REBASE_USE_BUILTIN=false git rebase 2>err &&
+ test_i18ngrep "$expected" err &&
+
+ # ...not when we would have used the built-in anyway
+ test_must_fail git -c rebase.useBuiltin=true rebase 2>err &&
+ test_must_be_empty err &&
+ test_must_fail env GIT_TEST_REBASE_USE_BUILTIN=true git rebase 2>err &&
+ test_must_be_empty err
+'
+
test_done
test_expect_success 'rebase -x with empty command fails' '
test_when_finished "git rebase --abort ||:" &&
- test_must_fail env GIT_TEST_REBASE_USE_BUILTIN=true \
- git rebase -x "" @ 2>actual &&
+ test_must_fail env git rebase -x "" @ 2>actual &&
test_write_lines "error: empty exec command" >expected &&
test_i18ncmp expected actual &&
- test_must_fail env GIT_TEST_REBASE_USE_BUILTIN=true \
- git rebase -x " " @ 2>actual &&
+ test_must_fail env git rebase -x " " @ 2>actual &&
test_i18ncmp expected actual
'
'
test_expect_success 'rebase -x with newline in command fails' '
test_when_finished "git rebase --abort ||:" &&
- test_must_fail env GIT_TEST_REBASE_USE_BUILTIN=true \
- git rebase -x "a${LF}b" @ 2>actual &&
+ test_must_fail env git rebase -x "a${LF}b" @ 2>actual &&
test_write_lines "error: exec commands cannot contain newlines" \
>expected &&
test_i18ncmp expected actual
(
set_cat_todo_editor &&
test_must_fail git -c rebase.instructionFormat= \
- rebase --autosquash --force -i HEAD^ >actual &&
+ rebase --autosquash --force-rebase -i HEAD^ >actual &&
git log -1 --format="pick %h %s" >expect &&
test_cmp expect actual
)
test -e F
'
+test_expect_success SHA1 'loose object cache vs re-reading todo list' '
+ GIT_REBASE_TODO=.git/rebase-merge/git-rebase-todo &&
+ export GIT_REBASE_TODO &&
+ write_script append-todo.sh <<-\EOS &&
+ # For values 5 and 6, this yields SHA-1s with the same first two digits
+ echo "pick $(git rev-parse --short \
+ $(printf "%s\\n" \
+ "tree $EMPTY_TREE" \
+ "author A U Thor <author@example.org> $1 +0000" \
+ "committer A U Thor <author@example.org> $1 +0000" \
+ "" \
+ "$1" |
+ git hash-object -t commit -w --stdin))" >>$GIT_REBASE_TODO
+
+ shift
+ test -z "$*" ||
+ echo "exec $0 $*" >>$GIT_REBASE_TODO
+ EOS
+
+ git rebase HEAD -x "./append-todo.sh 5 6"
+'
+
test_done
EOF
test_config sequence.editor \""$PWD"/replace-editor.sh\" &&
test_tick &&
- git rebase -i --force --root -r &&
+ git rebase -i --force-rebase --root -r &&
test "Parsnip" = "$(git show -s --format=%an HEAD^)" &&
test $(git rev-parse second-root^0) != $(git rev-parse HEAD^) &&
test $(git rev-parse second-root:second-root.t) = \
test_cmp_rev HEAD $before &&
test_tick &&
- git rebase -i --force -r HEAD^^ &&
+ git rebase -i --force-rebase -r HEAD^^ &&
test "Hank" = "$(git show -s --format=%an HEAD)" &&
test "$before" != $(git rev-parse HEAD) &&
test_cmp_graph HEAD^^.. <<-\EOF
test_commit base foo b &&
test_commit picked foo c &&
test_commit --signoff picked-signed foo d &&
+ git checkout -b topic initial &&
+ test_commit redundant-pick foo c redundant &&
+ git commit --allow-empty --allow-empty-message &&
+ git tag empty &&
+ git checkout master &&
git config advice.detachedhead false
'
test_i18ngrep ! "Changes not staged for commit:" actual
'
+test_expect_success 'cherry-pick --continue remembers --keep-redundant-commits' '
+ test_when_finished "git cherry-pick --abort || :" &&
+ pristine_detach initial &&
+ test_must_fail git cherry-pick --keep-redundant-commits picked redundant &&
+ echo c >foo &&
+ git add foo &&
+ git cherry-pick --continue
+'
+
+test_expect_success 'cherry-pick --continue remembers --allow-empty and --allow-empty-message' '
+ test_when_finished "git cherry-pick --abort || :" &&
+ pristine_detach initial &&
+ test_must_fail git cherry-pick --allow-empty --allow-empty-message \
+ picked empty &&
+ echo c >foo &&
+ git add foo &&
+ git cherry-pick --continue
+'
+
test_done
. ./test-lib.sh
# Setup some files to be removed, some with funny characters
-test_expect_success \
- 'Initialize test directory' \
- "touch -- foo bar baz 'space embedded' -q &&
- git add -- foo bar baz 'space embedded' -q &&
- git commit -m 'add normal files'"
+test_expect_success 'Initialize test directory' '
+ touch -- foo bar baz "space embedded" -q &&
+ git add -- foo bar baz "space embedded" -q &&
+ git commit -m "add normal files"
+'
-if test_have_prereq !FUNNYNAMES; then
+if test_have_prereq !FUNNYNAMES
+then
say 'Your filesystem does not allow tabs in filenames.'
fi
-test_expect_success FUNNYNAMES 'add files with funny names' "
- touch -- 'tab embedded' 'newline
-embedded' &&
- git add -- 'tab embedded' 'newline
-embedded' &&
- git commit -m 'add files with tabs and newlines'
-"
-
-test_expect_success \
- 'Pre-check that foo exists and is in index before git rm foo' \
- '[ -f foo ] && git ls-files --error-unmatch foo'
-
-test_expect_success \
- 'Test that git rm foo succeeds' \
- 'git rm --cached foo'
-
-test_expect_success \
- 'Test that git rm --cached foo succeeds if the index matches the file' \
- 'echo content >foo &&
- git add foo &&
- git rm --cached foo'
-
-test_expect_success \
- 'Test that git rm --cached foo succeeds if the index matches the file' \
- 'echo content >foo &&
- git add foo &&
- git commit -m foo &&
- echo "other content" >foo &&
- git rm --cached foo'
-
-test_expect_success \
- 'Test that git rm --cached foo fails if the index matches neither the file nor HEAD' '
- echo content >foo &&
- git add foo &&
- git commit -m foo --allow-empty &&
- echo "other content" >foo &&
- git add foo &&
- echo "yet another content" >foo &&
- test_must_fail git rm --cached foo
-'
-
-test_expect_success \
- 'Test that git rm --cached -f foo works in case where --cached only did not' \
- 'echo content >foo &&
- git add foo &&
- git commit -m foo --allow-empty &&
- echo "other content" >foo &&
- git add foo &&
- echo "yet another content" >foo &&
- git rm --cached -f foo'
-
-test_expect_success \
- 'Post-check that foo exists but is not in index after git rm foo' \
- '[ -f foo ] && test_must_fail git ls-files --error-unmatch foo'
-
-test_expect_success \
- 'Pre-check that bar exists and is in index before "git rm bar"' \
- '[ -f bar ] && git ls-files --error-unmatch bar'
-
-test_expect_success \
- 'Test that "git rm bar" succeeds' \
- 'git rm bar'
-
-test_expect_success \
- 'Post-check that bar does not exist and is not in index after "git rm -f bar"' \
- '! [ -f bar ] && test_must_fail git ls-files --error-unmatch bar'
-
-test_expect_success \
- 'Test that "git rm -- -q" succeeds (remove a file that looks like an option)' \
- 'git rm -- -q'
-
-test_expect_success FUNNYNAMES \
- "Test that \"git rm -f\" succeeds with embedded space, tab, or newline characters." \
- "git rm -f 'space embedded' 'tab embedded' 'newline
-embedded'"
+test_expect_success FUNNYNAMES 'add files with funny names' '
+ touch -- "tab embedded" "newline${LF}embedded" &&
+ git add -- "tab embedded" "newline${LF}embedded" &&
+ git commit -m "add files with tabs and newlines"
+'
+
+test_expect_success 'Pre-check that foo exists and is in index before git rm foo' '
+ test_path_is_file foo &&
+ git ls-files --error-unmatch foo
+'
+
+test_expect_success 'Test that git rm foo succeeds' '
+ git rm --cached foo
+'
+
+test_expect_success 'Test that git rm --cached foo succeeds if the index matches the file' '
+ echo content >foo &&
+ git add foo &&
+ git rm --cached foo
+'
+
+test_expect_success 'Test that git rm --cached foo succeeds if the index matches the file' '
+ echo content >foo &&
+ git add foo &&
+ git commit -m foo &&
+ echo "other content" >foo &&
+ git rm --cached foo
+'
+
+test_expect_success 'Test that git rm --cached foo fails if the index matches neither the file nor HEAD' '
+ echo content >foo &&
+ git add foo &&
+ git commit -m foo --allow-empty &&
+ echo "other content" >foo &&
+ git add foo &&
+ echo "yet another content" >foo &&
+ test_must_fail git rm --cached foo
+'
+
+test_expect_success 'Test that git rm --cached -f foo works in case where --cached only did not' '
+ echo content >foo &&
+ git add foo &&
+ git commit -m foo --allow-empty &&
+ echo "other content" >foo &&
+ git add foo &&
+ echo "yet another content" >foo &&
+ git rm --cached -f foo
+'
+
+test_expect_success 'Post-check that foo exists but is not in index after git rm foo' '
+ test_path_is_file foo &&
+ test_must_fail git ls-files --error-unmatch foo
+'
+
+test_expect_success 'Pre-check that bar exists and is in index before "git rm bar"' '
+ test_path_is_file bar &&
+ git ls-files --error-unmatch bar
+'
+
+test_expect_success 'Test that "git rm bar" succeeds' '
+ git rm bar
+'
+
+test_expect_success 'Post-check that bar does not exist and is not in index after "git rm -f bar"' '
+ test_path_is_missing bar &&
+ test_must_fail git ls-files --error-unmatch bar
+'
+
+test_expect_success 'Test that "git rm -- -q" succeeds (remove a file that looks like an option)' '
+ git rm -- -q
+'
+
+test_expect_success FUNNYNAMES 'Test that "git rm -f" succeeds with embedded space, tab, or newline characters.' '
+ git rm -f "space embedded" "tab embedded" "newline${LF}embedded"
+'
test_expect_success SANITY 'Test that "git rm -f" fails if its rm fails' '
test_when_finished "chmod 775 ." &&
test_must_fail git rm -f baz
'
-test_expect_success \
- 'When the rm in "git rm -f" fails, it should not remove the file from the index' \
- 'git ls-files --error-unmatch baz'
+test_expect_success 'When the rm in "git rm -f" fails, it should not remove the file from the index' '
+ git ls-files --error-unmatch baz
+'
test_expect_success 'Remove nonexistent file with --ignore-unmatch' '
git rm --ignore-unmatch nonexistent
test_expect_success 'Modify foo -- rm should refuse' '
echo >>foo &&
test_must_fail git rm foo baz &&
- test -f foo &&
- test -f baz &&
+ test_path_is_file foo &&
+ test_path_is_file baz &&
git ls-files --error-unmatch foo baz
'
test_expect_success 'Modified foo -- rm -f should work' '
git rm -f foo baz &&
- test ! -f foo &&
- test ! -f baz &&
+ test_path_is_missing foo &&
+ test_path_is_missing baz &&
test_must_fail git ls-files --error-unmatch foo &&
test_must_fail git ls-files --error-unmatch bar
'
test_expect_success 'foo is different in index from HEAD -- rm should refuse' '
test_must_fail git rm foo baz &&
- test -f foo &&
- test -f baz &&
+ test_path_is_file foo &&
+ test_path_is_file baz &&
git ls-files --error-unmatch foo baz
'
test_expect_success 'but with -f it should work.' '
git rm -f foo baz &&
- test ! -f foo &&
- test ! -f baz &&
+ test_path_is_missing foo &&
+ test_path_is_missing baz &&
test_must_fail git ls-files --error-unmatch foo &&
test_must_fail git ls-files --error-unmatch baz
'
test_expect_success 'Recursive without -r fails' '
test_must_fail git rm frotz &&
- test -d frotz &&
- test -f frotz/nitfol
+ test_path_is_dir frotz &&
+ test_path_is_file frotz/nitfol
'
test_expect_success 'Recursive with -r but dirty' '
echo qfwfq >>frotz/nitfol &&
test_must_fail git rm -r frotz &&
- test -d frotz &&
- test -f frotz/nitfol
+ test_path_is_dir frotz &&
+ test_path_is_file frotz/nitfol
'
test_expect_success 'Recursive with -r -f' '
git rm -f -r frotz &&
- ! test -f frotz/nitfol &&
- ! test -d frotz
+ test_path_is_missing frotz/nitfol &&
+ test_path_is_missing frotz
'
test_expect_success 'Remove nonexistent file returns nonzero exit status' '
test_expect_success 'Call "rm" from outside the work tree' '
mkdir repo &&
- (cd repo &&
- git init &&
- echo something >somefile &&
- git add somefile &&
- git commit -m "add a file" &&
- (cd .. &&
- git --git-dir=repo/.git --work-tree=repo rm somefile) &&
- test_must_fail git ls-files --error-unmatch somefile)
+ (
+ cd repo &&
+ git init &&
+ echo something >somefile &&
+ git add somefile &&
+ git commit -m "add a file" &&
+ (
+ cd .. &&
+ git --git-dir=repo/.git --work-tree=repo rm somefile
+ ) &&
+ test_must_fail git ls-files --error-unmatch somefile
+ )
'
test_expect_success 'refresh index before checking if it is up-to-date' '
-
git reset --hard &&
test-tool chmtime -86400 frotz/nitfol &&
git rm frotz/nitfol &&
- test ! -f frotz/nitfol
-
+ test_path_is_missing frotz/nitfol
'
test_expect_success 'choking "git rm" should not let it die with cruft' '
i=0 &&
while test $i -lt 12000
do
- echo "100644 1234567890123456789012345678901234567890 0 some-file-$i"
- i=$(( $i + 1 ))
+ echo "100644 1234567890123456789012345678901234567890 0 some-file-$i"
+ i=$(( $i + 1 ))
done | git update-index --index-info &&
git rm -n "some-file-*" | : &&
test_path_is_missing .git/index.lock
echo content >dir/subdir/subsubdir/file &&
git add dir/subdir/subsubdir/file &&
git rm -f dir/subdir/subsubdir/file &&
- ! test -d dir
+ test_path_is_missing dir
'
cat >expect <<EOF
git add .gitmodules &&
git commit -m "add submodule" &&
git rm submod &&
- test ! -e submod &&
+ test_path_is_missing submod &&
git status -s -uno --ignore-submodules=none >actual &&
test_cmp expect actual &&
test_must_fail git config -f .gitmodules submodule.sub.url &&
git reset --hard &&
git submodule update &&
git rm submod &&
- test ! -d submod &&
+ test_path_is_missing submod &&
git status -s -uno --ignore-submodules=none >actual &&
test_cmp expect actual &&
test_must_fail git config -f .gitmodules submodule.sub.url &&
git reset --hard &&
git submodule update &&
git rm submod/ &&
- test ! -d submod &&
+ test_path_is_missing submod &&
git status -s -uno --ignore-submodules=none >actual &&
test_cmp expect actual
'
git submodule update &&
git -C submod checkout HEAD^ &&
test_must_fail git rm submod &&
- test -d submod &&
- test -f submod/.git &&
+ test_path_is_dir submod &&
+ test_path_is_file submod/.git &&
git status -s -uno --ignore-submodules=none >actual &&
test_cmp expect.modified actual &&
git rm -f submod &&
- test ! -d submod &&
+ test_path_is_missing submod &&
git status -s -uno --ignore-submodules=none >actual &&
test_cmp expect actual &&
test_must_fail git config -f .gitmodules submodule.sub.url &&
git reset --hard &&
git submodule update &&
git rm --cached submod &&
- test -d submod &&
- test -f submod/.git &&
+ test_path_is_dir submod &&
+ test_path_is_file submod/.git &&
git status -s -uno >actual &&
test_cmp expect.cached actual &&
git config -f .gitmodules submodule.sub.url &&
git reset --hard &&
git submodule update &&
git rm -n submod &&
- test -f submod/.git &&
+ test_path_is_file submod/.git &&
git diff-index --exit-code HEAD
'
git rm .gitmodules &&
git rm submod >actual 2>actual.err &&
test_must_be_empty actual.err &&
- ! test -d submod &&
- ! test -f submod/.git &&
+ test_path_is_missing submod &&
+ test_path_is_missing submod/.git &&
git status -s -uno >actual &&
test_cmp expect.both_deleted actual
'
git submodule update &&
git config -f .gitmodules foo.bar true &&
test_must_fail git rm submod >actual 2>actual.err &&
- test -s actual.err &&
- test -d submod &&
- test -f submod/.git &&
+ test_file_not_empty actual.err &&
+ test_path_is_dir submod &&
+ test_path_is_file submod/.git &&
git diff-files --quiet -- submod &&
git add .gitmodules &&
git rm submod >actual 2>actual.err &&
test_must_be_empty actual.err &&
- ! test -d submod &&
- ! test -f submod/.git &&
+ test_path_is_missing submod &&
+ test_path_is_missing submod/.git &&
git status -s -uno >actual &&
test_cmp expect actual
'
echo "warning: Could not find section in .gitmodules where path=submod" >expect.err &&
git rm submod >actual 2>actual.err &&
test_i18ncmp expect.err actual.err &&
- ! test -d submod &&
- ! test -f submod/.git &&
+ test_path_is_missing submod &&
+ test_path_is_missing submod/.git &&
git status -s -uno >actual &&
test_cmp expect actual
'
git submodule update &&
echo X >submod/empty &&
test_must_fail git rm submod &&
- test -d submod &&
- test -f submod/.git &&
+ test_path_is_dir submod &&
+ test_path_is_file submod/.git &&
git status -s -uno --ignore-submodules=none >actual &&
test_cmp expect.modified_inside actual &&
git rm -f submod &&
- test ! -d submod &&
+ test_path_is_missing submod &&
git status -s -uno --ignore-submodules=none >actual &&
test_cmp expect actual
'
git submodule update &&
echo X >submod/untracked &&
test_must_fail git rm submod &&
- test -d submod &&
- test -f submod/.git &&
+ test_path_is_dir submod &&
+ test_path_is_file submod/.git &&
git status -s -uno --ignore-submodules=none >actual &&
test_cmp expect.modified_untracked actual &&
git rm -f submod &&
- test ! -d submod &&
+ test_path_is_missing submod &&
git status -s -uno --ignore-submodules=none >actual &&
test_cmp expect actual
'
git submodule update &&
test_must_fail git merge conflict2 &&
git rm submod &&
- test ! -d submod &&
+ test_path_is_missing submod &&
git status -s -uno --ignore-submodules=none >actual &&
test_cmp expect actual
'
git -C submod checkout HEAD^ &&
test_must_fail git merge conflict2 &&
test_must_fail git rm submod &&
- test -d submod &&
- test -f submod/.git &&
+ test_path_is_dir submod &&
+ test_path_is_file submod/.git &&
git status -s -uno --ignore-submodules=none >actual &&
test_cmp expect.conflict actual &&
git rm -f submod &&
- test ! -d submod &&
+ test_path_is_missing submod &&
git status -s -uno --ignore-submodules=none >actual &&
test_cmp expect actual &&
test_must_fail git config -f .gitmodules submodule.sub.url &&
echo X >submod/empty &&
test_must_fail git merge conflict2 &&
test_must_fail git rm submod &&
- test -d submod &&
- test -f submod/.git &&
+ test_path_is_dir submod &&
+ test_path_is_file submod/.git &&
git status -s -uno --ignore-submodules=none >actual &&
test_cmp expect.conflict actual &&
git rm -f submod &&
- test ! -d submod &&
+ test_path_is_missing submod &&
git status -s -uno --ignore-submodules=none >actual &&
test_cmp expect actual &&
test_must_fail git config -f .gitmodules submodule.sub.url &&
echo X >submod/untracked &&
test_must_fail git merge conflict2 &&
test_must_fail git rm submod &&
- test -d submod &&
- test -f submod/.git &&
+ test_path_is_dir submod &&
+ test_path_is_file submod/.git &&
git status -s -uno --ignore-submodules=none >actual &&
test_cmp expect.conflict actual &&
git rm -f submod &&
- test ! -d submod &&
+ test_path_is_missing submod &&
git status -s -uno --ignore-submodules=none >actual &&
test_cmp expect actual
'
git checkout conflict1 &&
git reset --hard &&
git submodule update &&
- (cd submod &&
+ (
+ cd submod &&
rm .git &&
cp -R ../.git/modules/sub .git &&
GIT_WORK_TREE=. git config --unset core.worktree
) &&
test_must_fail git merge conflict2 &&
test_must_fail git rm submod &&
- test -d submod &&
- test -d submod/.git &&
+ test_path_is_dir submod &&
+ test_path_is_dir submod/.git &&
git status -s -uno --ignore-submodules=none >actual &&
test_cmp expect.conflict actual &&
test_must_fail git rm -f submod &&
- test -d submod &&
- test -d submod/.git &&
+ test_path_is_dir submod &&
+ test_path_is_dir submod/.git &&
git status -s -uno --ignore-submodules=none >actual &&
test_cmp expect.conflict actual &&
git merge --abort &&
git reset --hard &&
test_must_fail git merge conflict2 &&
git rm submod &&
- test ! -d submod &&
+ test_path_is_missing submod &&
git status -s -uno --ignore-submodules=none >actual &&
test_cmp expect actual
'
git checkout -f master &&
git reset --hard &&
git submodule update &&
- (cd submod &&
+ (
+ cd submod &&
rm .git &&
cp -R ../.git/modules/sub .git &&
GIT_WORK_TREE=. git config --unset core.worktree &&
rm -r ../.git/modules/sub
) &&
git rm submod 2>output.err &&
- ! test -d submod &&
- ! test -d submod/.git &&
+ test_path_is_missing submod &&
+ test_path_is_missing submod/.git &&
git status -s -uno --ignore-submodules=none >actual &&
- test -s actual &&
+ test_file_not_empty actual &&
test_i18ngrep Migrating output.err
'
test_expect_success 'setup subsubmodule' '
git reset --hard &&
git submodule update &&
- (cd submod &&
+ (
+ cd submod &&
git update-index --add --cacheinfo 160000 $(git rev-parse HEAD) subsubmod &&
git config -f .gitmodules submodule.sub.url ../. &&
git config -f .gitmodules submodule.sub.path subsubmod &&
test_expect_success 'rm recursively removes work tree of unmodified submodules' '
git rm submod &&
- test ! -d submod &&
+ test_path_is_missing submod &&
git status -s -uno --ignore-submodules=none >actual &&
test_cmp expect actual
'
git submodule update --recursive &&
git -C submod/subsubmod checkout HEAD^ &&
test_must_fail git rm submod &&
- test -d submod &&
- test -f submod/.git &&
+ test_path_is_dir submod &&
+ test_path_is_file submod/.git &&
git status -s -uno --ignore-submodules=none >actual &&
test_cmp expect.modified_inside actual &&
git rm -f submod &&
- test ! -d submod &&
+ test_path_is_missing submod &&
git status -s -uno --ignore-submodules=none >actual &&
test_cmp expect actual
'
git submodule update --recursive &&
echo X >submod/subsubmod/empty &&
test_must_fail git rm submod &&
- test -d submod &&
- test -f submod/.git &&
+ test_path_is_dir submod &&
+ test_path_is_file submod/.git &&
git status -s -uno --ignore-submodules=none >actual &&
test_cmp expect.modified_inside actual &&
git rm -f submod &&
- test ! -d submod &&
+ test_path_is_missing submod &&
git status -s -uno --ignore-submodules=none >actual &&
test_cmp expect actual
'
git submodule update --recursive &&
echo X >submod/subsubmod/untracked &&
test_must_fail git rm submod &&
- test -d submod &&
- test -f submod/.git &&
+ test_path_is_dir submod &&
+ test_path_is_file submod/.git &&
git status -s -uno --ignore-submodules=none >actual &&
test_cmp expect.modified_untracked actual &&
git rm -f submod &&
- test ! -d submod &&
+ test_path_is_missing submod &&
git status -s -uno --ignore-submodules=none >actual &&
test_cmp expect actual
'
test_expect_success "rm absorbs submodule's nested .git directory" '
git reset --hard &&
git submodule update --recursive &&
- (cd submod/subsubmod &&
+ (
+ cd submod/subsubmod &&
rm .git &&
mv ../../.git/modules/sub/modules/sub .git &&
GIT_WORK_TREE=. git config --unset core.worktree
) &&
git rm submod 2>output.err &&
- ! test -d submod &&
- ! test -d submod/subsubmod/.git &&
+ test_path_is_missing submod &&
+ test_path_is_missing submod/subsubmod/.git &&
git status -s -uno --ignore-submodules=none >actual &&
- test -s actual &&
+ test_file_not_empty actual &&
test_i18ngrep Migrating output.err
'
. ./test-lib.sh
test_expect_success 'stash some dirty working directory' '
- echo 1 > file &&
+ echo 1 >file &&
git add file &&
echo unrelated >other-file &&
git add other-file &&
test_tick &&
git commit -m initial &&
- echo 2 > file &&
+ echo 2 >file &&
git add file &&
- echo 3 > file &&
+ echo 3 >file &&
test_tick &&
git stash &&
git diff-files --quiet &&
git diff-index --cached --quiet HEAD
'
-cat > expect << EOF
+cat >expect <<EOF
diff --git a/file b/file
index 0cfbf08..00750ed 100644
--- a/file
test_expect_success 'parents of stash' '
test $(git rev-parse stash^) = $(git rev-parse HEAD) &&
- git diff stash^2..stash > output &&
+ git diff stash^2..stash >output &&
test_cmp expect output
'
test_expect_success 'apply stashed changes (including index)' '
git reset --hard HEAD^ &&
- echo 6 > other-file &&
+ echo 6 >other-file &&
git add other-file &&
test_tick &&
git commit -m other-file &&
test_expect_success 'drop top stash' '
git reset --hard &&
- git stash list > stashlist1 &&
- echo 7 > file &&
+ git stash list >expected &&
+ echo 7 >file &&
git stash &&
git stash drop &&
- git stash list > stashlist2 &&
- test_cmp stashlist1 stashlist2 &&
+ git stash list >actual &&
+ test_cmp expected actual &&
git stash apply &&
test 3 = $(cat file) &&
test 1 = $(git show :file) &&
test_expect_success 'drop middle stash' '
git reset --hard &&
- echo 8 > file &&
+ echo 8 >file &&
git stash &&
- echo 9 > file &&
+ echo 9 >file &&
git stash &&
git stash drop stash@{1} &&
test 2 = $(git stash list | wc -l) &&
test 0 = $(git stash list | wc -l)
'
-cat > expect << EOF
+cat >expect <<EOF
diff --git a/file2 b/file2
new file mode 100644
index 0000000..1fe912c
+bar2
EOF
-cat > expect1 << EOF
+cat >expect1 <<EOF
diff --git a/file b/file
index 257cc56..5716ca5 100644
--- a/file
+bar
EOF
-cat > expect2 << EOF
+cat >expect2 <<EOF
diff --git a/file b/file
index 7601807..5716ca5 100644
--- a/file
EOF
test_expect_success 'stash branch' '
- echo foo > file &&
+ echo foo >file &&
git commit file -m first &&
- echo bar > file &&
- echo bar2 > file2 &&
+ echo bar >file &&
+ echo bar2 >file2 &&
git add file2 &&
git stash &&
- echo baz > file &&
+ echo baz >file &&
git commit file -m second &&
git stash branch stashbranch &&
test refs/heads/stashbranch = $(git symbolic-ref HEAD) &&
test $(git rev-parse HEAD) = $(git rev-parse master^) &&
- git diff --cached > output &&
+ git diff --cached >output &&
test_cmp expect output &&
- git diff > output &&
+ git diff >output &&
test_cmp expect1 output &&
git add file &&
git commit -m alternate\ second &&
- git diff master..stashbranch > output &&
+ git diff master..stashbranch >output &&
test_cmp output expect2 &&
test 0 = $(git stash list | wc -l)
'
test_expect_success 'apply -q is quiet' '
- echo foo > file &&
+ echo foo >file &&
git stash &&
- git stash apply -q > output.out 2>&1 &&
+ git stash apply -q >output.out 2>&1 &&
test_must_be_empty output.out
'
test_expect_success 'save -q is quiet' '
- git stash save --quiet > output.out 2>&1 &&
+ git stash save --quiet >output.out 2>&1 &&
test_must_be_empty output.out
'
test_expect_success 'pop -q is quiet' '
- git stash pop -q > output.out 2>&1 &&
+ git stash pop -q >output.out 2>&1 &&
test_must_be_empty output.out
'
test_expect_success 'pop -q --index works and is quiet' '
- echo foo > file &&
+ echo foo >file &&
git add file &&
git stash save --quiet &&
- git stash pop -q --index > output.out 2>&1 &&
+ git stash pop -q --index >output.out 2>&1 &&
test foo = "$(git show :file)" &&
test_must_be_empty output.out
'
test_expect_success 'drop -q is quiet' '
git stash &&
- git stash drop -q > output.out 2>&1 &&
+ git stash drop -q >output.out 2>&1 &&
test_must_be_empty output.out
'
test_expect_success 'stash -k' '
- echo bar3 > file &&
- echo bar4 > file2 &&
+ echo bar3 >file &&
+ echo bar4 >file2 &&
git add file2 &&
git stash -k &&
test bar,bar4 = $(cat file),$(cat file2)
'
test_expect_success 'stash --no-keep-index' '
- echo bar33 > file &&
- echo bar44 > file2 &&
+ echo bar33 >file &&
+ echo bar44 >file2 &&
git add file2 &&
git stash --no-keep-index &&
test bar,bar2 = $(cat file),$(cat file2)
'
test_expect_success 'stash --invalid-option' '
- echo bar5 > file &&
- echo bar6 > file2 &&
+ echo bar5 >file &&
+ echo bar6 >file2 &&
git add file2 &&
test_must_fail git stash --invalid-option &&
test_must_fail git stash save --invalid-option &&
test new = "$(cat file3)"
'
+test_expect_success 'stash --intent-to-add file' '
+ git reset --hard &&
+ echo new >file4 &&
+ git add --intent-to-add file4 &&
+ test_when_finished "git rm -f file4" &&
+ test_must_fail git stash
+'
+
test_expect_success 'stash rm then recreate' '
git reset --hard &&
git rm file &&
test foo = "$(cat file/file)"
'
+test_expect_success 'giving too many ref arguments does not modify files' '
+ git stash clear &&
+ test_when_finished "git reset --hard HEAD" &&
+ echo foo >file2 &&
+ git stash &&
+ echo bar >file2 &&
+ git stash &&
+ test-tool chmtime =123456789 file2 &&
+ for type in apply pop "branch stash-branch"
+ do
+ test_must_fail git stash $type stash@{0} stash@{1} 2>err &&
+ test_i18ngrep "Too many revisions" err &&
+ test 123456789 = $(test-tool chmtime -g file2) || return 1
+ done
+'
+
+test_expect_success 'drop: too many arguments errors out (does nothing)' '
+ git stash list >expect &&
+ test_must_fail git stash drop stash@{0} stash@{1} 2>err &&
+ test_i18ngrep "Too many revisions" err &&
+ git stash list >actual &&
+ test_cmp expect actual
+'
+
+test_expect_success 'show: too many arguments errors out (does nothing)' '
+ test_must_fail git stash show stash@{0} stash@{1} 2>err 1>out &&
+ test_i18ngrep "Too many revisions" err &&
+ test_must_be_empty out
+'
+
test_expect_success 'stash create - no changes' '
git stash clear &&
test_when_finished "git reset --hard HEAD" &&
git stash clear &&
test_when_finished "git reset --hard HEAD" &&
git reset --hard &&
- echo foo >> file &&
+ echo foo >>file &&
STASH_ID=$(git stash create) &&
git reset --hard &&
git stash branch stash-branch ${STASH_ID} &&
- test_when_finished "git reset --hard HEAD && git checkout master && git branch -D stash-branch" &&
+ test_when_finished "git reset --hard HEAD && git checkout master &&
+ git branch -D stash-branch" &&
test $(git ls-files --modified | wc -l) -eq 1
'
git stash clear &&
test_when_finished "git reset --hard HEAD" &&
git reset --hard &&
- echo foo >> file &&
+ echo foo >>file &&
git stash &&
test_when_finished "git stash drop" &&
- echo bar >> file &&
+ echo bar >>file &&
STASH_ID=$(git stash create) &&
git reset --hard &&
git stash branch stash-branch ${STASH_ID} &&
- test_when_finished "git reset --hard HEAD && git checkout master && git branch -D stash-branch" &&
+ test_when_finished "git reset --hard HEAD && git checkout master &&
+ git branch -D stash-branch" &&
test $(git ls-files --modified | wc -l) -eq 1
'
+test_expect_success 'stash branch complains with no arguments' '
+ test_must_fail git stash branch 2>err &&
+ test_i18ngrep "No branch name specified" err
+'
+
test_expect_success 'stash show format defaults to --stat' '
git stash clear &&
test_when_finished "git reset --hard HEAD" &&
git reset --hard &&
- echo foo >> file &&
+ echo foo >>file &&
git stash &&
test_when_finished "git stash drop" &&
- echo bar >> file &&
+ echo bar >>file &&
STASH_ID=$(git stash create) &&
git reset --hard &&
cat >expected <<-EOF &&
git stash clear &&
test_when_finished "git reset --hard HEAD" &&
git reset --hard &&
- echo foo >> file &&
+ echo foo >>file &&
git stash &&
test_when_finished "git stash drop" &&
- echo bar >> file &&
+ echo bar >>file &&
STASH_ID=$(git stash create) &&
git reset --hard &&
echo "1 0 file" >expected &&
git stash clear &&
test_when_finished "git reset --hard HEAD" &&
git reset --hard &&
- echo foo >> file &&
+ echo foo >>file &&
git stash &&
test_when_finished "git stash drop" &&
- echo bar >> file &&
+ echo bar >>file &&
STASH_ID=$(git stash create) &&
git reset --hard &&
cat >expected <<-EOF &&
git stash clear &&
test_when_finished "git reset --hard HEAD" &&
git reset --hard &&
- echo foo >> file &&
+ echo foo >>file &&
STASH_ID=$(git stash create) &&
git reset --hard &&
echo "1 0 file" >expected &&
git stash clear &&
test_when_finished "git reset --hard HEAD" &&
git reset --hard &&
- echo foo >> file &&
+ echo foo >>file &&
STASH_ID=$(git stash create) &&
git reset --hard &&
cat >expected <<-EOF &&
test_cmp expected actual
'
-test_expect_success 'stash drop - fail early if specified stash is not a stash reference' '
+test_expect_success 'stash show --patience shows diff' '
+ git reset --hard &&
+ echo foo >>file &&
+ STASH_ID=$(git stash create) &&
+ git reset --hard &&
+ cat >expected <<-EOF &&
+ diff --git a/file b/file
+ index 7601807..71b52c4 100644
+ --- a/file
+ +++ b/file
+ @@ -1 +1,2 @@
+ baz
+ +foo
+ EOF
+ git stash show --patience ${STASH_ID} >actual &&
+ test_cmp expected actual
+'
+
+test_expect_success 'drop: fail early if specified stash is not a stash ref' '
git stash clear &&
test_when_finished "git reset --hard HEAD && git stash clear" &&
git reset --hard &&
- echo foo > file &&
+ echo foo >file &&
git stash &&
- echo bar > file &&
+ echo bar >file &&
git stash &&
test_must_fail git stash drop $(git rev-parse stash@{0}) &&
git stash pop &&
git reset --hard HEAD
'
-test_expect_success 'stash pop - fail early if specified stash is not a stash reference' '
+test_expect_success 'pop: fail early if specified stash is not a stash ref' '
git stash clear &&
test_when_finished "git reset --hard HEAD && git stash clear" &&
git reset --hard &&
- echo foo > file &&
+ echo foo >file &&
git stash &&
- echo bar > file &&
+ echo bar >file &&
git stash &&
test_must_fail git stash pop $(git rev-parse stash@{0}) &&
git stash pop &&
test_expect_success 'ref with non-existent reflog' '
git stash clear &&
- echo bar5 > file &&
- echo bar6 > file2 &&
+ echo bar5 >file &&
+ echo bar6 >file2 &&
git add file2 &&
git stash &&
test_must_fail git rev-parse --quiet --verify does-not-exist &&
test_expect_success 'invalid ref of the form stash@{n}, n >= N' '
git stash clear &&
test_must_fail git stash drop stash@{0} &&
- echo bar5 > file &&
- echo bar6 > file2 &&
+ echo bar5 >file &&
+ echo bar6 >file2 &&
git add file2 &&
git stash &&
test_must_fail git stash drop stash@{1} &&
git stash drop
'
-test_expect_success 'stash branch should not drop the stash if the branch exists' '
+test_expect_success 'branch: do not drop the stash if the branch exists' '
git stash clear &&
echo foo >file &&
git add file &&
git rev-parse stash@{0} --
'
-test_expect_success 'stash branch should not drop the stash if the apply fails' '
+test_expect_success 'branch: should not drop the stash if the apply fails' '
git stash clear &&
git reset HEAD~1 --hard &&
echo foo >file &&
git rev-parse stash@{0} --
'
-test_expect_success 'stash apply shows status same as git status (relative to current directory)' '
+test_expect_success 'apply: show same status as git status (relative to ./)' '
git stash clear &&
echo 1 >subdir/subfile1 &&
echo 2 >subdir/subfile2 &&
test_i18ncmp expect actual
'
-cat > expect << EOF
+cat >expect <<EOF
diff --git a/HEAD b/HEAD
new file mode 100644
index 0000000..fe0cbee
test_expect_success 'stash where working directory contains "HEAD" file' '
git stash clear &&
git reset --hard &&
- echo file-not-a-ref > HEAD &&
+ echo file-not-a-ref >HEAD &&
git add HEAD &&
test_tick &&
git stash &&
git diff-files --quiet &&
git diff-index --cached --quiet HEAD &&
test "$(git rev-parse stash^)" = "$(git rev-parse HEAD)" &&
- git diff stash^..stash > output &&
+ git diff stash^..stash >output &&
test_cmp expect output
'
test_i18ncmp expect actual
'
-test_expect_success 'stash push with pathspec shows no changes when there are none' '
+test_expect_success 'push <pathspec>: show no changes when there are none' '
>foo &&
git add foo &&
git commit -m "tmp" &&
test_i18ncmp expect actual
'
-test_expect_success 'stash push with pathspec not in the repository errors out' '
+test_expect_success 'push: <pathspec> not in the repository errors out' '
>untracked &&
test_must_fail git stash push untracked &&
test_path_is_file untracked
'
+test_expect_success 'push: -q is quiet with changes' '
+ >foo &&
+ git add foo &&
+ git stash push -q >output 2>&1 &&
+ test_must_be_empty output
+'
+
+test_expect_success 'push: -q is quiet with no changes' '
+ git stash push -q >output 2>&1 &&
+ test_must_be_empty output
+'
+
+test_expect_success 'push: -q is quiet even if there is no initial commit' '
+ git init foo_dir &&
+ test_when_finished rm -rf foo_dir &&
+ (
+ cd foo_dir &&
+ >bar &&
+ test_must_fail git stash push -q >output 2>&1 &&
+ test_must_be_empty output
+ )
+'
+
test_expect_success 'untracked files are left in place when -u is not given' '
>file &&
git add file &&
test_path_is_file subdir/untracked
'
+test_expect_success 'stash with user.name and user.email set works' '
+ test_config user.name "A U Thor" &&
+ test_config user.email "a.u@thor" &&
+ git stash
+'
+
test_expect_success 'stash works when user.name and user.email are not set' '
git reset &&
>1 &&
test_i18ncmp expect actual
'
+test_expect_success 'stash -u with globs' '
+ >untracked.txt &&
+ git stash -u -- ":(glob)**/*.txt" &&
+ test_path_is_missing untracked.txt
+'
+
test_done
--- /dev/null
+#!/bin/sh
+
+test_description='Test git stash show configuration.'
+
+. ./test-lib.sh
+
+test_expect_success 'setup' '
+ test_commit file
+'
+
+# takes three parameters:
+# 1. the stash.showStat value (or "<unset>")
+# 2. the stash.showPatch value (or "<unset>")
+# 3. the diff options of the expected output (or nothing for no output)
+test_stat_and_patch () {
+ if test "<unset>" = "$1"
+ then
+ test_unconfig stash.showStat
+ else
+ test_config stash.showStat "$1"
+ fi &&
+
+ if test "<unset>" = "$2"
+ then
+ test_unconfig stash.showPatch
+ else
+ test_config stash.showPatch "$2"
+ fi &&
+
+ shift 2 &&
+ echo 2 >file.t &&
+ if test $# != 0
+ then
+ git diff "$@" >expect
+ fi &&
+ git stash &&
+ git stash show >actual &&
+
+ if test $# = 0
+ then
+ test_must_be_empty actual
+ else
+ test_cmp expect actual
+ fi
+}
+
+test_expect_success 'showStat unset showPatch unset' '
+ test_stat_and_patch "<unset>" "<unset>" --stat
+'
+
+test_expect_success 'showStat unset showPatch false' '
+ test_stat_and_patch "<unset>" false --stat
+'
+
+test_expect_success 'showStat unset showPatch true' '
+ test_stat_and_patch "<unset>" true --stat -p
+'
+
+test_expect_success 'showStat false showPatch unset' '
+ test_stat_and_patch false "<unset>"
+'
+
+test_expect_success 'showStat false showPatch false' '
+ test_stat_and_patch false false
+'
+
+test_expect_success 'showStat false showPatch true' '
+ test_stat_and_patch false true -p
+'
+
+test_expect_success 'showStat true showPatch unset' '
+ test_stat_and_patch true "<unset>" --stat
+'
+
+test_expect_success 'showStat true showPatch false' '
+ test_stat_and_patch true false --stat
+'
+
+test_expect_success 'showStat true showPatch true' '
+ test_stat_and_patch true true --stat -p
+'
+
+test_done
ls patches/0004-This-is-an-excessively-long-subject-line-for-a-messa.patch
'
+test_expect_success 'failure to write cover-letter aborts gracefully' '
+ test_when_finished "rmdir 0000-cover-letter.patch" &&
+ mkdir 0000-cover-letter.patch &&
+ test_must_fail git format-patch --no-renames --cover-letter -1
+'
+
test_expect_success 'cover-letter inherits diff options' '
git mv file foo &&
git commit -m foo &&
test_cmp expect actual
'
+test_expect_success 'setup for --combined-all-paths' '
+ git branch side1c &&
+ git branch side2c &&
+ git checkout side1c &&
+ test_seq 1 10 >filename-side1c &&
+ git add filename-side1c &&
+ git commit -m with &&
+ git checkout side2c &&
+ test_seq 1 9 >filename-side2c &&
+ echo ten >>filename-side2c &&
+ git add filename-side2c &&
+ git commit -m iam &&
+ git checkout -b mergery side1c &&
+ git merge --no-commit side2c &&
+ git rm filename-side1c &&
+ echo eleven >>filename-side2c &&
+ git mv filename-side2c filename-merged &&
+ git add filename-merged &&
+ git commit
+'
+
+test_expect_success '--combined-all-paths and --raw' '
+ cat <<-\EOF >expect &&
+ ::100644 100644 100644 f00c965d8307308469e537302baa73048488f162 088bd5d92c2a8e0203ca8e7e4c2a5c692f6ae3f7 333b9c62519f285e1854830ade0fe1ef1d40ee1b RR filename-side1c filename-side2c filename-merged
+ EOF
+ git diff-tree -c -M --raw --combined-all-paths HEAD >actual.tmp &&
+ sed 1d <actual.tmp >actual &&
+ test_cmp expect actual
+'
+
+test_expect_success '--combined-all-paths and --cc' '
+ cat <<-\EOF >expect &&
+ --- a/filename-side1c
+ --- a/filename-side2c
+ +++ b/filename-merged
+ EOF
+ git diff-tree --cc -M --combined-all-paths HEAD >actual.tmp &&
+ grep ^[-+][-+][-+] <actual.tmp >actual &&
+ test_cmp expect actual
+'
+
+test_expect_success FUNNYNAMES 'setup for --combined-all-paths with funny names' '
+ git branch side1d &&
+ git branch side2d &&
+ git checkout side1d &&
+ test_seq 1 10 >"$(printf "file\twith\ttabs")" &&
+ git add file* &&
+ git commit -m with &&
+ git checkout side2d &&
+ test_seq 1 9 >"$(printf "i\tam\ttabbed")" &&
+ echo ten >>"$(printf "i\tam\ttabbed")" &&
+ git add *tabbed &&
+ git commit -m iam &&
+ git checkout -b funny-names-mergery side1d &&
+ git merge --no-commit side2d &&
+ git rm *tabs &&
+ echo eleven >>"$(printf "i\tam\ttabbed")" &&
+ git mv "$(printf "i\tam\ttabbed")" "$(printf "fickle\tnaming")" &&
+ git add fickle* &&
+ git commit
+'
+
+test_expect_success FUNNYNAMES '--combined-all-paths and --raw and funny names' '
+ cat <<-\EOF >expect &&
+ ::100644 100644 100644 f00c965d8307308469e537302baa73048488f162 088bd5d92c2a8e0203ca8e7e4c2a5c692f6ae3f7 333b9c62519f285e1854830ade0fe1ef1d40ee1b RR "file\twith\ttabs" "i\tam\ttabbed" "fickle\tnaming"
+ EOF
+ git diff-tree -c -M --raw --combined-all-paths HEAD >actual.tmp &&
+ sed 1d <actual.tmp >actual &&
+ test_cmp expect actual
+'
+
+test_expect_success FUNNYNAMES '--combined-all-paths and --raw -and -z and funny names' '
+ printf "aaf8087c3cbd4db8e185a2d074cf27c53cfb75d7\0::100644 100644 100644 f00c965d8307308469e537302baa73048488f162 088bd5d92c2a8e0203ca8e7e4c2a5c692f6ae3f7 333b9c62519f285e1854830ade0fe1ef1d40ee1b RR\0file\twith\ttabs\0i\tam\ttabbed\0fickle\tnaming\0" >expect &&
+ git diff-tree -c -M --raw --combined-all-paths -z HEAD >actual &&
+ test_cmp -a expect actual
+'
+
+test_expect_success FUNNYNAMES '--combined-all-paths and --cc and funny names' '
+ cat <<-\EOF >expect &&
+ --- "a/file\twith\ttabs"
+ --- "a/i\tam\ttabbed"
+ +++ "b/fickle\tnaming"
+ EOF
+ git diff-tree --cc -M --combined-all-paths HEAD >actual.tmp &&
+ grep ^[-+][-+][-+] <actual.tmp >actual &&
+ test_cmp expect actual
+'
+
test_done
export GIT_CEILING_DIRECTORIES &&
cd non/git &&
test_must_fail git diff --no-index a 2>actual.err &&
- echo "usage: git diff --no-index <path> <path>" >expect.err &&
- test_cmp expect.err actual.err
+ test_i18ngrep "usage: git diff --no-index" actual.err
)
'
test_cmp expect actual
'
+test_expect_success 'diff --no-index allows external diff' '
+ test_expect_code 1 \
+ env GIT_EXTERNAL_DIFF="echo external ;:" \
+ git diff --no-index non/git/a non/git/b >actual &&
+ echo external >expect &&
+ test_cmp expect actual
+'
+
test_done
--- /dev/null
+#!/bin/sh
+
+test_description='behavior of diff when reading objects in a partial clone'
+
+. ./test-lib.sh
+
+test_expect_success 'git show batches blobs' '
+ test_when_finished "rm -rf server client trace" &&
+
+ test_create_repo server &&
+ echo a >server/a &&
+ echo b >server/b &&
+ git -C server add a b &&
+ git -C server commit -m x &&
+
+ test_config -C server uploadpack.allowfilter 1 &&
+ test_config -C server uploadpack.allowanysha1inwant 1 &&
+ git clone --bare --filter=blob:limit=0 "file://$(pwd)/server" client &&
+
+ # Ensure that there is exactly 1 negotiation by checking that there is
+ # only 1 "done" line sent. ("done" marks the end of negotiation.)
+ GIT_TRACE_PACKET="$(pwd)/trace" git -C client show HEAD &&
+ grep "git> done" trace >done_lines &&
+ test_line_count = 1 done_lines
+'
+
+test_expect_success 'diff batches blobs' '
+ test_when_finished "rm -rf server client trace" &&
+
+ test_create_repo server &&
+ echo a >server/a &&
+ echo b >server/b &&
+ git -C server add a b &&
+ git -C server commit -m x &&
+ echo c >server/c &&
+ echo d >server/d &&
+ git -C server add c d &&
+ git -C server commit -m x &&
+
+ test_config -C server uploadpack.allowfilter 1 &&
+ test_config -C server uploadpack.allowanysha1inwant 1 &&
+ git clone --bare --filter=blob:limit=0 "file://$(pwd)/server" client &&
+
+ # Ensure that there is exactly 1 negotiation by checking that there is
+ # only 1 "done" line sent. ("done" marks the end of negotiation.)
+ GIT_TRACE_PACKET="$(pwd)/trace" git -C client diff HEAD^ HEAD &&
+ grep "git> done" trace >done_lines &&
+ test_line_count = 1 done_lines
+'
+
+test_expect_success 'diff skips same-OID blobs' '
+ test_when_finished "rm -rf server client trace" &&
+
+ test_create_repo server &&
+ echo a >server/a &&
+ echo b >server/b &&
+ git -C server add a b &&
+ git -C server commit -m x &&
+ echo another-a >server/a &&
+ git -C server add a &&
+ git -C server commit -m x &&
+
+ test_config -C server uploadpack.allowfilter 1 &&
+ test_config -C server uploadpack.allowanysha1inwant 1 &&
+ git clone --bare --filter=blob:limit=0 "file://$(pwd)/server" client &&
+
+ echo a | git hash-object --stdin >hash-old-a &&
+ echo another-a | git hash-object --stdin >hash-new-a &&
+ echo b | git hash-object --stdin >hash-b &&
+
+ # Ensure that only a and another-a are fetched.
+ GIT_TRACE_PACKET="$(pwd)/trace" git -C client diff HEAD^ HEAD &&
+ grep "want $(cat hash-old-a)" trace &&
+ grep "want $(cat hash-new-a)" trace &&
+ ! grep "want $(cat hash-b)" trace
+'
+
+test_expect_success 'diff with rename detection batches blobs' '
+ test_when_finished "rm -rf server client trace" &&
+
+ test_create_repo server &&
+ echo a >server/a &&
+ printf "b\nb\nb\nb\nb\n" >server/b &&
+ git -C server add a b &&
+ git -C server commit -m x &&
+ rm server/b &&
+ printf "b\nb\nb\nb\nbX\n" >server/c &&
+ git -C server add c &&
+ git -C server commit -a -m x &&
+
+ test_config -C server uploadpack.allowfilter 1 &&
+ test_config -C server uploadpack.allowanysha1inwant 1 &&
+ git clone --bare --filter=blob:limit=0 "file://$(pwd)/server" client &&
+
+ # Ensure that there is exactly 1 negotiation by checking that there is
+ # only 1 "done" line sent. ("done" marks the end of negotiation.)
+ GIT_TRACE_PACKET="$(pwd)/trace" git -C client diff -M HEAD^ HEAD >out &&
+ grep "similarity index" out &&
+ grep "git> done" trace >done_lines &&
+ test_line_count = 1 done_lines
+'
+
+test_done
printf "Subject: " >subject-prefix &&
- cat - subject-prefix msg-without-scissors-line >msg-with-scissors-line <<-\EOF &&
+ cat - subject-prefix msg-without-scissors-line >msg-with-scissors-line <<-\EOF
This line should not be included in the commit message with --scissors enabled.
- - >8 - - remove everything above this line - - >8 - -
EOF
-
- signoff="Signed-off-by: $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL>"
'
test_expect_success setup '
test_cmp expect actual
'
+test_expect_success '%(trailers:only=yes) shows only "key: value" trailers' '
+ git log --no-walk --pretty=format:"%(trailers:only=yes)" >actual &&
+ grep -v patch.description <trailers >expect &&
+ test_cmp expect actual
+'
+
+test_expect_success '%(trailers:only=no) shows all trailers' '
+ git log --no-walk --pretty=format:"%(trailers:only=no)" >actual &&
+ cat trailers >expect &&
+ test_cmp expect actual
+'
+
+test_expect_success '%(trailers:only=no,only=true) shows only "key: value" trailers' '
+ git log --no-walk --pretty=format:"%(trailers:only=yes)" >actual &&
+ grep -v patch.description <trailers >expect &&
+ test_cmp expect actual
+'
+
test_expect_success '%(trailers:unfold) unfolds trailers' '
git log --no-walk --pretty="%(trailers:unfold)" >actual &&
{
test_cmp expect actual
'
+test_expect_success 'pretty format %(trailers:key=foo) shows that trailer' '
+ git log --no-walk --pretty="format:%(trailers:key=Acked-by)" >actual &&
+ echo "Acked-by: A U Thor <author@example.com>" >expect &&
+ test_cmp expect actual
+'
+
+test_expect_success 'pretty format %(trailers:key=foo) is case insensitive' '
+ git log --no-walk --pretty="format:%(trailers:key=AcKed-bY)" >actual &&
+ echo "Acked-by: A U Thor <author@example.com>" >expect &&
+ test_cmp expect actual
+'
+
+test_expect_success 'pretty format %(trailers:key=foo:) trailing colon also works' '
+ git log --no-walk --pretty="format:%(trailers:key=Acked-by:)" >actual &&
+ echo "Acked-by: A U Thor <author@example.com>" >expect &&
+ test_cmp expect actual
+'
+
+test_expect_success 'pretty format %(trailers:key=foo) multiple keys' '
+ git log --no-walk --pretty="format:%(trailers:key=Acked-by:,key=Signed-off-By)" >actual &&
+ grep -v patch.description <trailers >expect &&
+ test_cmp expect actual
+'
+
+test_expect_success '%(trailers:key=nonexistant) becomes empty' '
+ git log --no-walk --pretty="x%(trailers:key=Nacked-by)x" >actual &&
+ echo "xx" >expect &&
+ test_cmp expect actual
+'
+
+test_expect_success '%(trailers:key=foo) handles multiple lines even if folded' '
+ git log --no-walk --pretty="format:%(trailers:key=Signed-Off-by)" >actual &&
+ grep -v patch.description <trailers | grep -v Acked-by >expect &&
+ test_cmp expect actual
+'
+
+test_expect_success '%(trailers:key=foo,unfold) properly unfolds' '
+ git log --no-walk --pretty="format:%(trailers:key=Signed-Off-by,unfold)" >actual &&
+ unfold <trailers | grep Signed-off-by >expect &&
+ test_cmp expect actual
+'
+
+test_expect_success 'pretty format %(trailers:key=foo,only=no) also includes nontrailer lines' '
+ git log --no-walk --pretty="format:%(trailers:key=Acked-by,only=no)" >actual &&
+ {
+ echo "Acked-by: A U Thor <author@example.com>" &&
+ grep patch.description <trailers
+ } >expect &&
+ test_cmp expect actual
+'
+
+test_expect_success '%(trailers:key) without value is error' '
+ git log --no-walk --pretty="tformat:%(trailers:key)" >actual &&
+ echo "%(trailers:key)" >expect &&
+ test_cmp expect actual
+'
+
+test_expect_success '%(trailers:key=foo,valueonly) shows only value' '
+ git log --no-walk --pretty="format:%(trailers:key=Acked-by,valueonly)" >actual &&
+ echo "A U Thor <author@example.com>" >expect &&
+ test_cmp expect actual
+'
+
+test_expect_success 'pretty format %(trailers:separator) changes separator' '
+ git log --no-walk --pretty=format:"X%(trailers:separator=%x00,unfold)X" >actual &&
+ printf "XSigned-off-by: A U Thor <author@example.com>\0Acked-by: A U Thor <author@example.com>\0[ v2 updated patch description ]\0Signed-off-by: A U Thor <author@example.com>X" >expect &&
+ test_cmp expect actual
+'
+
+test_expect_success 'pretty format %(trailers) combining separator/key/valueonly' '
+ git commit --allow-empty -F - <<-\EOF &&
+ Important fix
+
+ The fix is explained here
+
+ Closes: #1234
+ EOF
+
+ git commit --allow-empty -F - <<-\EOF &&
+ Another fix
+
+ The fix is explained here
+
+ Closes: #567
+ Closes: #890
+ EOF
+
+ git commit --allow-empty -F - <<-\EOF &&
+ Does not close any tickets
+ EOF
+
+ git log --pretty="%s% (trailers:separator=%x2c%x20,key=Closes,valueonly)" HEAD~3.. >actual &&
+ test_write_lines \
+ "Does not close any tickets" \
+ "Another fix #567, #890" \
+ "Important fix #1234" >expect &&
+ test_cmp expect actual
+'
+
test_expect_success 'trailer parsing not fooled by --- line' '
git commit --allow-empty -F - <<-\EOF &&
this is the subject
git log $(for x in $(test_seq 200); do echo -L $((2*x)),+1:c.c; done)
'
+test_expect_success '-s shows only line-log commits' '
+ git log --format="commit %s" -L1,24:b.c >expect.raw &&
+ grep ^commit expect.raw >expect &&
+ git log --format="commit %s" -L1,24:b.c -s >actual &&
+ test_cmp expect actual
+'
+
+test_expect_success '-p shows the default patch output' '
+ git log -L1,24:b.c >expect &&
+ git log -L1,24:b.c -p >actual &&
+ test_cmp expect actual
+'
+
+test_expect_success '--raw is forbidden' '
+ test_must_fail git log -L1,24:b.c --raw
+'
+
test_done
test_expect_success 'prune: prune former HEAD after checking out branch' '
- head_sha1=$(git rev-parse HEAD) &&
+ head_oid=$(git rev-parse HEAD) &&
git checkout --quiet master &&
git prune -v >prune_actual &&
- grep "$head_sha1" prune_actual
+ grep "$head_oid" prune_actual
'
'
test_expect_success 'prune .git/shallow' '
- SHA1=$(echo hi|git commit-tree HEAD^{tree}) &&
- echo $SHA1 >.git/shallow &&
+ oid=$(echo hi|git commit-tree HEAD^{tree}) &&
+ echo $oid >.git/shallow &&
git prune --dry-run >out &&
- grep $SHA1 .git/shallow &&
- grep $SHA1 out &&
+ grep $oid .git/shallow &&
+ grep $oid out &&
git prune &&
test_path_is_missing .git/shallow
'
+test_expect_success 'prune .git/shallow when there are no loose objects' '
+ oid=$(echo hi|git commit-tree HEAD^{tree}) &&
+ echo $oid >.git/shallow &&
+ git update-ref refs/heads/shallow-tip $oid &&
+ git repack -ad &&
+ # verify assumption that all loose objects are gone
+ git count-objects | grep ^0 &&
+ git prune &&
+ echo $oid >expect &&
+ test_cmp expect .git/shallow
+'
+
test_expect_success 'prune: handle alternate object database' '
test_create_repo A &&
git -C A commit --allow-empty -m "initial commit" &&
git reset --hard HEAD^
) &&
git prune --expire=now &&
- SHA1=`git hash-object expected` &&
- git -C third-worktree show "$SHA1" >actual &&
+ oid=`git hash-object expected` &&
+ git -C third-worktree show "$oid" >actual &&
test_cmp expected actual
'
awk -f print_2.awk ls_files_result |
sort >expected &&
- git -C r1 pack-objects --rev --stdout >all.pack <<-EOF &&
+ git -C r1 pack-objects --revs --stdout >all.pack <<-EOF &&
HEAD
EOF
git -C r1 index-pack ../all.pack &&
'
test_expect_success 'verify blob:none packfile has no blobs' '
- git -C r1 pack-objects --rev --stdout --filter=blob:none >filter.pack <<-EOF &&
+ git -C r1 pack-objects --revs --stdout --filter=blob:none >filter.pack <<-EOF &&
HEAD
EOF
git -C r1 index-pack ../filter.pack &&
git -C r5 commit -m "foo" &&
del=$(git -C r5 rev-parse HEAD^{tree} | sed "s|..|&/|") &&
rm r5/.git/objects/$del &&
- test_must_fail git -C r5 pack-objects --rev --stdout 2>bad_tree <<-EOF &&
+ test_must_fail git -C r5 pack-objects --revs --stdout 2>bad_tree <<-EOF &&
HEAD
EOF
grep "bad tree object" bad_tree
'
test_expect_success 'verify tree:0 packfile has no blobs or trees' '
- git -C r1 pack-objects --rev --stdout --filter=tree:0 >commitsonly.pack <<-EOF &&
+ git -C r1 pack-objects --revs --stdout --filter=tree:0 >commitsonly.pack <<-EOF &&
HEAD
EOF
git -C r1 index-pack ../commitsonly.pack &&
test_expect_success 'grab tree directly when using tree:0' '
# We should get the tree specified directly but not its blobs or subtrees.
- git -C r1 pack-objects --rev --stdout --filter=tree:0 >commitsonly.pack <<-EOF &&
+ git -C r1 pack-objects --revs --stdout --filter=tree:0 >commitsonly.pack <<-EOF &&
HEAD:
EOF
git -C r1 index-pack ../commitsonly.pack &&
awk -f print_2.awk ls_files_result |
sort >expected &&
- git -C r2 pack-objects --rev --stdout >all.pack <<-EOF &&
+ git -C r2 pack-objects --revs --stdout >all.pack <<-EOF &&
HEAD
EOF
git -C r2 index-pack ../all.pack &&
'
test_expect_success 'verify blob:limit=500 omits all blobs' '
- git -C r2 pack-objects --rev --stdout --filter=blob:limit=500 >filter.pack <<-EOF &&
+ git -C r2 pack-objects --revs --stdout --filter=blob:limit=500 >filter.pack <<-EOF &&
HEAD
EOF
git -C r2 index-pack ../filter.pack &&
'
test_expect_success 'verify blob:limit=1000' '
- git -C r2 pack-objects --rev --stdout --filter=blob:limit=1000 >filter.pack <<-EOF &&
+ git -C r2 pack-objects --revs --stdout --filter=blob:limit=1000 >filter.pack <<-EOF &&
HEAD
EOF
git -C r2 index-pack ../filter.pack &&
awk -f print_2.awk ls_files_result |
sort >expected &&
- git -C r2 pack-objects --rev --stdout --filter=blob:limit=1001 >filter.pack <<-EOF &&
+ git -C r2 pack-objects --revs --stdout --filter=blob:limit=1001 >filter.pack <<-EOF &&
HEAD
EOF
git -C r2 index-pack ../filter.pack &&
awk -f print_2.awk ls_files_result |
sort >expected &&
- git -C r2 pack-objects --rev --stdout --filter=blob:limit=10001 >filter.pack <<-EOF &&
+ git -C r2 pack-objects --revs --stdout --filter=blob:limit=10001 >filter.pack <<-EOF &&
HEAD
EOF
git -C r2 index-pack ../filter.pack &&
awk -f print_2.awk ls_files_result |
sort >expected &&
- git -C r2 pack-objects --rev --stdout --filter=blob:limit=1k >filter.pack <<-EOF &&
+ git -C r2 pack-objects --revs --stdout --filter=blob:limit=1k >filter.pack <<-EOF &&
HEAD
EOF
git -C r2 index-pack ../filter.pack &&
awk -f print_2.awk ls_files_result |
sort >expected &&
- git -C r2 pack-objects --rev --stdout --filter=blob:limit=1k >filter.pack <<-EOF &&
+ git -C r2 pack-objects --revs --stdout --filter=blob:limit=1k >filter.pack <<-EOF &&
HEAD
$(git -C r2 rev-parse HEAD:large.10000)
EOF
awk -f print_2.awk ls_files_result |
sort >expected &&
- git -C r2 pack-objects --rev --stdout --filter=blob:limit=1m >filter.pack <<-EOF &&
+ git -C r2 pack-objects --revs --stdout --filter=blob:limit=1m >filter.pack <<-EOF &&
HEAD
EOF
git -C r2 index-pack ../filter.pack &&
awk -f print_2.awk ls_files_result |
sort >expected &&
- git -C r3 pack-objects --rev --stdout >all.pack <<-EOF &&
+ git -C r3 pack-objects --revs --stdout >all.pack <<-EOF &&
HEAD
EOF
git -C r3 index-pack ../all.pack &&
awk -f print_2.awk ls_files_result |
sort >expected &&
- git -C r3 pack-objects --rev --stdout --filter=sparse:path=../pattern1 >filter.pack <<-EOF &&
+ git -C r3 pack-objects --revs --stdout --filter=sparse:path=../pattern1 >filter.pack <<-EOF &&
HEAD
EOF
git -C r3 index-pack ../filter.pack &&
awk -f print_2.awk ls_files_result |
sort >expected &&
- git -C r3 pack-objects --rev --stdout --filter=sparse:path=../pattern2 >filter.pack <<-EOF &&
+ git -C r3 pack-objects --revs --stdout --filter=sparse:path=../pattern2 >filter.pack <<-EOF &&
HEAD
EOF
git -C r3 index-pack ../filter.pack &&
awk -f print_2.awk ls_files_result |
sort >expected &&
- git -C r4 pack-objects --rev --stdout >all.pack <<-EOF &&
+ git -C r4 pack-objects --revs --stdout >all.pack <<-EOF &&
HEAD
EOF
git -C r4 index-pack ../all.pack &&
sort >expected &&
oid=$(git -C r4 ls-files -s pattern | awk -f print_2.awk) &&
- git -C r4 pack-objects --rev --stdout --filter=sparse:oid=$oid >filter.pack <<-EOF &&
+ git -C r4 pack-objects --revs --stdout --filter=sparse:oid=$oid >filter.pack <<-EOF &&
HEAD
EOF
git -C r4 index-pack ../filter.pack &&
awk -f print_2.awk ls_files_result |
sort >expected &&
- git -C r4 pack-objects --rev --stdout --filter=sparse:oid=master:pattern >filter.pack <<-EOF &&
+ git -C r4 pack-objects --revs --stdout --filter=sparse:oid=master:pattern >filter.pack <<-EOF &&
HEAD
EOF
git -C r4 index-pack ../filter.pack &&
'
test_expect_success 'verify pack-objects fails w/ missing objects' '
- test_must_fail git -C r1 pack-objects --rev --stdout >miss.pack <<-EOF
+ test_must_fail git -C r1 pack-objects --revs --stdout >miss.pack <<-EOF
HEAD
EOF
'
test_expect_success 'verify pack-objects fails w/ --missing=error' '
- test_must_fail git -C r1 pack-objects --rev --stdout --missing=error >miss.pack <<-EOF
+ test_must_fail git -C r1 pack-objects --revs --stdout --missing=error >miss.pack <<-EOF
HEAD
EOF
'
test_expect_success 'verify pack-objects w/ --missing=allow-any' '
- git -C r1 pack-objects --rev --stdout --missing=allow-any >miss.pack <<-EOF
+ git -C r1 pack-objects --revs --stdout --missing=allow-any >miss.pack <<-EOF
HEAD
EOF
'
test_expect_success 'write graph' '
cd "$TRASH_DIRECTORY/full" &&
- graph1=$(git commit-graph write) &&
+ git commit-graph write &&
test_path_is_file $objdir/info/commit-graph &&
graph_read_expect "3"
'
GRAPH_BYTE_OCTOPUS=$(($GRAPH_OCTOPUS_DATA_OFFSET + 4))
GRAPH_BYTE_FOOTER=$(($GRAPH_OCTOPUS_DATA_OFFSET + 4 * $NUM_OCTOPUS_EDGES))
+corrupt_graph_setup() {
+ cd "$TRASH_DIRECTORY/full" &&
+ test_when_finished mv commit-graph-backup $objdir/info/commit-graph &&
+ cp $objdir/info/commit-graph commit-graph-backup
+}
+
+corrupt_graph_verify() {
+ grepstr=$1
+ test_must_fail git commit-graph verify 2>test_err &&
+ grep -v "^+" test_err >err &&
+ test_i18ngrep "$grepstr" err &&
+ if test "$2" != "no-copy"
+ then
+ cp $objdir/info/commit-graph commit-graph-pre-write-test
+ fi &&
+ git status --short &&
+ GIT_TEST_COMMIT_GRAPH_DIE_ON_LOAD=true git commit-graph write &&
+ git commit-graph verify
+}
+
# usage: corrupt_graph_and_verify <position> <data> <string> [<zero_pos>]
# Manipulates the commit-graph file at the position
# by inserting the data, optionally zeroing the file
pos=$1
data="${2:-\0}"
grepstr=$3
- cd "$TRASH_DIRECTORY/full" &&
+ corrupt_graph_setup &&
orig_size=$(wc -c < $objdir/info/commit-graph) &&
zero_pos=${4:-${orig_size}} &&
- test_when_finished mv commit-graph-backup $objdir/info/commit-graph &&
- cp $objdir/info/commit-graph commit-graph-backup &&
printf "$data" | dd of="$objdir/info/commit-graph" bs=1 seek="$pos" conv=notrunc &&
dd of="$objdir/info/commit-graph" bs=1 seek="$zero_pos" if=/dev/null &&
generate_zero_bytes $(($orig_size - $zero_pos)) >>"$objdir/info/commit-graph" &&
- test_must_fail git commit-graph verify 2>test_err &&
- grep -v "^+" test_err >err &&
- test_i18ngrep "$grepstr" err
+ corrupt_graph_verify "$grepstr"
+
}
+test_expect_success POSIXPERM,SANITY 'detect permission problem' '
+ corrupt_graph_setup &&
+ chmod 000 $objdir/info/commit-graph &&
+ corrupt_graph_verify "Could not open" "no-copy"
+'
+
+test_expect_success 'detect too small' '
+ corrupt_graph_setup &&
+ echo "a small graph" >$objdir/info/commit-graph &&
+ corrupt_graph_verify "too small"
+'
+
test_expect_success 'detect bad signature' '
corrupt_graph_and_verify 0 "\0" \
"graph signature"
git fsck &&
corrupt_graph_and_verify $GRAPH_BYTE_FOOTER "\00" \
"incorrect checksum" &&
+ cp commit-graph-pre-write-test $objdir/info/commit-graph &&
test_must_fail git fsck
'
'
midx_git_two_modes () {
+ git -c core.multiPackIndex=false $1 >expect &&
+ git -c core.multiPackIndex=true $1 >actual &&
if [ "$2" = "sorted" ]
then
- git -c core.multiPackIndex=false $1 | sort >expect &&
- git -c core.multiPackIndex=true $1 | sort >actual
- else
- git -c core.multiPackIndex=false $1 >expect &&
- git -c core.multiPackIndex=true $1 >actual
+ sort <expect >expect.sorted &&
+ mv expect.sorted expect &&
+ sort <actual >actual.sorted &&
+ mv actual.sorted actual
fi &&
test_cmp expect actual
}
midx_git_two_modes "rev-list --objects --all" &&
midx_git_two_modes "log --raw" &&
midx_git_two_modes "count-objects --verbose" &&
- midx_git_two_modes "cat-file --batch-all-objects --buffer --batch-check" &&
- midx_git_two_modes "cat-file --batch-all-objects --buffer --batch-check --unsorted" sorted
+ midx_git_two_modes "cat-file --batch-all-objects --batch-check" &&
+ midx_git_two_modes "cat-file --batch-all-objects --batch-check --unordered" sorted
'
}
compare_results_with_midx "one v2 pack"
+test_expect_success 'corrupt idx not opened' '
+ idx=$(test-tool read-midx $objdir | grep "\.idx\$") &&
+ mv $objdir/pack/$idx backup-$idx &&
+ test_when_finished "mv backup-\$idx \$objdir/pack/\$idx" &&
+
+ # This is the minimum size for a sha-1 based .idx; this lets
+ # us pass perfunctory tests, but anything that actually opens and reads
+ # the idx file will complain.
+ test_copy_bytes 1064 <backup-$idx >$objdir/pack/$idx &&
+
+ git -c core.multiPackIndex=true rev-list --objects --all 2>err &&
+ test_must_be_empty err
+'
+
test_expect_success 'add more objects' '
for i in $(test_seq 6 10)
do
--- /dev/null
+#!/bin/sh
+#
+# Copyright (c) 2018 Jiang Xin
+#
+
+test_description='Test git pack-redundant
+
+In order to test git-pack-redundant, we will create a number of objects and
+packs in the repository `master.git`. The relationship between packs (P1-P8)
+and objects (T, A-R) is showed in the following chart. Objects of a pack will
+be marked with letter x, while objects of redundant packs will be marked with
+exclamation point, and redundant pack itself will be marked with asterisk.
+
+ | T A B C D E F G H I J K L M N O P Q R
+ ----+--------------------------------------
+ P1 | x x x x x x x x
+ P2* | ! ! ! ! ! ! !
+ P3 | x x x x x x
+ P4* | ! ! ! ! !
+ P5 | x x x x
+ P6* | ! ! !
+ P7 | x x
+ P8* | !
+ ----+--------------------------------------
+ ALL | x x x x x x x x x x x x x x x x x x x
+
+Another repository `shared.git` has unique objects (X-Z), while other objects
+(marked with letter s) are shared through alt-odb (of `master.git`). The
+relationship between packs and objects is as follows:
+
+ | T A B C D E F G H I J K L M N O P Q R X Y Z
+ ----+----------------------------------------------
+ Px1 | s s s x x x
+ Px2 | s s s x x x
+'
+
+. ./test-lib.sh
+
+master_repo=master.git
+shared_repo=shared.git
+
+# Create commits in <repo> and assign each commit's oid to shell variables
+# given in the arguments (A, B, and C). E.g.:
+#
+# create_commits_in <repo> A B C
+#
+# NOTE: Avoid calling this function from a subshell since variable
+# assignments will disappear when subshell exits.
+create_commits_in () {
+ repo="$1" &&
+ if ! parent=$(git -C "$repo" rev-parse HEAD^{} 2>/dev/null)
+ then
+ parent=
+ fi &&
+ T=$(git -C "$repo" write-tree) &&
+ shift &&
+ while test $# -gt 0
+ do
+ name=$1 &&
+ test_tick &&
+ if test -z "$parent"
+ then
+ oid=$(echo $name | git -C "$repo" commit-tree $T)
+ else
+ oid=$(echo $name | git -C "$repo" commit-tree -p $parent $T)
+ fi &&
+ eval $name=$oid &&
+ parent=$oid &&
+ shift ||
+ return 1
+ done &&
+ git -C "$repo" update-ref refs/heads/master $oid
+}
+
+# Create pack in <repo> and assign pack id to variable given in the 2nd argument
+# (<name>). Commits in the pack will be read from stdin. E.g.:
+#
+# create_pack_in <repo> <name> <<-EOF
+# ...
+# EOF
+#
+# NOTE: commits from stdin should be given using heredoc, not using pipe, and
+# avoid calling this function from a subshell since variable assignments will
+# disappear when subshell exits.
+create_pack_in () {
+ repo="$1" &&
+ name="$2" &&
+ pack=$(git -C "$repo/objects/pack" pack-objects -q pack) &&
+ eval $name=$pack &&
+ eval P$pack=$name:$pack
+}
+
+format_packfiles () {
+ sed \
+ -e "s#.*/pack-\(.*\)\.idx#\1#" \
+ -e "s#.*/pack-\(.*\)\.pack#\1#" |
+ sort -u |
+ while read p
+ do
+ if test -z "$(eval echo \${P$p})"
+ then
+ echo $p
+ else
+ eval echo "\${P$p}"
+ fi
+ done |
+ sort
+}
+
+test_expect_success 'setup master repo' '
+ git init --bare "$master_repo" &&
+ create_commits_in "$master_repo" A B C D E F G H I J K L M N O P Q R
+'
+
+#############################################################################
+# Chart of packs and objects for this test case
+#
+# | T A B C D E F G H I J K L M N O P Q R
+# ----+--------------------------------------
+# P1 | x x x x x x x x
+# P2 | x x x x x x x
+# P3 | x x x x x x
+# ----+--------------------------------------
+# ALL | x x x x x x x x x x x x x x x
+#
+#############################################################################
+test_expect_success 'master: no redundant for pack 1, 2, 3' '
+ create_pack_in "$master_repo" P1 <<-EOF &&
+ $T
+ $A
+ $B
+ $C
+ $D
+ $E
+ $F
+ $R
+ EOF
+ create_pack_in "$master_repo" P2 <<-EOF &&
+ $B
+ $C
+ $D
+ $E
+ $G
+ $H
+ $I
+ EOF
+ create_pack_in "$master_repo" P3 <<-EOF &&
+ $F
+ $I
+ $J
+ $K
+ $L
+ $M
+ EOF
+ (
+ cd "$master_repo" &&
+ git pack-redundant --all >out &&
+ test_must_be_empty out
+ )
+'
+
+#############################################################################
+# Chart of packs and objects for this test case
+#
+# | T A B C D E F G H I J K L M N O P Q R
+# ----+--------------------------------------
+# P1 | x x x x x x x x
+# P2 | x x x x x x x
+# P3* | ! ! ! ! ! !
+# P4 | x x x x x
+# P5 | x x x x
+# ----+--------------------------------------
+# ALL | x x x x x x x x x x x x x x x x x x
+#
+#############################################################################
+test_expect_success 'master: one of pack-2/pack-3 is redundant' '
+ create_pack_in "$master_repo" P4 <<-EOF &&
+ $J
+ $K
+ $L
+ $M
+ $P
+ EOF
+ create_pack_in "$master_repo" P5 <<-EOF &&
+ $G
+ $H
+ $N
+ $O
+ EOF
+ (
+ cd "$master_repo" &&
+ cat >expect <<-EOF &&
+ P3:$P3
+ EOF
+ git pack-redundant --all >out &&
+ format_packfiles <out >actual &&
+ test_cmp expect actual
+ )
+'
+
+#############################################################################
+# Chart of packs and objects for this test case
+#
+# | T A B C D E F G H I J K L M N O P Q R
+# ----+--------------------------------------
+# P1 | x x x x x x x x
+# P2* | ! ! ! ! ! ! !
+# P3 | x x x x x x
+# P4* | ! ! ! ! !
+# P5 | x x x x
+# P6* | ! ! !
+# P7 | x x
+# ----+--------------------------------------
+# ALL | x x x x x x x x x x x x x x x x x x x
+#
+#############################################################################
+test_expect_success 'master: pack 2, 4, and 6 are redundant' '
+ create_pack_in "$master_repo" P6 <<-EOF &&
+ $N
+ $O
+ $Q
+ EOF
+ create_pack_in "$master_repo" P7 <<-EOF &&
+ $P
+ $Q
+ EOF
+ (
+ cd "$master_repo" &&
+ cat >expect <<-EOF &&
+ P2:$P2
+ P4:$P4
+ P6:$P6
+ EOF
+ git pack-redundant --all >out &&
+ format_packfiles <out >actual &&
+ test_cmp expect actual
+ )
+'
+
+#############################################################################
+# Chart of packs and objects for this test case
+#
+# | T A B C D E F G H I J K L M N O P Q R
+# ----+--------------------------------------
+# P1 | x x x x x x x x
+# P2* | ! ! ! ! ! ! !
+# P3 | x x x x x x
+# P4* | ! ! ! ! !
+# P5 | x x x x
+# P6* | ! ! !
+# P7 | x x
+# P8* | !
+# ----+--------------------------------------
+# ALL | x x x x x x x x x x x x x x x x x x x
+#
+#############################################################################
+test_expect_success 'master: pack-8 (subset of pack-1) is also redundant' '
+ create_pack_in "$master_repo" P8 <<-EOF &&
+ $A
+ EOF
+ (
+ cd "$master_repo" &&
+ cat >expect <<-EOF &&
+ P2:$P2
+ P4:$P4
+ P6:$P6
+ P8:$P8
+ EOF
+ git pack-redundant --all >out &&
+ format_packfiles <out >actual &&
+ test_cmp expect actual
+ )
+'
+
+test_expect_success 'master: clean loose objects' '
+ (
+ cd "$master_repo" &&
+ git prune-packed &&
+ find objects -type f | sed -e "/objects\/pack\//d" >out &&
+ test_must_be_empty out
+ )
+'
+
+test_expect_success 'master: remove redundant packs and pass fsck' '
+ (
+ cd "$master_repo" &&
+ git pack-redundant --all | xargs rm &&
+ git fsck &&
+ git pack-redundant --all >out &&
+ test_must_be_empty out
+ )
+'
+
+# The following test cases will execute inside `shared.git`, instead of
+# inside `master.git`.
+test_expect_success 'setup shared.git' '
+ git clone --mirror "$master_repo" "$shared_repo" &&
+ (
+ cd "$shared_repo" &&
+ printf "../../$master_repo/objects\n" >objects/info/alternates
+ )
+'
+
+test_expect_success 'shared: all packs are redundant, but no output without --alt-odb' '
+ (
+ cd "$shared_repo" &&
+ git pack-redundant --all >out &&
+ test_must_be_empty out
+ )
+'
+
+#############################################################################
+# Chart of packs and objects for this test case
+#
+# ================ master.git ===============
+# | T A B C D E F G H I J K L M N O P Q R <----------+
+# ----+-------------------------------------- |
+# P1 | x x x x x x x x |
+# P3 | x x x x x x |
+# P5 | x x x x |
+# P7 | x x |
+# ----+-------------------------------------- |
+# ALL | x x x x x x x x x x x x x x x x x x x |
+# |
+# |
+# ================ shared.git =============== |
+# | T A B C D E F G H I J K L M N O P Q R <objects/info/alternates>
+# ----+--------------------------------------
+# P1* | s s s s s s s s
+# P3* | s s s s s s
+# P5* | s s s s
+# P7* | s s
+# ----+--------------------------------------
+# ALL | x x x x x x x x x x x x x x x x x x x
+#
+#############################################################################
+test_expect_success 'shared: show redundant packs in stderr for verbose mode' '
+ (
+ cd "$shared_repo" &&
+ cat >expect <<-EOF &&
+ P1:$P1
+ P3:$P3
+ P5:$P5
+ P7:$P7
+ EOF
+ git pack-redundant --all --verbose >out 2>out.err &&
+ test_must_be_empty out &&
+ grep "pack$" out.err | format_packfiles >actual &&
+ test_cmp expect actual
+ )
+'
+
+test_expect_success 'shared: remove redundant packs, no packs left' '
+ (
+ cd "$shared_repo" &&
+ cat >expect <<-EOF &&
+ fatal: Zero packs found!
+ EOF
+ git pack-redundant --all --alt-odb | xargs rm &&
+ git fsck &&
+ test_must_fail git pack-redundant --all --alt-odb >actual 2>&1 &&
+ test_cmp expect actual
+ )
+'
+
+test_expect_success 'shared: create new objects and packs' '
+ create_commits_in "$shared_repo" X Y Z &&
+ create_pack_in "$shared_repo" Px1 <<-EOF &&
+ $X
+ $Y
+ $Z
+ $A
+ $B
+ $C
+ EOF
+ create_pack_in "$shared_repo" Px2 <<-EOF
+ $X
+ $Y
+ $Z
+ $D
+ $E
+ $F
+ EOF
+'
+
+test_expect_success 'shared: no redundant without --alt-odb' '
+ (
+ cd "$shared_repo" &&
+ git pack-redundant --all >out &&
+ test_must_be_empty out
+ )
+'
+
+#############################################################################
+# Chart of packs and objects for this test case
+#
+# ================ master.git ===============
+# | T A B C D E F G H I J K L M N O P Q R <----------------+
+# ----+-------------------------------------- |
+# P1 | x x x x x x x x |
+# P3 | x x x x x x |
+# P5 | x x x x |
+# P7 | x x |
+# ----+-------------------------------------- |
+# ALL | x x x x x x x x x x x x x x x x x x x |
+# |
+# |
+# ================ shared.git ======================= |
+# | T A B C D E F G H I J K L M N O P Q R X Y Z <objects/info/alternates>
+# ----+----------------------------------------------
+# Px1 | s s s x x x
+# Px2*| s s s ! ! !
+# ----+----------------------------------------------
+# ALL | s s s s s s s s s s s s s s s s s s s x x x
+#
+#############################################################################
+test_expect_success 'shared: one pack is redundant with --alt-odb' '
+ (
+ cd "$shared_repo" &&
+ git pack-redundant --all --alt-odb >out &&
+ format_packfiles <out >actual &&
+ test_line_count = 1 actual
+ )
+'
+
+#############################################################################
+# Chart of packs and objects for this test case
+#
+# ================ master.git ===============
+# | T A B C D E F G H I J K L M N O P Q R <----------------+
+# ----+-------------------------------------- |
+# P1 | x x x x x x x x |
+# P3 | x x x x x x |
+# P5 | x x x x |
+# P7 | x x |
+# ----+-------------------------------------- |
+# ALL | x x x x x x x x x x x x x x x x x x x |
+# |
+# |
+# ================ shared.git ======================= |
+# | T A B C D E F G H I J K L M N O P Q R X Y Z <objects/info/alternates>
+# ----+----------------------------------------------
+# Px1*| s s s i i i
+# Px2*| s s s i i i
+# ----+----------------------------------------------
+# ALL | s s s s s s s s s s s s s s s s s s s i i i
+# (ignored objects, marked with i)
+#
+#############################################################################
+test_expect_success 'shared: ignore unique objects and all two packs are redundant' '
+ (
+ cd "$shared_repo" &&
+ cat >expect <<-EOF &&
+ Px1:$Px1
+ Px2:$Px2
+ EOF
+ git pack-redundant --all --alt-odb >out <<-EOF &&
+ $X
+ $Y
+ $Z
+ EOF
+ format_packfiles <out >actual &&
+ test_cmp expect actual
+ )
+'
+
+test_done
$shared .have
EOF
- GIT_TRACE_PACKET=$(pwd)/trace \
+ GIT_TRACE_PACKET=$(pwd)/trace GIT_TEST_PROTOCOL_VERSION= \
git push \
--receive-pack="unset GIT_TRACE_PACKET; git-receive-pack" \
fork HEAD:foo &&
test_expect_success 'git rebase with implicit use of interactive backend' '
git reset --hard D &&
clear_hook_input &&
- test_must_fail git rebase --keep --onto A B &&
+ test_must_fail git rebase --keep-empty --onto A B &&
echo C > foo &&
git add foo &&
git rebase --continue &&
test_expect_success 'git rebase --skip with implicit use of interactive backend' '
git reset --hard D &&
clear_hook_input &&
- test_must_fail git rebase --keep --onto A B &&
+ test_must_fail git rebase --keep-empty --onto A B &&
test_must_fail git rebase --skip &&
echo D > foo &&
git add foo &&
test_commit -C server 6 &&
git init client &&
- test_must_fail git -C client fetch-pack ../server \
+ # Some protocol versions (e.g. 2) support fetching
+ # unadvertised objects, so restrict this test to v0.
+ test_must_fail env GIT_TEST_PROTOCOL_VERSION= git -C client fetch-pack ../server \
$(git -C server rev-parse refs/heads/master^) 2>err &&
test_i18ngrep "Server does not allow request for unadvertised object" err
'
fetch_filter_blob_limit_zero "$HTTPD_DOCUMENT_ROOT_PATH/server" "$HTTPD_URL/smart/server"
'
-stop_httpd
-
-
test_done
test -s "$1" &&
perl -alne '
next unless $F[1] eq "upload-pack<";
- last if $F[2] eq "0000";
+ next unless $F[2] eq "want";
print $F[2], " ", $F[3];
' "$1"
}
check_negotiation_tip
'
-stop_httpd
-
test_done
$(git rev-parse refs/tags/mark1.10) refs/tags/mark1.10
$(git rev-parse refs/tags/mark1.2) refs/tags/mark1.2
EOF
- git ls-remote --symref >actual &&
+ # Protocol v2 supports sending symrefs for refs other than HEAD, so use
+ # protocol v0 here.
+ GIT_TEST_PROTOCOL_VERSION= git ls-remote --symref >actual &&
test_cmp expect actual
'
ref: refs/heads/master HEAD
1bd44cb9d13204b0fe1958db0082f5028a16eb3a HEAD
EOF
- git ls-remote --symref . HEAD >actual &&
+ # Protocol v2 supports sending symrefs for refs other than HEAD, so use
+ # protocol v0 here.
+ GIT_TEST_PROTOCOL_VERSION= git ls-remote --symref . HEAD >actual &&
test_cmp expect actual
'
1bd44cb9d13204b0fe1958db0082f5028a16eb3a refs/heads/foo
1bd44cb9d13204b0fe1958db0082f5028a16eb3a refs/heads/master
EOF
- git ls-remote --symref --heads . >actual &&
+ # Protocol v2 supports sending symrefs for refs other than HEAD, so use
+ # protocol v0 here.
+ GIT_TEST_PROTOCOL_VERSION= git ls-remote --symref --heads . >actual &&
test_cmp expect actual
'
1bd44cb9d13204b0fe1958db0082f5028a16eb3a refs/heads/foo
1bd44cb9d13204b0fe1958db0082f5028a16eb3a refs/heads/master
EOF
- git ls-remote --symref --heads . >actual &&
+ # Protocol v2 supports sending symrefs for refs other than HEAD, so use
+ # protocol v0 here.
+ GIT_TEST_PROTOCOL_VERSION= git ls-remote --symref --heads . >actual &&
test_cmp expect actual &&
- git ls-remote --symref . "refs/heads/*" >actual &&
+ GIT_TEST_PROTOCOL_VERSION= git ls-remote --symref . "refs/heads/*" >actual &&
test_cmp expect actual
'
test_description='Merge logic in fetch'
+# NEEDSWORK: If the overspecification of the expected result is reduced, we
+# might be able to run this test in all protocol versions.
+GIT_TEST_PROTOCOL_VERSION=
+
. ./test-lib.sh
LF='
git prune &&
test_must_fail git cat-file -t $the_commit &&
+ # Some protocol versions (e.g. 2) support fetching
+ # unadvertised objects, so restrict this test to v0.
+
# fetching the hidden object should fail by default
- test_must_fail git fetch -v ../testrepo $the_commit:refs/heads/copy 2>err &&
+ test_must_fail env GIT_TEST_PROTOCOL_VERSION= \
+ git fetch -v ../testrepo $the_commit:refs/heads/copy 2>err &&
test_i18ngrep "Server does not allow request for unadvertised object" err &&
test_must_fail git rev-parse --verify refs/heads/copy &&
mk_empty shallow &&
(
cd shallow &&
- test_must_fail git fetch --depth=1 ../testrepo/.git $SHA1 &&
+ # Some protocol versions (e.g. 2) support fetching
+ # unadvertised objects, so restrict this test to v0.
+ test_must_fail env GIT_TEST_PROTOCOL_VERSION= \
+ git fetch --depth=1 ../testrepo/.git $SHA1 &&
git --git-dir=../testrepo/.git config uploadpack.allowreachablesha1inwant true &&
git fetch --depth=1 ../testrepo/.git $SHA1 &&
git cat-file commit $SHA1
mk_empty shallow &&
(
cd shallow &&
- test_must_fail ok=sigpipe git fetch ../testrepo/.git $SHA1_3 &&
- test_must_fail ok=sigpipe git fetch ../testrepo/.git $SHA1_1 &&
+ # Some protocol versions (e.g. 2) support fetching
+ # unadvertised objects, so restrict this test to v0.
+ test_must_fail ok=sigpipe env GIT_TEST_PROTOCOL_VERSION= \
+ git fetch ../testrepo/.git $SHA1_3 &&
+ test_must_fail ok=sigpipe env GIT_TEST_PROTOCOL_VERSION= \
+ git fetch ../testrepo/.git $SHA1_1 &&
git --git-dir=../testrepo/.git config uploadpack.allowreachablesha1inwant true &&
git fetch ../testrepo/.git $SHA1_1 &&
git cat-file commit $SHA1_1 &&
test_must_fail git cat-file commit $SHA1_2 &&
git fetch ../testrepo/.git $SHA1_2 &&
git cat-file commit $SHA1_2 &&
- test_must_fail ok=sigpipe git fetch ../testrepo/.git $SHA1_3
+ test_must_fail ok=sigpipe env GIT_TEST_PROTOCOL_VERSION= \
+ git fetch ../testrepo/.git $SHA1_3
)
'
done
test_cmp expect actual
'
-test_expect_success 'push --follow-tag only pushes relevant tags' '
+test_expect_success 'push --follow-tags only pushes relevant tags' '
mk_test testrepo heads/master &&
rm -fr src dst &&
git init src &&
git tag -m "future" future &&
git checkout master &&
git for-each-ref refs/heads/master refs/tags/tag >../expect &&
- git push --follow-tag ../dst master
+ git push --follow-tags ../dst master
) &&
(
cd dst &&
) &&
git add b &&
git commit -m "added submodule" &&
- git push --recurse-submodule=check origin master
+ git push --recurse-submodules=check origin master
)
'
git -C client fsck
'
-stop_httpd
-
test_done
cd clone &&
git checkout --orphan newnew &&
test_commit new-too &&
- GIT_TRACE_PACKET="$TRASH_DIRECTORY/trace" git fetch --depth=2 &&
+ # NEEDSWORK: If the overspecification of the expected result is reduced, we
+ # might be able to run this test in all protocol versions.
+ GIT_TRACE_PACKET="$TRASH_DIRECTORY/trace" GIT_TEST_PROTOCOL_VERSION= \
+ git fetch --depth=2 &&
grep "fetch-pack< ACK .* ready" ../trace &&
! grep "fetch-pack> done" ../trace
)
)
'
-stop_httpd
test_done
test_cmp expect actual
'
-stop_httpd
-
test_done
cd "$ROOT_PATH" &&
git clone $HTTPD_URL/smart/test_repo.git/ test_repo_clone &&
- check_access_log exp
+ # NEEDSWORK: If the overspecification of the expected result is reduced, we
+ # might be able to run this test in all protocol versions.
+ if test -z "$GIT_TEST_PROTOCOL_VERSION"
+ then
+ check_access_log exp
+ fi
'
test_expect_success 'clone remote repository' '
POST /smart/test_repo.git/git-receive-pack HTTP/1.1 200
EOF
test_expect_success 'used receive-pack service' '
- check_access_log exp
+ # NEEDSWORK: If the overspecification of the expected result is reduced, we
+ # might be able to run this test in all protocol versions.
+ if test -z "$GIT_TEST_PROTOCOL_VERSION"
+ then
+ check_access_log exp
+ fi
'
test_http_push_nonff "$HTTPD_DOCUMENT_ROOT_PATH"/test_repo.git \
test_i18ngrep ! "^hint: " decoded
'
-stop_httpd
test_done
)
'
-stop_httpd
test_done
test_cmp expect "$HTTPD_DOCUMENT_ROOT_PATH"/upstream.git/hooks/pre-receive.push_options
'
-stop_httpd
-
test_done
test_i18ngrep "unable to access.*/redir-to/502" stderr
'
-stop_httpd
+test_expect_success 'fetching via http alternates works' '
+ parent=$HTTPD_DOCUMENT_ROOT_PATH/alt-parent.git &&
+ git init --bare "$parent" &&
+ git -C "$parent" --work-tree=. commit --allow-empty -m foo &&
+ git -C "$parent" update-server-info &&
+ commit=$(git -C "$parent" rev-parse HEAD) &&
+
+ child=$HTTPD_DOCUMENT_ROOT_PATH/alt-child.git &&
+ git init --bare "$child" &&
+ echo "../../alt-parent.git/objects" >"$child/objects/info/alternates" &&
+ git -C "$child" update-ref HEAD $commit &&
+ git -C "$child" update-server-info &&
+
+ git -c http.followredirects=true clone "$HTTPD_URL/dumb/alt-child.git"
+'
+
test_done
< Cache-Control: no-cache, max-age=0, must-revalidate
< Content-Type: application/x-git-upload-pack-result
EOF
- GIT_TRACE_CURL=true git clone --quiet $HTTPD_URL/smart/repo.git clone 2>err &&
+ GIT_TRACE_CURL=true GIT_TEST_PROTOCOL_VERSION= \
+ git clone --quiet $HTTPD_URL/smart/repo.git clone 2>err &&
test_cmp file clone/file &&
tr '\''\015'\'' Q <err |
sed -e "
/^< Content-Length: /d
/^< Transfer-Encoding: /d
" >actual &&
- sed -e "s/^> Accept-Encoding: .*/> Accept-Encoding: ENCODINGS/" \
- actual >actual.smudged &&
- test_cmp exp actual.smudged &&
- grep "Accept-Encoding:.*gzip" actual >actual.gzip &&
- test_line_count = 2 actual.gzip
+ # NEEDSWORK: If the overspecification of the expected result is reduced, we
+ # might be able to run this test in all protocol versions.
+ if test -z "$GIT_TEST_PROTOCOL_VERSION"
+ then
+ sed -e "s/^> Accept-Encoding: .*/> Accept-Encoding: ENCODINGS/" \
+ actual >actual.smudged &&
+ test_cmp exp actual.smudged &&
+
+ grep "Accept-Encoding:.*gzip" actual >actual.gzip &&
+ test_line_count = 2 actual.gzip
+ fi
'
test_expect_success 'fetch changes via http' '
GET /smart/repo.git/info/refs?service=git-upload-pack HTTP/1.1 200
POST /smart/repo.git/git-upload-pack HTTP/1.1 200
EOF
- check_access_log exp
+
+ # NEEDSWORK: If the overspecification of the expected result is reduced, we
+ # might be able to run this test in all protocol versions.
+ if test -z "$GIT_TEST_PROTOCOL_VERSION"
+ then
+ check_access_log exp
+ fi
'
test_expect_success 'follow redirects (301)' '
test_expect_success 'no-op half-auth fetch does not require a password' '
set_askpass wrong &&
- git --git-dir=half-auth fetch &&
+
+ # NEEDSWORK: When using HTTP(S), protocol v0 supports a "half-auth"
+ # configuration with authentication required only when downloading
+ # objects and not refs, by having the HTTP server only require
+ # authentication for the "git-upload-pack" path and not "info/refs".
+ # This is not possible with protocol v2, since both objects and refs
+ # are obtained from the "git-upload-pack" path. A solution to this is
+ # to teach the server and client to be able to inline ls-refs requests
+ # as an Extra Parameter (see pack-protocol.txt), so that "info/refs"
+ # can serve refs, just like it does in protocol v0.
+ GIT_TEST_PROTOCOL_VERSION=0 git --git-dir=half-auth fetch &&
expect_askpass none
'
git config http.cookiefile cookies.txt &&
git config http.savecookies true &&
git ls-remote $HTTPD_URL/smart_cookies/repo.git master &&
- tail -3 cookies.txt | sort >cookies_tail.txt &&
- test_cmp expect_cookies.txt cookies_tail.txt
+
+ # NEEDSWORK: If the overspecification of the expected result is reduced, we
+ # might be able to run this test in all protocol versions.
+ if test -z "$GIT_TEST_PROTOCOL_VERSION"
+ then
+ tail -3 cookies.txt | sort >cookies_tail.txt &&
+ test_cmp expect_cookies.txt cookies_tail.txt
+ fi
'
test_expect_success 'transfer.hiderefs works over smart-http' '
git init --bare test_reachable.git &&
git -C test_reachable.git remote add origin "$HTTPD_URL/smart/repo.git" &&
- test_must_fail git -C test_reachable.git fetch origin "$(git rev-parse HEAD)"
+ # Some protocol versions (e.g. 2) support fetching
+ # unadvertised objects, so restrict this test to v0.
+ test_must_fail env GIT_TEST_PROTOCOL_VERSION= \
+ git -C test_reachable.git fetch origin "$(git rev-parse HEAD)"
'
test_expect_success 'test allowanysha1inwant with unreachable' '
git init --bare test_reachable.git &&
git -C test_reachable.git remote add origin "$HTTPD_URL/smart/repo.git" &&
- test_must_fail git -C test_reachable.git fetch origin "$(git rev-parse HEAD)" &&
+ # Some protocol versions (e.g. 2) support fetching
+ # unadvertised objects, so restrict this test to v0.
+ test_must_fail env GIT_TEST_PROTOCOL_VERSION= \
+ git -C test_reachable.git fetch origin "$(git rev-parse HEAD)" &&
git -C "$server" config uploadpack.allowanysha1inwant 1 &&
git -C test_reachable.git fetch origin "$(git rev-parse HEAD)"
grep "server-side error" actual
'
-stop_httpd
test_done
# not need to send any ancestors of "c3", but we still need to send "c3"
# itself.
test_config -C client fetch.negotiationalgorithm skipping &&
- trace_fetch client origin to_fetch &&
+
+ # The ref advertisement itself is filtered when protocol v2 is used, so
+ # use v0.
+ GIT_TEST_PROTOCOL_VERSION= trace_fetch client origin to_fetch &&
have_sent c5 c4^ c2side &&
have_not_sent c4 c4^^ c4^^^
'
check_access_log exp
'
-stop_httpd
test_done
test_expect_success 'fetch notices corrupt idx' '
cp -R "$GIT_DAEMON_DOCUMENT_ROOT_PATH"/repo_pack.git "$GIT_DAEMON_DOCUMENT_ROOT_PATH"/repo_bad2.git &&
(cd "$GIT_DAEMON_DOCUMENT_ROOT_PATH"/repo_bad2.git &&
+ rm -f objects/pack/multi-pack-index &&
p=$(ls objects/pack/pack-*.idx) &&
chmod u+w $p &&
printf %0256d 0 | dd of=$p bs=256 count=1 seek=1 conv=notrunc
test_cmp expect actual
'
-stop_git_daemon
test_done
grep "< HTTP/1.1 500 Intentional Breakage" curl_log
'
-stop_httpd
-
test_done
}
test_expect_success 'clone myhost:src uses ssh' '
- git clone myhost:src ssh-clone &&
+ GIT_TEST_PROTOCOL_VERSION=0 git clone myhost:src ssh-clone &&
expect_ssh myhost src
'
'
test_expect_success 'bracketed hostnames are still ssh' '
- git clone "[myhost:123]:src" ssh-bracket-clone &&
+ GIT_TEST_PROTOCOL_VERSION=0 git clone "[myhost:123]:src" ssh-bracket-clone &&
expect_ssh "-p 123" myhost src
'
test_expect_success 'OpenSSH variant passes -4' '
- git clone -4 "[myhost:123]:src" ssh-ipv4-clone &&
+ GIT_TEST_PROTOCOL_VERSION=0 git clone -4 "[myhost:123]:src" ssh-ipv4-clone &&
expect_ssh "-4 -p 123" myhost src
'
test_when_finished "rm -f \"\$TRASH_DIRECTORY/uplink\"" &&
GIT_SSH="$TRASH_DIRECTORY/uplink" &&
test_when_finished "GIT_SSH=\"\$TRASH_DIRECTORY/ssh\$X\"" &&
- git clone "[myhost:123]:src" ssh-bracket-clone-sshlike-uplink &&
+ GIT_TEST_PROTOCOL_VERSION=0 git clone "[myhost:123]:src" ssh-bracket-clone-sshlike-uplink &&
expect_ssh "-p 123" myhost src
'
test_expect_success 'GIT_SSH_VARIANT overrides plink detection' '
copy_ssh_wrapper_as "$TRASH_DIRECTORY/plink" &&
- GIT_SSH_VARIANT=ssh \
- git clone "[myhost:123]:src" ssh-bracket-clone-variant-1 &&
+ GIT_TEST_PROTOCOL_VERSION=0 GIT_SSH_VARIANT=ssh \
+ git clone "[myhost:123]:src" ssh-bracket-clone-variant-1 &&
expect_ssh "-p 123" myhost src
'
test_expect_success 'ssh.variant overrides plink detection' '
copy_ssh_wrapper_as "$TRASH_DIRECTORY/plink" &&
- git -c ssh.variant=ssh \
+ GIT_TEST_PROTOCOL_VERSION=0 git -c ssh.variant=ssh \
clone "[myhost:123]:src" ssh-bracket-clone-variant-2 &&
expect_ssh "-p 123" myhost src
'
# $3 path
test_clone_url () {
counter=$(($counter + 1))
- test_might_fail git clone "$1" tmp$counter &&
+ test_might_fail env GIT_TEST_PROTOCOL_VERSION=0 git clone "$1" tmp$counter &&
shift &&
expect_ssh "$@"
}
partial_clone "$HTTPD_DOCUMENT_ROOT_PATH/server" "$HTTPD_URL/smart/server"
'
-stop_httpd
-
test_done
! test -e "$HTTPD_ROOT_PATH/one-time-sed"
'
-stop_httpd
-
test_done
TEST_NO_CREATE_REPO=1
+# This is a protocol-specific test.
+GIT_TEST_PROTOCOL_VERSION=
+
. ./test-lib.sh
# Test protocol v1 with 'git://' transport
grep "git< version 1" log
'
-stop_httpd
-
test_done
# Client requested to use protocol v2
grep "Git-Protocol: version=2" log &&
# Server responded using protocol v2
- grep "git< version 2" log
+ grep "git< version 2" log &&
+ # Verify that the chunked encoding sending codepath is NOT exercised
+ ! grep "Send header: Transfer-Encoding: chunked" log
+'
+
+test_expect_success 'clone big repository with http:// using protocol v2' '
+ test_when_finished "rm -f log" &&
+
+ git init "$HTTPD_DOCUMENT_ROOT_PATH/big" &&
+ # Ensure that the list of wants is greater than http.postbuffer below
+ for i in $(test_seq 1 1500)
+ do
+ # do not use here-doc, because it requires a process
+ # per loop iteration
+ echo "commit refs/heads/too-many-refs-$i" &&
+ echo "committer git <git@example.com> $i +0000" &&
+ echo "data 0" &&
+ echo "M 644 inline bla.txt" &&
+ echo "data 4" &&
+ echo "bla"
+ done | git -C "$HTTPD_DOCUMENT_ROOT_PATH/big" fast-import &&
+
+ GIT_TRACE_PACKET="$(pwd)/log" GIT_TRACE_CURL="$(pwd)/log" git \
+ -c protocol.version=2 -c http.postbuffer=65536 \
+ clone "$HTTPD_URL/smart/big" big_child &&
+
+ # Client requested to use protocol v2
+ grep "Git-Protocol: version=2" log &&
+ # Server responded using protocol v2
+ grep "git< version 2" log &&
+ # Verify that the chunked encoding sending codepath is exercised
+ grep "Send header: Transfer-Encoding: chunked" log
'
test_expect_success 'fetch with http:// using protocol v2' '
test_i18ngrep "expected no other sections to be sent after no .ready." err
'
-stop_httpd
-
test_done
test_i18ngrep "fatal: remote error: unknown ref refs/heads/raster" err
'
-stop_httpd
-
REPO="$(pwd)/repo"
LOCAL_PRISTINE="$(pwd)/local_pristine"
clone "$HTTPD_URL/smart-redir-perm/repo.git" redir.git
'
-stop_httpd
test_done
check_same BROKEN_HASH6 BISECT_HEAD &&
git bisect bad BISECT_HEAD &&
check_same BROKEN_HASH5 BISECT_HEAD &&
- git bisect good BISECT_HEAD &&
+ test_must_fail git bisect good BISECT_HEAD &&
check_same BROKEN_HASH6 bisect/bad &&
git bisect reset
'
check_same BROKEN_HASH6 BISECT_HEAD &&
git bisect good BISECT_HEAD &&
check_same BROKEN_HASH8 BISECT_HEAD &&
- git bisect good BISECT_HEAD &&
+ test_must_fail git bisect good BISECT_HEAD &&
check_same BROKEN_HASH9 bisect/bad &&
git bisect reset
'
git bisect reset &&
git checkout broken &&
git bisect start broken master --no-checkout &&
- git bisect run \"\$SHELL_PATH\" -c '
+ test_must_fail git bisect run \"\$SHELL_PATH\" -c '
GOOD=\$(git for-each-ref \"--format=%(objectname)\" refs/bisect/good-*) &&
git rev-list --objects BISECT_HEAD --not \$GOOD >tmp.\$\$ &&
git pack-objects --stdout >/dev/null < tmp.\$\$
test_must_be_empty stderr
'
+test_expect_success 'gc.reflogExpire{Unreachable,}=never skips "expire" via "gc"' '
+ test_config gc.reflogExpire never &&
+ test_config gc.reflogExpireUnreachable never &&
+
+ GIT_TRACE=$(pwd)/trace.out git gc &&
+
+ # Check that git-pack-refs is run as a sanity check (done via
+ # gc_before_repack()) but that git-expire is not.
+ grep -E "^trace: (built-in|exec|run_command): git pack-refs --" trace.out &&
+ ! grep -E "^trace: (built-in|exec|run_command): git reflog expire --" trace.out
+'
+
+test_expect_success 'one of gc.reflogExpire{Unreachable,}=never does not skip "expire" via "gc"' '
+ >trace.out &&
+ test_config gc.reflogExpire never &&
+ GIT_TRACE=$(pwd)/trace.out git gc &&
+ grep -E "^trace: (built-in|exec|run_command): git reflog expire --" trace.out
+'
+
run_and_wait_for_auto_gc () {
# We read stdout from gc for the side effect of waiting until the
# background gc process exits, closing its fd 9. Furthermore, the
--- /dev/null
+#!/bin/sh
+
+test_description='post index change hook'
+
+. ./test-lib.sh
+
+test_expect_success 'setup' '
+ mkdir -p dir1 &&
+ touch dir1/file1.txt &&
+ echo testing >dir1/file2.txt &&
+ git add . &&
+ git commit -m "initial"
+'
+
+test_expect_success 'test status, add, commit, others trigger hook without flags set' '
+ mkdir -p .git/hooks &&
+ write_script .git/hooks/post-index-change <<-\EOF &&
+ if test "$1" -eq 1; then
+ echo "Invalid combination of flags passed to hook; updated_workdir is set." >testfailure
+ exit 1
+ fi
+ if test "$2" -eq 1; then
+ echo "Invalid combination of flags passed to hook; updated_skipworktree is set." >testfailure
+ exit 1
+ fi
+ if test -f ".git/index.lock"; then
+ echo ".git/index.lock exists" >testfailure
+ exit 3
+ fi
+ if ! test -f ".git/index"; then
+ echo ".git/index does not exist" >testfailure
+ exit 3
+ fi
+ echo "success" >testsuccess
+ EOF
+ mkdir -p dir2 &&
+ touch dir2/file1.txt &&
+ touch dir2/file2.txt &&
+ : force index to be dirty &&
+ test-tool chmtime +60 dir1/file1.txt &&
+ git status &&
+ test_path_is_file testsuccess && rm -f testsuccess &&
+ test_path_is_missing testfailure &&
+ git add . &&
+ test_path_is_file testsuccess && rm -f testsuccess &&
+ test_path_is_missing testfailure &&
+ git commit -m "second" &&
+ test_path_is_file testsuccess && rm -f testsuccess &&
+ test_path_is_missing testfailure &&
+ git checkout -- dir1/file1.txt &&
+ test_path_is_file testsuccess && rm -f testsuccess &&
+ test_path_is_missing testfailure &&
+ git update-index &&
+ test_path_is_missing testsuccess &&
+ test_path_is_missing testfailure &&
+ git reset --soft &&
+ test_path_is_missing testsuccess &&
+ test_path_is_missing testfailure
+'
+
+test_expect_success 'test checkout and reset trigger the hook' '
+ write_script .git/hooks/post-index-change <<-\EOF &&
+ if test "$1" -eq 1 && test "$2" -eq 1; then
+ echo "Invalid combination of flags passed to hook; updated_workdir and updated_skipworktree are both set." >testfailure
+ exit 1
+ fi
+ if test "$1" -eq 0 && test "$2" -eq 0; then
+ echo "Invalid combination of flags passed to hook; neither updated_workdir or updated_skipworktree are set." >testfailure
+ exit 2
+ fi
+ if test "$1" -eq 1; then
+ if test -f ".git/index.lock"; then
+ echo "updated_workdir set but .git/index.lock exists" >testfailure
+ exit 3
+ fi
+ if ! test -f ".git/index"; then
+ echo "updated_workdir set but .git/index does not exist" >testfailure
+ exit 3
+ fi
+ else
+ echo "update_workdir should be set for checkout" >testfailure
+ exit 4
+ fi
+ echo "success" >testsuccess
+ EOF
+ : force index to be dirty &&
+ test-tool chmtime +60 dir1/file1.txt &&
+ git checkout master &&
+ test_path_is_file testsuccess && rm -f testsuccess &&
+ test_path_is_missing testfailure &&
+ test-tool chmtime +60 dir1/file1.txt &&
+ git checkout HEAD &&
+ test_path_is_file testsuccess && rm -f testsuccess &&
+ test_path_is_missing testfailure &&
+ test-tool chmtime +60 dir1/file1.txt &&
+ git reset --hard &&
+ test_path_is_file testsuccess && rm -f testsuccess &&
+ test_path_is_missing testfailure &&
+ git checkout -B test &&
+ test_path_is_file testsuccess && rm -f testsuccess &&
+ test_path_is_missing testfailure
+'
+
+test_expect_success 'test reset --mixed and update-index triggers the hook' '
+ write_script .git/hooks/post-index-change <<-\EOF &&
+ if test "$1" -eq 1 && test "$2" -eq 1; then
+ echo "Invalid combination of flags passed to hook; updated_workdir and updated_skipworktree are both set." >testfailure
+ exit 1
+ fi
+ if test "$1" -eq 0 && test "$2" -eq 0; then
+ echo "Invalid combination of flags passed to hook; neither updated_workdir or updated_skipworktree are set." >testfailure
+ exit 2
+ fi
+ if test "$2" -eq 1; then
+ if test -f ".git/index.lock"; then
+ echo "updated_skipworktree set but .git/index.lock exists" >testfailure
+ exit 3
+ fi
+ if ! test -f ".git/index"; then
+ echo "updated_skipworktree set but .git/index does not exist" >testfailure
+ exit 3
+ fi
+ else
+ echo "updated_skipworktree should be set for reset --mixed and update-index" >testfailure
+ exit 4
+ fi
+ echo "success" >testsuccess
+ EOF
+ : force index to be dirty &&
+ test-tool chmtime +60 dir1/file1.txt &&
+ git reset --mixed --quiet HEAD~1 &&
+ test_path_is_file testsuccess && rm -f testsuccess &&
+ test_path_is_missing testfailure &&
+ git hash-object -w --stdin <dir1/file2.txt >expect &&
+ git update-index --cacheinfo 100644 "$(cat expect)" dir1/file1.txt &&
+ test_path_is_file testsuccess && rm -f testsuccess &&
+ test_path_is_missing testfailure &&
+ git update-index --skip-worktree dir1/file2.txt &&
+ git update-index --remove dir1/file2.txt &&
+ test_path_is_file testsuccess && rm -f testsuccess &&
+ test_path_is_missing testfailure
+'
+
+test_done
test_must_fail git checkout simple 2>errs &&
test_i18ngrep overwritten errs &&
- git checkout --merge simple 2>errs &&
- test_i18ngrep ! overwritten errs &&
- git ls-files -u &&
- test_must_fail git cat-file -t :0:two &&
- test "$(git cat-file -t :1:two)" = blob &&
- test "$(git cat-file -t :2:two)" = blob &&
- test_must_fail git cat-file -t :3:two
+ test_must_fail git read-tree --quiet -m -u HEAD simple 2>errs &&
+ test_must_be_empty errs
'
test_expect_success 'checkout to detach HEAD (with advice declined)' '
cd super3 &&
sed -e "s#url = ../#url = file://$pwd/#" <.gitmodules >.gitmodules.tmp &&
mv -f .gitmodules.tmp .gitmodules &&
- test_must_fail git submodule update --init --depth=1 2>actual &&
+ # Some protocol versions (e.g. 2) support fetching
+ # unadvertised objects, so restrict this test to v0.
+ test_must_fail env GIT_TEST_PROTOCOL_VERSION= \
+ git submodule update --init --depth=1 2>actual &&
test_i18ngrep "Direct fetching of that commit failed." actual &&
git -C ../submodule config uploadpack.allowReachableSHA1InWant true &&
git submodule update --init --depth=1 >actual &&
)
'
+test_expect_success 'unsetting submodules config from the working tree with "submodule--helper config --unset"' '
+ (cd super &&
+ git submodule--helper config --unset submodule.submodule.url &&
+ git submodule--helper config submodule.submodule.url >actual &&
+ test_must_be_empty actual
+ )
+'
+
+
test_expect_success 'writing submodules config with "submodule--helper config"' '
(cd super &&
echo "new_url" >expect &&
--- /dev/null
+#!/bin/sh
+#
+# Copyright (c) 2019 Denton Liu
+#
+
+test_description='Test submodules set-branch subcommand
+
+This test verifies that the set-branch subcommand of git-submodule is working
+as expected.
+'
+
+TEST_NO_CREATE_REPO=1
+. ./test-lib.sh
+
+test_expect_success 'submodule config cache setup' '
+ mkdir submodule &&
+ (cd submodule &&
+ git init &&
+ echo a >a &&
+ git add . &&
+ git commit -ma &&
+ git checkout -b topic &&
+ echo b >a &&
+ git add . &&
+ git commit -mb
+ ) &&
+ mkdir super &&
+ (cd super &&
+ git init &&
+ git submodule add ../submodule &&
+ git commit -m "add submodule"
+ )
+'
+
+test_expect_success 'ensure submodule branch is unset' '
+ (cd super &&
+ test_must_fail grep branch .gitmodules
+ )
+'
+
+test_expect_success 'test submodule set-branch --branch' '
+ (cd super &&
+ git submodule set-branch --branch topic submodule &&
+ grep "branch = topic" .gitmodules &&
+ git submodule update --remote &&
+ cat <<-\EOF >expect &&
+ b
+ EOF
+ git -C submodule show -s --pretty=%s >actual &&
+ test_cmp expect actual
+ )
+'
+
+test_expect_success 'test submodule set-branch --default' '
+ (cd super &&
+ git submodule set-branch --default submodule &&
+ test_must_fail grep branch .gitmodules &&
+ git submodule update --remote &&
+ cat <<-\EOF >expect &&
+ a
+ EOF
+ git -C submodule show -s --pretty=%s >actual &&
+ test_cmp expect actual
+ )
+'
+
+test_expect_success 'test submodule set-branch -b' '
+ (cd super &&
+ git submodule set-branch -b topic submodule &&
+ grep "branch = topic" .gitmodules &&
+ git submodule update --remote &&
+ cat <<-\EOF >expect &&
+ b
+ EOF
+ git -C submodule show -s --pretty=%s >actual &&
+ test_cmp expect actual
+ )
+'
+
+test_expect_success 'test submodule set-branch -d' '
+ (cd super &&
+ git submodule set-branch -d submodule &&
+ test_must_fail grep branch .gitmodules &&
+ git submodule update --remote &&
+ cat <<-\EOF >expect &&
+ a
+ EOF
+ git -C submodule show -s --pretty=%s >actual &&
+ test_cmp expect actual
+ )
+'
+
+test_done
test_must_fail git rebase -p master
'
+test_expect_success 'author.name overrides user.name' '
+ test_config user.name user &&
+ test_config user.email user@example.com &&
+ test_config author.name author &&
+ test_commit author-name-override-user &&
+ echo author user@example.com > expected-author &&
+ echo user user@example.com > expected-committer &&
+ git log --format="%an %ae" -1 > actual-author &&
+ git log --format="%cn %ce" -1 > actual-committer &&
+ test_cmp expected-author actual-author &&
+ test_cmp expected-committer actual-committer
+'
+
+test_expect_success 'author.email overrides user.email' '
+ test_config user.name user &&
+ test_config user.email user@example.com &&
+ test_config author.email author@example.com &&
+ test_commit author-email-override-user &&
+ echo user author@example.com > expected-author &&
+ echo user user@example.com > expected-committer &&
+ git log --format="%an %ae" -1 > actual-author &&
+ git log --format="%cn %ce" -1 > actual-committer &&
+ test_cmp expected-author actual-author &&
+ test_cmp expected-committer actual-committer
+'
+
+test_expect_success 'committer.name overrides user.name' '
+ test_config user.name user &&
+ test_config user.email user@example.com &&
+ test_config committer.name committer &&
+ test_commit committer-name-override-user &&
+ echo user user@example.com > expected-author &&
+ echo committer user@example.com > expected-committer &&
+ git log --format="%an %ae" -1 > actual-author &&
+ git log --format="%cn %ce" -1 > actual-committer &&
+ test_cmp expected-author actual-author &&
+ test_cmp expected-committer actual-committer
+'
+
+test_expect_success 'committer.email overrides user.email' '
+ test_config user.name user &&
+ test_config user.email user@example.com &&
+ test_config committer.email committer@example.com &&
+ test_commit committer-email-override-user &&
+ echo user user@example.com > expected-author &&
+ echo user committer@example.com > expected-committer &&
+ git log --format="%an %ae" -1 > actual-author &&
+ git log --format="%cn %ce" -1 > actual-committer &&
+ test_cmp expected-author actual-author &&
+ test_cmp expected-committer actual-committer
+'
+
+test_expect_success 'author and committer environment variables override config settings' '
+ test_config user.name user &&
+ test_config user.email user@example.com &&
+ test_config author.name author &&
+ test_config author.email author@example.com &&
+ test_config committer.name committer &&
+ test_config committer.email committer@example.com &&
+ GIT_AUTHOR_NAME=env_author && export GIT_AUTHOR_NAME &&
+ GIT_AUTHOR_EMAIL=env_author@example.com && export GIT_AUTHOR_EMAIL &&
+ GIT_COMMITTER_NAME=env_commit && export GIT_COMMITTER_NAME &&
+ GIT_COMMITTER_EMAIL=env_commit@example.com && export GIT_COMMITTER_EMAIL &&
+ test_commit env-override-conf &&
+ echo env_author env_author@example.com > expected-author &&
+ echo env_commit env_commit@example.com > expected-committer &&
+ git log --format="%an %ae" -1 > actual-author &&
+ git log --format="%cn %ce" -1 > actual-committer &&
+ sane_unset GIT_AUTHOR_NAME GIT_AUTHOR_EMAIL &&
+ sane_unset GIT_COMMITTER_NAME GIT_COMMITTER_EMAIL &&
+ test_cmp expected-author actual-author &&
+ test_cmp expected-committer actual-committer
+'
+
test_done
test_i18ngrep "deleted:" actual &&
test_i18ngrep "new file:" actual &&
- git status --find-rename=100% >actual &&
+ git status --find-renames=100% >actual &&
test_i18ngrep "deleted:" actual &&
test_i18ngrep "new file:" actual
'
git status -M=01% >actual &&
test_i18ngrep "renamed:" actual &&
- git status --find-rename=01% >actual &&
+ git status --find-renames=01% >actual &&
test_i18ngrep "renamed:" actual
'
-test_expect_success 'copies not overridden by find-rename' '
+test_expect_success 'copies not overridden by find-renames' '
cp renamed copy &&
git add copy &&
test_i18ngrep "copied:" actual &&
test_i18ngrep "renamed:" actual &&
- git -c status.renames=copies status --find-rename=01% >actual &&
+ git -c status.renames=copies status --find-renames=01% >actual &&
test_i18ngrep "copied:" actual &&
test_i18ngrep "renamed:" actual
'
done >actual
EOF
-test_expect_success SYMLINKS 'difftool --dir-diff --symlink without unstaged changes' '
+test_expect_success SYMLINKS 'difftool --dir-diff --symlinks without unstaged changes' '
cat >expect <<-EOF &&
file
$PWD/file
sub/sub
$PWD/sub/sub
EOF
- git difftool --dir-diff --symlink \
+ git difftool --dir-diff --symlinks \
--extcmd "./.git/CHECK_SYMLINKS" branch HEAD &&
test_cmp expect actual
'
test_cmp expect actual
'
+test_expect_success 'outside worktree' '
+ echo 1 >1 &&
+ echo 2 >2 &&
+ test_expect_code 1 nongit git \
+ -c diff.tool=echo -c difftool.echo.cmd="echo \$LOCAL \$REMOTE" \
+ difftool --no-prompt --no-index ../1 ../2 >actual &&
+ echo "../1 ../2" >expect &&
+ test_cmp expect actual
+'
+
test_done
test_cmp expected actual
'
- test_expect_success "grep -w $L (with --column, --invert)" '
+ test_expect_success "grep -w $L (with --column, --invert-match)" '
{
echo ${HC}file:1:foo mmap bar
echo ${HC}file:1:foo_mmap bar
echo ${HC}file:1:foo_mmap bar mmap
echo ${HC}file:1:foo mmap bar_mmap
} >expected &&
- git grep --column --invert -w -e baz $H -- file >actual &&
+ git grep --column --invert-match -w -e baz $H -- file >actual &&
test_cmp expected actual
'
- test_expect_success "grep $L (with --column, --invert, extended OR)" '
+ test_expect_success "grep $L (with --column, --invert-match, extended OR)" '
{
echo ${HC}hello_world:6:HeLLo_world
} >expected &&
- git grep --column --invert -e ll --or --not -e _ $H -- hello_world \
+ git grep --column --invert-match -e ll --or --not -e _ $H -- hello_world \
>actual &&
test_cmp expected actual
'
- test_expect_success "grep $L (with --column, --invert, extended AND)" '
+ test_expect_success "grep $L (with --column, --invert-match, extended AND)" '
{
echo ${HC}hello_world:3:Hello world
echo ${HC}hello_world:3:Hello_world
echo ${HC}hello_world:6:HeLLo_world
} >expected &&
- git grep --column --invert --not -e _ --and --not -e ll $H -- hello_world \
+ git grep --column --invert-match --not -e _ --and --not -e ll $H -- hello_world \
>actual &&
test_cmp expected actual
'
echo ".gitignore:.*o*" &&
cat ../expect.full
} >../expect.with.ignored &&
- git grep --no-index --no-exclude o >../actual.full &&
+ git grep --no-index --no-exclude-standard o >../actual.full &&
test_cmp ../expect.with.ignored ../actual.full
)
'
echo ".gitignore:.*o*" &&
cat ../expect.full
} >../expect.with.ignored &&
- git -c grep.fallbackToNoIndex grep --no-exclude o >../actual.full &&
+ git -c grep.fallbackToNoIndex grep --no-exclude-standard o >../actual.full &&
test_cmp ../expect.with.ignored ../actual.full
)
'
git svn dcommit
'
-stop_httpd
-
test_done
)
'
-stop_httpd
-
test_done
)
'
-stop_httpd
-
test_done
( cd g && git rev-parse --symbolic --verify HEAD )
'
-stop_httpd
-
test_done
background_import_still_running
'
+###
+### series W (get-mark and empty orphan commits)
+###
+
+cat >>W-input <<-W_INPUT_END
+ commit refs/heads/W-branch
+ mark :1
+ author Full Name <user@company.tld> 1000000000 +0100
+ committer Full Name <user@company.tld> 1000000000 +0100
+ data 27
+ Intentionally empty commit
+ LFsget-mark :1
+ W_INPUT_END
+
+test_expect_success !MINGW 'W: get-mark & empty orphan commit with no newlines' '
+ sed -e s/LFs// W-input | tr L "\n" | git fast-import
+'
+
+test_expect_success !MINGW 'W: get-mark & empty orphan commit with one newline' '
+ sed -e s/LFs/L/ W-input | tr L "\n" | git fast-import
+'
+
+test_expect_success !MINGW 'W: get-mark & empty orphan commit with ugly second newline' '
+ # Technically, this should fail as it has too many linefeeds
+ # according to the grammar in fast-import.txt. But, for whatever
+ # reason, it works. Since using the correct number of newlines
+ # does not work with older (pre-2.22) versions of git, allow apps
+ # that used this second-newline workaround to keep working by
+ # checking it with this test...
+ sed -e s/LFs/LL/ W-input | tr L "\n" | git fast-import
+'
+
+test_expect_success !MINGW 'W: get-mark & empty orphan commit with erroneous third newline' '
+ # ...but do NOT allow more empty lines than that (see previous test).
+ sed -e s/LFs/LLL/ W-input | tr L "\n" | test_must_fail git fast-import
+'
+
test_done
)
'
-test_expect_success 'kill p4d' '
- kill_p4d
-'
-
test_done
'
test_expect_success 'restart p4d' '
- kill_p4d &&
+ stop_and_cleanup_p4d &&
start_p4d
'
'
test_expect_success 'restart p4d' '
- kill_p4d &&
+ stop_and_cleanup_p4d &&
start_p4d
'
)
'
-test_expect_success 'kill p4d' '
- kill_p4d
-'
-
test_done
)
'
-test_expect_success 'kill p4d' '
- kill_p4d
-'
-
test_done
)
'
-test_expect_success 'kill p4d' '
- kill_p4d
-'
-
test_done
)
'
-test_expect_success 'kill p4d' '
- kill_p4d
-'
-
test_done
)
'
-test_expect_success 'kill p4d' '
- kill_p4d
-'
-
test_done
test_path_is_file "$git"/cli_file2.t
'
-
-test_expect_success 'kill p4d' '
- kill_p4d
-'
-
test_done
)
'
-test_expect_success 'kill p4d' '
- kill_p4d
-'
-
test_done
)
'
-test_expect_success 'kill p4d' '
- kill_p4d
-'
-
test_done
git_verify "cdir 1/file11" "cdir 1/file12"
'
-test_expect_success 'kill p4d' '
- kill_p4d
-'
-
test_done
)
'
-test_expect_success 'kill p4d' '
- kill_p4d
-'
-
test_done
(
cd "$cli" &&
p4 sync ... &&
- !(p4 labels | grep GIT_TAG_ON_A_BRANCH)
+ ! p4 labels | grep GIT_TAG_ON_A_BRANCH
)
'
)
'
-
-test_expect_success 'kill p4d' '
- kill_p4d
-'
-
test_done
)
'
-test_expect_success 'kill p4d' '
- kill_p4d
-'
-
test_done
)
'
-test_expect_success 'kill p4d' '
- kill_p4d
-'
-
test_done
)
'
-test_expect_success 'kill p4d' '
- kill_p4d
-'
-
test_done
)
'
-test_expect_success 'kill p4d' '
- kill_p4d
-'
-
test_done
)
'
-test_expect_success 'kill p4d' '
- kill_p4d
-'
-
test_done
)
'
-test_expect_success 'kill p4d' '
- kill_p4d
-'
-
test_done
test_line_count \> 10 log
'
-test_expect_success 'kill p4d' '
- kill_p4d
-'
-
test_done
test_must_fail git p4 clone //depot/uc/...
'
-test_expect_success 'kill p4d' '
- kill_p4d
-'
-
test_done
)
'
-test_expect_success 'kill p4d' '
- kill_p4d
-'
-
test_done
)
'
-test_expect_success 'kill p4d' '
- kill_p4d
-'
-
test_done
)
'
-test_expect_success 'kill p4d' '
- kill_p4d
-'
-
test_done
)
'
-test_expect_success 'kill p4d' '
- kill_p4d
-'
-
test_done
)
'
-test_expect_success 'kill p4d' '
- kill_p4d
-'
-
test_done
git p4 clone --dest="$git" //depot
'
-test_expect_success 'kill p4d' '
- kill_p4d
-'
-
test_done
)
'
-test_expect_success 'kill p4d' '
- kill_p4d
-'
-
test_done
)
'
-test_expect_success 'kill p4d' '
- kill_p4d
-'
-
test_done
)
'
-test_expect_success 'kill p4d' '
- kill_p4d
-'
-
test_done
)
'
-test_expect_success 'kill p4d' '
- kill_p4d
-'
-
test_done
'
-test_expect_success 'kill p4d' '
- kill_p4d
-'
-
test_done
)
'
-test_expect_success 'kill p4d' '
- kill_p4d
-'
-
test_done
test_path_is_file file_to_shelve
)
'
-test_expect_success 'kill p4d' '
- kill_p4d
-'
test_done
)
'
-test_expect_success 'kill p4d' '
- kill_p4d
-'
-
-
test_done
--guess Z
--no-guess Z
--no-... Z
+ --overlay Z
EOF
'
test_completion "git --help core" "core-tutorial "
'
+test_expect_success 'completion.commands removes multiple commands' '
+ test_config completion.commands "-cherry -mergetool" &&
+ git --list-cmds=list-mainporcelain,list-complete,config >out &&
+ ! grep -E "^(cherry|mergetool)$" out
+'
+
test_expect_success 'setup for integration tests' '
echo content >file1 &&
echo more >file2 &&
fi
}
+# Check if the file exists and has a size greater than zero
+test_file_not_empty () {
+ if ! test -s "$1"
+ then
+ echo "'$1' is not a non-empty file."
+ false
+ fi
+}
+
test_path_is_missing () {
if test -e "$1"
then
} && (exit \"\$eval_ret\"); eval_ret=\$?; $test_cleanup"
}
+# This function can be used to schedule some commands to be run
+# unconditionally at the end of the test script, e.g. to stop a daemon:
+#
+# test_expect_success 'test git daemon' '
+# git daemon &
+# daemon_pid=$! &&
+# test_atexit 'kill $daemon_pid' &&
+# hello world
+# '
+#
+# The commands will be executed before the trash directory is removed,
+# i.e. the atexit commands will still be able to access any pidfiles or
+# socket files.
+#
+# Note that these commands will be run even when a test script run
+# with '--immediate' fails. Be careful with your atexit commands to
+# minimize any changes to the failed state.
+
+test_atexit () {
+ # We cannot detect when we are in a subshell in general, but by
+ # doing so on Bash is better than nothing (the test will
+ # silently pass on other shells).
+ test "${BASH_SUBSHELL-0}" = 0 ||
+ error "bug in test script: test_atexit does nothing in a subshell"
+ test_atexit_cleanup="{ $*
+ } && (exit \"\$eval_ret\"); eval_ret=\$?; $test_atexit_cleanup"
+}
+
# Most tests can use the created repository, but some may need to create more.
# Usage: test_create_repo <directory>
test_create_repo () {
. "$GIT_BUILD_DIR"/GIT-BUILD-OPTIONS
export PERL_PATH SHELL_PATH
+# Disallow the use of abbreviated options in the test suite by default
+if test -z "${GIT_TEST_DISALLOW_ABBREVIATED_OPTIONS}"
+then
+ GIT_TEST_DISALLOW_ABBREVIATED_OPTIONS=true
+ export GIT_TEST_DISALLOW_ABBREVIATED_OPTIONS
+fi
+
################################################################
# It appears that people try to run tests without building...
"${GIT_TEST_INSTALLED:-$GIT_BUILD_DIR}/git$X" >/dev/null
--stress)
stress=t ;;
--stress=*)
+ echo "error: --stress does not accept an argument: '$opt'" >&2
+ echo "did you mean --stress-jobs=${opt#*=} or --stress-limit=${opt#*=}?" >&2
+ exit 1
+ ;;
+ --stress-jobs=*)
+ stress=t;
stress=${opt#--*=}
case "$stress" in
*[!0-9]*|0*|"")
- echo "error: --stress=<N> requires the number of jobs to run" >&2
+ echo "error: --stress-jobs=<N> requires the number of jobs to run" >&2
exit 1
;;
*) # Good.
esac
;;
--stress-limit=*)
+ stress=t;
stress_limit=${opt#--*=}
case "$stress_limit" in
*[!0-9]*|0*|"")
my @env = keys %ENV;
my $ok = join("|", qw(
TRACE
+ TR2_
DEBUG
TEST
.*_TEST
die () {
code=$?
+ # This is responsible for running the atexit commands even when a
+ # test script run with '--immediate' fails, or when the user hits
+ # ctrl-C, i.e. when 'test_done' is not invoked at all.
+ test_atexit_handler || code=$?
if test -n "$GIT_EXIT_OK"
then
exit $code
GIT_EXIT_OK=
trap 'die' EXIT
-trap 'exit $?' INT TERM HUP
+# Disable '-x' tracing, because with some shells, notably dash, it
+# prevents running the cleanup commands when a test script run with
+# '--verbose-log -x' is interrupted.
+trap '{ code=$?; set +x; } 2>/dev/null; exit $code' INT TERM HUP
# The user-facing functions are loaded from a separate file so that
# test_perf subshells can have them too
junit_have_testcase=t
}
+test_atexit_cleanup=:
+test_atexit_handler () {
+ # In a succeeding test script 'test_atexit_handler' is invoked
+ # twice: first from 'test_done', then from 'die' in the trap on
+ # EXIT.
+ # This condition and resetting 'test_atexit_cleanup' below makes
+ # sure that the registered cleanup commands are run only once.
+ test : != "$test_atexit_cleanup" || return 0
+
+ setup_malloc_check
+ test_eval_ "$test_atexit_cleanup"
+ test_atexit_cleanup=:
+ teardown_malloc_check
+}
+
test_done () {
GIT_EXIT_OK=t
+ # Run the atexit commands _before_ the trash directory is
+ # removed, so the commands can access pidfiles and socket files.
+ test_atexit_handler
+
if test -n "$write_junit_xml" && test -n "$junit_xml_path"
then
test -n "$junit_have_testcase" || {
fi
fi
-# Provide an implementation of the 'yes' utility
+# Provide an implementation of the 'yes' utility; the upper bound
+# limit is there to help Windows that cannot stop this loop from
+# wasting cycles when the downstream stops reading, so do not be
+# tempted to turn it into an infinite loop. cf. 6129c930 ("test-lib:
+# limit the output of the yes utility", 2016-02-02)
yes () {
if test $# = 0
then
--- /dev/null
+#include "cache.h"
+#include "config.h"
+#include "json-writer.h"
+#include "quote.h"
+#include "run-command.h"
+#include "sigchain.h"
+#include "thread-utils.h"
+#include "version.h"
+#include "trace2/tr2_cfg.h"
+#include "trace2/tr2_cmd_name.h"
+#include "trace2/tr2_dst.h"
+#include "trace2/tr2_sid.h"
+#include "trace2/tr2_tgt.h"
+#include "trace2/tr2_tls.h"
+
+static int trace2_enabled;
+
+static int tr2_next_child_id; /* modify under lock */
+static int tr2_next_exec_id; /* modify under lock */
+static int tr2_next_repo_id = 1; /* modify under lock. zero is reserved */
+
+/*
+ * A table of the builtin TRACE2 targets. Each of these may be independently
+ * enabled or disabled. Each TRACE2 API method will try to write an event to
+ * *each* of the enabled targets.
+ */
+/* clang-format off */
+static struct tr2_tgt *tr2_tgt_builtins[] =
+{
+ &tr2_tgt_normal,
+ &tr2_tgt_perf,
+ &tr2_tgt_event,
+ NULL
+};
+/* clang-format on */
+
+/* clang-format off */
+#define for_each_builtin(j, tgt_j) \
+ for (j = 0, tgt_j = tr2_tgt_builtins[j]; \
+ tgt_j; \
+ j++, tgt_j = tr2_tgt_builtins[j])
+/* clang-format on */
+
+/* clang-format off */
+#define for_each_wanted_builtin(j, tgt_j) \
+ for_each_builtin(j, tgt_j) \
+ if (tr2_dst_trace_want(tgt_j->pdst))
+/* clang-format on */
+
+/*
+ * Force (rather than lazily) initialize any of the requested
+ * builtin TRACE2 targets at startup (and before we've seen an
+ * actual TRACE2 event call) so we can see if we need to setup
+ * the TR2 and TLS machinery.
+ *
+ * Return the number of builtin targets enabled.
+ */
+static int tr2_tgt_want_builtins(void)
+{
+ struct tr2_tgt *tgt_j;
+ int j;
+ int sum = 0;
+
+ for_each_builtin (j, tgt_j)
+ if (tgt_j->pfn_init())
+ sum++;
+
+ return sum;
+}
+
+/*
+ * Properly terminate each builtin target. Give each target
+ * a chance to write a summary event and/or flush if necessary
+ * and then close the fd.
+ */
+static void tr2_tgt_disable_builtins(void)
+{
+ struct tr2_tgt *tgt_j;
+ int j;
+
+ for_each_builtin (j, tgt_j)
+ tgt_j->pfn_term();
+}
+
+static int tr2main_exit_code;
+
+/*
+ * Our atexit routine should run after everything has finished.
+ *
+ * Note that events generated here might not actually appear if
+ * we are writing to fd 1 or 2 and our atexit routine runs after
+ * the pager's atexit routine (since it closes them to shutdown
+ * the pipes).
+ */
+static void tr2main_atexit_handler(void)
+{
+ struct tr2_tgt *tgt_j;
+ int j;
+ uint64_t us_now;
+ uint64_t us_elapsed_absolute;
+
+ us_now = getnanotime() / 1000;
+ us_elapsed_absolute = tr2tls_absolute_elapsed(us_now);
+
+ /*
+ * Clear any unbalanced regions so that our atexit message
+ * does not appear nested. This improves the appearance of
+ * the trace output if someone calls die(), for example.
+ */
+ tr2tls_pop_unwind_self();
+
+ for_each_wanted_builtin (j, tgt_j)
+ if (tgt_j->pfn_atexit)
+ tgt_j->pfn_atexit(us_elapsed_absolute,
+ tr2main_exit_code);
+
+ tr2_tgt_disable_builtins();
+
+ tr2tls_release();
+ tr2_sid_release();
+ tr2_cmd_name_release();
+ tr2_cfg_free_patterns();
+
+ trace2_enabled = 0;
+}
+
+static void tr2main_signal_handler(int signo)
+{
+ struct tr2_tgt *tgt_j;
+ int j;
+ uint64_t us_now;
+ uint64_t us_elapsed_absolute;
+
+ us_now = getnanotime() / 1000;
+ us_elapsed_absolute = tr2tls_absolute_elapsed(us_now);
+
+ for_each_wanted_builtin (j, tgt_j)
+ if (tgt_j->pfn_signal)
+ tgt_j->pfn_signal(us_elapsed_absolute, signo);
+
+ sigchain_pop(signo);
+ raise(signo);
+}
+
+void trace2_initialize_fl(const char *file, int line)
+{
+ struct tr2_tgt *tgt_j;
+ int j;
+
+ if (trace2_enabled)
+ return;
+
+ if (!tr2_tgt_want_builtins())
+ return;
+ trace2_enabled = 1;
+
+ tr2_sid_get();
+
+ atexit(tr2main_atexit_handler);
+ sigchain_push(SIGPIPE, tr2main_signal_handler);
+ tr2tls_init();
+
+ /*
+ * Emit 'version' message on each active builtin target.
+ */
+ for_each_wanted_builtin (j, tgt_j)
+ if (tgt_j->pfn_version_fl)
+ tgt_j->pfn_version_fl(file, line);
+}
+
+int trace2_is_enabled(void)
+{
+ return trace2_enabled;
+}
+
+void trace2_cmd_start_fl(const char *file, int line, const char **argv)
+{
+ struct tr2_tgt *tgt_j;
+ int j;
+
+ if (!trace2_enabled)
+ return;
+
+ for_each_wanted_builtin (j, tgt_j)
+ if (tgt_j->pfn_start_fl)
+ tgt_j->pfn_start_fl(file, line, argv);
+}
+
+int trace2_cmd_exit_fl(const char *file, int line, int code)
+{
+ struct tr2_tgt *tgt_j;
+ int j;
+ uint64_t us_now;
+ uint64_t us_elapsed_absolute;
+
+ code &= 0xff;
+
+ if (!trace2_enabled)
+ return code;
+
+ tr2main_exit_code = code;
+
+ us_now = getnanotime() / 1000;
+ us_elapsed_absolute = tr2tls_absolute_elapsed(us_now);
+
+ for_each_wanted_builtin (j, tgt_j)
+ if (tgt_j->pfn_exit_fl)
+ tgt_j->pfn_exit_fl(file, line, us_elapsed_absolute,
+ code);
+
+ return code;
+}
+
+void trace2_cmd_error_va_fl(const char *file, int line, const char *fmt,
+ va_list ap)
+{
+ struct tr2_tgt *tgt_j;
+ int j;
+
+ if (!trace2_enabled)
+ return;
+
+ /*
+ * We expect each target function to treat 'ap' as constant
+ * and use va_copy (because an 'ap' can only be walked once).
+ */
+ for_each_wanted_builtin (j, tgt_j)
+ if (tgt_j->pfn_error_va_fl)
+ tgt_j->pfn_error_va_fl(file, line, fmt, ap);
+}
+
+void trace2_cmd_path_fl(const char *file, int line, const char *pathname)
+{
+ struct tr2_tgt *tgt_j;
+ int j;
+
+ if (!trace2_enabled)
+ return;
+
+ for_each_wanted_builtin (j, tgt_j)
+ if (tgt_j->pfn_command_path_fl)
+ tgt_j->pfn_command_path_fl(file, line, pathname);
+}
+
+void trace2_cmd_name_fl(const char *file, int line, const char *name)
+{
+ struct tr2_tgt *tgt_j;
+ const char *hierarchy;
+ int j;
+
+ if (!trace2_enabled)
+ return;
+
+ tr2_cmd_name_append_hierarchy(name);
+ hierarchy = tr2_cmd_name_get_hierarchy();
+
+ for_each_wanted_builtin (j, tgt_j)
+ if (tgt_j->pfn_command_name_fl)
+ tgt_j->pfn_command_name_fl(file, line, name, hierarchy);
+}
+
+void trace2_cmd_mode_fl(const char *file, int line, const char *mode)
+{
+ struct tr2_tgt *tgt_j;
+ int j;
+
+ if (!trace2_enabled)
+ return;
+
+ for_each_wanted_builtin (j, tgt_j)
+ if (tgt_j->pfn_command_mode_fl)
+ tgt_j->pfn_command_mode_fl(file, line, mode);
+}
+
+void trace2_cmd_alias_fl(const char *file, int line, const char *alias,
+ const char **argv)
+{
+ struct tr2_tgt *tgt_j;
+ int j;
+
+ if (!trace2_enabled)
+ return;
+
+ for_each_wanted_builtin (j, tgt_j)
+ if (tgt_j->pfn_alias_fl)
+ tgt_j->pfn_alias_fl(file, line, alias, argv);
+}
+
+void trace2_cmd_list_config_fl(const char *file, int line)
+{
+ if (!trace2_enabled)
+ return;
+
+ tr2_cfg_list_config_fl(file, line);
+}
+
+void trace2_cmd_set_config_fl(const char *file, int line, const char *key,
+ const char *value)
+{
+ if (!trace2_enabled)
+ return;
+
+ tr2_cfg_set_fl(file, line, key, value);
+}
+
+void trace2_child_start_fl(const char *file, int line,
+ struct child_process *cmd)
+{
+ struct tr2_tgt *tgt_j;
+ int j;
+ uint64_t us_now;
+ uint64_t us_elapsed_absolute;
+
+ if (!trace2_enabled)
+ return;
+
+ us_now = getnanotime() / 1000;
+ us_elapsed_absolute = tr2tls_absolute_elapsed(us_now);
+
+ cmd->trace2_child_id = tr2tls_locked_increment(&tr2_next_child_id);
+ cmd->trace2_child_us_start = us_now;
+
+ for_each_wanted_builtin (j, tgt_j)
+ if (tgt_j->pfn_child_start_fl)
+ tgt_j->pfn_child_start_fl(file, line,
+ us_elapsed_absolute, cmd);
+}
+
+void trace2_child_exit_fl(const char *file, int line, struct child_process *cmd,
+ int child_exit_code)
+{
+ struct tr2_tgt *tgt_j;
+ int j;
+ uint64_t us_now;
+ uint64_t us_elapsed_absolute;
+ uint64_t us_elapsed_child;
+
+ if (!trace2_enabled)
+ return;
+
+ us_now = getnanotime() / 1000;
+ us_elapsed_absolute = tr2tls_absolute_elapsed(us_now);
+
+ if (cmd->trace2_child_us_start)
+ us_elapsed_child = us_now - cmd->trace2_child_us_start;
+ else
+ us_elapsed_child = 0;
+
+ for_each_wanted_builtin (j, tgt_j)
+ if (tgt_j->pfn_child_exit_fl)
+ tgt_j->pfn_child_exit_fl(file, line,
+ us_elapsed_absolute,
+ cmd->trace2_child_id, cmd->pid,
+ child_exit_code,
+ us_elapsed_child);
+}
+
+int trace2_exec_fl(const char *file, int line, const char *exe,
+ const char **argv)
+{
+ struct tr2_tgt *tgt_j;
+ int j;
+ int exec_id;
+ uint64_t us_now;
+ uint64_t us_elapsed_absolute;
+
+ if (!trace2_enabled)
+ return -1;
+
+ us_now = getnanotime() / 1000;
+ us_elapsed_absolute = tr2tls_absolute_elapsed(us_now);
+
+ exec_id = tr2tls_locked_increment(&tr2_next_exec_id);
+
+ for_each_wanted_builtin (j, tgt_j)
+ if (tgt_j->pfn_exec_fl)
+ tgt_j->pfn_exec_fl(file, line, us_elapsed_absolute,
+ exec_id, exe, argv);
+
+ return exec_id;
+}
+
+void trace2_exec_result_fl(const char *file, int line, int exec_id, int code)
+{
+ struct tr2_tgt *tgt_j;
+ int j;
+ uint64_t us_now;
+ uint64_t us_elapsed_absolute;
+
+ if (!trace2_enabled)
+ return;
+
+ us_now = getnanotime() / 1000;
+ us_elapsed_absolute = tr2tls_absolute_elapsed(us_now);
+
+ for_each_wanted_builtin (j, tgt_j)
+ if (tgt_j->pfn_exec_result_fl)
+ tgt_j->pfn_exec_result_fl(
+ file, line, us_elapsed_absolute, exec_id, code);
+}
+
+void trace2_thread_start_fl(const char *file, int line, const char *thread_name)
+{
+ struct tr2_tgt *tgt_j;
+ int j;
+ uint64_t us_now;
+ uint64_t us_elapsed_absolute;
+
+ if (!trace2_enabled)
+ return;
+
+ if (tr2tls_is_main_thread()) {
+ /*
+ * We should only be called from the new thread's thread-proc,
+ * so this is technically a bug. But in those cases where the
+ * main thread also runs the thread-proc function (or when we
+ * are built with threading disabled), we need to allow it.
+ *
+ * Convert this call to a region-enter so the nesting looks
+ * correct.
+ */
+ trace2_region_enter_printf_fl(file, line, NULL, NULL, NULL,
+ "thread-proc on main: %s",
+ thread_name);
+ return;
+ }
+
+ us_now = getnanotime() / 1000;
+ us_elapsed_absolute = tr2tls_absolute_elapsed(us_now);
+
+ tr2tls_create_self(thread_name);
+
+ for_each_wanted_builtin (j, tgt_j)
+ if (tgt_j->pfn_thread_start_fl)
+ tgt_j->pfn_thread_start_fl(file, line,
+ us_elapsed_absolute);
+}
+
+void trace2_thread_exit_fl(const char *file, int line)
+{
+ struct tr2_tgt *tgt_j;
+ int j;
+ uint64_t us_now;
+ uint64_t us_elapsed_absolute;
+ uint64_t us_elapsed_thread;
+
+ if (!trace2_enabled)
+ return;
+
+ if (tr2tls_is_main_thread()) {
+ /*
+ * We should only be called from the exiting thread's
+ * thread-proc, so this is technically a bug. But in
+ * those cases where the main thread also runs the
+ * thread-proc function (or when we are built with
+ * threading disabled), we need to allow it.
+ *
+ * Convert this call to a region-leave so the nesting
+ * looks correct.
+ */
+ trace2_region_leave_printf_fl(file, line, NULL, NULL, NULL,
+ "thread-proc on main");
+ return;
+ }
+
+ us_now = getnanotime() / 1000;
+ us_elapsed_absolute = tr2tls_absolute_elapsed(us_now);
+
+ /*
+ * Clear any unbalanced regions and then get the relative time
+ * for the outer-most region (which we pushed when the thread
+ * started). This gives us the run time of the thread.
+ */
+ tr2tls_pop_unwind_self();
+ us_elapsed_thread = tr2tls_region_elasped_self(us_now);
+
+ for_each_wanted_builtin (j, tgt_j)
+ if (tgt_j->pfn_thread_exit_fl)
+ tgt_j->pfn_thread_exit_fl(file, line,
+ us_elapsed_absolute,
+ us_elapsed_thread);
+
+ tr2tls_unset_self();
+}
+
+void trace2_def_param_fl(const char *file, int line, const char *param,
+ const char *value)
+{
+ struct tr2_tgt *tgt_j;
+ int j;
+
+ if (!trace2_enabled)
+ return;
+
+ for_each_wanted_builtin (j, tgt_j)
+ if (tgt_j->pfn_param_fl)
+ tgt_j->pfn_param_fl(file, line, param, value);
+}
+
+void trace2_def_repo_fl(const char *file, int line, struct repository *repo)
+{
+ struct tr2_tgt *tgt_j;
+ int j;
+
+ if (!trace2_enabled)
+ return;
+
+ if (repo->trace2_repo_id)
+ return;
+
+ repo->trace2_repo_id = tr2tls_locked_increment(&tr2_next_repo_id);
+
+ for_each_wanted_builtin (j, tgt_j)
+ if (tgt_j->pfn_repo_fl)
+ tgt_j->pfn_repo_fl(file, line, repo);
+}
+
+void trace2_region_enter_printf_va_fl(const char *file, int line,
+ const char *category, const char *label,
+ const struct repository *repo,
+ const char *fmt, va_list ap)
+{
+ struct tr2_tgt *tgt_j;
+ int j;
+ uint64_t us_now;
+ uint64_t us_elapsed_absolute;
+
+ if (!trace2_enabled)
+ return;
+
+ us_now = getnanotime() / 1000;
+ us_elapsed_absolute = tr2tls_absolute_elapsed(us_now);
+
+ /*
+ * Print the region-enter message at the current nesting
+ * (indentation) level and then push a new level.
+ *
+ * We expect each target function to treat 'ap' as constant
+ * and use va_copy.
+ */
+ for_each_wanted_builtin (j, tgt_j)
+ if (tgt_j->pfn_region_enter_printf_va_fl)
+ tgt_j->pfn_region_enter_printf_va_fl(
+ file, line, us_elapsed_absolute, category,
+ label, repo, fmt, ap);
+
+ tr2tls_push_self(us_now);
+}
+
+void trace2_region_enter_fl(const char *file, int line, const char *category,
+ const char *label, const struct repository *repo, ...)
+{
+ va_list ap;
+ va_start(ap, repo);
+ trace2_region_enter_printf_va_fl(file, line, category, label, repo,
+ NULL, ap);
+ va_end(ap);
+
+}
+
+void trace2_region_enter_printf_fl(const char *file, int line,
+ const char *category, const char *label,
+ const struct repository *repo,
+ const char *fmt, ...)
+{
+ va_list ap;
+
+ va_start(ap, fmt);
+ trace2_region_enter_printf_va_fl(file, line, category, label, repo, fmt,
+ ap);
+ va_end(ap);
+}
+
+#ifndef HAVE_VARIADIC_MACROS
+void trace2_region_enter_printf(const char *category, const char *label,
+ const struct repository *repo, const char *fmt,
+ ...)
+{
+ va_list ap;
+
+ va_start(ap, fmt);
+ trace2_region_enter_printf_va_fl(NULL, 0, category, label, repo, fmt,
+ ap);
+ va_end(ap);
+}
+#endif
+
+void trace2_region_leave_printf_va_fl(const char *file, int line,
+ const char *category, const char *label,
+ const struct repository *repo,
+ const char *fmt, va_list ap)
+{
+ struct tr2_tgt *tgt_j;
+ int j;
+ uint64_t us_now;
+ uint64_t us_elapsed_absolute;
+ uint64_t us_elapsed_region;
+
+ if (!trace2_enabled)
+ return;
+
+ us_now = getnanotime() / 1000;
+ us_elapsed_absolute = tr2tls_absolute_elapsed(us_now);
+
+ /*
+ * Get the elapsed time in the current region before we
+ * pop it off the stack. Pop the stack. And then print
+ * the perf message at the new (shallower) level so that
+ * it lines up with the corresponding push/enter.
+ */
+ us_elapsed_region = tr2tls_region_elasped_self(us_now);
+
+ tr2tls_pop_self();
+
+ /*
+ * We expect each target function to treat 'ap' as constant
+ * and use va_copy.
+ */
+ for_each_wanted_builtin (j, tgt_j)
+ if (tgt_j->pfn_region_leave_printf_va_fl)
+ tgt_j->pfn_region_leave_printf_va_fl(
+ file, line, us_elapsed_absolute,
+ us_elapsed_region, category, label, repo, fmt,
+ ap);
+}
+
+void trace2_region_leave_fl(const char *file, int line, const char *category,
+ const char *label, const struct repository *repo, ...)
+{
+ va_list ap;
+ va_start(ap, repo);
+ trace2_region_leave_printf_va_fl(file, line, category, label, repo,
+ NULL, ap);
+ va_end(ap);
+}
+
+void trace2_region_leave_printf_fl(const char *file, int line,
+ const char *category, const char *label,
+ const struct repository *repo,
+ const char *fmt, ...)
+{
+ va_list ap;
+
+ va_start(ap, fmt);
+ trace2_region_leave_printf_va_fl(file, line, category, label, repo, fmt,
+ ap);
+ va_end(ap);
+}
+
+#ifndef HAVE_VARIADIC_MACROS
+void trace2_region_leave_printf(const char *category, const char *label,
+ const struct repository *repo, const char *fmt,
+ ...)
+{
+ va_list ap;
+
+ va_start(ap, fmt);
+ trace2_region_leave_printf_va_fl(NULL, 0, category, label, repo, fmt,
+ ap);
+ va_end(ap);
+}
+#endif
+
+void trace2_data_string_fl(const char *file, int line, const char *category,
+ const struct repository *repo, const char *key,
+ const char *value)
+{
+ struct tr2_tgt *tgt_j;
+ int j;
+ uint64_t us_now;
+ uint64_t us_elapsed_absolute;
+ uint64_t us_elapsed_region;
+
+ if (!trace2_enabled)
+ return;
+
+ us_now = getnanotime() / 1000;
+ us_elapsed_absolute = tr2tls_absolute_elapsed(us_now);
+ us_elapsed_region = tr2tls_region_elasped_self(us_now);
+
+ for_each_wanted_builtin (j, tgt_j)
+ if (tgt_j->pfn_data_fl)
+ tgt_j->pfn_data_fl(file, line, us_elapsed_absolute,
+ us_elapsed_region, category, repo,
+ key, value);
+}
+
+void trace2_data_intmax_fl(const char *file, int line, const char *category,
+ const struct repository *repo, const char *key,
+ intmax_t value)
+{
+ struct strbuf buf_string = STRBUF_INIT;
+
+ if (!trace2_enabled)
+ return;
+
+ strbuf_addf(&buf_string, "%" PRIdMAX, value);
+ trace2_data_string_fl(file, line, category, repo, key, buf_string.buf);
+ strbuf_release(&buf_string);
+}
+
+void trace2_data_json_fl(const char *file, int line, const char *category,
+ const struct repository *repo, const char *key,
+ const struct json_writer *value)
+{
+ struct tr2_tgt *tgt_j;
+ int j;
+ uint64_t us_now;
+ uint64_t us_elapsed_absolute;
+ uint64_t us_elapsed_region;
+
+ if (!trace2_enabled)
+ return;
+
+ us_now = getnanotime() / 1000;
+ us_elapsed_absolute = tr2tls_absolute_elapsed(us_now);
+ us_elapsed_region = tr2tls_region_elasped_self(us_now);
+
+ for_each_wanted_builtin (j, tgt_j)
+ if (tgt_j->pfn_data_fl)
+ tgt_j->pfn_data_json_fl(file, line, us_elapsed_absolute,
+ us_elapsed_region, category,
+ repo, key, value);
+}
+
+void trace2_printf_va_fl(const char *file, int line, const char *fmt,
+ va_list ap)
+{
+ struct tr2_tgt *tgt_j;
+ int j;
+ uint64_t us_now;
+ uint64_t us_elapsed_absolute;
+
+ if (!trace2_enabled)
+ return;
+
+ us_now = getnanotime() / 1000;
+ us_elapsed_absolute = tr2tls_absolute_elapsed(us_now);
+
+ /*
+ * We expect each target function to treat 'ap' as constant
+ * and use va_copy.
+ */
+ for_each_wanted_builtin (j, tgt_j)
+ if (tgt_j->pfn_printf_va_fl)
+ tgt_j->pfn_printf_va_fl(file, line, us_elapsed_absolute,
+ fmt, ap);
+}
+
+void trace2_printf_fl(const char *file, int line, const char *fmt, ...)
+{
+ va_list ap;
+
+ va_start(ap, fmt);
+ trace2_printf_va_fl(file, line, fmt, ap);
+ va_end(ap);
+}
+
+#ifndef HAVE_VARIADIC_MACROS
+void trace2_printf(const char *fmt, ...)
+{
+ va_list ap;
+
+ va_start(ap, fmt);
+ trace2_printf_va_fl(NULL, 0, fmt, ap);
+ va_end(ap);
+}
+#endif
--- /dev/null
+#ifndef TRACE2_H
+#define TRACE2_H
+
+struct child_process;
+struct repository;
+struct json_writer;
+
+/*
+ * The public TRACE2 routines are grouped into the following groups:
+ *
+ * [] trace2_initialize -- initialization.
+ * [] trace2_cmd_* -- emit command/control messages.
+ * [] trace2_child* -- emit child start/stop messages.
+ * [] trace2_exec* -- emit exec start/stop messages.
+ * [] trace2_thread* -- emit thread start/stop messages.
+ * [] trace2_def* -- emit definition/parameter mesasges.
+ * [] trace2_region* -- emit region nesting messages.
+ * [] trace2_data* -- emit region/thread/repo data messages.
+ * [] trace2_printf* -- legacy trace[1] messages.
+ */
+
+/*
+ * Initialize TRACE2 tracing facility if any of the builtin TRACE2
+ * targets are enabled in the environment. Emits a 'version' event.
+ *
+ * Cleanup/Termination is handled automatically by a registered
+ * atexit() routine.
+ */
+void trace2_initialize_fl(const char *file, int line);
+
+#define trace2_initialize() trace2_initialize_fl(__FILE__, __LINE__)
+
+/*
+ * Return true if trace2 is enabled.
+ */
+int trace2_is_enabled(void);
+
+/*
+ * Emit a 'start' event with the original (unmodified) argv.
+ */
+void trace2_cmd_start_fl(const char *file, int line, const char **argv);
+
+#define trace2_cmd_start(argv) trace2_cmd_start_fl(__FILE__, __LINE__, (argv))
+
+/*
+ * Emit an 'exit' event.
+ *
+ * Write the exit-code that will be passed to exit() or returned
+ * from main().
+ *
+ * Use this prior to actually calling exit().
+ * See "#define exit()" in git-compat-util.h
+ */
+int trace2_cmd_exit_fl(const char *file, int line, int code);
+
+#define trace2_cmd_exit(code) (trace2_cmd_exit_fl(__FILE__, __LINE__, (code)))
+
+/*
+ * Emit an 'error' event.
+ *
+ * Write an error message to the TRACE2 targets.
+ */
+void trace2_cmd_error_va_fl(const char *file, int line, const char *fmt,
+ va_list ap);
+
+#define trace2_cmd_error_va(fmt, ap) \
+ trace2_cmd_error_va_fl(__FILE__, __LINE__, (fmt), (ap))
+
+/*
+ * Emit a 'pathname' event with the canonical pathname of the current process
+ * This gives post-processors a simple field to identify the command without
+ * having to parse the argv. For example, to distinguish invocations from
+ * installed versus debug executables.
+ */
+void trace2_cmd_path_fl(const char *file, int line, const char *pathname);
+
+#define trace2_cmd_path(p) trace2_cmd_path_fl(__FILE__, __LINE__, (p))
+
+/*
+ * Emit a 'cmd_name' event with the canonical name of the command.
+ * This gives post-processors a simple field to identify the command
+ * without having to parse the argv.
+ */
+void trace2_cmd_name_fl(const char *file, int line, const char *name);
+
+#define trace2_cmd_name(v) trace2_cmd_name_fl(__FILE__, __LINE__, (v))
+
+/*
+ * Emit a 'cmd_mode' event to further describe the command being run.
+ * For example, "checkout" can checkout a single file or can checkout a
+ * different branch. This gives post-processors a simple field to compare
+ * equivalent commands without having to parse the argv.
+ */
+void trace2_cmd_mode_fl(const char *file, int line, const char *mode);
+
+#define trace2_cmd_mode(sv) trace2_cmd_mode_fl(__FILE__, __LINE__, (sv))
+
+/*
+ * Emit an 'alias' expansion event.
+ */
+void trace2_cmd_alias_fl(const char *file, int line, const char *alias,
+ const char **argv);
+
+#define trace2_cmd_alias(alias, argv) \
+ trace2_cmd_alias_fl(__FILE__, __LINE__, (alias), (argv))
+
+/*
+ * Emit one or more 'def_param' events for "interesting" configuration
+ * settings.
+ *
+ * The environment variable "GIT_TR2_CONFIG_PARAMS" can be set to a
+ * list of patterns considered important. For example:
+ *
+ * GIT_TR2_CONFIG_PARAMS="core.*,remote.*.url"
+ *
+ * Note: this routine does a read-only iteration on the config data
+ * (using read_early_config()), so it must not be called until enough
+ * of the process environment has been established. This includes the
+ * location of the git and worktree directories, expansion of any "-c"
+ * and "-C" command line options, and etc.
+ */
+void trace2_cmd_list_config_fl(const char *file, int line);
+
+#define trace2_cmd_list_config() trace2_cmd_list_config_fl(__FILE__, __LINE__)
+
+/*
+ * Emit a "def_param" event for the given config key/value pair IF
+ * we consider the key to be "interesting".
+ *
+ * Use this for new/updated config settings created/updated after
+ * trace2_cmd_list_config() is called.
+ */
+void trace2_cmd_set_config_fl(const char *file, int line, const char *key,
+ const char *value);
+
+#define trace2_cmd_set_config(k, v) \
+ trace2_cmd_set_config_fl(__FILE__, __LINE__, (k), (v))
+
+/*
+ * Emit a 'child_start' event prior to spawning a child process.
+ *
+ * Before calling optionally set "cmd->trace2_child_class" to a string
+ * describing the type of the child process. For example, "editor" or
+ * "pager".
+ */
+void trace2_child_start_fl(const char *file, int line,
+ struct child_process *cmd);
+
+#define trace2_child_start(cmd) trace2_child_start_fl(__FILE__, __LINE__, (cmd))
+
+/*
+ * Emit a 'child_exit' event after the child process completes.
+ */
+void trace2_child_exit_fl(const char *file, int line, struct child_process *cmd,
+ int child_exit_code);
+
+#define trace2_child_exit(cmd, code) \
+ trace2_child_exit_fl(__FILE__, __LINE__, (cmd), (code))
+
+/*
+ * Emit an 'exec' event prior to calling one of exec(), execv(),
+ * execvp(), and etc. On Unix-derived systems, this will be the
+ * last event emitted for the current process, unless the exec
+ * fails. On Windows, exec() behaves like 'child_start' and a
+ * waitpid(), so additional events may be emitted.
+ *
+ * Returns the "exec_id".
+ */
+int trace2_exec_fl(const char *file, int line, const char *exe,
+ const char **argv);
+
+#define trace2_exec(exe, argv) trace2_exec_fl(__FILE__, __LINE__, (exe), (argv))
+
+/*
+ * Emit an 'exec_result' when possible. On Unix-derived systems,
+ * this should be called after exec() returns (which only happens
+ * when there is an error starting the new process). On Windows,
+ * this should be called after the waitpid().
+ *
+ * The "exec_id" should be the value returned from trace2_exec().
+ */
+void trace2_exec_result_fl(const char *file, int line, int exec_id, int code);
+
+#define trace2_exec_result(id, code) \
+ trace2_exec_result_fl(__FILE__, __LINE__, (id), (code))
+
+/*
+ * Emit a 'thread_start' event. This must be called from inside the
+ * thread-proc to set up the trace2 TLS data for the thread.
+ *
+ * Thread names should be descriptive, like "preload_index".
+ * Thread names will be decorated with an instance number automatically.
+ */
+void trace2_thread_start_fl(const char *file, int line,
+ const char *thread_name);
+
+#define trace2_thread_start(thread_name) \
+ trace2_thread_start_fl(__FILE__, __LINE__, (thread_name))
+
+/*
+ * Emit a 'thread_exit' event. This must be called from inside the
+ * thread-proc to report thread-specific data and cleanup TLS data
+ * for the thread.
+ */
+void trace2_thread_exit_fl(const char *file, int line);
+
+#define trace2_thread_exit() trace2_thread_exit_fl(__FILE__, __LINE__)
+
+/*
+ * Emit a 'param' event.
+ *
+ * Write a "<param> = <value>" pair describing some aspect of the
+ * run such as an important configuration setting or command line
+ * option that significantly changes command behavior.
+ */
+void trace2_def_param_fl(const char *file, int line, const char *param,
+ const char *value);
+
+#define trace2_def_param(param, value) \
+ trace2_def_param_fl(__FILE__, __LINE__, (param), (value))
+
+/*
+ * Tell trace2 about a newly instantiated repo object and assign
+ * a trace2-repo-id to be used in subsequent activity events.
+ *
+ * Emits a 'worktree' event for this repo instance.
+ */
+void trace2_def_repo_fl(const char *file, int line, struct repository *repo);
+
+#define trace2_def_repo(repo) trace2_def_repo_fl(__FILE__, __LINE__, repo)
+
+/*
+ * Emit a 'region_enter' event for <category>.<label> with optional
+ * repo-id and printf message.
+ *
+ * Enter a new nesting level on the current thread and remember the
+ * current time. This controls the indenting of all subsequent events
+ * on this thread.
+ */
+void trace2_region_enter_fl(const char *file, int line, const char *category,
+ const char *label, const struct repository *repo, ...);
+
+#define trace2_region_enter(category, label, repo) \
+ trace2_region_enter_fl(__FILE__, __LINE__, (category), (label), (repo))
+
+void trace2_region_enter_printf_va_fl(const char *file, int line,
+ const char *category, const char *label,
+ const struct repository *repo,
+ const char *fmt, va_list ap);
+
+#define trace2_region_enter_printf_va(category, label, repo, fmt, ap) \
+ trace2_region_enter_printf_va_fl(__FILE__, __LINE__, (category), \
+ (label), (repo), (fmt), (ap))
+
+void trace2_region_enter_printf_fl(const char *file, int line,
+ const char *category, const char *label,
+ const struct repository *repo,
+ const char *fmt, ...);
+
+#ifdef HAVE_VARIADIC_MACROS
+#define trace2_region_enter_printf(category, label, repo, ...) \
+ trace2_region_enter_printf_fl(__FILE__, __LINE__, (category), (label), \
+ (repo), __VA_ARGS__)
+#else
+/* clang-format off */
+__attribute__((format (region_enter_printf, 4, 5)))
+void trace2_region_enter_printf(const char *category, const char *label,
+ const struct repository *repo, const char *fmt,
+ ...);
+/* clang-format on */
+#endif
+
+/*
+ * Emit a 'region_leave' event for <category>.<label> with optional
+ * repo-id and printf message.
+ *
+ * Leave current nesting level and report the elapsed time spent
+ * in this nesting level.
+ */
+void trace2_region_leave_fl(const char *file, int line, const char *category,
+ const char *label, const struct repository *repo, ...);
+
+#define trace2_region_leave(category, label, repo) \
+ trace2_region_leave_fl(__FILE__, __LINE__, (category), (label), (repo))
+
+void trace2_region_leave_printf_va_fl(const char *file, int line,
+ const char *category, const char *label,
+ const struct repository *repo,
+ const char *fmt, va_list ap);
+
+#define trace2_region_leave_printf_va(category, label, repo, fmt, ap) \
+ trace2_region_leave_printf_va_fl(__FILE__, __LINE__, (category), \
+ (label), (repo), (fmt), (ap))
+
+void trace2_region_leave_printf_fl(const char *file, int line,
+ const char *category, const char *label,
+ const struct repository *repo,
+ const char *fmt, ...);
+
+#ifdef HAVE_VARIADIC_MACROS
+#define trace2_region_leave_printf(category, label, repo, ...) \
+ trace2_region_leave_printf_fl(__FILE__, __LINE__, (category), (label), \
+ (repo), __VA_ARGS__)
+#else
+/* clang-format off */
+__attribute__((format (region_leave_printf, 4, 5)))
+void trace2_region_leave_printf(const char *category, const char *label,
+ const struct repository *repo, const char *fmt,
+ ...);
+/* clang-format on */
+#endif
+
+/*
+ * Emit a key-value pair 'data' event of the form <category>.<key> = <value>.
+ * This event implicitly contains information about thread, nesting region,
+ * and optional repo-id.
+ *
+ * On event-based TRACE2 targets, this generates a 'data' event suitable
+ * for post-processing. On printf-based TRACE2 targets, this is converted
+ * into a fixed-format printf message.
+ */
+void trace2_data_string_fl(const char *file, int line, const char *category,
+ const struct repository *repo, const char *key,
+ const char *value);
+
+#define trace2_data_string(category, repo, key, value) \
+ trace2_data_string_fl(__FILE__, __LINE__, (category), (repo), (key), \
+ (value))
+
+void trace2_data_intmax_fl(const char *file, int line, const char *category,
+ const struct repository *repo, const char *key,
+ intmax_t value);
+
+#define trace2_data_intmax(category, repo, key, value) \
+ trace2_data_intmax_fl(__FILE__, __LINE__, (category), (repo), (key), \
+ (value))
+
+void trace2_data_json_fl(const char *file, int line, const char *category,
+ const struct repository *repo, const char *key,
+ const struct json_writer *jw);
+
+#define trace2_data_json(category, repo, key, value) \
+ trace2_data_json_fl(__FILE__, __LINE__, (category), (repo), (key), \
+ (value))
+
+/*
+ * Emit a 'printf' event.
+ *
+ * Write an arbitrary formatted message to the TRACE2 targets. These
+ * text messages should be considered as human-readable strings without
+ * any formatting guidelines. Post-processors may choose to ignore
+ * them.
+ */
+void trace2_printf_va_fl(const char *file, int line, const char *fmt,
+ va_list ap);
+
+#define trace2_printf_va(fmt, ap) \
+ trace2_printf_va_fl(__FILE__, __LINE__, (fmt), (ap))
+
+void trace2_printf_fl(const char *file, int line, const char *fmt, ...);
+
+#ifdef HAVE_VARIADIC_MACROS
+#define trace2_printf(...) trace2_printf_fl(__FILE__, __LINE__, __VA_ARGS__)
+#else
+/* clang-format off */
+__attribute__((format (printf, 1, 2)))
+void trace2_printf(const char *fmt, ...);
+/* clang-format on */
+#endif
+
+/*
+ * Optional platform-specific code to dump information about the
+ * current and any parent process(es). This is intended to allow
+ * post-processors to know who spawned this git instance and anything
+ * else the platform may be able to tell us about the current process.
+ */
+#if defined(GIT_WINDOWS_NATIVE)
+void trace2_collect_process_info(void);
+#else
+#define trace2_collect_process_info() \
+ do { \
+ } while (0)
+#endif
+
+#endif /* TRACE2_H */
--- /dev/null
+#include "cache.h"
+#include "config.h"
+#include "tr2_cfg.h"
+
+#define TR2_ENVVAR_CFG_PARAM "GIT_TR2_CONFIG_PARAMS"
+
+static struct strbuf **tr2_cfg_patterns;
+static int tr2_cfg_count_patterns;
+static int tr2_cfg_loaded;
+
+/*
+ * Parse a string containing a comma-delimited list of config keys
+ * or wildcard patterns into a list of strbufs.
+ */
+static int tr2_cfg_load_patterns(void)
+{
+ struct strbuf **s;
+ const char *envvar;
+
+ if (tr2_cfg_loaded)
+ return tr2_cfg_count_patterns;
+ tr2_cfg_loaded = 1;
+
+ envvar = getenv(TR2_ENVVAR_CFG_PARAM);
+ if (!envvar || !*envvar)
+ return tr2_cfg_count_patterns;
+
+ tr2_cfg_patterns = strbuf_split_buf(envvar, strlen(envvar), ',', -1);
+ for (s = tr2_cfg_patterns; *s; s++) {
+ struct strbuf *buf = *s;
+
+ if (buf->len && buf->buf[buf->len - 1] == ',')
+ strbuf_setlen(buf, buf->len - 1);
+ strbuf_trim_trailing_newline(*s);
+ strbuf_trim(*s);
+ }
+
+ tr2_cfg_count_patterns = s - tr2_cfg_patterns;
+ return tr2_cfg_count_patterns;
+}
+
+void tr2_cfg_free_patterns(void)
+{
+ if (tr2_cfg_patterns)
+ strbuf_list_free(tr2_cfg_patterns);
+ tr2_cfg_count_patterns = 0;
+ tr2_cfg_loaded = 0;
+}
+
+struct tr2_cfg_data {
+ const char *file;
+ int line;
+};
+
+/*
+ * See if the given config key matches any of our patterns of interest.
+ */
+static int tr2_cfg_cb(const char *key, const char *value, void *d)
+{
+ struct strbuf **s;
+ struct tr2_cfg_data *data = (struct tr2_cfg_data *)d;
+
+ for (s = tr2_cfg_patterns; *s; s++) {
+ struct strbuf *buf = *s;
+ int wm = wildmatch(buf->buf, key, WM_CASEFOLD);
+ if (wm == WM_MATCH) {
+ trace2_def_param_fl(data->file, data->line, key, value);
+ return 0;
+ }
+ }
+
+ return 0;
+}
+
+void tr2_cfg_list_config_fl(const char *file, int line)
+{
+ struct tr2_cfg_data data = { file, line };
+
+ if (tr2_cfg_load_patterns() > 0)
+ read_early_config(tr2_cfg_cb, &data);
+}
+
+void tr2_cfg_set_fl(const char *file, int line, const char *key,
+ const char *value)
+{
+ struct tr2_cfg_data data = { file, line };
+
+ if (tr2_cfg_load_patterns() > 0)
+ tr2_cfg_cb(key, value, &data);
+}
--- /dev/null
+#ifndef TR2_CFG_H
+#define TR2_CFG_H
+
+/*
+ * Iterate over all config settings and emit 'def_param' events for the
+ * "interesting" ones to TRACE2.
+ */
+void tr2_cfg_list_config_fl(const char *file, int line);
+
+/*
+ * Emit a "def_param" event for the given key/value pair IF we consider
+ * the key to be "interesting".
+ */
+void tr2_cfg_set_fl(const char *file, int line, const char *key,
+ const char *value);
+
+void tr2_cfg_free_patterns(void);
+
+#endif /* TR2_CFG_H */
--- /dev/null
+#include "cache.h"
+#include "trace2/tr2_cmd_name.h"
+
+#define TR2_ENVVAR_PARENT_NAME "GIT_TR2_PARENT_NAME"
+
+static struct strbuf tr2cmdname_hierarchy = STRBUF_INIT;
+
+void tr2_cmd_name_append_hierarchy(const char *name)
+{
+ const char *parent_name = getenv(TR2_ENVVAR_PARENT_NAME);
+
+ strbuf_reset(&tr2cmdname_hierarchy);
+ if (parent_name && *parent_name) {
+ strbuf_addstr(&tr2cmdname_hierarchy, parent_name);
+ strbuf_addch(&tr2cmdname_hierarchy, '/');
+ }
+ strbuf_addstr(&tr2cmdname_hierarchy, name);
+
+ setenv(TR2_ENVVAR_PARENT_NAME, tr2cmdname_hierarchy.buf, 1);
+}
+
+const char *tr2_cmd_name_get_hierarchy(void)
+{
+ return tr2cmdname_hierarchy.buf;
+}
+
+void tr2_cmd_name_release(void)
+{
+ strbuf_release(&tr2cmdname_hierarchy);
+}
--- /dev/null
+#ifndef TR2_CMD_NAME_H
+#define TR2_CMD_NAME_H
+
+/*
+ * Append the current command name to the list being maintained
+ * in the environment.
+ *
+ * The hierarchy for a top-level git command is just the current
+ * command name. For a child git process, the hierarchy includes the
+ * names of the parent processes.
+ *
+ * The hierarchy for the current process will be exported to the
+ * environment and inherited by child processes.
+ */
+void tr2_cmd_name_append_hierarchy(const char *name);
+
+/*
+ * Get the command name hierarchy for the current process.
+ */
+const char *tr2_cmd_name_get_hierarchy(void);
+
+void tr2_cmd_name_release(void);
+
+#endif /* TR2_CMD_NAME_H */
--- /dev/null
+#include "cache.h"
+#include "trace2/tr2_dst.h"
+
+/*
+ * If a Trace2 target cannot be opened for writing, we should issue a
+ * warning to stderr, but this is very annoying if the target is a pipe
+ * or socket and beyond the user's control -- especially since every
+ * git command (and sub-command) will print the message. So we silently
+ * eat these warnings and just discard the trace data.
+ *
+ * Enable the following environment variable to see these warnings.
+ */
+#define TR2_ENVVAR_DST_DEBUG "GIT_TR2_DST_DEBUG"
+
+static int tr2_dst_want_warning(void)
+{
+ static int tr2env_dst_debug = -1;
+
+ if (tr2env_dst_debug == -1) {
+ const char *env_value = getenv(TR2_ENVVAR_DST_DEBUG);
+ if (!env_value || !*env_value)
+ tr2env_dst_debug = 0;
+ else
+ tr2env_dst_debug = atoi(env_value) > 0;
+ }
+
+ return tr2env_dst_debug;
+}
+
+void tr2_dst_trace_disable(struct tr2_dst *dst)
+{
+ if (dst->need_close)
+ close(dst->fd);
+ dst->fd = 0;
+ dst->initialized = 1;
+ dst->need_close = 0;
+}
+
+static int tr2_dst_try_path(struct tr2_dst *dst, const char *tgt_value)
+{
+ int fd = open(tgt_value, O_WRONLY | O_APPEND | O_CREAT, 0666);
+ if (fd == -1) {
+ if (tr2_dst_want_warning())
+ warning("trace2: could not open '%s' for '%s' tracing: %s",
+ tgt_value, dst->env_var_name, strerror(errno));
+
+ tr2_dst_trace_disable(dst);
+ return 0;
+ }
+
+ dst->fd = fd;
+ dst->need_close = 1;
+ dst->initialized = 1;
+
+ return dst->fd;
+}
+
+#ifndef NO_UNIX_SOCKETS
+#define PREFIX_AF_UNIX "af_unix:"
+#define PREFIX_AF_UNIX_STREAM "af_unix:stream:"
+#define PREFIX_AF_UNIX_DGRAM "af_unix:dgram:"
+
+static int tr2_dst_try_uds_connect(const char *path, int sock_type, int *out_fd)
+{
+ int fd;
+ struct sockaddr_un sa;
+
+ fd = socket(AF_UNIX, sock_type, 0);
+ if (fd == -1)
+ return errno;
+
+ sa.sun_family = AF_UNIX;
+ strlcpy(sa.sun_path, path, sizeof(sa.sun_path));
+
+ if (connect(fd, (struct sockaddr *)&sa, sizeof(sa)) == -1) {
+ int e = errno;
+ close(fd);
+ return e;
+ }
+
+ *out_fd = fd;
+ return 0;
+}
+
+#define TR2_DST_UDS_TRY_STREAM (1 << 0)
+#define TR2_DST_UDS_TRY_DGRAM (1 << 1)
+
+static int tr2_dst_try_unix_domain_socket(struct tr2_dst *dst,
+ const char *tgt_value)
+{
+ unsigned int uds_try = 0;
+ int fd;
+ int e;
+ const char *path = NULL;
+
+ /*
+ * Allow "af_unix:[<type>:]<absolute_path>"
+ *
+ * Trace2 always writes complete individual messages (without
+ * chunking), so we can talk to either DGRAM or STREAM type sockets.
+ *
+ * Allow the user to explicitly request the socket type.
+ *
+ * If they omit the socket type, try one and then the other.
+ */
+
+ if (skip_prefix(tgt_value, PREFIX_AF_UNIX_STREAM, &path))
+ uds_try |= TR2_DST_UDS_TRY_STREAM;
+
+ else if (skip_prefix(tgt_value, PREFIX_AF_UNIX_DGRAM, &path))
+ uds_try |= TR2_DST_UDS_TRY_DGRAM;
+
+ else if (skip_prefix(tgt_value, PREFIX_AF_UNIX, &path))
+ uds_try |= TR2_DST_UDS_TRY_STREAM | TR2_DST_UDS_TRY_DGRAM;
+
+ if (!path || !*path) {
+ if (tr2_dst_want_warning())
+ warning("trace2: invalid AF_UNIX value '%s' for '%s' tracing",
+ tgt_value, dst->env_var_name);
+
+ tr2_dst_trace_disable(dst);
+ return 0;
+ }
+
+ if (!is_absolute_path(path) ||
+ strlen(path) >= sizeof(((struct sockaddr_un *)0)->sun_path)) {
+ if (tr2_dst_want_warning())
+ warning("trace2: invalid AF_UNIX path '%s' for '%s' tracing",
+ path, dst->env_var_name);
+
+ tr2_dst_trace_disable(dst);
+ return 0;
+ }
+
+ if (uds_try & TR2_DST_UDS_TRY_STREAM) {
+ e = tr2_dst_try_uds_connect(path, SOCK_STREAM, &fd);
+ if (!e)
+ goto connected;
+ if (e != EPROTOTYPE)
+ goto error;
+ }
+ if (uds_try & TR2_DST_UDS_TRY_DGRAM) {
+ e = tr2_dst_try_uds_connect(path, SOCK_DGRAM, &fd);
+ if (!e)
+ goto connected;
+ }
+
+error:
+ if (tr2_dst_want_warning())
+ warning("trace2: could not connect to socket '%s' for '%s' tracing: %s",
+ path, dst->env_var_name, strerror(e));
+
+ tr2_dst_trace_disable(dst);
+ return 0;
+
+connected:
+ dst->fd = fd;
+ dst->need_close = 1;
+ dst->initialized = 1;
+
+ return dst->fd;
+}
+#endif
+
+static void tr2_dst_malformed_warning(struct tr2_dst *dst,
+ const char *tgt_value)
+{
+ struct strbuf buf = STRBUF_INIT;
+
+ strbuf_addf(&buf, "trace2: unknown value for '%s': '%s'",
+ dst->env_var_name, tgt_value);
+ warning("%s", buf.buf);
+
+ strbuf_release(&buf);
+}
+
+int tr2_dst_get_trace_fd(struct tr2_dst *dst)
+{
+ const char *tgt_value;
+
+ /* don't open twice */
+ if (dst->initialized)
+ return dst->fd;
+
+ dst->initialized = 1;
+
+ tgt_value = getenv(dst->env_var_name);
+
+ if (!tgt_value || !strcmp(tgt_value, "") || !strcmp(tgt_value, "0") ||
+ !strcasecmp(tgt_value, "false")) {
+ dst->fd = 0;
+ return dst->fd;
+ }
+
+ if (!strcmp(tgt_value, "1") || !strcasecmp(tgt_value, "true")) {
+ dst->fd = STDERR_FILENO;
+ return dst->fd;
+ }
+
+ if (strlen(tgt_value) == 1 && isdigit(*tgt_value)) {
+ dst->fd = atoi(tgt_value);
+ return dst->fd;
+ }
+
+ if (is_absolute_path(tgt_value))
+ return tr2_dst_try_path(dst, tgt_value);
+
+#ifndef NO_UNIX_SOCKETS
+ if (starts_with(tgt_value, PREFIX_AF_UNIX))
+ return tr2_dst_try_unix_domain_socket(dst, tgt_value);
+#endif
+
+ /* Always warn about malformed values. */
+ tr2_dst_malformed_warning(dst, tgt_value);
+ tr2_dst_trace_disable(dst);
+ return 0;
+}
+
+int tr2_dst_trace_want(struct tr2_dst *dst)
+{
+ return !!tr2_dst_get_trace_fd(dst);
+}
+
+void tr2_dst_write_line(struct tr2_dst *dst, struct strbuf *buf_line)
+{
+ int fd = tr2_dst_get_trace_fd(dst);
+
+ strbuf_complete_line(buf_line); /* ensure final NL on buffer */
+
+ /*
+ * We do not use write_in_full() because we do not want
+ * a short-write to try again. We are using O_APPEND mode
+ * files and the kernel handles the atomic seek+write. If
+ * another thread or git process is concurrently writing to
+ * this fd or file, our remainder-write may not be contiguous
+ * with our initial write of this message. And that will
+ * confuse readers. So just don't bother.
+ *
+ * It is assumed that TRACE2 messages are short enough that
+ * the system can write them in 1 attempt and we won't see
+ * a short-write.
+ *
+ * If we get an IO error, just close the trace dst.
+ */
+ if (write(fd, buf_line->buf, buf_line->len) >= 0)
+ return;
+
+ if (tr2_dst_want_warning())
+ warning("unable to write trace to '%s': %s", dst->env_var_name,
+ strerror(errno));
+ tr2_dst_trace_disable(dst);
+}
--- /dev/null
+#ifndef TR2_DST_H
+#define TR2_DST_H
+
+struct strbuf;
+
+struct tr2_dst {
+ const char *const env_var_name;
+ int fd;
+ unsigned int initialized : 1;
+ unsigned int need_close : 1;
+};
+
+/*
+ * Disable TRACE2 on the destination. In TRACE2 a destination (DST)
+ * wraps a file descriptor; it is associated with a TARGET which
+ * defines the formatting.
+ */
+void tr2_dst_trace_disable(struct tr2_dst *dst);
+
+/*
+ * Return the file descriptor for the DST.
+ * If 0, the dst is closed or disabled.
+ */
+int tr2_dst_get_trace_fd(struct tr2_dst *dst);
+
+/*
+ * Return true if the DST is opened for writing.
+ */
+int tr2_dst_trace_want(struct tr2_dst *dst);
+
+/*
+ * Write a single line/message to the trace file.
+ */
+void tr2_dst_write_line(struct tr2_dst *dst, struct strbuf *buf_line);
+
+#endif /* TR2_DST_H */
--- /dev/null
+#include "cache.h"
+#include "trace2/tr2_sid.h"
+
+#define TR2_ENVVAR_PARENT_SID "GIT_TR2_PARENT_SID"
+
+static struct strbuf tr2sid_buf = STRBUF_INIT;
+static int tr2sid_nr_git_parents;
+
+/*
+ * Compute a "unique" session id (SID) for the current process. This allows
+ * all events from this process to have a single label (much like a PID).
+ *
+ * Export this into our environment so that all child processes inherit it.
+ *
+ * If we were started by another git instance, use our parent's SID as a
+ * prefix. (This lets us track parent/child relationships even if there
+ * is an intermediate shell process.)
+ *
+ * Additionally, count the number of nested git processes.
+ */
+static void tr2_sid_compute(void)
+{
+ uint64_t us_now;
+ const char *parent_sid;
+
+ if (tr2sid_buf.len)
+ return;
+
+ parent_sid = getenv(TR2_ENVVAR_PARENT_SID);
+ if (parent_sid && *parent_sid) {
+ const char *p;
+ for (p = parent_sid; *p; p++)
+ if (*p == '/')
+ tr2sid_nr_git_parents++;
+
+ strbuf_addstr(&tr2sid_buf, parent_sid);
+ strbuf_addch(&tr2sid_buf, '/');
+ tr2sid_nr_git_parents++;
+ }
+
+ us_now = getnanotime() / 1000;
+ strbuf_addf(&tr2sid_buf, "%" PRIuMAX "-%" PRIdMAX, (uintmax_t)us_now,
+ (intmax_t)getpid());
+
+ setenv(TR2_ENVVAR_PARENT_SID, tr2sid_buf.buf, 1);
+}
+
+const char *tr2_sid_get(void)
+{
+ if (!tr2sid_buf.len)
+ tr2_sid_compute();
+
+ return tr2sid_buf.buf;
+}
+
+int tr2_sid_depth(void)
+{
+ if (!tr2sid_buf.len)
+ tr2_sid_compute();
+
+ return tr2sid_nr_git_parents;
+}
+
+void tr2_sid_release(void)
+{
+ strbuf_release(&tr2sid_buf);
+}
--- /dev/null
+#ifndef TR2_SID_H
+#define TR2_SID_H
+
+/*
+ * Get our session id. Compute if necessary.
+ */
+const char *tr2_sid_get(void);
+
+/*
+ * Get our process depth. A top-level git process invoked from the
+ * command line will have depth=0. A child git process will have
+ * depth=1 and so on.
+ */
+int tr2_sid_depth(void);
+
+void tr2_sid_release(void);
+
+#endif /* TR2_SID_H */
--- /dev/null
+#include "cache.h"
+#include "tr2_tbuf.h"
+
+void tr2_tbuf_local_time(struct tr2_tbuf *tb)
+{
+ struct timeval tv;
+ struct tm tm;
+ time_t secs;
+
+ gettimeofday(&tv, NULL);
+ secs = tv.tv_sec;
+ localtime_r(&secs, &tm);
+
+ xsnprintf(tb->buf, sizeof(tb->buf), "%02d:%02d:%02d.%06ld", tm.tm_hour,
+ tm.tm_min, tm.tm_sec, (long)tv.tv_usec);
+}
+
+void tr2_tbuf_utc_time(struct tr2_tbuf *tb)
+{
+ struct timeval tv;
+ struct tm tm;
+ time_t secs;
+
+ gettimeofday(&tv, NULL);
+ secs = tv.tv_sec;
+ gmtime_r(&secs, &tm);
+
+ xsnprintf(tb->buf, sizeof(tb->buf),
+ "%4d-%02d-%02d %02d:%02d:%02d.%06ld", tm.tm_year + 1900,
+ tm.tm_mon + 1, tm.tm_mday, tm.tm_hour, tm.tm_min, tm.tm_sec,
+ (long)tv.tv_usec);
+}
--- /dev/null
+#ifndef TR2_TBUF_H
+#define TR2_TBUF_H
+
+/*
+ * A simple wrapper around a fixed buffer to avoid C syntax
+ * quirks and the need to pass around an additional size_t
+ * argument.
+ */
+struct tr2_tbuf {
+ char buf[32];
+};
+
+/*
+ * Fill buffer with formatted local time string.
+ */
+void tr2_tbuf_local_time(struct tr2_tbuf *tb);
+
+/*
+ * Fill buffer with formatted UTC time string.
+ */
+void tr2_tbuf_utc_time(struct tr2_tbuf *tb);
+
+#endif /* TR2_TBUF_H */
--- /dev/null
+#ifndef TR2_TGT_H
+#define TR2_TGT_H
+
+struct child_process;
+struct repository;
+struct json_writer;
+
+/*
+ * Function prototypes for a TRACE2 "target" vtable.
+ */
+
+typedef int(tr2_tgt_init_t)(void);
+typedef void(tr2_tgt_term_t)(void);
+
+typedef void(tr2_tgt_evt_version_fl_t)(const char *file, int line);
+
+typedef void(tr2_tgt_evt_start_fl_t)(const char *file, int line,
+ const char **argv);
+typedef void(tr2_tgt_evt_exit_fl_t)(const char *file, int line,
+ uint64_t us_elapsed_absolute, int code);
+typedef void(tr2_tgt_evt_signal_t)(uint64_t us_elapsed_absolute, int signo);
+typedef void(tr2_tgt_evt_atexit_t)(uint64_t us_elapsed_absolute, int code);
+
+typedef void(tr2_tgt_evt_error_va_fl_t)(const char *file, int line,
+ const char *fmt, va_list ap);
+
+typedef void(tr2_tgt_evt_command_path_fl_t)(const char *file, int line,
+ const char *command_path);
+typedef void(tr2_tgt_evt_command_name_fl_t)(const char *file, int line,
+ const char *name,
+ const char *hierarchy);
+typedef void(tr2_tgt_evt_command_mode_fl_t)(const char *file, int line,
+ const char *mode);
+
+typedef void(tr2_tgt_evt_alias_fl_t)(const char *file, int line,
+ const char *alias, const char **argv);
+
+typedef void(tr2_tgt_evt_child_start_fl_t)(const char *file, int line,
+ uint64_t us_elapsed_absolute,
+ const struct child_process *cmd);
+typedef void(tr2_tgt_evt_child_exit_fl_t)(const char *file, int line,
+ uint64_t us_elapsed_absolute, int cid,
+ int pid, int code,
+ uint64_t us_elapsed_child);
+
+typedef void(tr2_tgt_evt_thread_start_fl_t)(const char *file, int line,
+ uint64_t us_elapsed_absolute);
+typedef void(tr2_tgt_evt_thread_exit_fl_t)(const char *file, int line,
+ uint64_t us_elapsed_absolute,
+ uint64_t us_elapsed_thread);
+
+typedef void(tr2_tgt_evt_exec_fl_t)(const char *file, int line,
+ uint64_t us_elapsed_absolute, int exec_id,
+ const char *exe, const char **argv);
+typedef void(tr2_tgt_evt_exec_result_fl_t)(const char *file, int line,
+ uint64_t us_elapsed_absolute,
+ int exec_id, int code);
+
+typedef void(tr2_tgt_evt_param_fl_t)(const char *file, int line,
+ const char *param, const char *value);
+
+typedef void(tr2_tgt_evt_repo_fl_t)(const char *file, int line,
+ const struct repository *repo);
+
+typedef void(tr2_tgt_evt_region_enter_printf_va_fl_t)(
+ const char *file, int line, uint64_t us_elapsed_absolute,
+ const char *category, const char *label, const struct repository *repo,
+ const char *fmt, va_list ap);
+typedef void(tr2_tgt_evt_region_leave_printf_va_fl_t)(
+ const char *file, int line, uint64_t us_elapsed_absolute,
+ uint64_t us_elapsed_region, const char *category, const char *label,
+ const struct repository *repo, const char *fmt, va_list ap);
+
+typedef void(tr2_tgt_evt_data_fl_t)(const char *file, int line,
+ uint64_t us_elapsed_absolute,
+ uint64_t us_elapsed_region,
+ const char *category,
+ const struct repository *repo,
+ const char *key, const char *value);
+typedef void(tr2_tgt_evt_data_json_fl_t)(const char *file, int line,
+ uint64_t us_elapsed_absolute,
+ uint64_t us_elapsed_region,
+ const char *category,
+ const struct repository *repo,
+ const char *key,
+ const struct json_writer *value);
+
+typedef void(tr2_tgt_evt_printf_va_fl_t)(const char *file, int line,
+ uint64_t us_elapsed_absolute,
+ const char *fmt, va_list ap);
+
+/*
+ * "vtable" for a TRACE2 target. Use NULL if a target does not want
+ * to emit that message.
+ */
+/* clang-format off */
+struct tr2_tgt {
+ struct tr2_dst *pdst;
+
+ tr2_tgt_init_t *pfn_init;
+ tr2_tgt_term_t *pfn_term;
+
+ tr2_tgt_evt_version_fl_t *pfn_version_fl;
+ tr2_tgt_evt_start_fl_t *pfn_start_fl;
+ tr2_tgt_evt_exit_fl_t *pfn_exit_fl;
+ tr2_tgt_evt_signal_t *pfn_signal;
+ tr2_tgt_evt_atexit_t *pfn_atexit;
+ tr2_tgt_evt_error_va_fl_t *pfn_error_va_fl;
+ tr2_tgt_evt_command_path_fl_t *pfn_command_path_fl;
+ tr2_tgt_evt_command_name_fl_t *pfn_command_name_fl;
+ tr2_tgt_evt_command_mode_fl_t *pfn_command_mode_fl;
+ tr2_tgt_evt_alias_fl_t *pfn_alias_fl;
+ tr2_tgt_evt_child_start_fl_t *pfn_child_start_fl;
+ tr2_tgt_evt_child_exit_fl_t *pfn_child_exit_fl;
+ tr2_tgt_evt_thread_start_fl_t *pfn_thread_start_fl;
+ tr2_tgt_evt_thread_exit_fl_t *pfn_thread_exit_fl;
+ tr2_tgt_evt_exec_fl_t *pfn_exec_fl;
+ tr2_tgt_evt_exec_result_fl_t *pfn_exec_result_fl;
+ tr2_tgt_evt_param_fl_t *pfn_param_fl;
+ tr2_tgt_evt_repo_fl_t *pfn_repo_fl;
+ tr2_tgt_evt_region_enter_printf_va_fl_t *pfn_region_enter_printf_va_fl;
+ tr2_tgt_evt_region_leave_printf_va_fl_t *pfn_region_leave_printf_va_fl;
+ tr2_tgt_evt_data_fl_t *pfn_data_fl;
+ tr2_tgt_evt_data_json_fl_t *pfn_data_json_fl;
+ tr2_tgt_evt_printf_va_fl_t *pfn_printf_va_fl;
+};
+/* clang-format on */
+
+extern struct tr2_tgt tr2_tgt_event;
+extern struct tr2_tgt tr2_tgt_normal;
+extern struct tr2_tgt tr2_tgt_perf;
+
+#endif /* TR2_TGT_H */
--- /dev/null
+#include "cache.h"
+#include "config.h"
+#include "json-writer.h"
+#include "run-command.h"
+#include "version.h"
+#include "trace2/tr2_dst.h"
+#include "trace2/tr2_tbuf.h"
+#include "trace2/tr2_sid.h"
+#include "trace2/tr2_tgt.h"
+#include "trace2/tr2_tls.h"
+
+static struct tr2_dst tr2dst_event = { "GIT_TR2_EVENT", 0, 0, 0 };
+
+/*
+ * The version number of the JSON data generated by the EVENT target
+ * in this source file. Update this if you make a significant change
+ * to the JSON fields or message structure. You probably do not need
+ * to update this if you just add another call to one of the existing
+ * TRACE2 API methods.
+ */
+#define TR2_EVENT_VERSION "1"
+
+/*
+ * Region nesting limit for messages written to the event target.
+ *
+ * The "region_enter" and "region_leave" messages (especially recursive
+ * messages such as those produced while diving the worktree or index)
+ * are primarily intended for the performance target during debugging.
+ *
+ * Some of the outer-most messages, however, may be of interest to the
+ * event target. Set this environment variable to a larger integer for
+ * more detail in the event target.
+ */
+#define TR2_ENVVAR_EVENT_NESTING "GIT_TR2_EVENT_NESTING"
+static int tr2env_event_nesting_wanted = 2;
+
+/*
+ * Set this environment variable to true to omit the <time>, <file>, and
+ * <line> fields from most events.
+ */
+#define TR2_ENVVAR_EVENT_BRIEF "GIT_TR2_EVENT_BRIEF"
+static int tr2env_event_brief;
+
+static int fn_init(void)
+{
+ int want = tr2_dst_trace_want(&tr2dst_event);
+ int want_nesting;
+ int want_brief;
+ char *nesting;
+ char *brief;
+
+ if (!want)
+ return want;
+
+ nesting = getenv(TR2_ENVVAR_EVENT_NESTING);
+ if (nesting && ((want_nesting = atoi(nesting)) > 0))
+ tr2env_event_nesting_wanted = want_nesting;
+
+ brief = getenv(TR2_ENVVAR_EVENT_BRIEF);
+ if (brief && ((want_brief = atoi(brief)) > 0))
+ tr2env_event_brief = want_brief;
+
+ return want;
+}
+
+static void fn_term(void)
+{
+ tr2_dst_trace_disable(&tr2dst_event);
+}
+
+/*
+ * Append common key-value pairs to the currently open JSON object.
+ * "event:"<event_name>"
+ * "sid":"<sid>"
+ * "thread":"<thread_name>"
+ * "time":"<time>"
+ * "file":"<filename>"
+ * "line":<line_number>
+ * "repo":<repo_id>
+ */
+static void event_fmt_prepare(const char *event_name, const char *file,
+ int line, const struct repository *repo,
+ struct json_writer *jw)
+{
+ struct tr2tls_thread_ctx *ctx = tr2tls_get_self();
+ struct tr2_tbuf tb_now;
+
+ jw_object_string(jw, "event", event_name);
+ jw_object_string(jw, "sid", tr2_sid_get());
+ jw_object_string(jw, "thread", ctx->thread_name.buf);
+
+ /*
+ * In brief mode, only emit <time> on these 2 event types.
+ */
+ if (!tr2env_event_brief || !strcmp(event_name, "version") ||
+ !strcmp(event_name, "atexit")) {
+ tr2_tbuf_utc_time(&tb_now);
+ jw_object_string(jw, "time", tb_now.buf);
+ }
+
+ if (!tr2env_event_brief && file && *file) {
+ jw_object_string(jw, "file", file);
+ jw_object_intmax(jw, "line", line);
+ }
+
+ if (repo)
+ jw_object_intmax(jw, "repo", repo->trace2_repo_id);
+}
+
+static void fn_version_fl(const char *file, int line)
+{
+ const char *event_name = "version";
+ struct json_writer jw = JSON_WRITER_INIT;
+
+ jw_object_begin(&jw, 0);
+ event_fmt_prepare(event_name, file, line, NULL, &jw);
+ jw_object_string(&jw, "evt", TR2_EVENT_VERSION);
+ jw_object_string(&jw, "exe", git_version_string);
+ jw_end(&jw);
+
+ tr2_dst_write_line(&tr2dst_event, &jw.json);
+ jw_release(&jw);
+}
+
+static void fn_start_fl(const char *file, int line, const char **argv)
+{
+ const char *event_name = "start";
+ struct json_writer jw = JSON_WRITER_INIT;
+
+ jw_object_begin(&jw, 0);
+ event_fmt_prepare(event_name, file, line, NULL, &jw);
+ jw_object_inline_begin_array(&jw, "argv");
+ jw_array_argv(&jw, argv);
+ jw_end(&jw);
+ jw_end(&jw);
+
+ tr2_dst_write_line(&tr2dst_event, &jw.json);
+ jw_release(&jw);
+}
+
+static void fn_exit_fl(const char *file, int line, uint64_t us_elapsed_absolute,
+ int code)
+{
+ const char *event_name = "exit";
+ struct json_writer jw = JSON_WRITER_INIT;
+ double t_abs = (double)us_elapsed_absolute / 1000000.0;
+
+ jw_object_begin(&jw, 0);
+ event_fmt_prepare(event_name, file, line, NULL, &jw);
+ jw_object_double(&jw, "t_abs", 6, t_abs);
+ jw_object_intmax(&jw, "code", code);
+ jw_end(&jw);
+
+ tr2_dst_write_line(&tr2dst_event, &jw.json);
+ jw_release(&jw);
+}
+
+static void fn_signal(uint64_t us_elapsed_absolute, int signo)
+{
+ const char *event_name = "signal";
+ struct json_writer jw = JSON_WRITER_INIT;
+ double t_abs = (double)us_elapsed_absolute / 1000000.0;
+
+ jw_object_begin(&jw, 0);
+ event_fmt_prepare(event_name, __FILE__, __LINE__, NULL, &jw);
+ jw_object_double(&jw, "t_abs", 6, t_abs);
+ jw_object_intmax(&jw, "signo", signo);
+ jw_end(&jw);
+
+ tr2_dst_write_line(&tr2dst_event, &jw.json);
+ jw_release(&jw);
+}
+
+static void fn_atexit(uint64_t us_elapsed_absolute, int code)
+{
+ const char *event_name = "atexit";
+ struct json_writer jw = JSON_WRITER_INIT;
+ double t_abs = (double)us_elapsed_absolute / 1000000.0;
+
+ jw_object_begin(&jw, 0);
+ event_fmt_prepare(event_name, __FILE__, __LINE__, NULL, &jw);
+ jw_object_double(&jw, "t_abs", 6, t_abs);
+ jw_object_intmax(&jw, "code", code);
+ jw_end(&jw);
+
+ tr2_dst_write_line(&tr2dst_event, &jw.json);
+ jw_release(&jw);
+}
+
+static void maybe_add_string_va(struct json_writer *jw, const char *field_name,
+ const char *fmt, va_list ap)
+{
+ if (fmt && *fmt) {
+ va_list copy_ap;
+ struct strbuf buf = STRBUF_INIT;
+
+ va_copy(copy_ap, ap);
+ strbuf_vaddf(&buf, fmt, copy_ap);
+ va_end(copy_ap);
+
+ jw_object_string(jw, field_name, buf.buf);
+ strbuf_release(&buf);
+ return;
+ }
+
+ if (fmt && *fmt) {
+ jw_object_string(jw, field_name, fmt);
+ return;
+ }
+}
+
+static void fn_error_va_fl(const char *file, int line, const char *fmt,
+ va_list ap)
+{
+ const char *event_name = "error";
+ struct json_writer jw = JSON_WRITER_INIT;
+
+ jw_object_begin(&jw, 0);
+ event_fmt_prepare(event_name, file, line, NULL, &jw);
+ maybe_add_string_va(&jw, "msg", fmt, ap);
+ /*
+ * Also emit the format string as a field in case
+ * post-processors want to aggregate common error
+ * messages by type without argument fields (such
+ * as pathnames or branch names) cluttering it up.
+ */
+ if (fmt && *fmt)
+ jw_object_string(&jw, "fmt", fmt);
+ jw_end(&jw);
+
+ tr2_dst_write_line(&tr2dst_event, &jw.json);
+ jw_release(&jw);
+}
+
+static void fn_command_path_fl(const char *file, int line, const char *pathname)
+{
+ const char *event_name = "cmd_path";
+ struct json_writer jw = JSON_WRITER_INIT;
+
+ jw_object_begin(&jw, 0);
+ event_fmt_prepare(event_name, file, line, NULL, &jw);
+ jw_object_string(&jw, "path", pathname);
+ jw_end(&jw);
+
+ tr2_dst_write_line(&tr2dst_event, &jw.json);
+ jw_release(&jw);
+}
+
+static void fn_command_name_fl(const char *file, int line, const char *name,
+ const char *hierarchy)
+{
+ const char *event_name = "cmd_name";
+ struct json_writer jw = JSON_WRITER_INIT;
+
+ jw_object_begin(&jw, 0);
+ event_fmt_prepare(event_name, file, line, NULL, &jw);
+ jw_object_string(&jw, "name", name);
+ if (hierarchy && *hierarchy)
+ jw_object_string(&jw, "hierarchy", hierarchy);
+ jw_end(&jw);
+
+ tr2_dst_write_line(&tr2dst_event, &jw.json);
+ jw_release(&jw);
+}
+
+static void fn_command_mode_fl(const char *file, int line, const char *mode)
+{
+ const char *event_name = "cmd_mode";
+ struct json_writer jw = JSON_WRITER_INIT;
+
+ jw_object_begin(&jw, 0);
+ event_fmt_prepare(event_name, file, line, NULL, &jw);
+ jw_object_string(&jw, "name", mode);
+ jw_end(&jw);
+
+ tr2_dst_write_line(&tr2dst_event, &jw.json);
+ jw_release(&jw);
+}
+
+static void fn_alias_fl(const char *file, int line, const char *alias,
+ const char **argv)
+{
+ const char *event_name = "alias";
+ struct json_writer jw = JSON_WRITER_INIT;
+
+ jw_object_begin(&jw, 0);
+ event_fmt_prepare(event_name, file, line, NULL, &jw);
+ jw_object_string(&jw, "alias", alias);
+ jw_object_inline_begin_array(&jw, "argv");
+ jw_array_argv(&jw, argv);
+ jw_end(&jw);
+ jw_end(&jw);
+
+ tr2_dst_write_line(&tr2dst_event, &jw.json);
+ jw_release(&jw);
+}
+
+static void fn_child_start_fl(const char *file, int line,
+ uint64_t us_elapsed_absolute,
+ const struct child_process *cmd)
+{
+ const char *event_name = "child_start";
+ struct json_writer jw = JSON_WRITER_INIT;
+
+ jw_object_begin(&jw, 0);
+ event_fmt_prepare(event_name, file, line, NULL, &jw);
+ jw_object_intmax(&jw, "child_id", cmd->trace2_child_id);
+ if (cmd->trace2_hook_name) {
+ jw_object_string(&jw, "child_class", "hook");
+ jw_object_string(&jw, "hook_name", cmd->trace2_hook_name);
+ } else {
+ const char *child_class =
+ cmd->trace2_child_class ? cmd->trace2_child_class : "?";
+ jw_object_string(&jw, "child_class", child_class);
+ }
+ if (cmd->dir)
+ jw_object_string(&jw, "cd", cmd->dir);
+ jw_object_bool(&jw, "use_shell", cmd->use_shell);
+ jw_object_inline_begin_array(&jw, "argv");
+ if (cmd->git_cmd)
+ jw_array_string(&jw, "git");
+ jw_array_argv(&jw, cmd->argv);
+ jw_end(&jw);
+ jw_end(&jw);
+
+ tr2_dst_write_line(&tr2dst_event, &jw.json);
+ jw_release(&jw);
+}
+
+static void fn_child_exit_fl(const char *file, int line,
+ uint64_t us_elapsed_absolute, int cid, int pid,
+ int code, uint64_t us_elapsed_child)
+{
+ const char *event_name = "child_exit";
+ struct json_writer jw = JSON_WRITER_INIT;
+ double t_rel = (double)us_elapsed_child / 1000000.0;
+
+ jw_object_begin(&jw, 0);
+ event_fmt_prepare(event_name, file, line, NULL, &jw);
+ jw_object_intmax(&jw, "child_id", cid);
+ jw_object_intmax(&jw, "pid", pid);
+ jw_object_intmax(&jw, "code", code);
+ jw_object_double(&jw, "t_rel", 6, t_rel);
+ jw_end(&jw);
+
+ tr2_dst_write_line(&tr2dst_event, &jw.json);
+
+ jw_release(&jw);
+}
+
+static void fn_thread_start_fl(const char *file, int line,
+ uint64_t us_elapsed_absolute)
+{
+ const char *event_name = "thread_start";
+ struct json_writer jw = JSON_WRITER_INIT;
+
+ jw_object_begin(&jw, 0);
+ event_fmt_prepare(event_name, file, line, NULL, &jw);
+ jw_end(&jw);
+
+ tr2_dst_write_line(&tr2dst_event, &jw.json);
+ jw_release(&jw);
+}
+
+static void fn_thread_exit_fl(const char *file, int line,
+ uint64_t us_elapsed_absolute,
+ uint64_t us_elapsed_thread)
+{
+ const char *event_name = "thread_exit";
+ struct json_writer jw = JSON_WRITER_INIT;
+ double t_rel = (double)us_elapsed_thread / 1000000.0;
+
+ jw_object_begin(&jw, 0);
+ event_fmt_prepare(event_name, file, line, NULL, &jw);
+ jw_object_double(&jw, "t_rel", 6, t_rel);
+ jw_end(&jw);
+
+ tr2_dst_write_line(&tr2dst_event, &jw.json);
+ jw_release(&jw);
+}
+
+static void fn_exec_fl(const char *file, int line, uint64_t us_elapsed_absolute,
+ int exec_id, const char *exe, const char **argv)
+{
+ const char *event_name = "exec";
+ struct json_writer jw = JSON_WRITER_INIT;
+
+ jw_object_begin(&jw, 0);
+ event_fmt_prepare(event_name, file, line, NULL, &jw);
+ jw_object_intmax(&jw, "exec_id", exec_id);
+ if (exe)
+ jw_object_string(&jw, "exe", exe);
+ jw_object_inline_begin_array(&jw, "argv");
+ jw_array_argv(&jw, argv);
+ jw_end(&jw);
+ jw_end(&jw);
+
+ tr2_dst_write_line(&tr2dst_event, &jw.json);
+ jw_release(&jw);
+}
+
+static void fn_exec_result_fl(const char *file, int line,
+ uint64_t us_elapsed_absolute, int exec_id,
+ int code)
+{
+ const char *event_name = "exec_result";
+ struct json_writer jw = JSON_WRITER_INIT;
+
+ jw_object_begin(&jw, 0);
+ event_fmt_prepare(event_name, file, line, NULL, &jw);
+ jw_object_intmax(&jw, "exec_id", exec_id);
+ jw_object_intmax(&jw, "code", code);
+ jw_end(&jw);
+
+ tr2_dst_write_line(&tr2dst_event, &jw.json);
+ jw_release(&jw);
+}
+
+static void fn_param_fl(const char *file, int line, const char *param,
+ const char *value)
+{
+ const char *event_name = "def_param";
+ struct json_writer jw = JSON_WRITER_INIT;
+
+ jw_object_begin(&jw, 0);
+ event_fmt_prepare(event_name, file, line, NULL, &jw);
+ jw_object_string(&jw, "param", param);
+ jw_object_string(&jw, "value", value);
+ jw_end(&jw);
+
+ tr2_dst_write_line(&tr2dst_event, &jw.json);
+ jw_release(&jw);
+}
+
+static void fn_repo_fl(const char *file, int line,
+ const struct repository *repo)
+{
+ const char *event_name = "def_repo";
+ struct json_writer jw = JSON_WRITER_INIT;
+
+ jw_object_begin(&jw, 0);
+ event_fmt_prepare(event_name, file, line, repo, &jw);
+ jw_object_string(&jw, "worktree", repo->worktree);
+ jw_end(&jw);
+
+ tr2_dst_write_line(&tr2dst_event, &jw.json);
+ jw_release(&jw);
+}
+
+static void fn_region_enter_printf_va_fl(const char *file, int line,
+ uint64_t us_elapsed_absolute,
+ const char *category,
+ const char *label,
+ const struct repository *repo,
+ const char *fmt, va_list ap)
+{
+ const char *event_name = "region_enter";
+ struct tr2tls_thread_ctx *ctx = tr2tls_get_self();
+ if (ctx->nr_open_regions <= tr2env_event_nesting_wanted) {
+ struct json_writer jw = JSON_WRITER_INIT;
+
+ jw_object_begin(&jw, 0);
+ event_fmt_prepare(event_name, file, line, repo, &jw);
+ jw_object_intmax(&jw, "nesting", ctx->nr_open_regions);
+ if (category)
+ jw_object_string(&jw, "category", category);
+ if (label)
+ jw_object_string(&jw, "label", label);
+ maybe_add_string_va(&jw, "msg", fmt, ap);
+ jw_end(&jw);
+
+ tr2_dst_write_line(&tr2dst_event, &jw.json);
+ jw_release(&jw);
+ }
+}
+
+static void fn_region_leave_printf_va_fl(
+ const char *file, int line, uint64_t us_elapsed_absolute,
+ uint64_t us_elapsed_region, const char *category, const char *label,
+ const struct repository *repo, const char *fmt, va_list ap)
+{
+ const char *event_name = "region_leave";
+ struct tr2tls_thread_ctx *ctx = tr2tls_get_self();
+ if (ctx->nr_open_regions <= tr2env_event_nesting_wanted) {
+ struct json_writer jw = JSON_WRITER_INIT;
+ double t_rel = (double)us_elapsed_region / 1000000.0;
+
+ jw_object_begin(&jw, 0);
+ event_fmt_prepare(event_name, file, line, repo, &jw);
+ jw_object_double(&jw, "t_rel", 6, t_rel);
+ jw_object_intmax(&jw, "nesting", ctx->nr_open_regions);
+ if (category)
+ jw_object_string(&jw, "category", category);
+ if (label)
+ jw_object_string(&jw, "label", label);
+ maybe_add_string_va(&jw, "msg", fmt, ap);
+ jw_end(&jw);
+
+ tr2_dst_write_line(&tr2dst_event, &jw.json);
+ jw_release(&jw);
+ }
+}
+
+static void fn_data_fl(const char *file, int line, uint64_t us_elapsed_absolute,
+ uint64_t us_elapsed_region, const char *category,
+ const struct repository *repo, const char *key,
+ const char *value)
+{
+ const char *event_name = "data";
+ struct tr2tls_thread_ctx *ctx = tr2tls_get_self();
+ if (ctx->nr_open_regions <= tr2env_event_nesting_wanted) {
+ struct json_writer jw = JSON_WRITER_INIT;
+ double t_abs = (double)us_elapsed_absolute / 1000000.0;
+ double t_rel = (double)us_elapsed_region / 1000000.0;
+
+ jw_object_begin(&jw, 0);
+ event_fmt_prepare(event_name, file, line, repo, &jw);
+ jw_object_double(&jw, "t_abs", 6, t_abs);
+ jw_object_double(&jw, "t_rel", 6, t_rel);
+ jw_object_intmax(&jw, "nesting", ctx->nr_open_regions);
+ jw_object_string(&jw, "category", category);
+ jw_object_string(&jw, "key", key);
+ jw_object_string(&jw, "value", value);
+ jw_end(&jw);
+
+ tr2_dst_write_line(&tr2dst_event, &jw.json);
+ jw_release(&jw);
+ }
+}
+
+static void fn_data_json_fl(const char *file, int line,
+ uint64_t us_elapsed_absolute,
+ uint64_t us_elapsed_region, const char *category,
+ const struct repository *repo, const char *key,
+ const struct json_writer *value)
+{
+ const char *event_name = "data_json";
+ struct tr2tls_thread_ctx *ctx = tr2tls_get_self();
+ if (ctx->nr_open_regions <= tr2env_event_nesting_wanted) {
+ struct json_writer jw = JSON_WRITER_INIT;
+ double t_abs = (double)us_elapsed_absolute / 1000000.0;
+ double t_rel = (double)us_elapsed_region / 1000000.0;
+
+ jw_object_begin(&jw, 0);
+ event_fmt_prepare(event_name, file, line, repo, &jw);
+ jw_object_double(&jw, "t_abs", 6, t_abs);
+ jw_object_double(&jw, "t_rel", 6, t_rel);
+ jw_object_intmax(&jw, "nesting", ctx->nr_open_regions);
+ jw_object_string(&jw, "category", category);
+ jw_object_string(&jw, "key", key);
+ jw_object_sub_jw(&jw, "value", value);
+ jw_end(&jw);
+
+ tr2_dst_write_line(&tr2dst_event, &jw.json);
+ jw_release(&jw);
+ }
+}
+
+struct tr2_tgt tr2_tgt_event = {
+ &tr2dst_event,
+
+ fn_init,
+ fn_term,
+
+ fn_version_fl,
+ fn_start_fl,
+ fn_exit_fl,
+ fn_signal,
+ fn_atexit,
+ fn_error_va_fl,
+ fn_command_path_fl,
+ fn_command_name_fl,
+ fn_command_mode_fl,
+ fn_alias_fl,
+ fn_child_start_fl,
+ fn_child_exit_fl,
+ fn_thread_start_fl,
+ fn_thread_exit_fl,
+ fn_exec_fl,
+ fn_exec_result_fl,
+ fn_param_fl,
+ fn_repo_fl,
+ fn_region_enter_printf_va_fl,
+ fn_region_leave_printf_va_fl,
+ fn_data_fl,
+ fn_data_json_fl,
+ NULL, /* printf */
+};
--- /dev/null
+#include "cache.h"
+#include "config.h"
+#include "run-command.h"
+#include "quote.h"
+#include "version.h"
+#include "trace2/tr2_dst.h"
+#include "trace2/tr2_tbuf.h"
+#include "trace2/tr2_tgt.h"
+#include "trace2/tr2_tls.h"
+
+static struct tr2_dst tr2dst_normal = { "GIT_TR2", 0, 0, 0 };
+
+/*
+ * Set this environment variable to true to omit the "<time> <file>:<line>"
+ * fields from each line written to the builtin normal target.
+ *
+ * Unit tests may want to use this to help with testing.
+ */
+#define TR2_ENVVAR_NORMAL_BRIEF "GIT_TR2_BRIEF"
+static int tr2env_normal_brief;
+
+#define TR2FMT_NORMAL_FL_WIDTH (50)
+
+static int fn_init(void)
+{
+ int want = tr2_dst_trace_want(&tr2dst_normal);
+ int want_brief;
+ char *brief;
+
+ if (!want)
+ return want;
+
+ brief = getenv(TR2_ENVVAR_NORMAL_BRIEF);
+ if (brief && *brief &&
+ ((want_brief = git_parse_maybe_bool(brief)) != -1))
+ tr2env_normal_brief = want_brief;
+
+ return want;
+}
+
+static void fn_term(void)
+{
+ tr2_dst_trace_disable(&tr2dst_normal);
+}
+
+static void normal_fmt_prepare(const char *file, int line, struct strbuf *buf)
+{
+ strbuf_setlen(buf, 0);
+
+ if (!tr2env_normal_brief) {
+ struct tr2_tbuf tb_now;
+
+ tr2_tbuf_local_time(&tb_now);
+ strbuf_addstr(buf, tb_now.buf);
+ strbuf_addch(buf, ' ');
+
+ if (file && *file)
+ strbuf_addf(buf, "%s:%d ", file, line);
+ while (buf->len < TR2FMT_NORMAL_FL_WIDTH)
+ strbuf_addch(buf, ' ');
+ }
+}
+
+static void normal_io_write_fl(const char *file, int line,
+ const struct strbuf *buf_payload)
+{
+ struct strbuf buf_line = STRBUF_INIT;
+
+ normal_fmt_prepare(file, line, &buf_line);
+ strbuf_addbuf(&buf_line, buf_payload);
+ tr2_dst_write_line(&tr2dst_normal, &buf_line);
+ strbuf_release(&buf_line);
+}
+
+static void fn_version_fl(const char *file, int line)
+{
+ struct strbuf buf_payload = STRBUF_INIT;
+
+ strbuf_addf(&buf_payload, "version %s", git_version_string);
+ normal_io_write_fl(file, line, &buf_payload);
+ strbuf_release(&buf_payload);
+}
+
+static void fn_start_fl(const char *file, int line, const char **argv)
+{
+ struct strbuf buf_payload = STRBUF_INIT;
+
+ strbuf_addstr(&buf_payload, "start ");
+ sq_quote_argv_pretty(&buf_payload, argv);
+ normal_io_write_fl(file, line, &buf_payload);
+ strbuf_release(&buf_payload);
+}
+
+static void fn_exit_fl(const char *file, int line, uint64_t us_elapsed_absolute,
+ int code)
+{
+ struct strbuf buf_payload = STRBUF_INIT;
+ double elapsed = (double)us_elapsed_absolute / 1000000.0;
+
+ strbuf_addf(&buf_payload, "exit elapsed:%.6f code:%d", elapsed, code);
+ normal_io_write_fl(file, line, &buf_payload);
+ strbuf_release(&buf_payload);
+}
+
+static void fn_signal(uint64_t us_elapsed_absolute, int signo)
+{
+ struct strbuf buf_payload = STRBUF_INIT;
+ double elapsed = (double)us_elapsed_absolute / 1000000.0;
+
+ strbuf_addf(&buf_payload, "signal elapsed:%.6f code:%d", elapsed,
+ signo);
+ normal_io_write_fl(__FILE__, __LINE__, &buf_payload);
+ strbuf_release(&buf_payload);
+}
+
+static void fn_atexit(uint64_t us_elapsed_absolute, int code)
+{
+ struct strbuf buf_payload = STRBUF_INIT;
+ double elapsed = (double)us_elapsed_absolute / 1000000.0;
+
+ strbuf_addf(&buf_payload, "atexit elapsed:%.6f code:%d", elapsed, code);
+ normal_io_write_fl(__FILE__, __LINE__, &buf_payload);
+ strbuf_release(&buf_payload);
+}
+
+static void maybe_append_string_va(struct strbuf *buf, const char *fmt,
+ va_list ap)
+{
+ if (fmt && *fmt) {
+ va_list copy_ap;
+
+ va_copy(copy_ap, ap);
+ strbuf_vaddf(buf, fmt, copy_ap);
+ va_end(copy_ap);
+ return;
+ }
+
+ if (fmt && *fmt) {
+ strbuf_addstr(buf, fmt);
+ return;
+ }
+}
+
+static void fn_error_va_fl(const char *file, int line, const char *fmt,
+ va_list ap)
+{
+ struct strbuf buf_payload = STRBUF_INIT;
+
+ strbuf_addstr(&buf_payload, "error ");
+ maybe_append_string_va(&buf_payload, fmt, ap);
+ normal_io_write_fl(file, line, &buf_payload);
+ strbuf_release(&buf_payload);
+}
+
+static void fn_command_path_fl(const char *file, int line, const char *pathname)
+{
+ struct strbuf buf_payload = STRBUF_INIT;
+
+ strbuf_addf(&buf_payload, "cmd_path %s", pathname);
+ normal_io_write_fl(file, line, &buf_payload);
+ strbuf_release(&buf_payload);
+}
+
+static void fn_command_name_fl(const char *file, int line, const char *name,
+ const char *hierarchy)
+{
+ struct strbuf buf_payload = STRBUF_INIT;
+
+ strbuf_addf(&buf_payload, "cmd_name %s", name);
+ if (hierarchy && *hierarchy)
+ strbuf_addf(&buf_payload, " (%s)", hierarchy);
+ normal_io_write_fl(file, line, &buf_payload);
+ strbuf_release(&buf_payload);
+}
+
+static void fn_command_mode_fl(const char *file, int line, const char *mode)
+{
+ struct strbuf buf_payload = STRBUF_INIT;
+
+ strbuf_addf(&buf_payload, "cmd_mode %s", mode);
+ normal_io_write_fl(file, line, &buf_payload);
+ strbuf_release(&buf_payload);
+}
+
+static void fn_alias_fl(const char *file, int line, const char *alias,
+ const char **argv)
+{
+ struct strbuf buf_payload = STRBUF_INIT;
+
+ strbuf_addf(&buf_payload, "alias %s ->", alias);
+ sq_quote_argv_pretty(&buf_payload, argv);
+ normal_io_write_fl(file, line, &buf_payload);
+ strbuf_release(&buf_payload);
+}
+
+static void fn_child_start_fl(const char *file, int line,
+ uint64_t us_elapsed_absolute,
+ const struct child_process *cmd)
+{
+ struct strbuf buf_payload = STRBUF_INIT;
+
+ strbuf_addf(&buf_payload, "child_start[%d] ", cmd->trace2_child_id);
+
+ if (cmd->dir) {
+ strbuf_addstr(&buf_payload, " cd");
+ sq_quote_buf_pretty(&buf_payload, cmd->dir);
+ strbuf_addstr(&buf_payload, "; ");
+ }
+
+ /*
+ * TODO if (cmd->env) { Consider dumping changes to environment. }
+ * See trace_add_env() in run-command.c as used by original trace.c
+ */
+
+ if (cmd->git_cmd)
+ strbuf_addstr(&buf_payload, "git");
+ sq_quote_argv_pretty(&buf_payload, cmd->argv);
+
+ normal_io_write_fl(file, line, &buf_payload);
+ strbuf_release(&buf_payload);
+}
+
+static void fn_child_exit_fl(const char *file, int line,
+ uint64_t us_elapsed_absolute, int cid, int pid,
+ int code, uint64_t us_elapsed_child)
+{
+ struct strbuf buf_payload = STRBUF_INIT;
+ double elapsed = (double)us_elapsed_child / 1000000.0;
+
+ strbuf_addf(&buf_payload, "child_exit[%d] pid:%d code:%d elapsed:%.6f",
+ cid, pid, code, elapsed);
+ normal_io_write_fl(file, line, &buf_payload);
+ strbuf_release(&buf_payload);
+}
+
+static void fn_exec_fl(const char *file, int line, uint64_t us_elapsed_absolute,
+ int exec_id, const char *exe, const char **argv)
+{
+ struct strbuf buf_payload = STRBUF_INIT;
+
+ strbuf_addf(&buf_payload, "exec[%d] ", exec_id);
+ if (exe)
+ strbuf_addstr(&buf_payload, exe);
+ sq_quote_argv_pretty(&buf_payload, argv);
+ normal_io_write_fl(file, line, &buf_payload);
+ strbuf_release(&buf_payload);
+}
+
+static void fn_exec_result_fl(const char *file, int line,
+ uint64_t us_elapsed_absolute, int exec_id,
+ int code)
+{
+ struct strbuf buf_payload = STRBUF_INIT;
+
+ strbuf_addf(&buf_payload, "exec_result[%d] code:%d", exec_id, code);
+ if (code > 0)
+ strbuf_addf(&buf_payload, " err:%s", strerror(code));
+ normal_io_write_fl(file, line, &buf_payload);
+ strbuf_release(&buf_payload);
+}
+
+static void fn_param_fl(const char *file, int line, const char *param,
+ const char *value)
+{
+ struct strbuf buf_payload = STRBUF_INIT;
+
+ strbuf_addf(&buf_payload, "def_param %s=%s", param, value);
+ normal_io_write_fl(file, line, &buf_payload);
+ strbuf_release(&buf_payload);
+}
+
+static void fn_repo_fl(const char *file, int line,
+ const struct repository *repo)
+{
+ struct strbuf buf_payload = STRBUF_INIT;
+
+ strbuf_addstr(&buf_payload, "worktree ");
+ sq_quote_buf_pretty(&buf_payload, repo->worktree);
+ normal_io_write_fl(file, line, &buf_payload);
+ strbuf_release(&buf_payload);
+}
+
+static void fn_printf_va_fl(const char *file, int line,
+ uint64_t us_elapsed_absolute, const char *fmt,
+ va_list ap)
+{
+ struct strbuf buf_payload = STRBUF_INIT;
+
+ maybe_append_string_va(&buf_payload, fmt, ap);
+ normal_io_write_fl(file, line, &buf_payload);
+ strbuf_release(&buf_payload);
+}
+
+struct tr2_tgt tr2_tgt_normal = {
+ &tr2dst_normal,
+
+ fn_init,
+ fn_term,
+
+ fn_version_fl,
+ fn_start_fl,
+ fn_exit_fl,
+ fn_signal,
+ fn_atexit,
+ fn_error_va_fl,
+ fn_command_path_fl,
+ fn_command_name_fl,
+ fn_command_mode_fl,
+ fn_alias_fl,
+ fn_child_start_fl,
+ fn_child_exit_fl,
+ NULL, /* thread_start */
+ NULL, /* thread_exit */
+ fn_exec_fl,
+ fn_exec_result_fl,
+ fn_param_fl,
+ fn_repo_fl,
+ NULL, /* region_enter */
+ NULL, /* region_leave */
+ NULL, /* data */
+ NULL, /* data_json */
+ fn_printf_va_fl,
+};
--- /dev/null
+#include "cache.h"
+#include "config.h"
+#include "run-command.h"
+#include "quote.h"
+#include "version.h"
+#include "json-writer.h"
+#include "trace2/tr2_dst.h"
+#include "trace2/tr2_sid.h"
+#include "trace2/tr2_tbuf.h"
+#include "trace2/tr2_tgt.h"
+#include "trace2/tr2_tls.h"
+
+static struct tr2_dst tr2dst_perf = { "GIT_TR2_PERF", 0, 0, 0 };
+
+/*
+ * Set this environment variable to true to omit the "<time> <file>:<line>"
+ * fields from each line written to the builtin performance target.
+ *
+ * Unit tests may want to use this to help with testing.
+ */
+#define TR2_ENVVAR_PERF_BRIEF "GIT_TR2_PERF_BRIEF"
+static int tr2env_perf_brief;
+
+#define TR2FMT_PERF_FL_WIDTH (50)
+#define TR2FMT_PERF_MAX_EVENT_NAME (12)
+#define TR2FMT_PERF_REPO_WIDTH (4)
+#define TR2FMT_PERF_CATEGORY_WIDTH (10)
+
+#define TR2_DOTS_BUFFER_SIZE (100)
+#define TR2_INDENT (2)
+#define TR2_INDENT_LENGTH(ctx) (((ctx)->nr_open_regions - 1) * TR2_INDENT)
+
+static struct strbuf dots = STRBUF_INIT;
+
+static int fn_init(void)
+{
+ int want = tr2_dst_trace_want(&tr2dst_perf);
+ int want_brief;
+ char *brief;
+
+ if (!want)
+ return want;
+
+ strbuf_addchars(&dots, '.', TR2_DOTS_BUFFER_SIZE);
+
+ brief = getenv(TR2_ENVVAR_PERF_BRIEF);
+ if (brief && *brief &&
+ ((want_brief = git_parse_maybe_bool(brief)) != -1))
+ tr2env_perf_brief = want_brief;
+
+ return want;
+}
+
+static void fn_term(void)
+{
+ tr2_dst_trace_disable(&tr2dst_perf);
+
+ strbuf_release(&dots);
+}
+
+/*
+ * Format trace line prefix in human-readable classic format for
+ * the performance target:
+ * "[<time> [<file>:<line>] <bar>] <nr_parents> <bar>
+ * <thread_name> <bar> <event_name> <bar> [<repo>] <bar>
+ * [<elapsed_absolute>] [<elapsed_relative>] <bar>
+ * [<category>] <bar> [<dots>] "
+ */
+static void perf_fmt_prepare(const char *event_name,
+ struct tr2tls_thread_ctx *ctx, const char *file,
+ int line, const struct repository *repo,
+ uint64_t *p_us_elapsed_absolute,
+ uint64_t *p_us_elapsed_relative,
+ const char *category, struct strbuf *buf)
+{
+ int len;
+
+ strbuf_setlen(buf, 0);
+
+ if (!tr2env_perf_brief) {
+ struct tr2_tbuf tb_now;
+
+ tr2_tbuf_local_time(&tb_now);
+ strbuf_addstr(buf, tb_now.buf);
+ strbuf_addch(buf, ' ');
+
+ if (file && *file)
+ strbuf_addf(buf, "%s:%d ", file, line);
+ while (buf->len < TR2FMT_PERF_FL_WIDTH)
+ strbuf_addch(buf, ' ');
+
+ strbuf_addstr(buf, "| ");
+ }
+
+ strbuf_addf(buf, "d%d | ", tr2_sid_depth());
+ strbuf_addf(buf, "%-*s | %-*s | ", TR2_MAX_THREAD_NAME,
+ ctx->thread_name.buf, TR2FMT_PERF_MAX_EVENT_NAME,
+ event_name);
+
+ len = buf->len + TR2FMT_PERF_REPO_WIDTH;
+ if (repo)
+ strbuf_addf(buf, "r%d ", repo->trace2_repo_id);
+ while (buf->len < len)
+ strbuf_addch(buf, ' ');
+ strbuf_addstr(buf, "| ");
+
+ if (p_us_elapsed_absolute)
+ strbuf_addf(buf, "%9.6f | ",
+ ((double)(*p_us_elapsed_absolute)) / 1000000.0);
+ else
+ strbuf_addf(buf, "%9s | ", " ");
+
+ if (p_us_elapsed_relative)
+ strbuf_addf(buf, "%9.6f | ",
+ ((double)(*p_us_elapsed_relative)) / 1000000.0);
+ else
+ strbuf_addf(buf, "%9s | ", " ");
+
+ strbuf_addf(buf, "%-*s | ", TR2FMT_PERF_CATEGORY_WIDTH,
+ (category ? category : ""));
+
+ if (ctx->nr_open_regions > 0) {
+ int len_indent = TR2_INDENT_LENGTH(ctx);
+ while (len_indent > dots.len) {
+ strbuf_addbuf(buf, &dots);
+ len_indent -= dots.len;
+ }
+ strbuf_addf(buf, "%.*s", len_indent, dots.buf);
+ }
+}
+
+static void perf_io_write_fl(const char *file, int line, const char *event_name,
+ const struct repository *repo,
+ uint64_t *p_us_elapsed_absolute,
+ uint64_t *p_us_elapsed_relative,
+ const char *category,
+ const struct strbuf *buf_payload)
+{
+ struct tr2tls_thread_ctx *ctx = tr2tls_get_self();
+ struct strbuf buf_line = STRBUF_INIT;
+
+ perf_fmt_prepare(event_name, ctx, file, line, repo,
+ p_us_elapsed_absolute, p_us_elapsed_relative, category,
+ &buf_line);
+ strbuf_addbuf(&buf_line, buf_payload);
+ tr2_dst_write_line(&tr2dst_perf, &buf_line);
+ strbuf_release(&buf_line);
+}
+
+static void fn_version_fl(const char *file, int line)
+{
+ const char *event_name = "version";
+ struct strbuf buf_payload = STRBUF_INIT;
+
+ strbuf_addstr(&buf_payload, git_version_string);
+
+ perf_io_write_fl(file, line, event_name, NULL, NULL, NULL, NULL,
+ &buf_payload);
+ strbuf_release(&buf_payload);
+}
+
+static void fn_start_fl(const char *file, int line, const char **argv)
+{
+ const char *event_name = "start";
+ struct strbuf buf_payload = STRBUF_INIT;
+
+ sq_quote_argv_pretty(&buf_payload, argv);
+
+ perf_io_write_fl(file, line, event_name, NULL, NULL, NULL, NULL,
+ &buf_payload);
+ strbuf_release(&buf_payload);
+}
+
+static void fn_exit_fl(const char *file, int line, uint64_t us_elapsed_absolute,
+ int code)
+{
+ const char *event_name = "exit";
+ struct strbuf buf_payload = STRBUF_INIT;
+
+ strbuf_addf(&buf_payload, "code:%d", code);
+
+ perf_io_write_fl(file, line, event_name, NULL, &us_elapsed_absolute,
+ NULL, NULL, &buf_payload);
+ strbuf_release(&buf_payload);
+}
+
+static void fn_signal(uint64_t us_elapsed_absolute, int signo)
+{
+ const char *event_name = "signal";
+ struct strbuf buf_payload = STRBUF_INIT;
+
+ strbuf_addf(&buf_payload, "signo:%d", signo);
+
+ perf_io_write_fl(__FILE__, __LINE__, event_name, NULL,
+ &us_elapsed_absolute, NULL, NULL, &buf_payload);
+ strbuf_release(&buf_payload);
+}
+
+static void fn_atexit(uint64_t us_elapsed_absolute, int code)
+{
+ const char *event_name = "atexit";
+ struct strbuf buf_payload = STRBUF_INIT;
+
+ strbuf_addf(&buf_payload, "code:%d", code);
+
+ perf_io_write_fl(__FILE__, __LINE__, event_name, NULL,
+ &us_elapsed_absolute, NULL, NULL, &buf_payload);
+ strbuf_release(&buf_payload);
+}
+
+static void maybe_append_string_va(struct strbuf *buf, const char *fmt,
+ va_list ap)
+{
+ if (fmt && *fmt) {
+ va_list copy_ap;
+
+ va_copy(copy_ap, ap);
+ strbuf_vaddf(buf, fmt, copy_ap);
+ va_end(copy_ap);
+ return;
+ }
+
+ if (fmt && *fmt) {
+ strbuf_addstr(buf, fmt);
+ return;
+ }
+}
+
+static void fn_error_va_fl(const char *file, int line, const char *fmt,
+ va_list ap)
+{
+ const char *event_name = "error";
+ struct strbuf buf_payload = STRBUF_INIT;
+
+ maybe_append_string_va(&buf_payload, fmt, ap);
+
+ perf_io_write_fl(file, line, event_name, NULL, NULL, NULL, NULL,
+ &buf_payload);
+ strbuf_release(&buf_payload);
+}
+
+static void fn_command_path_fl(const char *file, int line, const char *pathname)
+{
+ const char *event_name = "cmd_path";
+ struct strbuf buf_payload = STRBUF_INIT;
+
+ strbuf_addstr(&buf_payload, pathname);
+
+ perf_io_write_fl(file, line, event_name, NULL, NULL, NULL, NULL,
+ &buf_payload);
+ strbuf_release(&buf_payload);
+}
+
+static void fn_command_name_fl(const char *file, int line, const char *name,
+ const char *hierarchy)
+{
+ const char *event_name = "cmd_name";
+ struct strbuf buf_payload = STRBUF_INIT;
+
+ strbuf_addstr(&buf_payload, name);
+ if (hierarchy && *hierarchy)
+ strbuf_addf(&buf_payload, " (%s)", hierarchy);
+
+ perf_io_write_fl(file, line, event_name, NULL, NULL, NULL, NULL,
+ &buf_payload);
+ strbuf_release(&buf_payload);
+}
+
+static void fn_command_mode_fl(const char *file, int line, const char *mode)
+{
+ const char *event_name = "cmd_mode";
+ struct strbuf buf_payload = STRBUF_INIT;
+
+ strbuf_addstr(&buf_payload, mode);
+
+ perf_io_write_fl(file, line, event_name, NULL, NULL, NULL, NULL,
+ &buf_payload);
+ strbuf_release(&buf_payload);
+}
+
+static void fn_alias_fl(const char *file, int line, const char *alias,
+ const char **argv)
+{
+ const char *event_name = "alias";
+ struct strbuf buf_payload = STRBUF_INIT;
+
+ strbuf_addf(&buf_payload, "alias:%s argv:", alias);
+ sq_quote_argv_pretty(&buf_payload, argv);
+
+ perf_io_write_fl(file, line, event_name, NULL, NULL, NULL, NULL,
+ &buf_payload);
+ strbuf_release(&buf_payload);
+}
+
+static void fn_child_start_fl(const char *file, int line,
+ uint64_t us_elapsed_absolute,
+ const struct child_process *cmd)
+{
+ const char *event_name = "child_start";
+ struct strbuf buf_payload = STRBUF_INIT;
+
+ if (cmd->trace2_hook_name) {
+ strbuf_addf(&buf_payload, "[ch%d] class:hook hook:%s",
+ cmd->trace2_child_id, cmd->trace2_hook_name);
+ } else {
+ const char *child_class =
+ cmd->trace2_child_class ? cmd->trace2_child_class : "?";
+ strbuf_addf(&buf_payload, "[ch%d] class:%s",
+ cmd->trace2_child_id, child_class);
+ }
+
+ if (cmd->dir) {
+ strbuf_addstr(&buf_payload, " cd:");
+ sq_quote_buf_pretty(&buf_payload, cmd->dir);
+ }
+
+ strbuf_addstr(&buf_payload, " argv:");
+ if (cmd->git_cmd)
+ strbuf_addstr(&buf_payload, " git");
+ sq_quote_argv_pretty(&buf_payload, cmd->argv);
+
+ perf_io_write_fl(file, line, event_name, NULL, &us_elapsed_absolute,
+ NULL, NULL, &buf_payload);
+ strbuf_release(&buf_payload);
+}
+
+static void fn_child_exit_fl(const char *file, int line,
+ uint64_t us_elapsed_absolute, int cid, int pid,
+ int code, uint64_t us_elapsed_child)
+{
+ const char *event_name = "child_exit";
+ struct strbuf buf_payload = STRBUF_INIT;
+
+ strbuf_addf(&buf_payload, "[ch%d] pid:%d code:%d", cid, pid, code);
+
+ perf_io_write_fl(file, line, event_name, NULL, &us_elapsed_absolute,
+ &us_elapsed_child, NULL, &buf_payload);
+ strbuf_release(&buf_payload);
+}
+
+static void fn_thread_start_fl(const char *file, int line,
+ uint64_t us_elapsed_absolute)
+{
+ const char *event_name = "thread_start";
+ struct strbuf buf_payload = STRBUF_INIT;
+
+ perf_io_write_fl(file, line, event_name, NULL, &us_elapsed_absolute,
+ NULL, NULL, &buf_payload);
+ strbuf_release(&buf_payload);
+}
+
+static void fn_thread_exit_fl(const char *file, int line,
+ uint64_t us_elapsed_absolute,
+ uint64_t us_elapsed_thread)
+{
+ const char *event_name = "thread_exit";
+ struct strbuf buf_payload = STRBUF_INIT;
+
+ perf_io_write_fl(file, line, event_name, NULL, &us_elapsed_absolute,
+ &us_elapsed_thread, NULL, &buf_payload);
+ strbuf_release(&buf_payload);
+}
+
+static void fn_exec_fl(const char *file, int line, uint64_t us_elapsed_absolute,
+ int exec_id, const char *exe, const char **argv)
+{
+ const char *event_name = "exec";
+ struct strbuf buf_payload = STRBUF_INIT;
+
+ strbuf_addf(&buf_payload, "id:%d ", exec_id);
+ strbuf_addstr(&buf_payload, "argv:");
+ if (exe)
+ strbuf_addf(&buf_payload, " %s", exe);
+ sq_quote_argv_pretty(&buf_payload, argv);
+
+ perf_io_write_fl(file, line, event_name, NULL, &us_elapsed_absolute,
+ NULL, NULL, &buf_payload);
+ strbuf_release(&buf_payload);
+}
+
+static void fn_exec_result_fl(const char *file, int line,
+ uint64_t us_elapsed_absolute, int exec_id,
+ int code)
+{
+ const char *event_name = "exec_result";
+ struct strbuf buf_payload = STRBUF_INIT;
+
+ strbuf_addf(&buf_payload, "id:%d code:%d", exec_id, code);
+ if (code > 0)
+ strbuf_addf(&buf_payload, " err:%s", strerror(code));
+
+ perf_io_write_fl(file, line, event_name, NULL, &us_elapsed_absolute,
+ NULL, NULL, &buf_payload);
+ strbuf_release(&buf_payload);
+}
+
+static void fn_param_fl(const char *file, int line, const char *param,
+ const char *value)
+{
+ const char *event_name = "def_param";
+ struct strbuf buf_payload = STRBUF_INIT;
+
+ strbuf_addf(&buf_payload, "%s:%s", param, value);
+
+ perf_io_write_fl(file, line, event_name, NULL, NULL, NULL, NULL,
+ &buf_payload);
+ strbuf_release(&buf_payload);
+}
+
+static void fn_repo_fl(const char *file, int line,
+ const struct repository *repo)
+{
+ const char *event_name = "def_repo";
+ struct strbuf buf_payload = STRBUF_INIT;
+
+ strbuf_addstr(&buf_payload, "worktree:");
+ sq_quote_buf_pretty(&buf_payload, repo->worktree);
+
+ perf_io_write_fl(file, line, event_name, repo, NULL, NULL, NULL,
+ &buf_payload);
+ strbuf_release(&buf_payload);
+}
+
+static void fn_region_enter_printf_va_fl(const char *file, int line,
+ uint64_t us_elapsed_absolute,
+ const char *category,
+ const char *label,
+ const struct repository *repo,
+ const char *fmt, va_list ap)
+{
+ const char *event_name = "region_enter";
+ struct strbuf buf_payload = STRBUF_INIT;
+
+ if (label)
+ strbuf_addf(&buf_payload, "label:%s ", label);
+ maybe_append_string_va(&buf_payload, fmt, ap);
+
+ perf_io_write_fl(file, line, event_name, repo, &us_elapsed_absolute,
+ NULL, category, &buf_payload);
+ strbuf_release(&buf_payload);
+}
+
+static void fn_region_leave_printf_va_fl(
+ const char *file, int line, uint64_t us_elapsed_absolute,
+ uint64_t us_elapsed_region, const char *category, const char *label,
+ const struct repository *repo, const char *fmt, va_list ap)
+{
+ const char *event_name = "region_leave";
+ struct strbuf buf_payload = STRBUF_INIT;
+
+ if (label)
+ strbuf_addf(&buf_payload, "label:%s ", label);
+ maybe_append_string_va(&buf_payload, fmt, ap);
+
+ perf_io_write_fl(file, line, event_name, repo, &us_elapsed_absolute,
+ &us_elapsed_region, category, &buf_payload);
+ strbuf_release(&buf_payload);
+}
+
+static void fn_data_fl(const char *file, int line, uint64_t us_elapsed_absolute,
+ uint64_t us_elapsed_region, const char *category,
+ const struct repository *repo, const char *key,
+ const char *value)
+{
+ const char *event_name = "data";
+ struct strbuf buf_payload = STRBUF_INIT;
+
+ strbuf_addf(&buf_payload, "%s:%s", key, value);
+
+ perf_io_write_fl(file, line, event_name, repo, &us_elapsed_absolute,
+ &us_elapsed_region, category, &buf_payload);
+ strbuf_release(&buf_payload);
+}
+
+static void fn_data_json_fl(const char *file, int line,
+ uint64_t us_elapsed_absolute,
+ uint64_t us_elapsed_region, const char *category,
+ const struct repository *repo, const char *key,
+ const struct json_writer *value)
+{
+ const char *event_name = "data_json";
+ struct strbuf buf_payload = STRBUF_INIT;
+
+ strbuf_addf(&buf_payload, "%s:%s", key, value->json.buf);
+
+ perf_io_write_fl(file, line, event_name, repo, &us_elapsed_absolute,
+ &us_elapsed_region, category, &buf_payload);
+ strbuf_release(&buf_payload);
+}
+
+static void fn_printf_va_fl(const char *file, int line,
+ uint64_t us_elapsed_absolute, const char *fmt,
+ va_list ap)
+{
+ const char *event_name = "printf";
+ struct strbuf buf_payload = STRBUF_INIT;
+
+ maybe_append_string_va(&buf_payload, fmt, ap);
+
+ perf_io_write_fl(file, line, event_name, NULL, &us_elapsed_absolute,
+ NULL, NULL, &buf_payload);
+ strbuf_release(&buf_payload);
+}
+
+struct tr2_tgt tr2_tgt_perf = {
+ &tr2dst_perf,
+
+ fn_init,
+ fn_term,
+
+ fn_version_fl,
+ fn_start_fl,
+ fn_exit_fl,
+ fn_signal,
+ fn_atexit,
+ fn_error_va_fl,
+ fn_command_path_fl,
+ fn_command_name_fl,
+ fn_command_mode_fl,
+ fn_alias_fl,
+ fn_child_start_fl,
+ fn_child_exit_fl,
+ fn_thread_start_fl,
+ fn_thread_exit_fl,
+ fn_exec_fl,
+ fn_exec_result_fl,
+ fn_param_fl,
+ fn_repo_fl,
+ fn_region_enter_printf_va_fl,
+ fn_region_leave_printf_va_fl,
+ fn_data_fl,
+ fn_data_json_fl,
+ fn_printf_va_fl,
+};
--- /dev/null
+#include "cache.h"
+#include "thread-utils.h"
+#include "trace2/tr2_tls.h"
+
+/*
+ * Initialize size of the thread stack for nested regions.
+ * This is used to store nested region start times. Note that
+ * this stack is per-thread and not per-trace-key.
+ */
+#define TR2_REGION_NESTING_INITIAL_SIZE (100)
+
+static struct tr2tls_thread_ctx *tr2tls_thread_main;
+static uint64_t tr2tls_us_start_main;
+
+static pthread_mutex_t tr2tls_mutex;
+static pthread_key_t tr2tls_key;
+
+static int tr2_next_thread_id; /* modify under lock */
+
+struct tr2tls_thread_ctx *tr2tls_create_self(const char *thread_name)
+{
+ uint64_t us_now = getnanotime() / 1000;
+ struct tr2tls_thread_ctx *ctx = xcalloc(1, sizeof(*ctx));
+
+ /*
+ * Implicitly "tr2tls_push_self()" to capture the thread's start
+ * time in array_us_start[0]. For the main thread this gives us the
+ * application run time.
+ */
+ ctx->alloc = TR2_REGION_NESTING_INITIAL_SIZE;
+ ctx->array_us_start = (uint64_t *)xcalloc(ctx->alloc, sizeof(uint64_t));
+ ctx->array_us_start[ctx->nr_open_regions++] = us_now;
+
+ ctx->thread_id = tr2tls_locked_increment(&tr2_next_thread_id);
+
+ strbuf_init(&ctx->thread_name, 0);
+ if (ctx->thread_id)
+ strbuf_addf(&ctx->thread_name, "th%02d:", ctx->thread_id);
+ strbuf_addstr(&ctx->thread_name, thread_name);
+ if (ctx->thread_name.len > TR2_MAX_THREAD_NAME)
+ strbuf_setlen(&ctx->thread_name, TR2_MAX_THREAD_NAME);
+
+ pthread_setspecific(tr2tls_key, ctx);
+
+ return ctx;
+}
+
+struct tr2tls_thread_ctx *tr2tls_get_self(void)
+{
+ struct tr2tls_thread_ctx *ctx = pthread_getspecific(tr2tls_key);
+
+ /*
+ * If the thread-proc did not call trace2_thread_start(), we won't
+ * have any TLS data associated with the current thread. Fix it
+ * here and silently continue.
+ */
+ if (!ctx)
+ ctx = tr2tls_create_self("unknown");
+
+ return ctx;
+}
+
+int tr2tls_is_main_thread(void)
+{
+ struct tr2tls_thread_ctx *ctx = pthread_getspecific(tr2tls_key);
+
+ return ctx == tr2tls_thread_main;
+}
+
+void tr2tls_unset_self(void)
+{
+ struct tr2tls_thread_ctx *ctx;
+
+ ctx = tr2tls_get_self();
+
+ pthread_setspecific(tr2tls_key, NULL);
+
+ free(ctx->array_us_start);
+ free(ctx);
+}
+
+void tr2tls_push_self(uint64_t us_now)
+{
+ struct tr2tls_thread_ctx *ctx = tr2tls_get_self();
+
+ ALLOC_GROW(ctx->array_us_start, ctx->nr_open_regions + 1, ctx->alloc);
+ ctx->array_us_start[ctx->nr_open_regions++] = us_now;
+}
+
+void tr2tls_pop_self(void)
+{
+ struct tr2tls_thread_ctx *ctx = tr2tls_get_self();
+
+ if (!ctx->nr_open_regions)
+ BUG("no open regions in thread '%s'", ctx->thread_name.buf);
+
+ ctx->nr_open_regions--;
+}
+
+void tr2tls_pop_unwind_self(void)
+{
+ struct tr2tls_thread_ctx *ctx = tr2tls_get_self();
+
+ while (ctx->nr_open_regions > 1)
+ tr2tls_pop_self();
+}
+
+uint64_t tr2tls_region_elasped_self(uint64_t us)
+{
+ struct tr2tls_thread_ctx *ctx;
+ uint64_t us_start;
+
+ ctx = tr2tls_get_self();
+ if (!ctx->nr_open_regions)
+ return 0;
+
+ us_start = ctx->array_us_start[ctx->nr_open_regions - 1];
+
+ return us - us_start;
+}
+
+uint64_t tr2tls_absolute_elapsed(uint64_t us)
+{
+ if (!tr2tls_thread_main)
+ return 0;
+
+ return us - tr2tls_us_start_main;
+}
+
+void tr2tls_init(void)
+{
+ pthread_key_create(&tr2tls_key, NULL);
+ init_recursive_mutex(&tr2tls_mutex);
+
+ tr2tls_thread_main = tr2tls_create_self("main");
+ /*
+ * Keep a copy of the absolute start time of the main thread
+ * in a fixed variable since other threads need to access it.
+ * This also eliminates the need to lock accesses to the main
+ * thread's array (because of reallocs).
+ */
+ tr2tls_us_start_main = tr2tls_thread_main->array_us_start[0];
+}
+
+void tr2tls_release(void)
+{
+ tr2tls_unset_self();
+ tr2tls_thread_main = NULL;
+
+ pthread_mutex_destroy(&tr2tls_mutex);
+ pthread_key_delete(tr2tls_key);
+}
+
+int tr2tls_locked_increment(int *p)
+{
+ int current_value;
+
+ pthread_mutex_lock(&tr2tls_mutex);
+ current_value = *p;
+ *p = current_value + 1;
+ pthread_mutex_unlock(&tr2tls_mutex);
+
+ return current_value;
+}
--- /dev/null
+#ifndef TR2_TLS_H
+#define TR2_TLS_H
+
+#include "strbuf.h"
+
+/*
+ * Arbitry limit for thread names for column alignment.
+ */
+#define TR2_MAX_THREAD_NAME (24)
+
+struct tr2tls_thread_ctx {
+ struct strbuf thread_name;
+ uint64_t *array_us_start;
+ int alloc;
+ int nr_open_regions; /* plays role of "nr" in ALLOC_GROW */
+ int thread_id;
+};
+
+/*
+ * Create TLS data for the current thread. This gives us a place to
+ * put per-thread data, such as thread start time, function nesting
+ * and a per-thread label for our messages.
+ *
+ * We assume the first thread is "main". Other threads are given
+ * non-zero thread-ids to help distinguish messages from concurrent
+ * threads.
+ *
+ * Truncate the thread name if necessary to help with column alignment
+ * in printf-style messages.
+ *
+ * In this and all following functions the term "self" refers to the
+ * current thread.
+ */
+struct tr2tls_thread_ctx *tr2tls_create_self(const char *thread_name);
+
+/*
+ * Get our TLS data.
+ */
+struct tr2tls_thread_ctx *tr2tls_get_self(void);
+
+/*
+ * return true if the current thread is the main thread.
+ */
+int tr2tls_is_main_thread(void);
+
+/*
+ * Free our TLS data.
+ */
+void tr2tls_unset_self(void);
+
+/*
+ * Begin a new nested region and remember the start time.
+ */
+void tr2tls_push_self(uint64_t us_now);
+
+/*
+ * End the innermost nested region.
+ */
+void tr2tls_pop_self(void);
+
+/*
+ * Pop any extra (above the first) open regions on the current
+ * thread and discard. During a thread-exit, we should only
+ * have region[0] that was pushed in trace2_thread_start() if
+ * the thread exits normally.
+ */
+void tr2tls_pop_unwind_self(void);
+
+/*
+ * Compute the elapsed time since the innermost region in the
+ * current thread started and the given time (usually now).
+ */
+uint64_t tr2tls_region_elasped_self(uint64_t us);
+
+/*
+ * Compute the elapsed time since the main thread started
+ * and the given time (usually now). This is assumed to
+ * be the absolute run time of the process.
+ */
+uint64_t tr2tls_absolute_elapsed(uint64_t us);
+
+/*
+ * Initialize the tr2 TLS system.
+ */
+void tr2tls_init(void);
+
+/*
+ * Free all tr2 TLS resources.
+ */
+void tr2tls_release(void);
+
+/*
+ * Protected increment of an integer.
+ */
+int tr2tls_locked_increment(int *p);
+
+#endif /* TR2_TLS_H */
const struct trailer_info *info,
const struct process_trailer_options *opts)
{
+ size_t origlen = out->len;
size_t i;
/* If we want the whole block untouched, we can take the fast path. */
- if (!opts->only_trailers && !opts->unfold) {
+ if (!opts->only_trailers && !opts->unfold && !opts->filter && !opts->separator) {
strbuf_add(out, info->trailer_start,
info->trailer_end - info->trailer_start);
return;
struct strbuf val = STRBUF_INIT;
parse_trailer(&tok, &val, NULL, trailer, separator_pos);
- if (opts->unfold)
- unfold_value(&val);
-
- strbuf_addf(out, "%s: %s\n", tok.buf, val.buf);
+ if (!opts->filter || opts->filter(&tok, opts->filter_data)) {
+ if (opts->unfold)
+ unfold_value(&val);
+
+ if (opts->separator && out->len != origlen)
+ strbuf_addbuf(out, opts->separator);
+ if (!opts->value_only)
+ strbuf_addf(out, "%s: ", tok.buf);
+ strbuf_addbuf(out, &val);
+ if (!opts->separator)
+ strbuf_addch(out, '\n');
+ }
strbuf_release(&tok);
strbuf_release(&val);
} else if (!opts->only_trailers) {
+ if (opts->separator && out->len != origlen) {
+ strbuf_addbuf(out, opts->separator);
+ }
strbuf_addstr(out, trailer);
+ if (opts->separator) {
+ strbuf_rtrim(out);
+ }
}
}
int only_input;
int unfold;
int no_divider;
+ int value_only;
+ const struct strbuf *separator;
+ int (*filter)(const struct strbuf *, void *);
+ void *filter_data;
};
#define PROCESS_TRAILER_OPTIONS_INIT {0}
argv_array_pushf(&helper->env_array, "%s=%s",
GIT_DIR_ENVIRONMENT, get_git_dir());
+ helper->trace2_child_class = helper->args.argv[0]; /* "remote-<name>" */
+
code = start_command(helper);
if (code < 0 && errno == ENOENT)
die(_("unable to find remote helper for '%s'"), data->name);
int ret = 0;
struct git_transport_data *data = transport->data;
struct ref *refs = NULL;
- char *dest = xstrdup(transport->url);
struct fetch_pack_args args;
struct ref *refs_tmp = NULL;
switch (data->version) {
case protocol_v2:
- refs = fetch_pack(&args, data->fd, data->conn,
+ refs = fetch_pack(&args, data->fd,
refs_tmp ? refs_tmp : transport->remote_refs,
- dest, to_fetch, nr_heads, &data->shallow,
+ to_fetch, nr_heads, &data->shallow,
&transport->pack_lockfile, data->version);
break;
case protocol_v1:
case protocol_v0:
- refs = fetch_pack(&args, data->fd, data->conn,
+ refs = fetch_pack(&args, data->fd,
refs_tmp ? refs_tmp : transport->remote_refs,
- dest, to_fetch, nr_heads, &data->shallow,
+ to_fetch, nr_heads, &data->shallow,
&transport->pack_lockfile, data->version);
break;
case protocol_unknown_version:
free_refs(refs_tmp);
free_refs(refs);
- free(dest);
return ret;
}
proc.argv = argv;
proc.in = -1;
+ proc.trace2_hook_name = "pre-push";
if (start_command(&proc)) {
finish_command(&proc);
{ 0x0E34, 0x0E3A },
{ 0x0E47, 0x0E4E },
{ 0x0EB1, 0x0EB1 },
-{ 0x0EB4, 0x0EB9 },
-{ 0x0EBB, 0x0EBC },
+{ 0x0EB4, 0x0EBC },
{ 0x0EC8, 0x0ECD },
{ 0x0F18, 0x0F19 },
{ 0x0F35, 0x0F35 },
{ 0xA980, 0xA982 },
{ 0xA9B3, 0xA9B3 },
{ 0xA9B6, 0xA9B9 },
-{ 0xA9BC, 0xA9BC },
+{ 0xA9BC, 0xA9BD },
{ 0xA9E5, 0xA9E5 },
{ 0xAA29, 0xAA2E },
{ 0xAA31, 0xAA32 },
{ 0x11727, 0x1172B },
{ 0x1182F, 0x11837 },
{ 0x11839, 0x1183A },
+{ 0x119D4, 0x119D7 },
+{ 0x119DA, 0x119DB },
+{ 0x119E0, 0x119E0 },
{ 0x11A01, 0x11A0A },
{ 0x11A33, 0x11A38 },
{ 0x11A3B, 0x11A3E },
{ 0x11D95, 0x11D95 },
{ 0x11D97, 0x11D97 },
{ 0x11EF3, 0x11EF4 },
+{ 0x13430, 0x13438 },
{ 0x16AF0, 0x16AF4 },
{ 0x16B30, 0x16B36 },
+{ 0x16F4F, 0x16F4F },
{ 0x16F8F, 0x16F92 },
{ 0x1BC9D, 0x1BC9E },
{ 0x1BCA0, 0x1BCA3 },
{ 0x1E01B, 0x1E021 },
{ 0x1E023, 0x1E024 },
{ 0x1E026, 0x1E02A },
+{ 0x1E130, 0x1E136 },
+{ 0x1E2EC, 0x1E2EF },
{ 0x1E8D0, 0x1E8D6 },
{ 0x1E944, 0x1E94A },
{ 0xE0001, 0xE0001 },
{ 0xFE68, 0xFE6B },
{ 0xFF01, 0xFF60 },
{ 0xFFE0, 0xFFE6 },
-{ 0x16FE0, 0x16FE1 },
-{ 0x17000, 0x187F1 },
+{ 0x16FE0, 0x16FE3 },
+{ 0x17000, 0x187F7 },
{ 0x18800, 0x18AF2 },
{ 0x1B000, 0x1B11E },
+{ 0x1B150, 0x1B152 },
+{ 0x1B164, 0x1B167 },
{ 0x1B170, 0x1B2FB },
{ 0x1F004, 0x1F004 },
{ 0x1F0CF, 0x1F0CF },
{ 0x1F680, 0x1F6C5 },
{ 0x1F6CC, 0x1F6CC },
{ 0x1F6D0, 0x1F6D2 },
+{ 0x1F6D5, 0x1F6D5 },
{ 0x1F6EB, 0x1F6EC },
-{ 0x1F6F4, 0x1F6F9 },
-{ 0x1F910, 0x1F93E },
-{ 0x1F940, 0x1F970 },
+{ 0x1F6F4, 0x1F6FA },
+{ 0x1F7E0, 0x1F7EB },
+{ 0x1F90D, 0x1F971 },
{ 0x1F973, 0x1F976 },
-{ 0x1F97A, 0x1F97A },
-{ 0x1F97C, 0x1F9A2 },
-{ 0x1F9B0, 0x1F9B9 },
-{ 0x1F9C0, 0x1F9C2 },
-{ 0x1F9D0, 0x1F9FF },
+{ 0x1F97A, 0x1F9A2 },
+{ 0x1F9A5, 0x1F9AA },
+{ 0x1F9AE, 0x1F9CA },
+{ 0x1F9CD, 0x1F9FF },
+{ 0x1FA70, 0x1FA73 },
+{ 0x1FA78, 0x1FA7A },
+{ 0x1FA80, 0x1FA82 },
+{ 0x1FA90, 0x1FA95 },
{ 0x20000, 0x2FFFD },
{ 0x30000, 0x3FFFD }
};
enum unpack_trees_error_types e,
const char *path)
{
+ if (o->quiet)
+ return -1;
+
if (!o->show_all_errors)
return error(ERRORMSG(o, e), super_prefixed(path));
flags |= SUBMODULE_MOVE_HEAD_FORCE;
if (submodule_move_head(ce->name, old_id, new_id, flags))
- return o->gently ? -1 :
- add_rejected_path(o, ERROR_WOULD_LOSE_SUBMODULE, ce->name);
+ return add_rejected_path(o, ERROR_WOULD_LOSE_SUBMODULE, ce->name);
return 0;
}
}
}
-/*
- * Unlink the last component and schedule the leading directories for
- * removal, such that empty directories get removed.
- */
-static void unlink_entry(const struct cache_entry *ce)
-{
- const struct submodule *sub = submodule_from_ce(ce);
- if (sub) {
- /* state.force is set at the caller. */
- submodule_move_head(ce->name, "HEAD", NULL,
- SUBMODULE_MOVE_HEAD_FORCE);
- }
- if (!check_leading_path(ce->name, ce_namelen(ce)))
- return;
- if (remove_or_warn(ce->ce_mode, ce->name))
- return;
- schedule_dir_for_removal(ce->name, ce_namelen(ce));
-}
-
static struct progress *get_progress(struct unpack_trees_options *o)
{
unsigned cnt = 0, total = 0;
unlink_entry(ce);
}
}
- remove_marked_cache_entries(index);
+ remove_marked_cache_entries(index, 0);
remove_scheduled_dirs();
if (should_update_submodules() && o->update && !o->dry_run)
* below.
*/
struct oid_array to_fetch = OID_ARRAY_INIT;
- int fetch_if_missing_store = fetch_if_missing;
- fetch_if_missing = 0;
for (i = 0; i < index->cache_nr; i++) {
struct cache_entry *ce = index->cache[i];
- if ((ce->ce_flags & CE_UPDATE) &&
- !S_ISGITLINK(ce->ce_mode)) {
- if (!has_object_file(&ce->oid))
- oid_array_append(&to_fetch, &ce->oid);
- }
+
+ if (!(ce->ce_flags & CE_UPDATE) ||
+ S_ISGITLINK(ce->ce_mode))
+ continue;
+ if (!oid_object_info_extended(the_repository, &ce->oid,
+ NULL,
+ OBJECT_INFO_FOR_PREFETCH))
+ continue;
+ oid_array_append(&to_fetch, &ce->oid);
}
if (to_fetch.nr)
fetch_objects(repository_format_partial_clone,
to_fetch.oid, to_fetch.nr);
- fetch_if_missing = fetch_if_missing_store;
oid_array_clear(&to_fetch);
}
for (i = 0; i < index->cache_nr; i++) {
* instead of ODB since we already know what these trees contain.
*/
static int traverse_by_cache_tree(int pos, int nr_entries, int nr_names,
- struct name_entry *names,
struct traverse_info *info)
{
struct cache_entry *src[MAX_UNPACK_TREES + 1] = { NULL, };
* unprocessed entries before 'pos'.
*/
bottom = o->cache_bottom;
- ret = traverse_by_cache_tree(pos, nr_entries, n, names, info);
+ ret = traverse_by_cache_tree(pos, nr_entries, n, info);
o->cache_bottom = bottom;
return ret;
}
static int unpack_failed(struct unpack_trees_options *o, const char *message)
{
discard_index(&o->result);
- if (!o->gently && !o->exiting_early) {
+ if (!o->quiet && !o->exiting_early) {
if (message)
return error("%s", message);
return -1;
WRITE_TREE_SILENT |
WRITE_TREE_REPAIR);
}
+
+ o->result.updated_workdir = 1;
discard_index(o->dst_index);
*o->dst_index = o->result;
} else {
static int reject_merge(const struct cache_entry *ce,
struct unpack_trees_options *o)
{
- return o->gently ? -1 :
- add_rejected_path(o, ERROR_WOULD_OVERWRITE, ce->name);
+ return add_rejected_path(o, ERROR_WOULD_OVERWRITE, ce->name);
}
static int same(const struct cache_entry *a, const struct cache_entry *b)
int r = check_submodule_move_head(ce,
"HEAD", oid_to_hex(&ce->oid), o);
if (r)
- return o->gently ? -1 :
- add_rejected_path(o, error_type, ce->name);
+ return add_rejected_path(o, error_type, ce->name);
return 0;
}
}
if (errno == ENOENT)
return 0;
- return o->gently ? -1 :
- add_rejected_path(o, error_type, ce->name);
+ return add_rejected_path(o, error_type, ce->name);
}
int verify_uptodate(const struct cache_entry *ce,
*/
static int verify_clean_submodule(const char *old_sha1,
const struct cache_entry *ce,
- enum unpack_trees_error_types error_type,
struct unpack_trees_options *o)
{
if (!submodule_from_ce(ce))
}
static int verify_clean_subdirectory(const struct cache_entry *ce,
- enum unpack_trees_error_types error_type,
struct unpack_trees_options *o)
{
/*
if (!sub_head && oideq(&oid, &ce->oid))
return 0;
return verify_clean_submodule(sub_head ? NULL : oid_to_hex(&oid),
- ce, error_type, o);
+ ce, o);
}
/*
d.exclude_per_dir = o->dir->exclude_per_dir;
i = read_directory(&d, o->src_index, pathbuf, namelen+1, NULL);
if (i)
- return o->gently ? -1 :
- add_rejected_path(o, ERROR_NOT_UPTODATE_DIR, ce->name);
+ return add_rejected_path(o, ERROR_NOT_UPTODATE_DIR, ce->name);
free(pathbuf);
return cnt;
}
* files that are in "foo/" we would lose
* them.
*/
- if (verify_clean_subdirectory(ce, error_type, o) < 0)
+ if (verify_clean_subdirectory(ce, o) < 0)
return -1;
return 0;
}
return 0;
}
- return o->gently ? -1 :
- add_rejected_path(o, error_type, name);
+ return add_rejected_path(o, error_type, name);
}
/*
return error("Cannot do a bind merge of %d trees",
o->merge_size);
if (a && old)
- return o->gently ? -1 :
+ return o->quiet ? -1 :
error(ERRORMSG(o, ERROR_BIND_OVERLAP),
super_prefixed(a->name),
super_prefixed(old->name));
if (o->update && S_ISGITLINK(old->ce_mode) &&
should_update_submodules() && !verify_uptodate(old, o))
update |= CE_UPDATE;
- add_entry(o, old, update, 0);
+ add_entry(o, old, update, CE_STAGEMASK);
return 0;
}
return merged_entry(a, old, o);
diff_index_cached,
debug_unpack,
skip_sparse_checkout,
- gently,
+ quiet,
exiting_early,
show_all_errors,
dry_run;
static NORETURN void usage_builtin(const char *err, va_list params)
{
vreportf("usage: ", err, params);
+
+ /*
+ * When we detect a usage error *before* the command dispatch in
+ * cmd_main(), we don't know what verb to report. Force it to this
+ * to facilitate post-processing.
+ */
+ trace2_cmd_name("_usage_");
+
+ /*
+ * Currently, the (err, params) are usually just the static usage
+ * string which isn't very useful here. Usually, the call site
+ * manually calls fprintf(stderr,...) with the actual detailed
+ * syntax error before calling usage().
+ *
+ * TODO It would be nice to update the call sites to pass both
+ * the static usage string and the detailed error message.
+ */
+
exit(129);
}
static NORETURN void die_builtin(const char *err, va_list params)
{
+ /*
+ * We call this trace2 function first and expect it to va_copy 'params'
+ * before using it (because an 'ap' can only be walked once).
+ */
+ trace2_cmd_error_va(err, params);
+
vreportf("fatal: ", err, params);
+
exit(128);
}
static void error_builtin(const char *err, va_list params)
{
+ /*
+ * We call this trace2 function first and expect it to va_copy 'params'
+ * before using it (because an 'ap' can only be walked once).
+ */
+ trace2_cmd_error_va(err, params);
+
vreportf("error: ", err, params);
}
DIR *dir;
struct dirent *d;
int ret = 0;
- struct repository_format format;
+ struct repository_format format = REPOSITORY_FORMAT_INIT;
submodule_gitdir = git_pathdup_submodule(path, "%s", "");
if (!submodule_gitdir)
read_repository_format(&format, sb.buf);
if (format.version != 0) {
strbuf_release(&sb);
+ clear_repository_format(&format);
return 1;
}
+ clear_repository_format(&format);
/* Replace config by worktrees. */
strbuf_setlen(&sb, sb.len - strlen("config"));
void wt_status_collect(struct wt_status *s)
{
+ trace2_region_enter("status", "worktrees", s->repo);
wt_status_collect_changes_worktree(s);
- if (s->is_initial)
+ trace2_region_leave("status", "worktrees", s->repo);
+
+ if (s->is_initial) {
+ trace2_region_enter("status", "initial", s->repo);
wt_status_collect_changes_initial(s);
- else
+ trace2_region_leave("status", "initial", s->repo);
+ } else {
+ trace2_region_enter("status", "index", s->repo);
wt_status_collect_changes_index(s);
+ trace2_region_leave("status", "index", s->repo);
+ }
+
+ trace2_region_enter("status", "untracked", s->repo);
wt_status_collect_untracked(s);
+ trace2_region_leave("status", "untracked", s->repo);
wt_status_get_state(s->repo, &s->state, s->branch && !strcmp(s->branch, "HEAD"));
if (s->state.merge_in_progress && !has_unmerged(s))
void wt_status_print(struct wt_status *s)
{
+ trace2_data_intmax("status", s->repo, "count/changed", s->change.nr);
+ trace2_data_intmax("status", s->repo, "count/untracked",
+ s->untracked.nr);
+ trace2_data_intmax("status", s->repo, "count/ignored", s->ignored.nr);
+
+ trace2_region_enter("status", "print", s->repo);
+
switch (s->status_format) {
case STATUS_FORMAT_SHORT:
wt_shortstatus_print(s);
wt_longstatus_print(s);
break;
}
+
+ trace2_region_leave("status", "print", s->repo);
}
/**
git_xmerge_style = XDL_MERGE_DIFF3;
else if (!strcmp(value, "merge"))
git_xmerge_style = 0;
+ /*
+ * Please update _git_checkout() in
+ * git-completion.bash when you add new merge config
+ */
else
die("unknown style '%s' given for '%s'",
value, var);