# A list of macros that should be interpreted as foreach loops instead of as
# function calls.
-ForEachMacros: ['for_each_string_list_item']
+ForEachMacros: ['for_each_string_list_item', 'for_each_wanted_builtin', 'for_each_builtin', 'for_each_ut']
# The maximum number of consecutive empty lines to keep.
MaxEmptyLinesToKeep: 1
a mailing list (git@vger.kernel.org) for code submissions, code
reviews, and bug reports.
-Nevertheless, you can use [submitGit](http://submitgit.herokuapp.com/) to
+Nevertheless, you can use [GitGitGadget](https://gitgitgadget.github.io/) to
conveniently send your Pull Requests commits to our mailing list.
Please read ["A note from the maintainer"](https://git.kernel.org/pub/scm/git/git.git/plain/MaintNotes?h=todo)
Thanks for taking the time to contribute to Git! Please be advised that the
Git community does not use github.com for their contributions. Instead, we use
a mailing list (git@vger.kernel.org) for code submissions, code reviews, and
-bug reports. Nevertheless, you can use submitGit to conveniently send your Pull
-Requests commits to our mailing list.
+bug reports. Nevertheless, you can use GitGitGadget (https://gitgitgadget.github.io/)
+to conveniently send your Pull Requests commits to our mailing list.
Please read the "guidelines for contributing" linked above!
/git-init-db
/git-interpret-trailers
/git-instaweb
-/git-legacy-rebase
+/git-legacy-stash
/git-log
/git-ls-files
/git-ls-remote
/git-remote-ftps
/git-remote-fd
/git-remote-ext
-/git-remote-testgit
/git-remote-testpy
/git-remote-testsvn
/git-repack
compiler:
addons:
before_install:
- - env: jobname=Windows
- os: linux
- compiler:
- addons:
- before_install:
- script:
- - >
- test "$TRAVIS_REPO_SLUG" != "git/git" ||
- ci/run-windows-build.sh $TRAVIS_BRANCH $(git rev-parse HEAD)
- after_failure:
- env: jobname=Linux32
os: linux
compiler:
manpage-base-url.xsl
SubmittingPatches.txt
tmp-doc-diff/
+GIT-ASCIIDOCFLAGS
+/GIT-EXCLUDED-PROGRAMS
or commands:
Literal examples (e.g. use of command-line options, command names,
- branch names, configuration and environment variables) must be
- typeset in monospace (i.e. wrapped with backticks):
+ branch names, URLs, pathnames (files and directories), configuration and
+ environment variables) must be typeset in monospace (i.e. wrapped with
+ backticks):
`--pretty=oneline`
`git rev-list`
`remote.pushDefault`
+ `http://git.example.com`
+ `.git/config`
`GIT_DIR`
`HEAD`
SP_ARTICLES =
OBSOLETE_HTML =
+-include GIT-EXCLUDED-PROGRAMS
+
MAN1_TXT += $(filter-out \
+ $(patsubst %,%.txt,$(EXCLUDED_PROGRAMS)) \
$(addsuffix .txt, $(ARTICLES) $(SP_ARTICLES)), \
$(wildcard git-*.txt))
MAN1_TXT += git.txt
MAN1_TXT += gitk.txt
-MAN1_TXT += gitremote-helpers.txt
MAN1_TXT += gitweb.txt
MAN5_TXT += gitattributes.txt
MAN7_TXT += giteveryday.txt
MAN7_TXT += gitglossary.txt
MAN7_TXT += gitnamespaces.txt
+MAN7_TXT += gitremote-helpers.txt
MAN7_TXT += gitrevisions.txt
MAN7_TXT += gitsubmodules.txt
MAN7_TXT += gittutorial-2.txt
show_tool_names can_merge "* " || :' >mergetools-merge.txt && \
date >$@
+TRACK_ASCIIDOCFLAGS = $(subst ','\'',$(ASCIIDOC_COMMON):$(ASCIIDOC_HTML):$(ASCIIDOC_DOCBOOK))
+
+GIT-ASCIIDOCFLAGS: FORCE
+ @FLAGS='$(TRACK_ASCIIDOCFLAGS)'; \
+ if test x"$$FLAGS" != x"`cat GIT-ASCIIDOCFLAGS 2>/dev/null`" ; then \
+ echo >&2 " * new asciidoc flags"; \
+ echo "$$FLAGS" >GIT-ASCIIDOCFLAGS; \
+ fi
+
clean:
$(RM) *.xml *.xml+ *.html *.html+ *.1 *.5 *.7
$(RM) *.texi *.texi+ *.texi++ git.info gitman.info
$(RM) SubmittingPatches.txt
$(RM) $(cmds_txt) $(mergetools_txt) *.made
$(RM) manpage-base-url.xsl
+ $(RM) GIT-ASCIIDOCFLAGS
-$(MAN_HTML): %.html : %.txt asciidoc.conf
+$(MAN_HTML): %.html : %.txt asciidoc.conf asciidoctor-extensions.rb GIT-ASCIIDOCFLAGS
$(QUIET_ASCIIDOC)$(RM) $@+ $@ && \
$(TXT_TO_HTML) -d manpage -o $@+ $< && \
mv $@+ $@
-$(OBSOLETE_HTML): %.html : %.txto asciidoc.conf
+$(OBSOLETE_HTML): %.html : %.txto asciidoc.conf asciidoctor-extensions.rb GIT-ASCIIDOCFLAGS
$(QUIET_ASCIIDOC)$(RM) $@+ $@ && \
$(TXT_TO_HTML) -o $@+ $< && \
mv $@+ $@
manpage-base-url.xsl: manpage-base-url.xsl.in
$(QUIET_GEN)sed "s|@@MAN_BASE_URL@@|$(MAN_BASE_URL)|" $< > $@
-%.1 %.5 %.7 : %.xml manpage-base-url.xsl
+%.1 %.5 %.7 : %.xml manpage-base-url.xsl $(wildcard manpage*.xsl)
$(QUIET_XMLTO)$(RM) $@ && \
$(XMLTO) -m $(MANPAGE_XSL) $(XMLTO_EXTRA) man $<
-%.xml : %.txt asciidoc.conf
+%.xml : %.txt asciidoc.conf asciidoctor-extensions.rb GIT-ASCIIDOCFLAGS
$(QUIET_ASCIIDOC)$(RM) $@+ $@ && \
$(TXT_TO_XML) -d manpage -o $@+ $< && \
mv $@+ $@
-user-manual.xml: user-manual.txt user-manual.conf
+user-manual.xml: user-manual.txt user-manual.conf asciidoctor-extensions.rb GIT-ASCIIDOCFLAGS
$(QUIET_ASCIIDOC)$(RM) $@+ $@ && \
$(TXT_TO_XML) -d book -o $@+ $< && \
mv $@+ $@
$(QUIET_GEN)cd technical && '$(SHELL_PATH_SQ)' ./api-index.sh
technical/%.html: ASCIIDOC_EXTRA += -a git-relative-html-prefix=../
-$(patsubst %,%.html,$(API_DOCS) technical/api-index $(TECH_DOCS)): %.html : %.txt asciidoc.conf
+$(patsubst %,%.html,$(API_DOCS) technical/api-index $(TECH_DOCS)): %.html : %.txt \
+ asciidoc.conf GIT-ASCIIDOCFLAGS
$(QUIET_ASCIIDOC)$(TXT_TO_HTML) $*.txt
SubmittingPatches.txt: SubmittingPatches
WEBDOC_DEST = /pub/software/scm/git/docs
howto/%.html: ASCIIDOC_EXTRA += -a git-relative-html-prefix=../
-$(patsubst %.txt,%.html,$(wildcard howto/*.txt)): %.html : %.txt
+$(patsubst %.txt,%.html,$(wildcard howto/*.txt)): %.html : %.txt GIT-ASCIIDOCFLAGS
$(QUIET_ASCIIDOC)$(RM) $@+ $@ && \
sed -e '1,/^$$/d' $< | \
$(TXT_TO_HTML) - >$@+ && \
--- /dev/null
+Git 2.22 Release Notes
+======================
+
+Updates since v2.21
+-------------------
+
+UI, Workflows & Features
+
+ * "git checkout --no-overlay" can be used to trigger a new mode of
+ checking out paths out of the tree-ish, that allows paths that
+ match the pathspec that are in the current index and working tree
+ and are not in the tree-ish.
+
+ * The %(trailers) formatter in "git log --format=..." now allows to
+ optionally pick trailers selectively by keyword, show only values,
+ etc.
+
+ * Four new configuration variables {author,committer}.{name,email}
+ have been introduced to override user.{name,email} in more specific
+ cases.
+
+ * Command-line completion (in contrib/) learned to tab-complete the
+ "git submodule absorbgitdirs" subcommand.
+
+ * "git branch" learned a new subcommand "--show-current".
+
+ * Output from "diff --cc" did not show the original paths when the
+ merge involved renames. A new option adds the paths in the
+ original trees to the output.
+
+ * The command line completion (in contrib/) has been taught to
+ complete more subcommand parameters.
+
+ * The final report from "git bisect" used to show the suspected
+ culprit using a raw "diff-tree", with which there is no output for
+ a merge commit. This has been updated to use a more modern and
+ human readable output that still is concise enough.
+
+ * "git rebase --rebase-merges" replaces its old "--preserve-merges"
+ option; the latter is now marked as deprecated.
+
+ * Error message given while cloning with --recurse-submodules has
+ been updated.
+
+ * The completion helper code now pays attention to repository-local
+ configuration (when available), which allows --list-cmds to honour
+ a repository specific setting of completion.commands, for example.
+
+ * "git mergetool" learned to offer Sublime Merge (smerge) as one of
+ its backends.
+
+ * A new hook "post-index-change" is called when the on-disk index
+ file changes, which can help e.g. a virtualized working tree
+ implementation.
+
+ * "git difftool" can now run outside a repository.
+
+ * "git checkout -m <other>" was about carrying the differences
+ between HEAD and the working-tree files forward while checking out
+ another branch, and ignored the differences between HEAD and the
+ index. The command has been taught to abort when the index and the
+ HEAD are different.
+
+ * A progress indicator has been added to the "index-pack" step, which
+ often makes users wait for completion during "git clone".
+
+ * "git submodule" learns "set-branch" subcommand that allows the
+ submodule.*.branch settings to be modified.
+
+ * "git merge-recursive" backend recently learned a new heuristics to
+ infer file movement based on how other files in the same directory
+ moved. As this is inherently less robust heuristics than the one
+ based on the content similarity of the file itself (rather than
+ based on what its neighbours are doing), it sometimes gives an
+ outcome unexpected by the end users. This has been toned down to
+ leave the renamed paths in higher/conflicted stages in the index so
+ that the user can examine and confirm the result.
+
+ * "git tag" learned to give an advice suggesting it might be a
+ mistake when creating an annotated or signed tag that points at
+ another tag.
+
+ * The "git pack-objects" command learned to report the number of
+ objects it packed via the trace2 mechanism.
+
+ * The list of conflicted paths shown in the editor while concluding a
+ conflicted merge was shown above the scissors line when the
+ clean-up mode is set to "scissors", even though it was commented
+ out just like the list of updated paths and other information to
+ help the user explain the merge better.
+
+ * The trace2 tracing facility learned to auto-generate a filename
+ when told to log to a directory.
+
+ * "git clone" learned a new --server-option option when talking over
+ the protocol version 2.
+
+ * The connectivity bitmaps are created by default in bare
+ repositories now; also the pathname hash-cache is created by
+ default to avoid making crappy deltas when repacking.
+
+
+Performance, Internal Implementation, Development Support etc.
+
+ * The diff machinery, one of the oldest parts of the system, which
+ long predates the parse-options API, uses fairly long and complex
+ handcrafted option parser. This is being rewritten to use the
+ parse-options API.
+
+ * The implementation of pack-redundant has been updated for
+ performance in a repository with many packfiles.
+
+ * A more structured way to obtain execution trace has been added.
+
+ * "git prune" has been taught to take advantage of reachability
+ bitmap when able.
+
+ * The command line parser of "git commit-tree" has been rewritten to
+ use the parse-options API.
+
+ * Suggest GitGitGadget instead of submitGit as a way to submit
+ patches based on GitHub PR to us.
+
+ * The test framework has been updated to help developers by making it
+ easier to run most of the tests under different versions of
+ over-the-wire protocols.
+
+ * Dev support update to make it easier to compare two formatted
+ results from our documentation.
+
+ * The scripted "git rebase" implementation has been retired.
+
+ * "git multi-pack-index verify" did not scale well with the number of
+ packfiles, which is being improved.
+
+ * "git stash" has been rewritten in C.
+
+ * The "check-docs" Makefile target to support developers has been
+ updated.
+
+ * The tests have been updated not to rely on the abbreviated option
+ names the parse-options API offers, to protect us from an
+ abbreviated form of an option that used to be unique within the
+ command getting non-unique when a new option that share the same
+ prefix is added.
+
+ * The scripted version of "git rebase -i" wrote and rewrote the todo
+ list many times during a single step of its operation, and the
+ recent C-rewrite made a faithful conversion of the logic to C. The
+ implementation has been updated to carry necessary information
+ around in-core to avoid rewriting the same file over and over
+ unnecessarily.
+
+ * Test framework update to more robustly clean up leftover files and
+ processes after tests are done.
+
+ * Conversion from unsigned char[20] to struct object_id continues.
+
+ * While running "git diff" in a lazy clone, we can upfront know which
+ missing blobs we will need, instead of waiting for the on-demand
+ machinery to discover them one by one. The code learned to aim to
+ achieve better performance by batching the request for these
+ promised blobs.
+
+ * During an initial "git clone --depth=..." partial clone, it is
+ pointless to spend cycles for a large portion of the connectivity
+ check that enumerates and skips promisor objects (which by
+ definition is all objects fetched from the other side). This has
+ been optimized out.
+
+ * Mechanically and systematically drop "extern" from function
+ declarlation.
+
+ * The script to aggregate perf result unconditionally depended on
+ libjson-perl even though it did not have to, which has been
+ corrected.
+
+ * The internal implementation of "git rebase -i" has been updated to
+ avoid forking a separate "rebase--interactive" process.
+
+
+Fixes since v2.21
+-----------------
+
+ * "git prune-packed" did not notice and complain against excess
+ arguments given from the command line, which now it does.
+ (merge 9b0bd87ed2 rj/prune-packed-excess-args later to maint).
+
+ * Split-index fix.
+ (merge 6e37c8ed3c nd/split-index-null-base-fix later to maint).
+
+ * "git diff --no-index" may still want to access Git goodies like
+ --ext-diff and --textconv, but so far these have been ignored,
+ which has been corrected.
+ (merge 287ab28bfa jk/diff-no-index-initialize later to maint).
+
+ * Unify RPC code for smart http in protocol v0/v1 and v2, which fixes
+ a bug in the latter (lack of authentication retry) and generally
+ improves the code base.
+ (merge a97d00799a jt/http-auth-proto-v2-fix later to maint).
+
+ * The include file compat/bswap.h has been updated so that it is safe
+ to (accidentally) include it more than once.
+ (merge 33aa579a55 jk/guard-bswap-header later to maint).
+
+ * The set of header files used by "make hdr-check" unconditionally
+ included sha256/gcrypt.h, even when it is not used, causing the
+ make target to fail. We now skip it when GCRYPT_SHA256 is not in
+ use.
+ (merge f23aa18e7f rj/hdr-check-gcrypt-fix later to maint).
+
+ * The Makefile uses 'find' utility to enumerate all the *.h header
+ files, which is expensive on platforms with slow filesystems; it
+ now optionally uses "ls-files" if working within a repository,
+ which is a trick similar to how all sources are enumerated to run
+ ETAGS on.
+ (merge 92b88eba9f js/find-lib-h-with-ls-files-when-possible later to maint).
+
+ * "git rebase" that was reimplemented in C did not set ORIG_HEAD
+ correctly, which has been corrected.
+ (merge cbd29ead92 js/rebase-orig-head-fix later to maint).
+
+ * Dev support.
+ (merge f545737144 js/stress-test-ui-tweak later to maint).
+
+ * CFLAGS now can be tweaked when invoking Make while using
+ DEVELOPER=YesPlease; this did not work well before.
+ (merge 6d5d4b4e93 ab/makefile-help-devs-more later to maint).
+
+ * "git fsck --connectivity-only" omits computation necessary to sift
+ the objects that are not reachable from any of the refs into
+ unreachable and dangling. This is now enabled when dangling
+ objects are requested (which is done by default, but can be
+ overridden with the "--no-dangling" option).
+ (merge 8d8c2a5aef jk/fsck-doc later to maint).
+
+ * On platforms where "git fetch" is killed with SIGPIPE (e.g. OSX),
+ the upload-pack that runs on the other end that hangs up after
+ detecting an error could cause "git fetch" to die with a signal,
+ which led to a flakey test. "git fetch" now ignores SIGPIPE during
+ the network portion of its operation (this is not a problem as we
+ check the return status from our write(2)s).
+ (merge 143588949c jk/no-sigpipe-during-network-transport later to maint).
+
+ * A recent update broke "is this object available to us?" check for
+ well-known objects like an empty tree (which should yield "yes",
+ even when there is no on-disk object for an empty tree), which has
+ been corrected.
+ (merge f06ab027ef jk/virtual-objects-do-exist later to maint).
+
+ * The setup code has been cleaned up to avoid leaks around the
+ repository_format structure.
+ (merge e8805af1c3 ma/clear-repository-format later to maint).
+
+ * "git config --type=color ..." is meant to replace "git config --get-color"
+ but there is a slight difference that wasn't documented, which is
+ now fixed.
+ (merge cd8e7593b9 jk/config-type-color-ends-with-lf later to maint).
+
+ * When the "clean" filter can reduce the size of a huge file in the
+ working tree down to a small "token" (a la Git LFS), there is no
+ point in allocating a huge scratch area upfront, but the buffer is
+ sized based on the original file size. The convert mechanism now
+ allocates very minimum and reallocates as it receives the output
+ from the clean filter process.
+ (merge 02156ab031 jh/resize-convert-scratch-buffer later to maint).
+
+ * "git rebase" uses the refs/rewritten/ hierarchy to store its
+ intermediate states, which inherently makes the hierarchy per
+ worktree, but it didn't quite work well.
+ (merge b9317d55a3 nd/rewritten-ref-is-per-worktree later to maint).
+
+ * "git log -L<from>,<to>:<path>" with "-s" did not suppress the patch
+ output as it should. This has been corrected.
+ (merge 05314efaea jk/line-log-with-patch later to maint).
+
+ * "git worktree add" used to do a "find an available name with stat
+ and then mkdir", which is race-prone. This has been fixed by using
+ mkdir and reacting to EEXIST in a loop.
+ (merge 7af01f2367 ms/worktree-add-atomic-mkdir later to maint).
+
+ * Build update for SHA-1 with collision detection.
+ (merge 07a20f569b jk/sha1dc later to maint).
+
+ * Build procedure has been fixed around use of asciidoctor instead of
+ asciidoc.
+ (merge 185f9a0ea0 ma/asciidoctor-fixes later to maint).
+
+ * remote-http transport did not anonymize URLs reported in its error
+ messages at places.
+ (merge c1284b21f2 js/anonymize-remote-curl-diag later to maint).
+
+ * Error messages given from the http transport have been updated so
+ that they can be localized.
+ (merge ed8b4132c8 js/remote-curl-i18n later to maint).
+
+ * "git init" forgot to read platform-specific repository
+ configuration, which made Windows port to ignore settings of
+ core.hidedotfiles, for example.
+
+ * A corner-case object name ambiguity while the sequencer machinery
+ is working (e.g. "rebase -i -x") has been fixed.
+
+ * "git format-patch" did not diagnose an error while opening the
+ output file for the cover-letter, which has been corrected.
+ (merge 2fe95f494c jc/format-patch-error-check later to maint).
+
+ * "git checkout -f <branch>" while the index has an unmerged path
+ incorrectly left some paths in an unmerged state, which has been
+ corrected.
+
+ * A corner case bug in the refs API has been corrected.
+ (merge d3322eb28b jk/refs-double-abort later to maint).
+
+ * Unicode update.
+ (merge 584b62c37b bb/unicode-12 later to maint).
+
+ * dumb-http walker has been updated to share more error recovery
+ strategy with the normal codepath.
+
+ * A buglet in configuration parser has been fixed.
+ (merge 19e7fdaa58 nd/include-if-wildmatch later to maint).
+
+ * The documentation for "git read-tree --reset -u" has been updated.
+ (merge b5a0bd694c nd/read-tree-reset-doc later to maint).
+
+ * Code clean-up around a much-less-important-than-it-used-to-be
+ update_server_info() funtion.
+ (merge b3223761c8 jk/server-info-rabbit-hole later to maint).
+
+ * The message given when "git commit -a <paths>" errors out has been
+ updated.
+ (merge 5a1dbd48bc nd/commit-a-with-paths-msg-update later to maint).
+
+ * "git cherry-pick --options A..B", after giving control back to the
+ user to ask help resolving a conflicted step, did not honor the
+ options it originally received, which has been corrected.
+
+ * Various glitches in "git gc" around reflog handling have been fixed.
+
+ * The code to read from commit-graph file has been cleanup with more
+ careful error checking before using data read from it.
+
+ * Performance fix around "git fetch" that grabs many refs.
+ (merge b764300912 jt/fetch-pack-wanted-refs-optim later to maint).
+
+ * Protocol v2 support in "git fetch-pack" of shallow clones has been
+ corrected.
+
+ * Performance fix around "git blame", especially in a linear history
+ (which is the norm we should optimize for).
+ (merge f892014943 dk/blame-keep-origin-blob later to maint).
+
+ * Performance fix for "rev-list --parents -- pathspec".
+ (merge 8320b1dbe7 jk/revision-rewritten-parents-in-prio-queue later to maint).
+
+ * Updating the display with progress message has been cleaned up to
+ deal better with overlong messages.
+ (merge 545dc345eb sg/overlong-progress-fix later to maint).
+
+ * "git blame -- path" in a non-bare repository starts blaming from
+ the working tree, and the same command in a bare repository errors
+ out because there is no working tree by definition. The command
+ has been taught to instead start blaming from the commit at HEAD,
+ which is more useful.
+ (merge a544fb08f8 sg/blame-in-bare-start-at-head later to maint).
+
+ * An underallocation in the code to read the untracked cache
+ extension has been corrected.
+ (merge 3a7b45a623 js/untracked-cache-allocfix later to maint).
+
+ * The code is updated to check the result of memory allocation before
+ it is used in more places, by using xmalloc and/or xcalloc calls.
+ (merge 999b951b28 jk/xmalloc later to maint).
+
+ * The GETTEXT_POISON test option has been quite broken ever since it
+ was made runtime-tunable, which has been fixed.
+ (merge f88b9cb603 jc/gettext-test-fix later to maint).
+
+ * Test fix on APFS that is incapable of store paths in Latin-1.
+ (merge 3889149619 js/iso8895-test-on-apfs later to maint).
+
+ * "git submodule foreach <command> --quiet" did not pass the option
+ down correctly, which has been corrected.
+ (merge a282f5a906 nd/submodule-foreach-quiet later to maint).
+
+ * "git send-email" has been taught to use quoted-printable when the
+ payload contains carriage-return. The use of the mechanism is in
+ line with the design originally added the codepath that chooses QP
+ when the payload has overly long lines.
+ (merge 74d76a1701 bc/send-email-qp-cr later to maint).
+
+ * The recently added feature to add addresses that are on
+ anything-by: trailers in 'git send-email' was found to be way too
+ eager and considered nonsense strings as if they can be legitimate
+ beginning of *-by: trailer. This has been tightened.
+
+ * Builds with gettext broke on recent macOS w/ Homebrew, which
+ seems to have stopped including from /usr/local/include; this
+ has been corrected.
+ (merge 92a1377a2a js/macos-gettext-build later to maint).
+
+ * Running "git add" on a repository created inside the current
+ repository is an explicit indication that the user wants to add it
+ as a submodule, but when the HEAD of the inner repository is on an
+ unborn branch, it cannot be added as a submodule. Worse, the files
+ in its working tree can be added as if they are a part of the outer
+ repository, which is not what the user wants. These problems are
+ being addressed.
+ (merge f937bc2f86 km/empty-repo-is-still-a-repo later to maint).
+
+ * "git cherry-pick" run with the "-x" or the "--signoff" option used
+ to (and more importantly, ought to) clean up the commit log message
+ with the --cleanup=space option by default, but this has been
+ broken since late 2017. This has been fixed.
+
+ * When given a tag that points at a commit-ish, "git replace --graft"
+ failed to peel the tag before writing a replace ref, which did not
+ make sense because the old graft mechanism the feature wants to
+ mimick only allowed to replace one commit object with another.
+ This has been fixed.
+ (merge ee521ec4cb cc/replace-graft-peel-tags later to maint).
+
+ * Code tightening against a "wrong" object appearing where an object
+ of a different type is expected, instead of blindly assuming that
+ the connection between objects are correctly made.
+ (merge 97dd512af7 tb/unexpected later to maint).
+
+ * An earlier update for MinGW and Cygwin accidentally broke MSVC build,
+ which has been fixed.
+ (merge 22c3634c0f ss/msvc-path-utils-fix later to maint).
+
+ * %(push:track) token used in the --format option to "git
+ for-each-ref" and friends was not showing the right branch, which
+ has been fixed.
+ (merge c646d0934e dr/ref-filter-push-track-fix later to maint).
+
+ * "make check-docs", "git help -a", etc. did not account for cases
+ where a particular build may deliberately omit some subcommands,
+ which has been corrected.
+
+ * The logic to tell if a Git repository has a working tree protects
+ "git branch -D" from removing the branch that is currently checked
+ out by mistake. The implementation of this logic was broken for
+ repositories with unusual name, which unfortunately is the norm for
+ submodules these days. This has been fixed.
+ (merge f3534c98e4 jt/submodule-repo-is-with-worktree later to maint).
+
+ * AIX shared the same build issues with other BSDs around fileno(fp),
+ which has been corrected.
+ (merge ee662bf5c6 cc/aix-has-fileno-as-a-macro later to maint).
+
+ * The autoconf generated configure script failed to use the right
+ gettext() implementations from -libintl by ignoring useless stub
+ implementations shipped in some C library, which has been
+ corrected.
+ (merge b71e56a683 vk/autoconf-gettext later to maint).
+
+ * Fix index-pack perf test so that the repeated invocations always
+ run in an empty repository, which emulates the initial clone
+ situation better.
+ (merge 775c71e16d jk/p5302-avoid-collision-check-cost later to maint).
+
+ * A "ls-files" that emulates "find" to enumerate files in the working
+ tree resulted in duplicated Makefile rules that caused the build to
+ issue an unnecessary warning during a trial build after merge
+ conflicts are resolved in working tree *.h files but before the
+ resolved results are added to the index. This has been corrected.
+
+ * "git chery-pick" (and "revert" that shares the same runtime engine)
+ that deals with multiple commits got confused when the final step
+ gets stopped with a conflict and the user concluded the sequence
+ with "git commit". Attempt to fix it by cleaning up the state
+ files used by these commands in such a situation.
+ (merge 4a72486de9 pw/clean-sequencer-state-upon-final-commit later to maint).
+
+ * Code cleanup, docfix, build fix, etc.
+ (merge 11f470aee7 jc/test-yes-doc later to maint).
+ (merge 90503a240b js/doc-symref-in-proto-v1 later to maint).
+ (merge 5c326d1252 jk/unused-params later to maint).
+ (merge 68cabbfda3 dl/doc-submodule-wo-subcommand later to maint).
+ (merge 9903623761 ab/receive-pack-use-after-free-fix later to maint).
+ (merge 1ede45e44b en/merge-options-doc later to maint).
+ (merge 3e14dd2c8e rd/doc-hook-used-in-sample later to maint).
+ (merge c271dc28fd nd/no-more-check-racy later to maint).
+ (merge e6e15194a8 yb/utf-16le-bom-spellfix later to maint).
+ (merge bb101aaf0c rd/attr.c-comment-typofix later to maint).
+ (merge 716a5af812 rd/gc-prune-doc-fix later to maint).
+ (merge 50b206371d js/untravis-windows later to maint).
+ (merge dbf47215e3 js/rebase-recreate-merge later to maint).
+ (merge 56cb2d30f8 dl/reset-doc-no-wrt-abbrev later to maint).
+ (merge 64eca306a2 ja/dir-rename-doc-markup-fix later to maint).
+ (merge af91b0230c dl/ignore-docs later to maint).
+ (merge 59a06e947b ra/t3600-test-path-funcs later to maint).
+ (merge e041d0781b ar/t4150-remove-cruft later to maint).
+ (merge 8d75a1d183 ma/asciidoctor-fixes-more later to maint).
+ (merge 74cc547b0f mh/pack-protocol-doc-fix later to maint).
+ (merge ed31851fa6 ab/doc-misc-typofixes later to maint).
+ (merge a7256debd4 nd/checkout-m-doc-update later to maint).
+ (merge 3a9e1ad78d jt/t5551-protocol-v2-does-not-have-half-auth later to maint).
+ (merge 0b918b75af sg/t5318-cleanup later to maint).
+ (merge 68ed71b53c cb/doco-mono later to maint).
+ (merge a34dca2451 nd/interpret-trailers-docfix later to maint).
+ (merge cf7b857a77 en/fast-import-parsing-fix later to maint).
+ (merge fe61ccbc35 po/rerere-doc-fmt later to maint).
+ (merge ffea0248bf po/describe-not-necessarily-7 later to maint).
+ (merge 7cb7283adb tg/ls-files-debug-format-fix later to maint).
+ (merge f64a21bd82 tz/doc-apostrophe-no-longer-needed later to maint).
+ (merge dbe7b41019 js/t3301-unbreak-notes-test later to maint).
+ (merge d8083e4180 km/t3000-retitle later to maint).
+ (merge 9e4cbccbd7 tz/git-svn-doc-markup-fix later to maint).
+ (merge da9ca955a7 jk/ls-files-doc-markup-fix later to maint).
Some parts of the system have dedicated maintainers with their own
repositories.
-- 'git-gui/' comes from git-gui project, maintained by Pat Thoyts:
+- `git-gui/` comes from git-gui project, maintained by Pat Thoyts:
git://repo.or.cz/git-gui.git
-- 'gitk-git/' comes from Paul Mackerras's gitk project:
+- `gitk-git/` comes from Paul Mackerras's gitk project:
git://ozlabs.org/~paulus/gitk
-- 'po/' comes from the localization coordinator, Jiang Xin:
+- `po/` comes from the localization coordinator, Jiang Xin:
https://github.com/git-l10n/git-po/
def process(parent, target, attrs)
if parent.document.basebackend? 'html'
prefix = parent.document.attr('git-relative-html-prefix')
- %(<a href="#{prefix}#{target}.html">#{target}(#{attrs[1]})</a>\n)
+ %(<a href="#{prefix}#{target}.html">#{target}(#{attrs[1]})</a>)
elsif parent.document.basebackend? 'docbook'
"<citerefentry>\n" \
"<refentrytitle>#{target}</refentrytitle>" \
"<manvolnum>#{attrs[1]}</manvolnum>\n" \
- "</citerefentry>\n"
+ "</citerefentry>"
end
end
end
include::config/tag.txt[]
+include::config/trace2.txt[]
+
include::config/transfer.txt[]
include::config/uploadarchive.txt[]
waitingForEditor::
Print a message to the terminal whenever Git is waiting for
editor input from the user.
+ nestedTag::
+ Advice shown if a user attempts to recursively tag a tag object.
--
so that the local merge commits are included in the rebase (see
linkgit:git-rebase[1] for details).
+
-When preserve, also pass `--preserve-merges` along to 'git rebase'
-so that locally committed merge commits will not be flattened
-by running 'git pull'.
+When `preserve` (deprecated in favor of `merges`), also pass
+`--preserve-merges` along to 'git rebase' so that locally committed merge
+commits will not be flattened by running 'git pull'.
+
When the value is `interactive`, the rebase is run in interactive mode.
+
core.excludesFile::
Specifies the pathname to the file that contains patterns to
describe paths that are not meant to be tracked, in addition
- to '.gitignore' (per-directory) and '.git/info/exclude'.
+ to `.gitignore` (per-directory) and `.git/info/exclude`.
Defaults to `$XDG_CONFIG_HOME/git/ignore`.
If `$XDG_CONFIG_HOME` is either not set or empty, `$HOME/.config/git/ignore`
is used instead. See linkgit:gitignore[5].
command-line argument and write the password on its STDOUT.
core.attributesFile::
- In addition to '.gitattributes' (per-directory) and
- '.git/info/attributes', Git looks into this file for attributes
+ In addition to `.gitattributes` (per-directory) and
+ `.git/info/attributes`, Git looks into this file for attributes
(see linkgit:gitattributes[5]). Path expansions are made the same
way as for `core.excludesFile`. Its default value is
`$XDG_CONFIG_HOME/git/attributes`. If `$XDG_CONFIG_HOME` is either not
core.hooksPath::
By default Git will look for your hooks in the
- '$GIT_DIR/hooks' directory. Set this to different path,
- e.g. '/etc/git/hooks', and Git will try to find your hooks in
- that directory, e.g. '/etc/git/hooks/pre-receive' instead of
- in '$GIT_DIR/hooks/pre-receive'.
+ `$GIT_DIR/hooks` directory. Set this to different path,
+ e.g. `/etc/git/hooks`, and Git will try to find your hooks in
+ that directory, e.g. `/etc/git/hooks/pre-receive` instead of
+ in `$GIT_DIR/hooks/pre-receive`.
+
The path can be either absolute or relative. A relative path is
taken as relative to the directory where the hooks are run (see
diff.dirstat::
A comma separated list of `--dirstat` parameters specifying the
- default behavior of the `--dirstat` option to linkgit:git-diff[1]`
+ default behavior of the `--dirstat` option to linkgit:git-diff[1]
and friends. The defaults can be overridden on the command line
(using `--dirstat=<param1,param2,...>`). The fallback defaults
(when not changed by `diff.dirstat`) are `changes,noncumulative,3`.
environment variable. The command is called with parameters
as described under "git Diffs" in linkgit:git[1]. Note: if
you want to use an external diff program only on a subset of
- your files, you might want to use linkgit:gitattributes[5] instead.
+ your files, you might want to use linkgit:gitattributes[5] instead.
diff.ignoreSubmodules::
Sets the default value of --ignore-submodules. Note that this
vice versa by configuring the `fsck.<msg-id>` setting where the
`<msg-id>` is the fsck message ID and the value is one of `error`,
`warn` or `ignore`. For convenience, fsck prefixes the error/warning
-with the message ID, e.g. "missingEmail: invalid author/committer line
-- missing email" means that setting `fsck.missingEmail = ignore` will
-hide that issue.
+with the message ID, e.g. "missingEmail: invalid author/committer
+line - missing email" means that setting `fsck.missingEmail = ignore`
+will hide that issue.
+
In general, it is better to enumerate existing objects with problems
with `fsck.skipList`, instead of listing the kind of breakages these
gc.aggressiveDepth::
The depth parameter used in the delta compression
algorithm used by 'git gc --aggressive'. This defaults
- to 50.
+ to 50, which is the default for the `--depth` option when
+ `--aggressive` isn't in use.
++
+See the documentation for the `--depth` option in
+linkgit:git-repack[1] for more details.
gc.aggressiveWindow::
The window size parameter used in the delta compression
algorithm used by 'git gc --aggressive'. This defaults
- to 250.
+ to 250, which is a much more aggressive window size than
+ the default `--window` of 10.
++
+See the documentation for the `--window` option in
+linkgit:git-repack[1] for more details.
gc.auto::
When there are approximately more than this many loose
objects in the repository, `git gc --auto` will pack them.
Some Porcelain commands use this command to perform a
light-weight garbage collection from time to time. The
- default value is 6700. Setting this to 0 disables it.
+ default value is 6700.
++
+Setting this to 0 disables not only automatic packing based on the
+number of loose objects, but any other heuristic `git gc --auto` will
+otherwise use to determine if there's work to do, such as
+`gc.autoPackLimit`.
gc.autoPackLimit::
When there are more than this many packs that are not
marked with `*.keep` file in the repository, `git gc
--auto` consolidates them into one larger pack. The
- default value is 50. Setting this to 0 disables it.
+ default value is 50. Setting this to 0 disables it.
+ Setting `gc.auto` to 0 will also disable this.
++
+See the `gc.bigPackThreshold` configuration variable below. When in
+use, it'll affect how the auto pack limit works.
gc.autoDetach::
Make `git gc --auto` return immediately and run in background
this configuration variable is ignored, all packs except the base pack
will be repacked. After this the number of packs should go below
gc.autoPackLimit and gc.bigPackThreshold should be respected again.
++
+If the amount of memory estimated for `git repack` to run smoothly is
+not available and `gc.bigPackThreshold` is not set, the largest pack
+will also be excluded (this is the equivalent of running `git gc` with
+`--keep-base-pack`).
gc.writeCommitGraph::
If true, then gc will rewrite the commit-graph file when
- linkgit:git-gc[1] is run. When using linkgit:git-gc[1]
- '--auto' the commit-graph will be updated if housekeeping is
+ linkgit:git-gc[1] is run. When using `git gc --auto`
+ the commit-graph will be updated if housekeeping is
required. Default is false. See linkgit:git-commit-graph[1]
for details.
With "<pattern>" (e.g. "refs/stash")
in the middle, the setting applies only to the refs that
match the <pattern>.
++
+These types of entries are generally created as a result of using `git
+commit --amend` or `git rebase` and are the commits prior to the amend
+or rebase occurring. Since these changes are not part of the current
+project most users will want to expire them sooner, which is why the
+default is more aggressive than `gc.reflogExpire`.
gc.rerereResolved::
Records of conflicted merge you resolved earlier are
gpg.<format>.program::
Use this to customize the program used for the signing format you
chose. (see `gpg.program` and `gpg.format`) `gpg.program` can still
- be used as a legacy synonym for `gpg.openpgp.program`. The default
+ be used as a legacy synonym for `gpg.openpgp.program`. The default
value for `gpg.x509.program` is "gpgsm".
is turned off.
merge.renames::
- Whether and how Git detects renames. If set to "false",
- rename detection is disabled. If set to "true", basic rename
- detection is enabled. Defaults to the value of diff.renames.
+ Whether Git detects renames. If set to "false", rename detection
+ is disabled. If set to "true", basic rename detection is enabled.
+ Defaults to the value of diff.renames.
+
+merge.directoryRenames::
+ Whether Git detects directory renames, affecting what happens at
+ merge time to new files added to a directory on one side of
+ history when that directory was renamed on the other side of
+ history. If merge.directoryRenames is set to "false", directory
+ rename detection is disabled, meaning that such new files will be
+ left behind in the old directory. If set to "true", directory
+ rename detection is enabled, meaning that such new files will be
+ moved into the new directory. If set to "conflict", a conflict
+ will be reported for such paths. If merge.renames is false,
+ merge.directoryRenames is ignored and treated as false. Defaults
+ to "conflict".
merge.renormalize::
Tell Git that canonical representation of files in the
bitmapped and non-bitmapped objects (e.g., when serving a fetch
between an older, bitmapped pack and objects that have been
pushed since the last gc). The downside is that it consumes 4
- bytes per object of disk space, and that JGit's bitmap
- implementation does not understand it, causing it to complain if
- Git and JGit are used on the same repository. Defaults to false.
+ bytes per object of disk space. Defaults to true.
so that the local merge commits are included in the rebase (see
linkgit:git-rebase[1] for details).
+
-When preserve, also pass `--preserve-merges` along to 'git rebase'
-so that locally committed merge commits will not be flattened
-by running 'git pull'.
+When `preserve` (deprecated in favor of `merges`), also pass
+`--preserve-merges` along to 'git rebase' so that locally committed merge
+commits will not be flattened by running 'git pull'.
+
When the value is `interactive`, the rebase is run in interactive mode.
+
rebase.useBuiltin::
- Set to `false` to use the legacy shellscript implementation of
- linkgit:git-rebase[1]. Is `true` by default, which means use
- the built-in rewrite of it in C.
-+
-The C rewrite is first included with Git version 2.20. This option
-serves an an escape hatch to re-enable the legacy version in case any
-bugs are found in the rewrite. This option and the shellscript version
-of linkgit:git-rebase[1] will be removed in some future release.
-+
-If you find some reason to set this option to `false` other than
-one-off testing you should report the behavior difference as a bug in
-git.
+ Unused configuration variable. Used in Git versions 2.20 and
+ 2.21 as an escape hatch to enable the legacy shellscript
+ implementation of rebase. Now the built-in rewrite of it in C
+ is always used. Setting this will emit a warning, to alert any
+ remaining users that setting this now does nothing.
rebase.stat::
Whether to show a diffstat of what changed upstream since the last
packs created for clones and fetches, at the cost of some disk
space and extra time spent on the initial repack. This has
no effect if multiple packfiles are created.
- Defaults to false.
+ Defaults to true on bare repos, false otherwise.
--- /dev/null
+Trace2 config settings are only read from the system and global
+config files; repository local and worktree config files and `-c`
+command line arguments are not respected.
+
+trace2.normalTarget::
+ This variable controls the normal target destination.
+ It may be overridden by the `GIT_TR2` environment variable.
+ The following table shows possible values.
+
+trace2.perfTarget::
+ This variable controls the performance target destination.
+ It may be overridden by the `GIT_TR2_PERF` environment variable.
+ The following table shows possible values.
+
+trace2.eventTarget::
+ This variable controls the event target destination.
+ It may be overridden by the `GIT_TR2_EVENT` environment variable.
+ The following table shows possible values.
++
+include::../trace2-target-values.txt[]
+
+trace2.normalBrief::
+ Boolean. When true `time`, `filename`, and `line` fields are
+ omitted from normal output. May be overridden by the
+ `GIT_TR2_BRIEF` environment variable. Defaults to false.
+
+trace2.perfBrief::
+ Boolean. When true `time`, `filename`, and `line` fields are
+ omitted from PERF output. May be overridden by the
+ `GIT_TR2_PERF_BRIEF` environment variable. Defaults to false.
+
+trace2.eventBrief::
+ Boolean. When true `time`, `filename`, and `line` fields are
+ omitted from event output. May be overridden by the
+ `GIT_TR2_EVENT_BRIEF` environment variable. Defaults to false.
+
+trace2.eventNesting::
+ Integer. Specifies desired depth of nested regions in the
+ event output. Regions deeper than this value will be
+ omitted. May be overridden by the `GIT_TR2_EVENT_NESTING`
+ environment variable. Defaults to 2.
+
+trace2.configParams::
+ A comma-separated list of patterns of "important" config
+ settings that should be recorded in the trace2 output.
+ For example, `core.*,remote.*.url` would cause the trace2
+ output to contain events listing each configured remote.
+ May be overridden by the `GIT_TR2_CONFIG_PARAMS` environment
+ variable. Unset by default.
+
+trace2.destinationDebug::
+ Boolean. When true Git will print error messages when a
+ trace target destination cannot be opened for writing.
+ By default, these errors are suppressed and tracing is
+ silently disabled. May be overridden by the
+ `GIT_TR2_DST_DEBUG` environment variable.
-user.email::
- Your email address to be recorded in any newly created commits.
- Can be overridden by the `GIT_AUTHOR_EMAIL`, `GIT_COMMITTER_EMAIL`, and
- `EMAIL` environment variables. See linkgit:git-commit-tree[1].
-
user.name::
- Your full name to be recorded in any newly created commits.
- Can be overridden by the `GIT_AUTHOR_NAME` and `GIT_COMMITTER_NAME`
- environment variables. See linkgit:git-commit-tree[1].
+user.email::
+author.name::
+author.email::
+committer.name::
+committer.email::
+ The `user.name` and `user.email` variables determine what ends
+ up in the `author` and `committer` field of commit
+ objects.
+ If you need the `author` or `committer` to be different, the
+ `author.name`, `author.email`, `committer.name` or
+ `committer.email` variables can be set.
+ Also, all of these can be overridden by the `GIT_AUTHOR_NAME`,
+ `GIT_AUTHOR_EMAIL`, `GIT_COMMITTER_NAME`,
+ `GIT_COMMITTER_EMAIL` and `EMAIL` environment variables.
+ See linkgit:git-commit-tree[1] for more information.
user.useConfigOnly::
Instruct Git to avoid trying to guess defaults for `user.email`
. there are more "src" modes and "src" sha1
. status is concatenated status characters for each parent
. no optional "score" number
-. single path, only for "dst"
+. tab-separated pathname(s) of the file
-Example:
+For `-c` and `--cc`, only the destination or final path is shown even
+if the file was renamed on any side of history. With
+`--combined-all-paths`, the name of the path in each parent is shown
+followed by the name of the path in the merge commit.
+
+Examples for `-c` and `--cc` without `--combined-all-paths`:
+------------------------------------------------
+::100644 100644 100644 fabadb8 cc95eb0 4866510 MM desc.c
+::100755 100755 100755 52b7a2d 6d1ac04 d2ac7d7 RM bar.sh
+::100644 100644 100644 e07d6c5 9042e82 ee91881 RR phooey.c
+------------------------------------------------
+
+Examples when `--combined-all-paths` added to either `-c` or `--cc`:
------------------------------------------------
-::100644 100644 100644 fabadb8 cc95eb0 4866510 MM describe.c
+::100644 100644 100644 fabadb8 cc95eb0 4866510 MM desc.c desc.c desc.c
+::100755 100755 100755 52b7a2d 6d1ac04 d2ac7d7 RM foo.sh bar.sh bar.sh
+::100644 100644 100644 e07d6c5 9042e82 ee91881 RR fooey.c fuey.c phooey.c
------------------------------------------------
Note that 'combined diff' lists only files which were modified from
Similar to two-line header for traditional 'unified' diff
format, `/dev/null` is used to signal created or deleted
files.
++
+However, if the --combined-all-paths option is provided, instead of a
+two-line from-file/to-file you get a N+1 line from-file/to-file header,
+where N is the number of parents in the merge commit
+
+ --- a/file
+ --- a/file
+ --- a/file
+ +++ b/file
++
+This extended format can be useful if rename or copy detection is
+active, to allow you to see the original name of the file in different
+parents.
4. Chunk header format is modified to prevent people from
accidentally feeding it to `patch -p1`. Combined diff format
-U<n>::
--unified=<n>::
Generate diffs with <n> lines of context instead of
- the usual three.
+ the usual three. Implies `--patch`.
ifndef::git-format-patch[]
Implies `-p`.
endif::git-format-patch[]
+--output=<file>::
+ Output to a specific file instead of stdout.
+
+--output-indicator-new=<char>::
+--output-indicator-old=<char>::
+--output-indicator-context=<char>::
+ Specify the character used to indicate new, old or context
+ lines in the generated patch. Normally they are '+', '-' and
+ ' ' respectively.
+
ifndef::git-format-patch[]
--raw::
ifndef::git-log[]
number of modified files, as well as number of added and deleted
lines.
+-X[<param1,param2,...>]::
--dirstat[=<param1,param2,...>]::
Output the distribution of relative amount of changes for each
sub-directory. The behavior of `--dirstat` can be customized by
and accumulating child directory counts in the parent directories:
`--dirstat=files,10,cumulative`.
+--cumulative::
+ Synonym for --dirstat=cumulative
+
+--dirstat-by-file[=<param1,param2>...]::
+ Synonym for --dirstat=files,param1,param2...
+
--summary::
Output a condensed summary of extended header information
such as creations, renames and mode changes.
Turn off rename detection, even when the configuration
file gives the default to do so.
+--[no-]rename-empty::
+ Whether to use empty blobs as rename source.
+
ifndef::git-format-patch[]
--check::
Warn if changes introduce conflict markers or whitespace errors.
--binary::
In addition to `--full-index`, output a binary diff that
- can be applied with `git-apply`.
+ can be applied with `git-apply`. Implies `--patch`.
--abbrev[=<n>]::
Instead of showing the full 40-byte hexadecimal object
doc-diff [options] <from> <to> [-- <diff-options>]
doc-diff (-c|--clean)
--
-j=n parallel argument to pass to make
-f force rebuild; do not rely on cached results
-c,clean cleanup temporary working files
+j=n parallel argument to pass to make
+f force rebuild; do not rely on cached results
+c,clean cleanup temporary working files
+from-asciidoc use asciidoc with the 'from'-commit
+from-asciidoctor use asciidoctor with the 'from'-commit
+asciidoc use asciidoc with both commits
+to-asciidoc use asciidoc with the 'to'-commit
+to-asciidoctor use asciidoctor with the 'to'-commit
+asciidoctor use asciidoctor with both commits
+cut-header-footer cut away header and footer
"
SUBDIRECTORY_OK=1
. "$(git --exec-path)/git-sh-setup"
parallel=
force=
clean=
+from_program=
+to_program=
+cut_header_footer=
while test $# -gt 0
do
case "$1" in
clean=t ;;
-f)
force=t ;;
+ --from-asciidoctor)
+ from_program=-asciidoctor ;;
+ --to-asciidoctor)
+ to_program=-asciidoctor ;;
+ --asciidoctor)
+ from_program=-asciidoctor
+ to_program=-asciidoctor ;;
+ --from-asciidoc)
+ from_program=-asciidoc ;;
+ --to-asciidoc)
+ to_program=-asciidoc ;;
+ --asciidoc)
+ from_program=-asciidoc
+ to_program=-asciidoc ;;
+ --cut-header-footer)
+ cut_header_footer=-cut-header-footer ;;
--)
shift; break ;;
*)
ln -s "$dots/config.mak" "$tmp/worktree/config.mak"
fi
+construct_makemanflags () {
+ if test "$1" = "-asciidoc"
+ then
+ echo USE_ASCIIDOCTOR=
+ elif test "$1" = "-asciidoctor"
+ then
+ echo USE_ASCIIDOCTOR=YesPlease
+ fi
+}
+
+from_makemanflags=$(construct_makemanflags "$from_program") &&
+to_makemanflags=$(construct_makemanflags "$to_program") &&
+
+from_dir=$from_oid$from_program$cut_header_footer &&
+to_dir=$to_oid$to_program$cut_header_footer &&
+
# generate_render_makefile <srcdir> <dstdir>
generate_render_makefile () {
find "$1" -type f |
done
}
-# render_tree <committish_oid>
+# render_tree <committish_oid> <directory_name> <makemanflags>
render_tree () {
# Skip install-man entirely if we already have an installed directory.
# We can't rely on make here, since "install-man" unconditionally
# we then can't rely on during the render step). We use "mv" to make
# sure we don't get confused by a previous run that failed partway
# through.
- if ! test -d "$tmp/installed/$1"
+ oid=$1 &&
+ dname=$2 &&
+ makemanflags=$3 &&
+ if ! test -d "$tmp/installed/$dname"
then
- git -C "$tmp/worktree" checkout --detach "$1" &&
+ git -C "$tmp/worktree" checkout --detach "$oid" &&
make -j$parallel -C "$tmp/worktree" \
+ $makemanflags \
GIT_VERSION=omitted \
SOURCE_DATE_EPOCH=0 \
- DESTDIR="$tmp/installed/$1+" \
+ DESTDIR="$tmp/installed/$dname+" \
install-man &&
- mv "$tmp/installed/$1+" "$tmp/installed/$1"
+ mv "$tmp/installed/$dname+" "$tmp/installed/$dname"
fi &&
# As with "installed" above, we skip the render if it's already been
# done. So using make here is primarily just about running in
# parallel.
- if ! test -d "$tmp/rendered/$1"
+ if ! test -d "$tmp/rendered/$dname"
then
- generate_render_makefile "$tmp/installed/$1" "$tmp/rendered/$1+" |
+ generate_render_makefile "$tmp/installed/$dname" \
+ "$tmp/rendered/$dname+" |
make -j$parallel -f - &&
- mv "$tmp/rendered/$1+" "$tmp/rendered/$1"
+ mv "$tmp/rendered/$dname+" "$tmp/rendered/$dname"
+
+ if test "$cut_header_footer" = "-cut-header-footer"
+ then
+ for f in $(find "$tmp/rendered/$dname" -type f)
+ do
+ tail -n +3 "$f" | head -n -2 |
+ sed -e '1{/^$/d}' -e '${/^$/d}' >"$f+" &&
+ mv "$f+" "$f" ||
+ return 1
+ done
+ fi
fi
}
-render_tree $from_oid &&
-render_tree $to_oid &&
-git -C $tmp/rendered diff --no-index "$@" $from_oid $to_oid
+render_tree $from_oid $from_dir $from_makemanflags &&
+render_tree $to_oid $to_dir $to_makemanflags &&
+git -C $tmp/rendered diff --no-index "$@" $from_dir $to_dir
--server-option=<option>::
Transmit the given string to the server when communicating using
protocol version 2. The given string must not contain a NUL or LF
- character.
+ character. The server's handling of server options, including
+ unknown ones, is server-specific.
When multiple `--server-option=<option>` are given, they are all
sent to the other side in the order listed on the command line.
for command-line options).
-CONFIGURATION
--------------
-
-The optional configuration variable `core.excludesFile` indicates a path to a
-file containing patterns of file names to exclude from git-add, similar to
-$GIT_DIR/info/exclude. Patterns in the exclude file are used in addition to
-those in info/exclude. See linkgit:gitignore[5].
-
-
EXAMPLES
--------
am.threeWay configuration variable. For more information,
see am.threeWay in linkgit:git-config[1].
+--rerere-autoupdate::
+--no-rerere-autoupdate::
+ Allow the rerere mechanism to update the index with the
+ result of auto-conflict resolution if possible.
+
--ignore-space-change::
--ignore-whitespace::
--whitespace=<option>::
--------
[verse]
'git branch' [--color[=<when>] | --no-color] [-r | -a]
- [--list] [-v [--abbrev=<length> | --no-abbrev]]
+ [--list] [--show-current] [-v [--abbrev=<length> | --no-abbrev]]
[--column[=<options>] | --no-column] [--sort=<key>]
[(--merged | --no-merged) [<commit>]]
[--contains [<commit]] [--no-contains [<commit>]]
branch).
The command's second form creates a new branch head named <branchname>
-which points to the current `HEAD`, or <start-point> if given.
+which points to the current `HEAD`, or <start-point> if given. As a
+special case, for <start-point>, you may use `"A...B"` as a shortcut for
+the merge base of `A` and `B` if there is exactly one merge base. You
+can leave out at most one of `A` and `B`, in which case it defaults to
+`HEAD`.
Note that this will create the new branch, but it will not switch the
working tree to it; use "git checkout <newbranch>" to switch to the
branch --list 'maint-*'`, list only the branches that match
the pattern(s).
+--show-current::
+ Print the name of the current branch. In detached HEAD state,
+ nothing is printed.
+
-v::
-vv::
--verbose::
+
When checking out paths from the index, this option lets you recreate
the conflicted merge in the specified paths.
++
+When switching branches with `--merge`, staged changes may be lost.
--conflict=<style>::
The same as --merge option above, but changes the way the
This means that you can use `git checkout -p` to selectively discard
edits from your current working tree. See the ``Interactive Mode''
section of linkgit:git-add[1] to learn how to operate the `--patch` mode.
++
+Note that this option uses the no overlay mode by default (see also
+`--[no-]overlay`), and currently doesn't support overlay mode.
--ignore-other-worktrees::
`git checkout` refuses when the wanted ref is already checked
Do not attempt to create a branch if a remote tracking branch
of the same name exists.
+--[no-]overlay::
+ In the default overlay mode, `git checkout` never
+ removes files from the index or the working tree. When
+ specifying `--no-overlay`, files that appear in the index and
+ working tree, but not in <tree-ish> are removed, to make them
+ match <tree-ish> exactly.
+
<branch>::
Branch to checkout; if it refers to a branch (i.e., a name that,
when prepended with "refs/heads/", is a valid ref), then that
<start_point>::
The name of a commit at which to start the new branch; see
linkgit:git-branch[1] for details. Defaults to HEAD.
++
+As a special case, you may use `"A...B"` as a shortcut for the
+merge base of `A` and `B` if there is exactly one merge base. You can
+leave out at most one of `A` and `B`, in which case it defaults to `HEAD`.
<tree-ish>::
Tree to checkout from (when paths are given). If not specified,
With this option, 'git cherry-pick' will let you edit the commit
message prior to committing.
+--cleanup=<mode>::
+ This option determines how the commit message will be cleaned up before
+ being passed on to the commit machinery. See linkgit:git-commit[1] for more
+ details. In particular, if the '<mode>' is given a value of `scissors`,
+ scissors will be appended to `MERGE_MSG` before being passed on in the case
+ of a conflict.
+
-x::
When recording the commit, append a line that says
"(cherry picked from commit ...)" to the original commit
Pass the merge strategy-specific option through to the
merge strategy. See linkgit:git-merge[1] for details.
+--rerere-autoupdate::
+--no-rerere-autoupdate::
+ Allow the rerere mechanism to update the index with the
+ result of auto-conflict resolution if possible.
+
SEQUENCER SUBCOMMANDS
---------------------
include::sequencer.txt[]
-e <pattern>::
--exclude=<pattern>::
- In addition to those found in .gitignore (per directory) and
- $GIT_DIR/info/exclude, also consider these patterns to be in the
- set of the ignore rules in effect.
+ Use the given exclude pattern in addition to the standard ignore rules
+ (see linkgit:gitignore[5]).
-x::
- Don't use the standard ignore rules read from .gitignore (per
- directory) and $GIT_DIR/info/exclude, but do still use the ignore
- rules given with `-e` options. This allows removing all untracked
+ Don't use the standard ignore rules (see linkgit:gitignore[5]), but
+ still use the ignore rules given with `-e` options from the command
+ line. This allows removing all untracked
files, including build products. This can be used (possibly in
conjunction with 'git reset') to create a pristine
working directory to test a clean build.
is specified. This flag forces progress status even if the
standard error stream is not directed to a terminal.
+--server-option=<option>::
+ Transmit the given string to the server when communicating using
+ protocol version 2. The given string must not contain a NUL or LF
+ character. The server's handling of server options, including
+ unknown ones, is server-specific.
+ When multiple `--server-option=<option>` are given, they are all
+ sent to the other side in the order listed on the command line.
+
--no-checkout::
-n::
No checkout of HEAD is performed after the clone is complete.
emits the new commit object id on stdout. The log message is read
from the standard input, unless `-m` or `-F` options are given.
+The `-m` and `-F` options can be given any number of times, in any
+order. The commit log message will be composed in the order in which
+the options are given.
+
A commit object may have any number of parents. With exactly one
parent, it is an ordinary commit. Having more than one parent makes
the commit a merge between several lines of history. Initial (root)
OPTIONS
-------
<tree>::
- An existing tree object
+ An existing tree object.
-p <parent>::
Each `-p` indicates the id of a parent commit object.
-F <file>::
Read the commit log message from the given file. Use `-` to read
- from the standard input.
+ from the standard input. This can be given more than once and the
+ content of each file becomes its own paragraph.
-S[<keyid>]::
--gpg-sign[=<keyid>]::
--local::
For writing options: write to the repository `.git/config` file.
- This is the default behavior.
+ This is the default behavior.
+
For reading options: read only from the repository `.git/config` rather than
from all available files.
output. The optional `default` parameter is used instead, if
there is no color configured for `name`.
+
-`--type=color [--default=<default>]` is preferred over `--get-color`.
+`--type=color [--default=<default>]` is preferred over `--get-color`
+(but note that `--get-color` will omit the trailing newline printed by
+`--type=color`).
-e::
--edit::
This is sort of "Git root" - if you run 'git daemon' with
'--base-path=/srv/git' on example.com, then if you later try to pull
'git://example.com/hello.git', 'git daemon' will interpret the path
- as '/srv/git/hello.git'.
+ as `/srv/git/hello.git`.
--base-path-relaxed::
If --base-path is enabled and repo lookup fails, with this option
The number of additional commits is the number
of commits which would be displayed by "git log v1.0.4..parent".
-The hash suffix is "-g" + 7-char abbreviation for the tip commit
+The hash suffix is "-g" + unambiguous abbreviation for the tip commit
of parent (which was `2414721b194453f058079d897d13c4e377f92dc6`).
The "g" prefix stands for "git" and is used to allow describing the version of
a software depending on the SCM the software is managed with. This is useful
--------
[verse]
'git diff-tree' [--stdin] [-m] [-s] [-v] [--no-commit-id] [--pretty]
- [-t] [-r] [-c | --cc] [--root] [<common diff options>]
- <tree-ish> [<tree-ish>] [<path>...]
+ [-t] [-r] [-c | --cc] [--combined-all-paths] [--root]
+ [<common diff options>] <tree-ish> [<tree-ish>] [<path>...]
DESCRIPTION
-----------
itself and the commit log message is not shown, just like in any other
"empty diff" case.
+--combined-all-paths::
+ This flag causes combined diffs (used for merge commits) to
+ list the name of the file from all parents. It thus only has
+ effect when -c or --cc are specified, and is likely only
+ useful if filename changes are detected (i.e. when either
+ rename or copy detection have been requested).
+
--always::
Show the commit itself and the commit log message even
if the diff itself is empty.
include::pretty-formats.txt[]
+
include::diff-format.txt[]
GIT
When 'git-difftool' is invoked with the `-g` or `--gui` option
the default diff tool will be read from the configured
`diff.guitool` variable instead of `diff.tool`. The `--no-gui`
- option can be used to override this setting.
+ option can be used to override this setting. If `diff.guitool`
+ is not set, we will fallback in the order of `merge.guitool`,
+ `diff.tool`, `merge.tool` until a tool is found.
--[no-]trust-exit-code::
'git-difftool' invokes a diff tool individually on each file.
all `filemodify`, `filecopy`, `filerename` and `notemodify` commands in
the same commit, as `filedeleteall` wipes the branch clean (see below).
-The `LF` after the command is optional (it used to be required).
+The `LF` after the command is optional (it used to be required). Note
+that for reasons of backward compatibility, if the commit ends with a
+`data` command (i.e. it has has no `from`, `merge`, `filemodify`,
+`filedelete`, `filecopy`, `filerename`, `filedeleteall` or
+`notemodify` commands) then two `LF` commands may appear at the end of
+the command instead of just one.
`author`
^^^^^^^^
'get-mark' SP ':' <idnum> LF
....
-This command can be used anywhere in the stream that comments are
-accepted. In particular, the `get-mark` command can be used in the
-middle of a commit but not in the middle of a `data` command.
-
See ``Responses To Commands'' below for details about how to read
this output safely.
<contents> LF
====
-This command can be used anywhere in the stream that comments are
-accepted. In particular, the `cat-blob` command can be used in the
-middle of a commit but not in the middle of a `data` command.
+This command can be used where a `filemodify` directive can appear,
+allowing it to be used in the middle of a commit. For a `filemodify`
+using an inline directive, it can also appear right before the `data`
+directive.
See ``Responses To Commands'' below for details about how to read
this output safely.
blob or tree from a previous commit for use in the current one (with
`filemodify`).
-The `ls` command can be used anywhere in the stream that comments are
-accepted, including the middle of a commit.
+The `ls` command can also be used where a `filemodify` directive can
+appear, allowing it to be used in the middle of a commit.
Reading from the active commit::
This form can only be used in the middle of a `commit`.
to force recomputation of all deltas can significantly reduce the
final packfile size (30-50% smaller can be quite typical).
+Instead of running `git repack` you can also run `git gc
+--aggressive`, which will also optimize other things after an import
+(e.g. pack loose refs). As noted in the "AGGRESSIVE" section in
+linkgit:git-gc[1] the `--aggressive` option will find new deltas with
+the `-f` option to linkgit:git-repack[1]. For the reasons elaborated
+on above using `--aggressive` after a fast-import is one of the few
+cases where it's known to be worthwhile.
MEMORY UTILIZATION
------------------
rewriting. When applying a tree filter, the command needs to
temporarily check out the tree to some directory, which may consume
considerable space in case of large projects. By default it
- does this in the '.git-rewrite/' directory but you can override
+ does this in the `.git-rewrite/` directory but you can override
that choice by this parameter.
-f::
with --no-full.
--connectivity-only::
- Check only the connectivity of tags, commits and tree objects. By
- avoiding to unpack blobs, this speeds up the operation, at the
- expense of missing corrupt objects or other problematic issues.
+ Check only the connectivity of reachable objects, making sure
+ that any objects referenced by a reachable tag, commit, or tree
+ is present. This speeds up the operation by avoiding reading
+ blobs entirely (though it does still check that referenced blobs
+ exist). This will detect corruption in commits and trees, but
+ not do any semantic checks (e.g., for format errors). Corruption
+ in blob objects will not be detected at all.
++
+Unreachable tags, commits, and trees will also be accessed to find the
+tips of dangling segments of history. Use `--no-dangling` if you don't
+care about this output and want to speed it up further.
--strict::
Enable more strict checking, namely to catch a file mode
reflog, rerere metadata or stale working trees. May also update ancillary
indexes such as the commit-graph.
-Users are encouraged to run this task on a regular basis within
-each repository to maintain good disk space utilization and good
-operating performance.
+When common porcelain operations that create objects are run, they
+will check whether the repository has grown substantially since the
+last maintenance, and if so run `git gc` automatically. See `gc.auto`
+below for how to disable this behavior.
-Some git commands may automatically run 'git gc'; see the `--auto` flag
-below for details. If you know what you're doing and all you want is to
-disable this behavior permanently without further considerations, just do:
-
-----------------------
-$ git config --global gc.auto 0
-----------------------
+Running `git gc` manually should only be needed when adding objects to
+a repository without regularly running such porcelain commands, to do
+a one-off repository optimization, or e.g. to clean up a suboptimal
+mass-import. See the "PACKFILE OPTIMIZATION" section in
+linkgit:git-fast-import[1] for more details on the import case.
OPTIONS
-------
space utilization and performance. This option will cause
'git gc' to more aggressively optimize the repository at the expense
of taking much more time. The effects of this optimization are
- persistent, so this option only needs to be used occasionally; every
- few hundred changesets or so.
+ mostly persistent. See the "AGGRESSIVE" section below for details.
--auto::
With this option, 'git gc' checks whether any housekeeping is
required; if not, it exits without performing any work.
- Some git commands run `git gc --auto` after performing
- operations that could create many loose objects. Housekeeping
- is required if there are too many loose objects or too many
- packs in the repository.
-+
-If the number of loose objects exceeds the value of the `gc.auto`
-configuration variable, then all loose objects are combined into a
-single pack using `git repack -d -l`. Setting the value of `gc.auto`
-to 0 disables automatic packing of loose objects.
+
-If the number of packs exceeds the value of `gc.autoPackLimit`,
-then existing packs (except those marked with a `.keep` file
-or over `gc.bigPackThreshold` limit)
-are consolidated into a single pack by using the `-A` option of
-'git repack'.
-If the amount of memory is estimated not enough for `git repack` to
-run smoothly and `gc.bigPackThreshold` is not set, the largest
-pack will also be excluded (this is the equivalent of running `git gc`
-with `--keep-base-pack`).
-Setting `gc.autoPackLimit` to 0 disables automatic consolidation of
-packs.
+See the `gc.auto` option in the "CONFIGURATION" section below for how
+this heuristic works.
+
-If houskeeping is required due to many loose objects or packs, all
+Once housekeeping is triggered by exceeding the limits of
+configuration options such as `gc.auto` and `gc.autoPackLimit`, all
other housekeeping tasks (e.g. rerere, working trees, reflog...) will
be performed as well.
--prune=<date>::
Prune loose objects older than date (default is 2 weeks ago,
overridable by the config variable `gc.pruneExpire`).
- --prune=all prunes loose objects regardless of their age and
+ --prune=now prunes loose objects regardless of their age and
increases the risk of corruption if another process is writing to
the repository concurrently; see "NOTES" below. --prune is on by
default.
`.keep` files are consolidated into a single pack. When this
option is used, `gc.bigPackThreshold` is ignored.
+AGGRESSIVE
+----------
+
+When the `--aggressive` option is supplied, linkgit:git-repack[1] will
+be invoked with the `-f` flag, which in turn will pass
+`--no-reuse-delta` to linkgit:git-pack-objects[1]. This will throw
+away any existing deltas and re-compute them, at the expense of
+spending much more time on the repacking.
+
+The effects of this are mostly persistent, e.g. when packs and loose
+objects are coalesced into one another pack the existing deltas in
+that pack might get re-used, but there are also various cases where we
+might pick a sub-optimal delta from a newer pack instead.
+
+Furthermore, supplying `--aggressive` will tweak the `--depth` and
+`--window` options passed to linkgit:git-repack[1]. See the
+`gc.aggressiveDepth` and `gc.aggressiveWindow` settings below. By
+using a larger window size we're more likely to find more optimal
+deltas.
+
+It's probably not worth it to use this option on a given repository
+without running tailored performance benchmarks on it. It takes a lot
+more time, and the resulting space/delta optimization may or may not
+be worth it. Not using this at all is the right trade-off for most
+users and their repositories.
+
CONFIGURATION
-------------
-The optional configuration variable `gc.reflogExpire` can be
-set to indicate how long historical entries within each branch's
-reflog should remain available in this repository. The setting is
-expressed as a length of time, for example '90 days' or '3 months'.
-It defaults to '90 days'.
-
-The optional configuration variable `gc.reflogExpireUnreachable`
-can be set to indicate how long historical reflog entries which
-are not part of the current branch should remain available in
-this repository. These types of entries are generally created as
-a result of using `git commit --amend` or `git rebase` and are the
-commits prior to the amend or rebase occurring. Since these changes
-are not part of the current project most users will want to expire
-them sooner. This option defaults to '30 days'.
-
-The above two configuration variables can be given to a pattern. For
-example, this sets non-default expiry values only to remote-tracking
-branches:
-
-------------
-[gc "refs/remotes/*"]
- reflogExpire = never
- reflogExpireUnreachable = 3 days
-------------
-
-The optional configuration variable `gc.rerereResolved` indicates
-how long records of conflicted merge you resolved earlier are
-kept. This defaults to 60 days.
-
-The optional configuration variable `gc.rerereUnresolved` indicates
-how long records of conflicted merge you have not resolved are
-kept. This defaults to 15 days.
-
-The optional configuration variable `gc.packRefs` determines if
-'git gc' runs 'git pack-refs'. This can be set to "notbare" to enable
-it within all non-bare repos or it can be set to a boolean value.
-This defaults to true.
-
-The optional configuration variable `gc.writeCommitGraph` determines if
-'git gc' should run 'git commit-graph write'. This can be set to a
-boolean value. This defaults to false.
-
-The optional configuration variable `gc.aggressiveWindow` controls how
-much time is spent optimizing the delta compression of the objects in
-the repository when the --aggressive option is specified. The larger
-the value, the more time is spent optimizing the delta compression. See
-the documentation for the --window option in linkgit:git-repack[1] for
-more details. This defaults to 250.
-
-Similarly, the optional configuration variable `gc.aggressiveDepth`
-controls --depth option in linkgit:git-repack[1]. This defaults to 50.
-
-The optional configuration variable `gc.pruneExpire` controls how old
-the unreferenced loose objects have to be before they are pruned. The
-default is "2 weeks ago".
-
-Optional configuration variable `gc.worktreePruneExpire` controls how
-old a stale working tree should be before `git worktree prune` deletes
-it. Default is "3 months ago".
+The below documentation is the same as what's found in
+linkgit:git-config[1]:
+include::config/gc.txt[]
NOTES
-----
particular, it will keep not only objects referenced by your current set
of branches and tags, but also objects referenced by the index,
remote-tracking branches, refs saved by 'git filter-branch' in
-refs/original/, or reflogs (which may reference commits in branches
-that were later amended or rewound).
+refs/original/, reflogs (which may reference commits in branches
+that were later amended or rewound), and anything else in the refs/* namespace.
If you are expecting some objects to be deleted and they aren't, check
all of those locations and decide whether it makes sense in your case to
remove those references.
However, these features fall short of a complete solution, so users who
run commands concurrently have to live with some risk of corruption (which
-seems to be low in practice) unless they turn off automatic garbage
-collection with 'git config gc.auto 0'.
+seems to be low in practice).
HOOKS
-----
mechanism. Only useful with `--untracked`.
--exclude-standard::
- Do not pay attention to ignored files specified via the `.gitignore`
+ Do not pay attention to ignored files specified via the `.gitignore`
mechanism. Only useful when searching files in the current directory
with `--no-index`.
already opened konqueror in a new tab if possible.
For consistency, we also try such a trick if 'man.konqueror.path' is
-set to something like 'A_PATH_TO/konqueror'. That means we will try to
-launch 'A_PATH_TO/kfmclient' instead.
+set to something like `A_PATH_TO/konqueror`. That means we will try to
+launch `A_PATH_TO/kfmclient` instead.
If you really want to use 'konqueror', then you can use something like
the following:
Accelerated static Apache 2.x::
Similar to the above, but Apache can be used to return static
- files that are stored on disk. On many systems this may
+ files that are stored on disk. On many systems this may
be more efficient as Apache can ask the kernel to copy the
file contents from the file system directly to the network:
+
NAME
----
-git-interpret-trailers - add or parse structured information in commit messages
+git-interpret-trailers - Add or parse structured information in commit messages
SYNOPSIS
--------
linkgit:git-status[1] `--short` or linkgit:git-diff[1]
`--name-status` for more user-friendly alternatives.
+
+--
This option identifies the file status with the following tags (followed by
a space) at the start of each line:
C:: modified/changed
K:: to be killed
?:: other
+--
-v::
Similar to `-t`, but use lowercase letters for files
displayed.
--refs::
- Do not show peeled tags or pseudorefs like HEAD in the output.
+ Do not show peeled tags or pseudorefs like `HEAD` in the output.
-q::
--quiet::
taken as relative to the current working directory. E.g. when you are
in a directory 'sub' that has a directory 'dir', you can run 'git
ls-tree -r HEAD dir' to list the contents of the tree (that is
- 'sub/dir' in `HEAD`). You don't want to give a tree that is not at the
+ `sub/dir` in `HEAD`). You don't want to give a tree that is not at the
root level (e.g. `git ls-tree -r HEAD:sub dir`) in this case, as that
- would result in asking for 'sub/sub/dir' in the `HEAD` commit.
+ would result in asking for `sub/sub/dir` in the `HEAD` commit.
However, the current working directory can be ignored by passing
--full-tree option.
If `--log` is specified, a shortlog of the commits being merged
will be appended to the specified message.
---[no-]rerere-autoupdate::
+--rerere-autoupdate::
+--no-rerere-autoupdate::
Allow the rerere mechanism to update the index with the
result of auto-conflict resolution if possible.
FUNCTIONS
---------
get_merge_tool::
- returns a merge tool.
+ returns a merge tool. the return code is 1 if we returned a guessed
+ merge tool, else 0. '$GIT_MERGETOOL_GUI' may be set to 'true' to
+ search for the appropriate guitool.
get_merge_tool_cmd::
returns the custom command for a merge tool.
--gui::
When 'git-mergetool' is invoked with the `-g` or `--gui` option
the default merge tool will be read from the configured
- `merge.guitool` variable instead of `merge.tool`.
+ `merge.guitool` variable instead of `merge.tool`. If
+ `merge.guitool` is not set, we will fallback to the tool
+ configured under `merge.tool`.
--no-gui::
This overrides a previous `-g` or `--gui` setting and reads the
-C <object>::
--reuse-message=<object>::
- Take the given blob object (for example, another note) as the
+ Take the given blob object (for example, another note) as the
note message. (Use `git notes copy <object>` instead to
copy notes between objects.)
started.
--reset::
- Same as -m, except that unmerged entries are discarded
- instead of failing.
+ Same as -m, except that unmerged entries are discarded instead
+ of failing. When used with `-u`, updates leading to loss of
+ working tree changes will not abort the operation.
-u::
After a successful merge, update the files in the work
Instead of reading tree object(s) into the index, just empty
it.
+-q::
+--quiet::
+ Quiet, suppress feedback messages.
+
<tree-ish#>::
The id of the tree object(s) to be read/merged.
+
See also INCOMPATIBLE OPTIONS below.
+--rerere-autoupdate::
+--no-rerere-autoupdate::
+ Allow the rerere mechanism to update the index with the
+ result of auto-conflict resolution if possible.
+
-S[<keyid>]::
--gpg-sign[=<keyid>]::
GPG-sign commits. The `keyid` argument is optional and
+
By default, or when `no-rebase-cousins` was specified, commits which do not
have `<upstream>` as direct ancestor will keep their original branch point,
-i.e. commits that would be excluded by gitlink:git-log[1]'s
+i.e. commits that would be excluded by linkgit:git-log[1]'s
`--ancestry-path` option will keep their original ancestry by default. If
the `rebase-cousins` mode is turned on, such commits are instead rebased
onto `<upstream>` (or `<onto>`, if specified).
+
-The `--rebase-merges` mode is similar in spirit to `--preserve-merges`, but
-in contrast to that option works well in interactive rebases: commits can be
-reordered, inserted and dropped at will.
+The `--rebase-merges` mode is similar in spirit to the deprecated
+`--preserve-merges`, but in contrast to that option works well in interactive
+rebases: commits can be reordered, inserted and dropped at will.
+
It is currently only possible to recreate the merge commits using the
`recursive` merge strategy; Different merge strategies can be used only via
-p::
--preserve-merges::
- Recreate merge commits instead of flattening the history by replaying
- commits a merge commit introduces. Merge conflict resolutions or manual
- amendments to merge commits are not preserved.
+ [DEPRECATED: use `--rebase-merges` instead] Recreate merge commits
+ instead of flattening the history by replaying commits a merge commit
+ introduces. Merge conflict resolutions or manual amendments to merge
+ commits are not preserved.
+
This uses the `--interactive` machinery internally, but combining it
with the `--interactive` option explicitly is generally not a good
BUGS
----
-The todo list presented by `--preserve-merges --interactive` does not
-represent the topology of the revision graph. Editing commits and
-rewording their commit messages should work fine, but attempts to
-reorder commits tend to produce counterintuitive results. Use
-`--rebase-merges` in such scenarios instead.
+The todo list presented by the deprecated `--preserve-merges --interactive`
+does not represent the topology of the revision graph (use `--rebase-merges`
+instead). Editing commits and rewording their commit messages should work
+fine, but attempts to reorder commits tend to produce counterintuitive results.
+Use `--rebase-merges` in such scenarios instead.
For example, an attempt to rearrange
------------
link-level address).
"ext::git-server-alias foo %G/repo% with% spaces %Vfoo"::
- Represents a repository with path '/repo with spaces' accessed
+ Represents a repository with path `/repo with spaces` accessed
using the helper program "git-server-alias foo". The hostname for
the remote server passed in the protocol stream will be "foo"
(this allows multiple virtual Git servers to share a
SEE ALSO
--------
-linkgit:gitremote-helpers[1]
+linkgit:gitremote-helpers[7]
GIT
---
SEE ALSO
--------
-linkgit:gitremote-helpers[1]
+linkgit:gitremote-helpers[7]
GIT
---
git-remote-helpers
==================
-This document has been moved to linkgit:gitremote-helpers[1].
+This document has been moved to linkgit:gitremote-helpers[7].
Please let the owners of the referring site know so that they can update the
link you clicked to get here.
+++ /dev/null
-git-remote-testgit(1)
-=====================
-
-NAME
-----
-git-remote-testgit - Example remote-helper
-
-
-SYNOPSIS
---------
-[verse]
-git clone testgit::<source-repo> [<destination>]
-
-DESCRIPTION
------------
-
-This command is a simple remote-helper, that is used both as a
-testcase for the remote-helper functionality, and as an example to
-show remote-helper authors one possible implementation.
-
-The best way to learn more is to read the comments and source code in
-'git-remote-testgit'.
-
-SEE ALSO
---------
-linkgit:gitremote-helpers[1]
-
-GIT
----
-Part of the linkgit:git[1] suite
hand resolutions to their corresponding automerge results.
[NOTE]
-You need to set the configuration variable rerere.enabled in order to
+You need to set the configuration variable `rerere.enabled` in order to
enable this command.
`reset --merge` is meant to be used when resetting out of a conflicted
merge. Any mergy operation guarantees that the working tree file that is
-involved in the merge does not have local change wrt the index before
-it starts, and that it writes the result out to the working tree. So if
+involved in the merge does not have a local change with respect to the index
+before it starts, and that it writes the result out to the working tree. So if
we see some difference between the index and the target and also
between the index and the working tree, then it means that we are not
resetting out from a state that a mergy operation left after failing
With this option, 'git revert' will not start the commit
message editor.
+--cleanup=<mode>::
+ This option determines how the commit message will be cleaned up before
+ being passed on to the commit machinery. See linkgit:git-commit[1] for more
+ details. In particular, if the '<mode>' is given a value of `scissors`,
+ scissors will be appended to `MERGE_MSG` before being passed on in the case
+ of a conflict.
+
-n::
--no-commit::
Usually the command automatically creates some commits with
Pass the merge strategy-specific option through to the
merge strategy. See linkgit:git-merge[1] for details.
+--rerere-autoupdate::
+--no-rerere-autoupdate::
+ Allow the rerere mechanism to update the index with the
+ result of auto-conflict resolution if possible.
+
SEQUENCER SUBCOMMANDS
---------------------
include::sequencer.txt[]
------------------------------------------------
These three branches all forked from a common commit, [master],
-whose commit message is "Add {apostrophe}git show-branch{apostrophe}".
+whose commit message is "Add \'git show-branch'".
The "fixes" branch adds one commit "Introduce "reset type" flag to
"git reset"". The "mhf" branch adds many other commits.
The current branch is "master".
--------
[verse]
'git stash' list [<options>]
-'git stash' show [<stash>]
+'git stash' show [<options>] [<stash>]
'git stash' drop [-q|--quiet] [<stash>]
'git stash' ( pop | apply ) [--index] [-q|--quiet] [<stash>]
'git stash' branch <branchname> [<stash>]
The command takes options applicable to the 'git log'
command to control what is shown and how. See linkgit:git-log[1].
-show [<stash>]::
+show [<options>] [<stash>]::
Show the changes recorded in the stash entry as a diff between the
stashed contents and the commit back when the stash entry was first
command line arguments. Parsers should ignore headers they
don't recognize.
-### Branch Headers
+Branch Headers
+^^^^^^^^^^^^^^
If `--branch` is given, a series of header lines are printed with
information about the current branch.
------------------------------------------------------------
....
-### Changed Tracked Entries
+Changed Tracked Entries
+^^^^^^^^^^^^^^^^^^^^^^^
Following the headers, a series of lines are printed for tracked
entries. One of three different line formats may be used to describe
--------------------------------------------------------
....
-### Other Items
+Other Items
+^^^^^^^^^^^
Following the tracked entries (and if requested), a series of
lines will be printed for untracked and then ignored items
! <path>
-### Pathname Format Notes and -z
+Pathname Format Notes and -z
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^
When the `-z` option is given, pathnames are printed as is and
without any quoting and lines are terminated with a NUL (ASCII 0x00)
SYNOPSIS
--------
[verse]
+'git submodule' [--quiet] [--cached]
'git submodule' [--quiet] add [<options>] [--] <repository> [<path>]
'git submodule' [--quiet] status [--cached] [--recursive] [--] [<path>...]
'git submodule' [--quiet] init [--] [<path>...]
'git submodule' [--quiet] deinit [-f|--force] (--all|[--] <path>...)
'git submodule' [--quiet] update [<options>] [--] [<path>...]
+'git submodule' [--quiet] set-branch [<options>] [--] <path>
'git submodule' [--quiet] summary [<options>] [--] [<path>...]
'git submodule' [--quiet] foreach [--recursive] <command>
'git submodule' [--quiet] sync [--recursive] [--] [<path>...]
COMMANDS
--------
+With no arguments, shows the status of existing submodules. Several
+subcommands are available to perform operations on the submodules.
+
add [-b <branch>] [-f|--force] [--name <name>] [--reference <repository>] [--depth <depth>] [--] <repository> [<path>]::
Add the given repository as a submodule at the given path
to the changeset to be committed next to the current
or ../), the location relative to the superproject's default remote
repository (Please note that to specify a repository 'foo.git'
which is located right next to a superproject 'bar.git', you'll
-have to use '../foo.git' instead of './foo.git' - as one might expect
+have to use `../foo.git` instead of `./foo.git` - as one might expect
when following the rules for relative URLs - because the evaluation
of relative URLs in Git is identical to that of relative directories).
+
If `--recursive` is specified, this command will recurse into the
registered submodules, and update any nested submodules within.
--
+set-branch ((-d|--default)|(-b|--branch <branch>)) [--] <path>::
+ Sets the default remote tracking branch for the submodule. The
+ `--branch` option allows the remote branch to be specified. The
+ `--default` option removes the submodule.<name>.branch configuration
+ key, which causes the tracking branch to default to 'master'.
+
summary [--cached|--files] [(-n|--summary-limit) <n>] [commit] [--] [<path>...]::
Show commit summary between the given commit (defaults to HEAD) and
working tree/index. For a submodule in question, a series of commits
This option is only valid for the deinit command. Unregister all
submodules in the working tree.
--b::
---branch::
+-b <branch>::
+--branch <branch>::
Branch of repository to add as submodule.
The name of the branch is recorded as `submodule.<name>.branch` in
`.gitmodules` for `update --remote`. A special value of `.` is used to
indicate that the name of the branch in the submodule should be the
- same name as the current branch in the current repository.
+ same name as the current branch in the current repository. If the
+ option is not specified, it defaults to 'master'.
-f::
--force::
command-line argument.
+
This automatically updates the rev_map if needed (see
-'$GIT_DIR/svn/\*\*/.rev_map.*' in the FILES section below for details).
+'$GIT_DIR/svn/\**/.rev_map.*' in the FILES section below for details).
--localtime;;
Store Git commit times in the local time zone instead of UTC. This
and have no uncommitted changes.
+
This automatically updates the rev_map if needed (see
-'$GIT_DIR/svn/\*\*/.rev_map.*' in the FILES section below for details).
+'$GIT_DIR/svn/\**/.rev_map.*' in the FILES section below for details).
-l;;
--local;;
way to repair the repo is to use 'reset'.
+
Only the rev_map and refs/remotes/git-svn are changed (see
-'$GIT_DIR/svn/\*\*/.rev_map.*' in the FILES section below for details).
+'$GIT_DIR/svn/\**/.rev_map.*' in the FILES section below for details).
Follow 'reset' with a 'fetch' and then 'git reset' or 'git rebase' to
move local branches onto the new tree.
+
This option can only be used for one-shot imports as 'git svn'
will not be able to fetch again without metadata. Additionally,
-if you lose your '$GIT_DIR/svn/\*\*/.rev_map.*' files, 'git svn' will not
+if you lose your '$GIT_DIR/svn/\**/.rev_map.*' files, 'git svn' will not
be able to rebuild them.
+
The 'git svn log' command will not work on repositories using
tags = tags/*/project-a:refs/remotes/project-a/tags/*
------------------------------------------------------------------------
-Keep in mind that the '\*' (asterisk) wildcard of the local ref
-(right of the ':') *must* be the farthest right path component;
+Keep in mind that the `*` (asterisk) wildcard of the local ref
+(right of the `:`) *must* be the farthest right path component;
however the remote wildcard may be anywhere as long as it's an
-independent path component (surrounded by '/' or EOL). This
+independent path component (surrounded by `/` or EOL). This
type of configuration is not automatically created by 'init' and
should be manually entered with a text-editor or using 'git config'.
FILES
-----
-$GIT_DIR/svn/\*\*/.rev_map.*::
+$GIT_DIR/svn/\**/.rev_map.*::
Mapping between Subversion revision numbers and Git commit
names. In a repository where the noMetadata option is not set,
this can be rebuilt from the git-svn-id: lines that are at the
man page on an already opened konqueror in a new tab if possible.
For consistency, we also try such a trick if 'browser.konqueror.path' is
-set to something like 'A_PATH_TO/konqueror'. That means we will try to
-launch 'A_PATH_TO/kfmclient' instead.
+set to something like `A_PATH_TO/konqueror`. That means we will try to
+launch `A_PATH_TO/kfmclient` instead.
If you really want to use 'konqueror', then you can use something like
the following:
In general, all pseudo refs are per working tree and all refs starting
with "refs/" are shared. Pseudo refs are ones like HEAD which are
-directly under GIT_DIR instead of inside GIT_DIR/refs. There are one
+directly under GIT_DIR instead of inside GIT_DIR/refs. There is one
exception to this: refs inside refs/bisect and refs/worktree is not
shared.
The command-line parameters passed to the configured command are
determined by the ssh variant. See `ssh.variant` option in
linkgit:git-config[1] for details.
-
+
`$GIT_SSH_COMMAND` takes precedence over `$GIT_SSH`, and is interpreted
by the shell, which allows additional arguments to be included.
Each line in `gitattributes` file is of form:
- pattern attr1 attr2 ...
+ pattern attr1 attr2 ...
That is, a pattern followed by an attributes list,
separated by whitespaces. Leading and trailing whitespaces are
support will checkout `foo.ps1` as UTF-8 encoded file. This will
typically cause trouble for the users of this file.
+
-If a Git client, that does not support the `working-tree-encoding`
-attribute, adds a new file `bar.ps1`, then `bar.ps1` will be
+If a Git client that does not support the `working-tree-encoding`
+attribute adds a new file `bar.ps1`, then `bar.ps1` will be
stored "as-is" internally (in this example probably as UTF-16).
A client with `working-tree-encoding` support will interpret the
internal contents as UTF-8 and try to convert it to UTF-16 on checkout.
Use the following attributes if your '*.ps1' files are UTF-16 little
endian encoded without BOM and you want Git to use Windows line endings
-in the working directory (use `UTF-16-LE-BOM` instead of `UTF-16LE` if
+in the working directory (use `UTF-16LE-BOM` instead of `UTF-16LE` if
you want UTF-16 little endian with BOM).
Please note, it is highly recommended to
explicitly define the line endings with `eol` if the `working-tree-encoding`
variable `GIT_EDITOR=:` if the command will not bring up an editor
to modify the commit message.
+The default 'pre-commit' hook, when enabled--and with the
+`hooks.allownonascii` config option unset or set to false--prevents
+the use of non-ASCII filenames.
+
prepare-commit-msg
~~~~~~~~~~~~~~~~~~
from standard input. Exiting with non-zero status from this script prevent
`git-p4 submit` from launching. Run `git-p4 submit --help` for details.
+post-index-change
+~~~~~~~~~~~~~~~~~
+
+This hook is invoked when the index is written in read-cache.c
+do_write_locked_index.
+
+The first parameter passed to the hook is the indicator for the
+working directory being updated. "1" meaning working directory
+was updated or "0" when the working directory was not updated.
+
+The second parameter passed to the hook is the indicator for whether
+or not the index was updated and the skip-worktree bit could have
+changed. "1" meaning skip-worktree bits could have been updated
+and "0" meaning they were not.
+
+Only one parameter should be set to "1" when the hook runs. The hook
+running passing "1", "1" should not be possible.
+
GIT
---
Part of the linkgit:git[1] suite
- Other consecutive asterisks are considered regular asterisks and
will match according to the previous rules.
+CONFIGURATION
+-------------
+
+The optional configuration variable `core.excludesFile` indicates a path to a
+file containing patterns of file names to exclude, similar to
+`$GIT_DIR/info/exclude`. Patterns in the exclude file are used in addition to
+those in `$GIT_DIR/info/exclude`.
+
NOTES
-----
-----
User configuration and preferences are stored at:
-* '$XDG_CONFIG_HOME/git/gitk' if it exists, otherwise
-* '$HOME/.gitk' if it exists
+* `$XDG_CONFIG_HOME/git/gitk` if it exists, otherwise
+* `$HOME/.gitk` if it exists
-If neither of the above exist then '$XDG_CONFIG_HOME/git/gitk' is created and
+If neither of the above exist then `$XDG_CONFIG_HOME/git/gitk` is created and
used by default. If '$XDG_CONFIG_HOME' is not set it defaults to
-'$HOME/.config' in all cases.
+`$HOME/.config` in all cases.
History
-------
This defines two submodules, `libfoo` and `libbar`. These are expected to
-be checked out in the paths 'include/foo' and 'include/bar', and for both
+be checked out in the paths `include/foo` and `include/bar`, and for both
submodules a URL is specified which can be used for cloning the submodules.
SEE ALSO
-gitremote-helpers(1)
+gitremote-helpers(7)
====================
NAME
'option dry-run' {'true'|'false'}:
If true, pretend the operation completed successfully,
- but don't actually change any repository data. For most
+ but don't actually change any repository data. For most
helpers this only applies to the 'push', if supported.
'option servpath <c-style-quoted-path>'::
linkgit:git-remote-fd[1]
-linkgit:git-remote-testgit[1]
-
linkgit:git-fast-import[1]
GIT
to the object database, not to the repository!) in your
alternates file, but it will not work if you use absolute
paths unless the absolute path in filesystem and web URL
- is the same. See also 'objects/info/http-alternates'.
+ is the same. See also `objects/info/http-alternates`.
objects/info/http-alternates::
This file records URLs to alternate object stores that
* built-in values (some set during build stage),
* common system-wide configuration file (defaults to
- '/etc/gitweb-common.conf'),
+ `/etc/gitweb-common.conf`),
* either per-instance configuration file (defaults to 'gitweb_config.perl'
in the same directory as the installed gitweb), or if it does not exists
- then fallback system-wide configuration file (defaults to '/etc/gitweb.conf').
+ then fallback system-wide configuration file (defaults to `/etc/gitweb.conf`).
Values obtained in later configuration files override values obtained earlier
in the above sequence.
subroutine. For example, one might want to put gitweb configuration
related to access control for viewing repositories via Gitolite (one
of Git repository management tools) in a separate file, e.g. in
-'/etc/gitweb-gitolite.conf'. To include it, put
+`/etc/gitweb-gitolite.conf`. To include it, put
--------------------------------------------------
read_config_file("/etc/gitweb-gitolite.conf");
http://git.example.com/gitweb.cgi/foo/bar.git
------------------------------------------------
+
-will map to the path '/srv/git/foo/bar.git' on the filesystem.
+will map to the path `/srv/git/foo/bar.git` on the filesystem.
$projects_list::
Name of a plain text file listing projects, or a name of directory
$mimetypes_file::
File to use for (filename extension based) guessing of MIME types before
- trying '/etc/mime.types'. *NOTE* that this path, if relative, is taken
+ trying `/etc/mime.types`. *NOTE* that this path, if relative, is taken
as relative to the current Git repository, not to CGI script. If unset,
- only '/etc/mime.types' is used (if present on filesystem). If no mimetypes
+ only `/etc/mime.types` is used (if present on filesystem). If no mimetypes
file is found, mimetype guessing based on extension of file is disabled.
Unset by default.
+
This list should contain the URI of gitweb's standard stylesheet. The default
URI of gitweb stylesheet can be set at build time using the `GITWEB_CSS`
-makefile variable. Its default value is 'static/gitweb.css'
-(or 'static/gitweb.min.css' if the `CSSMIN` variable is defined,
+makefile variable. Its default value is `static/gitweb.css`
+(or `static/gitweb.min.css` if the `CSSMIN` variable is defined,
i.e. if CSS minifier is used during build).
+
*Note*: there is also a legacy `$stylesheet` configuration variable, which was
is displayed in the top right corner of each gitweb page and used as
a logo for the Atom feed. Relative to the base URI of gitweb (as a path).
Can be adjusted when building gitweb using `GITWEB_LOGO` variable
- By default set to 'static/git-logo.png'.
+ By default set to `static/git-logo.png`.
$favicon::
Points to the location where you put 'git-favicon.png' on your web
may display them in the browser's URL bar and next to the site name in
bookmarks. Relative to the base URI of gitweb. Can be adjusted at
build time using `GITWEB_FAVICON` variable.
- By default set to 'static/git-favicon.png'.
+ By default set to `static/git-favicon.png`.
$javascript::
Points to the location where you put 'gitweb.js' on your web server,
Relative to the base URI of gitweb. Can be set at build time using
the `GITWEB_JS` build-time configuration variable.
+
-The default value is either 'static/gitweb.js', or 'static/gitweb.min.js' if
+The default value is either `static/gitweb.js`, or `static/gitweb.min.js` if
the `JSMIN` build variable was defined, i.e. if JavaScript minifier was used
at build time. *Note* that this single file is generated from multiple
individual JavaScript "modules".
doesn't result in some other type; by default "text/plain".
Gitweb guesses mimetype of a file to display based on extension
of its filename, using `$mimetypes_file` (if set and file exists)
- and '/etc/mime.types' files (see *mime.types*(5) manpage; only
+ and `/etc/mime.types` files (see *mime.types*(5) manpage; only
filename extension rules are supported by gitweb).
$default_text_plain_charset::
(for example one for `git://` protocol, and one for `http://`
protocol).
+
-Note that per repository configuration can be set in '$GIT_DIR/cloneurl'
+Note that per repository configuration can be set in `$GIT_DIR/cloneurl`
file, or as values of multi-value `gitweb.url` configuration variable in
project config. Per-repository configuration takes precedence over value
composed from `@git_base_url_list` elements and project name.
If the server load exceeds this value then gitweb will return
"503 Service Unavailable" error. The server load is taken to be 0
if gitweb cannot determine its value. Currently it works only on Linux,
- where it uses '/proc/loadavg'; the load there is the number of active
+ where it uses `/proc/loadavg`; the load there is the number of active
tasks on the system -- processes that are actually running -- averaged
over the last minute.
+
$per_request_config::
If this is set to code reference, it will be run once for each request.
- You can set parts of configuration that change per session this way.
+ You can set parts of configuration that change per session this way.
For example, one might use the following code in a gitweb configuration
file
+
Only one provider at a time can be selected ('default' is one element list).
If an unknown provider is specified, the feature is disabled.
*Note* that some providers might require extra Perl packages to be
-installed; see 'gitweb/INSTALL' for more details.
+installed; see `gitweb/INSTALL` for more details.
+
This feature can be configured on a per-repository basis via
repository's `gitweb.avatar` configuration variable.
CONFIGURATION
-------------
Various aspects of gitweb's behavior can be controlled through the configuration
-file 'gitweb_config.perl' or '/etc/gitweb.conf'. See the linkgit:gitweb.conf[5]
+file `gitweb_config.perl` or `/etc/gitweb.conf`. See the linkgit:gitweb.conf[5]
for details.
Repositories
our $projectroot = '/path/to/parent/directory';
-----------------------------------------------------------------------
-The default value for `$projectroot` is '/pub/git'. You can change it during
+The default value for `$projectroot` is `/pub/git`. You can change it during
building gitweb via `GITWEB_PROJECTROOT` build configuration variable.
By default all Git repositories under `$projectroot` are visible and available
-------------------------------------------------------------------------------
+
from the template during repository creation, usually installed in
-'/usr/share/git-core/templates/'. You can use the `gitweb.description` repo
+`/usr/share/git-core/templates/`. You can use the `gitweb.description` repo
configuration variable, but the file takes precedence.
category (or `gitweb.category`)::
Apache as CGI
~~~~~~~~~~~~~
Apache must be configured to support CGI scripts in the directory in
-which gitweb is installed. Let's assume that it is '/var/www/cgi-bin'
+which gitweb is installed. Let's assume that it is `/var/www/cgi-bin`
directory.
-----------------------------------------------------------------------
(for mod_perl 1.x) or ModPerl::Registry (for mod_perl 2.x) to enable
this support.
-Assuming that gitweb is installed to '/var/www/perl', the following
+Assuming that gitweb is installed to `/var/www/perl`, the following
Apache configuration (for mod_perl 2.x) is suitable.
-----------------------------------------------------------------------
~~~~~~~~~~~~~~~~~~~
Gitweb works with Apache and FastCGI. First you need to rename, copy
or symlink gitweb.cgi to gitweb.fcgi. Let's assume that gitweb is
-installed in '/usr/share/gitweb' directory. The following Apache
+installed in `/usr/share/gitweb` directory. The following Apache
configuration is suitable (UNTESTED!)
-----------------------------------------------------------------------
-----------------------------------------------------------------------
The above configuration expects your public repositories to live under
-'/pub/git' and will serve them as `http://git.domain.org/dir-under-pub-git`,
+`/pub/git` and will serve them as `http://git.domain.org/dir-under-pub-git`,
both as clonable Git URL and as browseable gitweb interface. If you then
start your linkgit:git-daemon[1] with `--base-path=/pub/git --export-all`
then you can even use the `git://` URL with exactly the same path.
Setting the environment variable `GITWEB_CONFIG` will tell gitweb to use the
-named file (i.e. in this example '/etc/gitweb.conf') as a configuration for
+named file (i.e. in this example `/etc/gitweb.conf`) as a configuration for
gitweb. You don't really need it in above example; it is required only if
your configuration file is in different place than built-in (during
-compiling gitweb) 'gitweb_config.perl' or '/etc/gitweb.conf'. See
+compiling gitweb) 'gitweb_config.perl' or `/etc/gitweb.conf`. See
linkgit:gitweb.conf[5] for details, especially information about precedence
rules.
If you use the rewrite rules from the example you *might* also need
something like the following in your gitweb configuration file
-('/etc/gitweb.conf' following example):
+(`/etc/gitweb.conf` following example):
----------------------------------------------------------------------------
@stylesheets = ("/some/absolute/path/gitweb.css");
$my_uri = "/";
Here actual project root is passed to gitweb via `GITWEB_PROJECT_ROOT`
environment variable from a web server, so you need to put the following
-line in gitweb configuration file ('/etc/gitweb.conf' in above example):
+line in gitweb configuration file (`/etc/gitweb.conf` in above example):
--------------------------------------------------------------------------
$projectroot = $ENV{'GITWEB_PROJECTROOT'} || "/pub/git";
--------------------------------------------------------------------------
These configurations enable two things. First, each unix user (`<user>`) of
the server will be able to browse through gitweb Git repositories found in
-'~/public_git/' with the following url:
+`~/public_git/` with the following url:
http://git.example.org/~<user>/
use the \'~' as first character, just comment or remove the second rewrite
rule, and uncomment one of the following according to what you want.
-Second, repositories found in '/pub/scm/' and '/var/git/' will be accessible
+Second, repositories found in `/pub/scm/` and `/var/git/` will be accessible
through `http://git.example.org/scm/` and `http://git.example.org/var/`.
You can add as many project roots as you want by adding rewrite rules like
the third and the fourth.
http://git.example.com/project.git/shortlog/sometag
i.e. without 'gitweb.cgi' part, by using a configuration such as the
-following. This configuration assumes that '/var/www/gitweb' is the
+following. This configuration assumes that `/var/www/gitweb` is the
DocumentRoot of your webserver, contains the gitweb.cgi script and
complementary static files (stylesheet, favicon, JavaScript):
`@stylesheets`, `$my_uri` and `$home_link`, but you lose "dumb client"
access to your project .git dirs (described in "Single URL for gitweb and
for fetching" section). A possible workaround for the latter is the
-following: in your project root dir (e.g. '/pub/git') have the projects
-named *without* a .git extension (e.g. '/pub/git/project' instead of
-'/pub/git/project.git') and configure Apache as follows:
+following: in your project root dir (e.g. `/pub/git`) have the projects
+named *without* a .git extension (e.g. `/pub/git/project` instead of
+`/pub/git/project.git`) and configure Apache as follows:
----------------------------------------------------------------------------
<VirtualHost *:80>
ServerAlias git.example.com
will provide human-friendly gitweb access.
This solution is not 100% bulletproof, in the sense that if some project has
-a named ref (branch, tag) starting with 'git/', then paths such as
+a named ref (branch, tag) starting with `git/`, then paths such as
http://git.example.com/project/command/abranch..git/abranch
--------
linkgit:gitweb.conf[5], linkgit:git-instaweb[1]
-'gitweb/README', 'gitweb/INSTALL'
+`gitweb/README`, `gitweb/INSTALL`
GIT
---
origin/name-of-upstream-branch, which you can see using
`git branch -r`.
+[[def_overlay]]overlay::
+ Only update and add files to the working directory, but don't
+ delete them, similar to how 'cp -R' would update the contents
+ in the destination directory. This is the default mode in a
+ <<def_checkout,checkout>> when checking out files from the
+ <<def_index,index>> or a <<def_tree-ish,tree-ish>>. In
+ contrast, no-overlay mode also deletes tracked files not
+ present in the source, similar to 'rsync --delete'.
+
[[def_pack]]pack::
A set of objects which have been compressed into one file (to save space
or to transmit them efficiently).
--------------
If you have to access the WebDAV server from behind an HTTP(S) proxy,
-set the variable 'all_proxy' to 'http://proxy-host.com:port', or
-'http://login-on-proxy:passwd-on-proxy@proxy-host.com:port'. See 'man
+set the variable 'all_proxy' to `http://proxy-host.com:port`, or
+`http://login-on-proxy:passwd-on-proxy@proxy-host.com:port`. See 'man
curl' for details.
Perform the merge and commit the result. This option can
be used to override --no-commit.
+
-With --no-commit perform the merge but pretend the merge
-failed and do not autocommit, to give the user a chance to
-inspect and further tweak the merge result before committing.
+With --no-commit perform the merge and stop just before creating
+a merge commit, to give the user a chance to inspect and further
+tweak the merge result before committing.
++
+Note that fast-forward updates do not create a merge commit and
+therefore there is no way to stop those merges with --no-commit.
+Thus, if you want to ensure your branch is not changed or updated
+by the merge command, use --no-ff with --no-commit.
--edit::
-e::
updated behaviour, the environment variable `GIT_MERGE_AUTOEDIT` can be
set to `no` at the beginning of them.
+--cleanup=<mode>::
+ This option determines how the merge message will be cleaned up before
+ commiting. See linkgit:git-commit[1] for more details. In addition, if
+ the '<mode>' is given a value of `scissors`, scissors will be appended
+ to `MERGE_MSG` before being passed on to the commit machinery in the
+ case of a merge conflict.
+
--ff::
When the merge resolves as a fast-forward, only update the branch
pointer, without creating a merge commit. This is the default
+
The placeholders are:
-- '%H': commit hash
-- '%h': abbreviated commit hash
-- '%T': tree hash
-- '%t': abbreviated tree hash
-- '%P': parent hashes
-- '%p': abbreviated parent hashes
-- '%an': author name
-- '%aN': author name (respecting .mailmap, see linkgit:git-shortlog[1]
- or linkgit:git-blame[1])
-- '%ae': author email
-- '%aE': author email (respecting .mailmap, see
- linkgit:git-shortlog[1] or linkgit:git-blame[1])
-- '%ad': author date (format respects --date= option)
-- '%aD': author date, RFC2822 style
-- '%ar': author date, relative
-- '%at': author date, UNIX timestamp
-- '%ai': author date, ISO 8601-like format
-- '%aI': author date, strict ISO 8601 format
-- '%cn': committer name
-- '%cN': committer name (respecting .mailmap, see
- linkgit:git-shortlog[1] or linkgit:git-blame[1])
-- '%ce': committer email
-- '%cE': committer email (respecting .mailmap, see
- linkgit:git-shortlog[1] or linkgit:git-blame[1])
-- '%cd': committer date (format respects --date= option)
-- '%cD': committer date, RFC2822 style
-- '%cr': committer date, relative
-- '%ct': committer date, UNIX timestamp
-- '%ci': committer date, ISO 8601-like format
-- '%cI': committer date, strict ISO 8601 format
-- '%d': ref names, like the --decorate option of linkgit:git-log[1]
-- '%D': ref names without the " (", ")" wrapping.
-- '%S': ref name given on the command line by which the commit was reached
- (like `git log --source`), only works with `git log`
-- '%e': encoding
-- '%s': subject
-- '%f': sanitized subject line, suitable for a filename
-- '%b': body
-- '%B': raw body (unwrapped subject and body)
+- Placeholders that expand to a single literal character:
+'%n':: newline
+'%%':: a raw '%'
+'%x00':: print a byte from a hex code
+
+- Placeholders that affect formatting of later placeholders:
+'%Cred':: switch color to red
+'%Cgreen':: switch color to green
+'%Cblue':: switch color to blue
+'%Creset':: reset color
+'%C(...)':: color specification, as described under Values in the
+ "CONFIGURATION FILE" section of linkgit:git-config[1]. By
+ default, colors are shown only when enabled for log output
+ (by `color.diff`, `color.ui`, or `--color`, and respecting
+ the `auto` settings of the former if we are going to a
+ terminal). `%C(auto,...)` is accepted as a historical
+ synonym for the default (e.g., `%C(auto,red)`). Specifying
+ `%C(always,...)` will show the colors even when color is
+ not otherwise enabled (though consider just using
+ `--color=always` to enable color for the whole output,
+ including this format and anything else git might color).
+ `auto` alone (i.e. `%C(auto)`) will turn on auto coloring
+ on the next placeholders until the color is switched
+ again.
+'%m':: left (`<`), right (`>`) or boundary (`-`) mark
+'%w([<w>[,<i1>[,<i2>]]])':: switch line wrapping, like the -w option of
+ linkgit:git-shortlog[1].
+'%<(<N>[,trunc|ltrunc|mtrunc])':: make the next placeholder take at
+ least N columns, padding spaces on
+ the right if necessary. Optionally
+ truncate at the beginning (ltrunc),
+ the middle (mtrunc) or the end
+ (trunc) if the output is longer than
+ N columns. Note that truncating
+ only works correctly with N >= 2.
+'%<|(<N>)':: make the next placeholder take at least until Nth
+ columns, padding spaces on the right if necessary
+'%>(<N>)', '%>|(<N>)':: similar to '%<(<N>)', '%<|(<N>)' respectively,
+ but padding spaces on the left
+'%>>(<N>)', '%>>|(<N>)':: similar to '%>(<N>)', '%>|(<N>)'
+ respectively, except that if the next
+ placeholder takes more spaces than given and
+ there are spaces on its left, use those
+ spaces
+'%><(<N>)', '%><|(<N>)':: similar to '%<(<N>)', '%<|(<N>)'
+ respectively, but padding both sides
+ (i.e. the text is centered)
+
+- Placeholders that expand to information extracted from the commit:
+'%H':: commit hash
+'%h':: abbreviated commit hash
+'%T':: tree hash
+'%t':: abbreviated tree hash
+'%P':: parent hashes
+'%p':: abbreviated parent hashes
+'%an':: author name
+'%aN':: author name (respecting .mailmap, see linkgit:git-shortlog[1]
+ or linkgit:git-blame[1])
+'%ae':: author email
+'%aE':: author email (respecting .mailmap, see linkgit:git-shortlog[1]
+ or linkgit:git-blame[1])
+'%ad':: author date (format respects --date= option)
+'%aD':: author date, RFC2822 style
+'%ar':: author date, relative
+'%at':: author date, UNIX timestamp
+'%ai':: author date, ISO 8601-like format
+'%aI':: author date, strict ISO 8601 format
+'%cn':: committer name
+'%cN':: committer name (respecting .mailmap, see
+ linkgit:git-shortlog[1] or linkgit:git-blame[1])
+'%ce':: committer email
+'%cE':: committer email (respecting .mailmap, see
+ linkgit:git-shortlog[1] or linkgit:git-blame[1])
+'%cd':: committer date (format respects --date= option)
+'%cD':: committer date, RFC2822 style
+'%cr':: committer date, relative
+'%ct':: committer date, UNIX timestamp
+'%ci':: committer date, ISO 8601-like format
+'%cI':: committer date, strict ISO 8601 format
+'%d':: ref names, like the --decorate option of linkgit:git-log[1]
+'%D':: ref names without the " (", ")" wrapping.
+'%S':: ref name given on the command line by which the commit was reached
+ (like `git log --source`), only works with `git log`
+'%e':: encoding
+'%s':: subject
+'%f':: sanitized subject line, suitable for a filename
+'%b':: body
+'%B':: raw body (unwrapped subject and body)
ifndef::git-rev-list[]
-- '%N': commit notes
+'%N':: commit notes
endif::git-rev-list[]
-- '%GG': raw verification message from GPG for a signed commit
-- '%G?': show "G" for a good (valid) signature,
- "B" for a bad signature,
- "U" for a good signature with unknown validity,
- "X" for a good signature that has expired,
- "Y" for a good signature made by an expired key,
- "R" for a good signature made by a revoked key,
- "E" if the signature cannot be checked (e.g. missing key)
- and "N" for no signature
-- '%GS': show the name of the signer for a signed commit
-- '%GK': show the key used to sign a signed commit
-- '%GF': show the fingerprint of the key used to sign a signed commit
-- '%GP': show the fingerprint of the primary key whose subkey was used
- to sign a signed commit
-- '%gD': reflog selector, e.g., `refs/stash@{1}` or
- `refs/stash@{2 minutes ago`}; the format follows the rules described
- for the `-g` option. The portion before the `@` is the refname as
- given on the command line (so `git log -g refs/heads/master` would
- yield `refs/heads/master@{0}`).
-- '%gd': shortened reflog selector; same as `%gD`, but the refname
- portion is shortened for human readability (so `refs/heads/master`
- becomes just `master`).
-- '%gn': reflog identity name
-- '%gN': reflog identity name (respecting .mailmap, see
- linkgit:git-shortlog[1] or linkgit:git-blame[1])
-- '%ge': reflog identity email
-- '%gE': reflog identity email (respecting .mailmap, see
- linkgit:git-shortlog[1] or linkgit:git-blame[1])
-- '%gs': reflog subject
-- '%Cred': switch color to red
-- '%Cgreen': switch color to green
-- '%Cblue': switch color to blue
-- '%Creset': reset color
-- '%C(...)': color specification, as described under Values in the
- "CONFIGURATION FILE" section of linkgit:git-config[1].
- By default, colors are shown only when enabled for log output (by
- `color.diff`, `color.ui`, or `--color`, and respecting the `auto`
- settings of the former if we are going to a terminal). `%C(auto,...)`
- is accepted as a historical synonym for the default (e.g.,
- `%C(auto,red)`). Specifying `%C(always,...)` will show the colors
- even when color is not otherwise enabled (though consider
- just using `--color=always` to enable color for the whole output,
- including this format and anything else git might color). `auto`
- alone (i.e. `%C(auto)`) will turn on auto coloring on the next
- placeholders until the color is switched again.
-- '%m': left (`<`), right (`>`) or boundary (`-`) mark
-- '%n': newline
-- '%%': a raw '%'
-- '%x00': print a byte from a hex code
-- '%w([<w>[,<i1>[,<i2>]]])': switch line wrapping, like the -w option of
- linkgit:git-shortlog[1].
-- '%<(<N>[,trunc|ltrunc|mtrunc])': make the next placeholder take at
- least N columns, padding spaces on the right if necessary.
- Optionally truncate at the beginning (ltrunc), the middle (mtrunc)
- or the end (trunc) if the output is longer than N columns.
- Note that truncating only works correctly with N >= 2.
-- '%<|(<N>)': make the next placeholder take at least until Nth
- columns, padding spaces on the right if necessary
-- '%>(<N>)', '%>|(<N>)': similar to '%<(<N>)', '%<|(<N>)'
- respectively, but padding spaces on the left
-- '%>>(<N>)', '%>>|(<N>)': similar to '%>(<N>)', '%>|(<N>)'
- respectively, except that if the next placeholder takes more spaces
- than given and there are spaces on its left, use those spaces
-- '%><(<N>)', '%><|(<N>)': similar to '%<(<N>)', '%<|(<N>)'
- respectively, but padding both sides (i.e. the text is centered)
-- %(trailers[:options]): display the trailers of the body as interpreted
- by linkgit:git-interpret-trailers[1]. The `trailers` string may be
- followed by a colon and zero or more comma-separated options. If the
- `only` option is given, omit non-trailer lines from the trailer block.
- If the `unfold` option is given, behave as if interpret-trailer's
- `--unfold` option was given. E.g., `%(trailers:only,unfold)` to do
- both.
+'%GG':: raw verification message from GPG for a signed commit
+'%G?':: show "G" for a good (valid) signature,
+ "B" for a bad signature,
+ "U" for a good signature with unknown validity,
+ "X" for a good signature that has expired,
+ "Y" for a good signature made by an expired key,
+ "R" for a good signature made by a revoked key,
+ "E" if the signature cannot be checked (e.g. missing key)
+ and "N" for no signature
+'%GS':: show the name of the signer for a signed commit
+'%GK':: show the key used to sign a signed commit
+'%GF':: show the fingerprint of the key used to sign a signed commit
+'%GP':: show the fingerprint of the primary key whose subkey was used
+ to sign a signed commit
+'%gD':: reflog selector, e.g., `refs/stash@{1}` or `refs/stash@{2
+ minutes ago`}; the format follows the rules described for the
+ `-g` option. The portion before the `@` is the refname as
+ given on the command line (so `git log -g refs/heads/master`
+ would yield `refs/heads/master@{0}`).
+'%gd':: shortened reflog selector; same as `%gD`, but the refname
+ portion is shortened for human readability (so
+ `refs/heads/master` becomes just `master`).
+'%gn':: reflog identity name
+'%gN':: reflog identity name (respecting .mailmap, see
+ linkgit:git-shortlog[1] or linkgit:git-blame[1])
+'%ge':: reflog identity email
+'%gE':: reflog identity email (respecting .mailmap, see
+ linkgit:git-shortlog[1] or linkgit:git-blame[1])
+'%gs':: reflog subject
+'%(trailers[:options])':: display the trailers of the body as
+ interpreted by
+ linkgit:git-interpret-trailers[1]. The
+ `trailers` string may be followed by a colon
+ and zero or more comma-separated options:
+** 'key=<K>': only show trailers with specified key. Matching is done
+ case-insensitively and trailing colon is optional. If option is
+ given multiple times trailer lines matching any of the keys are
+ shown. This option automatically enables the `only` option so that
+ non-trailer lines in the trailer block are hidden. If that is not
+ desired it can be disabled with `only=false`. E.g.,
+ `%(trailers:key=Reviewed-by)` shows trailer lines with key
+ `Reviewed-by`.
+** 'only[=val]': select whether non-trailer lines from the trailer
+ block should be included. The `only` keyword may optionally be
+ followed by an equal sign and one of `true`, `on`, `yes` to omit or
+ `false`, `off`, `no` to show the non-trailer lines. If option is
+ given without value it is enabled. If given multiple times the last
+ value is used.
+** 'separator=<SEP>': specify a separator inserted between trailer
+ lines. When this option is not given each trailer line is
+ terminated with a line feed character. The string SEP may contain
+ the literal formatting codes described above. To use comma as
+ separator one must use `%x2C` as it would otherwise be parsed as
+ next option. If separator option is given multiple times only the
+ last one is used. E.g., `%(trailers:key=Ticket,separator=%x2C )`
+ shows all trailer lines whose key is "Ticket" separated by a comma
+ and a space.
+** 'unfold[=val]': make it behave as if interpret-trailer's `--unfold`
+ option was given. In same way as to for `only` it can be followed
+ by an equal sign and explicit value. E.g.,
+ `%(trailers:only,unfold=true)` unfolds and shows all trailer lines.
+** 'valueonly[=val]': skip over the key part of the trailer line and only
+ show the value part. Also this optionally allows explicit value.
NOTE: Some placeholders may depend on other options given to the
revision traversal engine. For example, the `%g*` reflog options will
--filter-print-omitted::
Only useful with `--filter=`; prints a list of the objects omitted
- by the filter. Object IDs are prefixed with a ``~'' character.
+ by the filter. Object IDs are prefixed with a ``~'' character.
--missing=<missing-action>::
A debug option to help with future "partial clone" development.
author's). If `-local` is appended to the format (e.g.,
`iso-local`), the user's local time zone is used instead.
+
+--
`--date=relative` shows dates relative to the current time,
e.g. ``2 hours ago''. The `-local` option has no effect for
`--date=relative`.
-+
+
`--date=local` is an alias for `--date=default-local`.
-+
+
`--date=iso` (or `--date=iso8601`) shows timestamps in a ISO 8601-like format.
The differences to the strict ISO 8601 format are:
- a space between time and time zone
- no colon between hours and minutes of the time zone
-+
`--date=iso-strict` (or `--date=iso8601-strict`) shows timestamps in strict
ISO 8601 format.
-+
+
`--date=rfc` (or `--date=rfc2822`) shows timestamps in RFC 2822
format, often found in email messages.
-+
+
`--date=short` shows only the date, but not the time, in `YYYY-MM-DD` format.
-+
+
`--date=raw` shows the date as seconds since the epoch (1970-01-01
00:00:00 UTC), followed by a space, and then the timezone as an offset
from UTC (a `+` or `-` with four digits; the first two are hours, and
Note that the `-local` option does not affect the seconds-since-epoch
value (which is always measured in UTC), but does switch the accompanying
timezone value.
-+
+
`--date=human` shows the timezone if the timezone does not match the
current time-zone, and doesn't print the whole date if that matches
(ie skip printing year for dates that are "this year", but also skip
the whole date itself if it's in the last few days and we can just say
what weekday it was). For older dates the hour and minute is also
omitted.
-+
+
`--date=unix` shows the date as a Unix epoch timestamp (seconds since
1970). As with `--raw`, this is always in UTC and therefore `-local`
has no effect.
-+
+
`--date=format:...` feeds the format `...` to your system `strftime`,
except for %z and %Z, which are handled internally.
Use `--date=format:%c` to show the date in your system locale's
preferred format. See the `strftime` manual for a complete list of
format placeholders. When using `-local`, the correct syntax is
`--date=format-local:...`.
-+
+
`--date=default` is the default format, and is similar to
`--date=rfc2822`, with a few exceptions:
-
+--
- there is no comma after the day-of-week
- the time zone is omitted when the local time zone is used
the parents have only two variants and the merge result picks
one of them without modification.
+--combined-all-paths::
+ This flag causes combined diffs (used for merge commits) to
+ list the name of the file from all parents. It thus only has
+ effect when -c or --cc are specified, and is likely only
+ useful if filename changes are detected (i.e. when either
+ rename or copy detection have been requested).
+
-m::
This flag makes the merge commits show the full diff like
regular commits; for each merge parent, a separate log entry
when you run `git cherry-pick`.
+
Note that any of the 'refs/*' cases above may come either from
-the '$GIT_DIR/refs' directory or from the '$GIT_DIR/packed-refs' file.
+the `$GIT_DIR/refs` directory or from the `$GIT_DIR/packed-refs` file.
While the ref name encoding is unspecified, UTF-8 is preferred as
some output processing may assume ref names in UTF-8.
'@'::
'@' alone is a shortcut for `HEAD`.
-'<refname>@{<date>}', e.g. 'master@\{yesterday\}', 'HEAD@{5 minutes ago}'::
+'[<refname>]@{<date>}', e.g. 'master@\{yesterday\}', 'HEAD@{5 minutes ago}'::
A ref followed by the suffix '@' with a date specification
enclosed in a brace
pair (e.g. '\{yesterday\}', '{1 month 2 weeks 3 days 1 hour 1
The construct '@{-<n>}' means the <n>th branch/commit checked out
before the current one.
-'<branchname>@\{upstream\}', e.g. 'master@\{upstream\}', '@\{u\}'::
+'[<branchname>]@\{upstream\}', e.g. 'master@\{upstream\}', '@\{u\}'::
The suffix '@\{upstream\}' to a branchname (short form '<branchname>@\{u\}')
refers to the branch that the branch specified by branchname is set to build on
top of (configured with `branch.<name>.remote` and
current one. These suffixes are also accepted when spelled in uppercase, and
they mean the same thing no matter the case.
-'<branchname>@\{push\}', e.g. 'master@\{push\}', '@\{push\}'::
+'[<branchname>]@\{push\}', e.g. 'master@\{push\}', '@\{push\}'::
The suffix '@\{push}' reports the branch "where we would push to" if
`git push` were run while `branchname` was checked out (or the current
`HEAD` if no branchname is specified). Since our push destination is
in a remote repository, of course, we report the local tracking branch
- that corresponds to that branch (i.e., something in 'refs/remotes/').
+ that corresponds to that branch (i.e., something in `refs/remotes/`).
+
Here's an example to make it more clear:
+
This suffix is also accepted when spelled in uppercase, and means the same
thing no matter the case.
-'<rev>{caret}', e.g. 'HEAD{caret}, v1.5.1{caret}0'::
+'<rev>{caret}[<n>]', e.g. 'HEAD{caret}, v1.5.1{caret}0'::
A suffix '{caret}' to a revision parameter means the first parent of
that commit object. '{caret}<n>' means the <n>th parent (i.e.
'<rev>{caret}'
'<rev>{caret}0' means the commit itself and is used when '<rev>' is the
object name of a tag object that refers to a commit object.
-'<rev>{tilde}<n>', e.g. 'master{tilde}3'::
+'<rev>{tilde}[<n>]', e.g. 'HEAD{tilde}, master{tilde}3'::
+ A suffix '{tilde}' to a revision parameter means the first parent of
+ that commit object.
A suffix '{tilde}<n>' to a revision parameter means the commit
object that is the <n>th generation ancestor of the named
commit object, following only the first parents. I.e. '<rev>{tilde}3' is
'<rev>{caret}0'
is a short-hand for '<rev>{caret}\{commit\}'.
+
-'rev{caret}\{object\}' can be used to make sure 'rev' names an
-object that exists, without requiring 'rev' to be a tag, and
-without dereferencing 'rev'; because a tag is already an object,
+'<rev>{caret}\{object\}' can be used to make sure '<rev>' names an
+object that exists, without requiring '<rev>' to be a tag, and
+without dereferencing '<rev>'; because a tag is already an object,
it does not have to be dereferenced even once to get to an object.
+
-'rev{caret}\{tag\}' can be used to ensure that 'rev' identifies an
+'<rev>{caret}\{tag\}' can be used to ensure that '<rev>' identifies an
existing tag object.
'<rev>{caret}{}', e.g. 'v0.99.8{caret}{}'::
Depending on the given text, the shell's word splitting rules might
require additional quoting.
-'<rev>:<path>', e.g. 'HEAD:README', ':README', 'master:./README'::
+'<rev>:<path>', e.g. 'HEAD:README', 'master:./README'::
A suffix ':' followed by a path names the blob or tree
at the given path in the tree-ish object named by the part
before the colon.
- ':path' (with an empty part before the colon)
- is a special case of the syntax described next: content
- recorded in the index at the given path.
A path starting with './' or '../' is relative to the current working directory.
The given path will be converted to be relative to the working tree's root directory.
This is most useful to address a blob or tree from a commit or tree that has
the same tree structure as the working tree.
-':<n>:<path>', e.g. ':0:README', ':README'::
+':[<n>:]<path>', e.g. ':0:README', ':README'::
A colon, optionally followed by a stage number (0 to 3) and a
colon, followed by a path, names a blob object in the
index at the given path. A missing stage number (and the colon
The 'r1{caret}!' notation includes commit 'r1' but excludes all of its parents.
By itself, this notation denotes the single commit 'r1'.
-The '<rev>{caret}-<n>' notation includes '<rev>' but excludes the <n>th
+The '<rev>{caret}-[<n>]' notation includes '<rev>' but excludes the <n>th
parent (i.e. a shorthand for '<rev>{caret}<n>..<rev>'), with '<n>' = 1 if
not given. This is typically useful for merge commits where you
can just pass '<commit>{caret}-' to get all the commits in the branch
--continue::
Continue the operation in progress using the information in
- '.git/sequencer'. Can be used to continue after resolving
+ `.git/sequencer`. Can be used to continue after resolving
conflicts in a failed cherry-pick or revert.
--quit::
config-like files that the caller specifies (i.e., files like `.gitmodules`,
`~/.gitconfig` etc.). For example,
----------------------------------------
+----------------------------------------
struct config_set gm_config;
git_configset_init(&gm_config);
int b;
The filename will be prefixed by passing the filename along with
the prefix argument of `parse_options()` to `prefix_filename()`.
-`OPT_ARGUMENT(long, description)`::
+`OPT_ARGUMENT(long, &int_var, description)`::
Introduce a long-option argument that will be kept in `argv[]`.
+ If this option was seen, `int_var` will be set to one (except
+ if a `NULL` pointer was passed).
`OPT_NUMBER_CALLBACK(&var, description, func_ptr)`::
Recognize numerical options like -123 and feed the integer as
--- /dev/null
+= Trace2 API
+
+The Trace2 API can be used to print debug, performance, and telemetry
+information to stderr or a file. The Trace2 feature is inactive unless
+explicitly enabled by enabling one or more Trace2 Targets.
+
+The Trace2 API is intended to replace the existing (Trace1)
+printf-style tracing provided by the existing `GIT_TRACE` and
+`GIT_TRACE_PERFORMANCE` facilities. During initial implementation,
+Trace2 and Trace1 may operate in parallel.
+
+The Trace2 API defines a set of high-level messages with known fields,
+such as (`start`: `argv`) and (`exit`: {`exit-code`, `elapsed-time`}).
+
+Trace2 instrumentation throughout the Git code base sends Trace2
+messages to the enabled Trace2 Targets. Targets transform these
+messages content into purpose-specific formats and write events to
+their data streams. In this manner, the Trace2 API can drive
+many different types of analysis.
+
+Targets are defined using a VTable allowing easy extension to other
+formats in the future. This might be used to define a binary format,
+for example.
+
+Trace2 is controlled using `trace2.*` config values in the system and
+global config files and `GIT_TR2*` environment variables. Trace2 does
+not read from repo local or worktree config files or respect `-c`
+command line config settings.
+
+== Trace2 Targets
+
+Trace2 defines the following set of Trace2 Targets.
+Format details are given in a later section.
+
+=== The Normal Format Target
+
+The normal format target is a tradition printf format and similar
+to GIT_TRACE format. This format is enabled with the `GIT_TR`
+environment variable or the `trace2.normalTarget` system or global
+config setting.
+
+For example
+
+------------
+$ export GIT_TR2=~/log.normal
+$ git version
+git version 2.20.1.155.g426c96fcdb
+------------
+
+or
+
+------------
+$ git config --global trace2.normalTarget ~/log.normal
+$ git version
+git version 2.20.1.155.g426c96fcdb
+------------
+
+yields
+
+------------
+$ cat ~/log.normal
+12:28:42.620009 common-main.c:38 version 2.20.1.155.g426c96fcdb
+12:28:42.620989 common-main.c:39 start git version
+12:28:42.621101 git.c:432 cmd_name version (version)
+12:28:42.621215 git.c:662 exit elapsed:0.001227 code:0
+12:28:42.621250 trace2/tr2_tgt_normal.c:124 atexit elapsed:0.001265 code:0
+------------
+
+=== The Performance Format Target
+
+The performance format target (PERF) is a column-based format to
+replace GIT_TRACE_PERFORMANCE and is suitable for development and
+testing, possibly to complement tools like gprof. This format is
+enabled with the `GIT_TR2_PERF` environment variable or the
+`trace2.perfTarget` system or global config setting.
+
+For example
+
+------------
+$ export GIT_TR2_PERF=~/log.perf
+$ git version
+git version 2.20.1.155.g426c96fcdb
+------------
+
+or
+
+------------
+$ git config --global trace2.perfTarget ~/log.perf
+$ git version
+git version 2.20.1.155.g426c96fcdb
+------------
+
+yields
+
+------------
+$ cat ~/log.perf
+12:28:42.620675 common-main.c:38 | d0 | main | version | | | | | 2.20.1.155.g426c96fcdb
+12:28:42.621001 common-main.c:39 | d0 | main | start | | 0.001173 | | | git version
+12:28:42.621111 git.c:432 | d0 | main | cmd_name | | | | | version (version)
+12:28:42.621225 git.c:662 | d0 | main | exit | | 0.001227 | | | code:0
+12:28:42.621259 trace2/tr2_tgt_perf.c:211 | d0 | main | atexit | | 0.001265 | | | code:0
+------------
+
+=== The Event Format Target
+
+The event format target is a JSON-based format of event data suitable
+for telemetry analysis. This format is enabled with the `GIT_TR2_EVENT`
+environment variable or the `trace2.eventTarget` system or global config
+setting.
+
+For example
+
+------------
+$ export GIT_TR2_EVENT=~/log.event
+$ git version
+git version 2.20.1.155.g426c96fcdb
+------------
+
+or
+
+------------
+$ git config --global trace2.eventTarget ~/log.event
+$ git version
+git version 2.20.1.155.g426c96fcdb
+------------
+
+yields
+
+------------
+$ cat ~/log.event
+{"event":"version","sid":"sid":"20190408T191610.507018Z-H9b68c35f-P000059a8","thread":"main","time":"2019-01-16T17:28:42.620713Z","file":"common-main.c","line":38,"evt":"1","exe":"2.20.1.155.g426c96fcdb"}
+{"event":"start","sid":"20190408T191610.507018Z-H9b68c35f-P000059a8","thread":"main","time":"2019-01-16T17:28:42.621027Z","file":"common-main.c","line":39,"t_abs":0.001173,"argv":["git","version"]}
+{"event":"cmd_name","sid":"20190408T191610.507018Z-H9b68c35f-P000059a8","thread":"main","time":"2019-01-16T17:28:42.621122Z","file":"git.c","line":432,"name":"version","hierarchy":"version"}
+{"event":"exit","sid":"20190408T191610.507018Z-H9b68c35f-P000059a8","thread":"main","time":"2019-01-16T17:28:42.621236Z","file":"git.c","line":662,"t_abs":0.001227,"code":0}
+{"event":"atexit","sid":"20190408T191610.507018Z-H9b68c35f-P000059a8","thread":"main","time":"2019-01-16T17:28:42.621268Z","file":"trace2/tr2_tgt_event.c","line":163,"t_abs":0.001265,"code":0}
+------------
+
+=== Enabling a Target
+
+To enable a target, set the corresponding environment variable or
+system or global config value to one of the following:
+
+include::../trace2-target-values.txt[]
+
+If the target already exists and is a directory, the traces will be
+written to files (one per process) underneath the given directory. They
+will be named according to the last component of the SID (optionally
+followed by a counter to avoid filename collisions).
+
+== Trace2 API
+
+All public Trace2 functions and macros are defined in `trace2.h` and
+`trace2.c`. All public symbols are prefixed with `trace2_`.
+
+There are no public Trace2 data structures.
+
+The Trace2 code also defines a set of private functions and data types
+in the `trace2/` directory. These symbols are prefixed with `tr2_`
+and should only be used by functions in `trace2.c`.
+
+== Conventions for Public Functions and Macros
+
+The functions defined by the Trace2 API are declared and documented
+in `trace2.h`. It defines the API functions and wrapper macros for
+Trace2.
+
+Some functions have a `_fl()` suffix to indicate that they take `file`
+and `line-number` arguments.
+
+Some functions have a `_va_fl()` suffix to indicate that they also
+take a `va_list` argument.
+
+Some functions have a `_printf_fl()` suffix to indicate that they also
+take a varargs argument.
+
+There are CPP wrapper macros and ifdefs to hide most of these details.
+See `trace2.h` for more details. The following discussion will only
+describe the simplified forms.
+
+== Public API
+
+All Trace2 API functions send a messsage to all of the active
+Trace2 Targets. This section describes the set of available
+messages.
+
+It helps to divide these functions into groups for discussion
+purposes.
+
+=== Basic Command Messages
+
+These are concerned with the lifetime of the overall git process.
+
+`void trace2_initialize_clock()`::
+
+ Initialize the Trace2 start clock and nothing else. This should
+ be called at the very top of main() to capture the process start
+ time and reduce startup order dependencies.
+
+`void trace2_initialize()`::
+
+ Determines if any Trace2 Targets should be enabled and
+ initializes the Trace2 facility. This includes setting up the
+ Trace2 thread local storage (TLS).
++
+This function emits a "version" message containing the version of git
+and the Trace2 protocol.
++
+This function should be called from `main()` as early as possible in
+the life of the process after essential process initialization.
+
+`int trace2_is_enabled()`::
+
+ Returns 1 if Trace2 is enabled (at least one target is
+ active).
+
+`void trace2_cmd_start(int argc, const char **argv)`::
+
+ Emits a "start" message containing the process command line
+ arguments.
+
+`int trace2_cmd_exit(int exit_code)`::
+
+ Emits an "exit" message containing the process exit-code and
+ elapsed time.
++
+Returns the exit-code.
+
+`void trace2_cmd_error(const char *fmt, va_list ap)`::
+
+ Emits an "error" message containing a formatted error message.
+
+`void trace2_cmd_path(const char *pathname)`::
+
+ Emits a "cmd_path" message with the full pathname of the
+ current process.
+
+=== Command Detail Messages
+
+These are concerned with describing the specific Git command
+after the command line, config, and environment are inspected.
+
+`void trace2_cmd_name(const char *name)`::
+
+ Emits a "cmd_name" message with the canonical name of the
+ command, for example "status" or "checkout".
+
+`void trace2_cmd_mode(const char *mode)`::
+
+ Emits a "cmd_mode" message with a qualifier name to further
+ describe the current git command.
++
+This message is intended to be used with git commands having multiple
+major modes. For example, a "checkout" command can checkout a new
+branch or it can checkout a single file, so the checkout code could
+emit a cmd_mode message of "branch" or "file".
+
+`void trace2_cmd_alias(const char *alias, const char **argv_expansion)`::
+
+ Emits an "alias" message containing the alias used and the
+ argument expansion.
+
+`void trace2_def_param(const char *parameter, const char *value)`::
+
+ Emits a "def_param" message containing a key/value pair.
++
+This message is intended to report some global aspect of the current
+command, such as a configuration setting or command line switch that
+significantly affects program performance or behavior, such as
+`core.abbrev`, `status.showUntrackedFiles`, or `--no-ahead-behind`.
+
+`void trace2_cmd_list_config()`::
+
+ Emits a "def_param" messages for "important" configuration
+ settings.
++
+The environment variable `GIT_TR2_CONFIG_PARAMS` or the `trace2.configParams`
+config value can be set to a
+list of patterns of important configuration settings, for example:
+`core.*,remote.*.url`. This function will iterate over all config
+settings and emit a "def_param" message for each match.
+
+`void trace2_cmd_set_config(const char *key, const char *value)`::
+
+ Emits a "def_param" message for a new or updated key/value
+ pair IF `key` is considered important.
++
+This is used to hook into `git_config_set()` and catch any
+configuration changes and update a value previously reported by
+`trace2_cmd_list_config()`.
+
+`void trace2_def_repo(struct repository *repo)`::
+
+ Registers a repository with the Trace2 layer. Assigns a
+ unique "repo-id" to `repo->trace2_repo_id`.
++
+Emits a "worktree" messages containing the repo-id and the worktree
+pathname.
++
+Region and data messages (described later) may refer to this repo-id.
++
+The main/top-level repository will have repo-id value 1 (aka "r1").
++
+The repo-id field is in anticipation of future in-proc submodule
+repositories.
+
+=== Child Process Messages
+
+These are concerned with the various spawned child processes,
+including shell scripts, git commands, editors, pagers, and hooks.
+
+`void trace2_child_start(struct child_process *cmd)`::
+
+ Emits a "child_start" message containing the "child-id",
+ "child-argv", and "child-classification".
++
+Before calling this, set `cmd->trace2_child_class` to a name
+describing the type of child process, for example "editor".
++
+This function assigns a unique "child-id" to `cmd->trace2_child_id`.
+This field is used later during the "child_exit" message to associate
+it with the "child_start" message.
++
+This function should be called before spawning the child process.
+
+`void trace2_child_exit(struct child_proess *cmd, int child_exit_code)`::
+
+ Emits a "child_exit" message containing the "child-id",
+ the child's elapsed time and exit-code.
++
+The reported elapsed time includes the process creation overhead and
+time spend waiting for it to exit, so it may be slightly longer than
+the time reported by the child itself.
++
+This function should be called after reaping the child process.
+
+`int trace2_exec(const char *exe, const char **argv)`::
+
+ Emits a "exec" message containing the "exec-id" and the
+ argv of the new process.
++
+This function should be called before calling one of the `exec()`
+variants, such as `execvp()`.
++
+This function returns a unique "exec-id". This value is used later
+if the exec() fails and a "exec-result" message is necessary.
+
+`void trace2_exec_result(int exec_id, int error_code)`::
+
+ Emits a "exec_result" message containing the "exec-id"
+ and the error code.
++
+On Unix-based systems, `exec()` does not return if successful.
+This message is used to indicate that the `exec()` failed and
+that the current program is continuing.
+
+=== Git Thread Messages
+
+These messages are concerned with Git thread usage.
+
+`void trace2_thread_start(const char *thread_name)`::
+
+ Emits a "thread_start" message.
++
+The `thread_name` field should be a descriptive name, such as the
+unique name of the thread-proc. A unique "thread-id" will be added
+to the name to uniquely identify thread instances.
++
+Region and data messages (described later) may refer to this thread
+name.
++
+This function must be called by the thread-proc of the new thread
+(so that TLS data is properly initialized) and not by the caller
+of `pthread_create()`.
+
+`void trace2_thread_exit()`::
+
+ Emits a "thread_exit" message containing the thread name
+ and the thread elapsed time.
++
+This function must be called by the thread-proc before it returns
+(so that the coorect TLS data is used and cleaned up. It should
+not be called by the caller of `pthread_join()`.
+
+=== Region and Data Messages
+
+These are concerned with recording performance data
+over regions or spans of code.
+
+`void trace2_region_enter(const char *category, const char *label, const struct repository *repo)`::
+
+`void trace2_region_enter_printf(const char *category, const char *label, const struct repository *repo, const char *fmt, ...)`::
+
+`void trace2_region_enter_printf_va(const char *category, const char *label, const struct repository *repo, const char *fmt, va_list ap)`::
+
+ Emits a thread-relative "region_enter" message with optional
+ printf string.
++
+This function pushes a new region nesting stack level on the current
+thread and starts a clock for the new stack frame.
++
+The `category` field is an arbitrary category name used to classify
+regions by feature area, such as "status" or "index". At this time
+it is only just printed along with the rest of the message. It may
+be used in the future to filter messages.
++
+The `label` field is an arbitrary label used to describe the activity
+being started, such as "read_recursive" or "do_read_index".
++
+The `repo` field, if set, will be used to get the "repo-id", so that
+recursive oerations can be attributed to the correct repository.
+
+`void trace2_region_leave(const char *category, const char *label, const struct repository *repo)`::
+
+`void trace2_region_leave_printf(const char *category, const char *label, const struct repository *repo, const char *fmt, ...)`::
+
+`void trace2_region_leave_printf_va(const char *category, const char *label, const struct repository *repo, const char *fmt, va_list ap)`::
+
+ Emits a thread-relative "region_leave" message with optional
+ printf string.
++
+This function pops the region nesting stack on the current thread
+and reports the elapsed time of the stack frame.
++
+The `category`, `label`, and `repo` fields are the same as above.
+The `category` and `label` do not need to match the correpsonding
+"region_enter" message, but it makes the data stream easier to
+understand.
+
+`void trace2_data_string(const char *category, const struct repository *repo, const char *key, const char * value)`::
+
+`void trace2_data_intmax(const char *category, const struct repository *repo, const char *key, intmax value)`::
+
+`void trace2_data_json(const char *category, const struct repository *repo, const char *key, const struct json_writer *jw)`::
+
+ Emits a region- and thread-relative "data" or "data_json" message.
++
+This is a key/value pair message containing information about the
+current thread, region stack, and repository. This could be used
+to print the number of files in a directory during a multi-threaded
+recursive tree walk.
+
+`void trace2_printf(const char *fmt, ...)`::
+
+`void trace2_printf_va(const char *fmt, va_list ap)`::
+
+ Emits a region- and thread-relative "printf" message.
+
+== Trace2 Target Formats
+
+=== NORMAL Format
+
+Events are written as lines of the form:
+
+------------
+[<time> SP <filename>:<line> SP+] <event-name> [[SP] <event-message>] LF
+------------
+
+`<event-name>`::
+
+ is the event name.
+
+`<event-message>`::
+ is a free-form printf message intended for human consumption.
++
+Note that this may contain embedded LF or CRLF characters that are
+not escaped, so the event may spill across multiple lines.
+
+If `GIT_TR2_BRIEF` or `trace2.normalBrief` is true, the `time`, `filename`,
+and `line` fields are omitted.
+
+This target is intended to be more of a summary (like GIT_TRACE) and
+less detailed than the other targets. It ignores thread, region, and
+data messages, for example.
+
+=== PERF Format
+
+Events are written as lines of the form:
+
+------------
+[<time> SP <filename>:<line> SP+
+ BAR SP] d<depth> SP
+ BAR SP <thread-name> SP+
+ BAR SP <event-name> SP+
+ BAR SP [r<repo-id>] SP+
+ BAR SP [<t_abs>] SP+
+ BAR SP [<t_rel>] SP+
+ BAR SP [<category>] SP+
+ BAR SP DOTS* <perf-event-message>
+ LF
+------------
+
+`<depth>`::
+ is the git process depth. This is the number of parent
+ git processes. A top-level git command has depth value "d0".
+ A child of it has depth value "d1". A second level child
+ has depth value "d2" and so on.
+
+`<thread-name>`::
+ is a unique name for the thread. The primary thread
+ is called "main". Other thread names are of the form "th%d:%s"
+ and include a unique number and the name of the thread-proc.
+
+`<event-name>`::
+ is the event name.
+
+`<repo-id>`::
+ when present, is a number indicating the repository
+ in use. A `def_repo` event is emitted when a repository is
+ opened. This defines the repo-id and associated worktree.
+ Subsequent repo-specific events will reference this repo-id.
++
+Currently, this is always "r1" for the main repository.
+This field is in anticipation of in-proc submodules in the future.
+
+`<t_abs>`::
+ when present, is the absolute time in seconds since the
+ program started.
+
+`<t_rel>`::
+ when present, is time in seconds relative to the start of
+ the current region. For a thread-exit event, it is the elapsed
+ time of the thread.
+
+`<category>`::
+ is present on region and data events and is used to
+ indicate a broad category, such as "index" or "status".
+
+`<perf-event-message>`::
+ is a free-form printf message intended for human consumption.
+
+------------
+15:33:33.532712 wt-status.c:2310 | d0 | main | region_enter | r1 | 0.126064 | | status | label:print
+15:33:33.532712 wt-status.c:2331 | d0 | main | region_leave | r1 | 0.127568 | 0.001504 | status | label:print
+------------
+
+If `GIT_TR2_PERF_BRIEF` or `trace2.perfBrief` is true, the `time`, `file`,
+and `line` fields are omitted.
+
+------------
+d0 | main | region_leave | r1 | 0.011717 | 0.009122 | index | label:preload
+------------
+
+The PERF target is intended for interactive performance analysis
+during development and is quite noisy.
+
+=== EVENT Format
+
+Each event is a JSON-object containing multiple key/value pairs
+written as a single line and followed by a LF.
+
+------------
+'{' <key> ':' <value> [',' <key> ':' <value>]* '}' LF
+------------
+
+Some key/value pairs are common to all events and some are
+event-specific.
+
+==== Common Key/Value Pairs
+
+The following key/value pairs are common to all events:
+
+------------
+{
+ "event":"version",
+ "sid":"20190408T191827.272759Z-H9b68c35f-P00003510",
+ "thread":"main",
+ "time":"2019-04-08T19:18:27.282761Z",
+ "file":"common-main.c",
+ "line":42,
+ ...
+}
+------------
+
+`"event":<event>`::
+ is the event name.
+
+`"sid":<sid>`::
+ is the session-id. This is a unique string to identify the
+ process instance to allow all events emitted by a process to
+ be identified. A session-id is used instead of a PID because
+ PIDs are recycled by the OS. For child git processes, the
+ session-id is prepended with the session-id of the parent git
+ process to allow parent-child relationships to be identified
+ during post-processing.
+
+`"thread":<thread>`::
+ is the thread name.
+
+`"time":<time>`::
+ is the UTC time of the event.
+
+`"file":<filename>`::
+ is source file generating the event.
+
+`"line":<line-number>`::
+ is the integer source line number generating the event.
+
+`"repo":<repo-id>`::
+ when present, is the integer repo-id as described previously.
+
+If `GIT_TR2_EVENT_BRIEF` or `trace2.eventBrief` is true, the `file`
+and `line` fields are omitted from all events and the `time` field is
+only present on the "start" and "atexit" events.
+
+==== Event-Specific Key/Value Pairs
+
+`"version"`::
+ This event gives the version of the executable and the EVENT format.
++
+------------
+{
+ "event":"version",
+ ...
+ "evt":"1", # EVENT format version
+ "exe":"2.20.1.155.g426c96fcdb" # git version
+}
+------------
+
+`"start"`::
+ This event contains the complete argv received by main().
++
+------------
+{
+ "event":"start",
+ ...
+ "t_abs":0.001227, # elapsed time in seconds
+ "argv":["git","version"]
+}
+------------
+
+`"exit"`::
+ This event is emitted when git calls `exit()`.
++
+------------
+{
+ "event":"exit",
+ ...
+ "t_abs":0.001227, # elapsed time in seconds
+ "code":0 # exit code
+}
+------------
+
+`"atexit"`::
+ This event is emitted by the Trace2 `atexit` routine during
+ final shutdown. It should be the last event emitted by the
+ process.
++
+(The elapsed time reported here is greater than the time reported in
+the "exit" event because it runs after all other atexit tasks have
+completed.)
++
+------------
+{
+ "event":"atexit",
+ ...
+ "t_abs":0.001227, # elapsed time in seconds
+ "code":0 # exit code
+}
+------------
+
+`"signal"`::
+ This event is emitted when the program is terminated by a user
+ signal. Depending on the platform, the signal event may
+ prevent the "atexit" event from being generated.
++
+------------
+{
+ "event":"signal",
+ ...
+ "t_abs":0.001227, # elapsed time in seconds
+ "signal":13 # SIGTERM, SIGINT, etc.
+}
+------------
+
+`"error"`::
+ This event is emitted when one of the `error()`, `die()`,
+ or `usage()` functions are called.
++
+------------
+{
+ "event":"error",
+ ...
+ "msg":"invalid option: --cahced", # formatted error message
+ "fmt":"invalid option: %s" # error format string
+}
+------------
++
+The error event may be emitted more than once. The format string
+allows post-processors to group errors by type without worrying
+about specific error arguments.
+
+`"cmd_path"`::
+ This event contains the discovered full path of the git
+ executable (on platforms that are configured to resolve it).
++
+------------
+{
+ "event":"cmd_path",
+ ...
+ "path":"C:/work/gfw/git.exe"
+}
+------------
+
+`"cmd_name"`::
+ This event contains the command name for this git process
+ and the hierarchy of commands from parent git processes.
++
+------------
+{
+ "event":"cmd_name",
+ ...
+ "name":"pack-objects",
+ "hierarchy":"push/pack-objects"
+}
+------------
++
+Normally, the "name" field contains the canonical name of the
+command. When a canonical name is not available, one of
+these special values are used:
++
+------------
+"_query_" # "git --html-path"
+"_run_dashed_" # when "git foo" tries to run "git-foo"
+"_run_shell_alias_" # alias expansion to a shell command
+"_run_git_alias_" # alias expansion to a git command
+"_usage_" # usage error
+------------
+
+`"cmd_mode"`::
+ This event, when present, describes the command variant This
+ event may be emitted more than once.
++
+------------
+{
+ "event":"cmd_mode",
+ ...
+ "name":"branch"
+}
+------------
++
+The "name" field is an arbitrary string to describe the command mode.
+For example, checkout can checkout a branch or an individual file.
+And these variations typically have different performance
+characteristics that are not comparable.
+
+`"alias"`::
+ This event is present when an alias is expanded.
++
+------------
+{
+ "event":"alias",
+ ...
+ "alias":"l", # registered alias
+ "argv":["log","--graph"] # alias expansion
+}
+------------
+
+`"child_start"`::
+ This event describes a child process that is about to be
+ spawned.
++
+------------
+{
+ "event":"child_start",
+ ...
+ "child_id":2,
+ "child_class":"?",
+ "use_shell":false,
+ "argv":["git","rev-list","--objects","--stdin","--not","--all","--quiet"]
+
+ "hook_name":"<hook_name>" # present when child_class is "hook"
+ "cd":"<path>" # present when cd is required
+}
+------------
++
+The "child_id" field can be used to match this child_start with the
+corresponding child_exit event.
++
+The "child_class" field is a rough classification, such as "editor",
+"pager", "transport/*", and "hook". Unclassified children are classified
+with "?".
+
+`"child_exit"`::
+ This event is generated after the current process has returned
+ from the waitpid() and collected the exit information from the
+ child.
++
+------------
+{
+ "event":"child_exit",
+ ...
+ "child_id":2,
+ "pid":14708, # child PID
+ "code":0, # child exit-code
+ "t_rel":0.110605 # observed run-time of child process
+}
+------------
++
+Note that the session-id of the child process is not available to
+the current/spawning process, so the child's PID is reported here as
+a hint for post-processing. (But it is only a hint because the child
+proces may be a shell script which doesn't have a session-id.)
++
+Note that the `t_rel` field contains the observed run time in seconds
+for the child process (starting before the fork/exec/spawn and
+stopping after the waitpid() and includes OS process creation overhead).
+So this time will be slightly larger than the atexit time reported by
+the child process itself.
+
+`"exec"`::
+ This event is generated before git attempts to `exec()`
+ another command rather than starting a child process.
++
+------------
+{
+ "event":"exec",
+ ...
+ "exec_id":0,
+ "exe":"git",
+ "argv":["foo", "bar"]
+}
+------------
++
+The "exec_id" field is a command-unique id and is only useful if the
+`exec()` fails and a corresponding exec_result event is generated.
+
+`"exec_result"`::
+ This event is generated if the `exec()` fails and control
+ returns to the current git command.
++
+------------
+{
+ "event":"exec_result",
+ ...
+ "exec_id":0,
+ "code":1 # error code (errno) from exec()
+}
+------------
+
+`"thread_start"`::
+ This event is generated when a thread is started. It is
+ generated from *within* the new thread's thread-proc (for TLS
+ reasons).
++
+------------
+{
+ "event":"thread_start",
+ ...
+ "thread":"th02:preload_thread" # thread name
+}
+------------
+
+`"thread_exit"`::
+ This event is generated when a thread exits. It is generated
+ from *within* the thread's thread-proc (for TLS reasons).
++
+------------
+{
+ "event":"thread_exit",
+ ...
+ "thread":"th02:preload_thread", # thread name
+ "t_rel":0.007328 # thread elapsed time
+}
+------------
+
+`"def_param"`::
+ This event is generated to log a global parameter.
++
+------------
+{
+ "event":"def_param",
+ ...
+ "param":"core.abbrev",
+ "value":"7"
+}
+------------
+
+`"def_repo"`::
+ This event defines a repo-id and associates it with the root
+ of the worktree.
++
+------------
+{
+ "event":"def_repo",
+ ...
+ "repo":1,
+ "worktree":"/Users/jeffhost/work/gfw"
+}
+------------
++
+As stated earlier, the repo-id is currently always 1, so there will
+only be one def_repo event. Later, if in-proc submodules are
+supported, a def_repo event should be emitted for each submodule
+visited.
+
+`"region_enter"`::
+ This event is generated when entering a region.
++
+------------
+{
+ "event":"region_enter",
+ ...
+ "repo":1, # optional
+ "nesting":1, # current region stack depth
+ "category":"index", # optional
+ "label":"do_read_index", # optional
+ "msg":".git/index" # optional
+}
+------------
++
+The `category` field may be used in a future enhancement to
+do category-based filtering.
++
+`GIT_TR2_EVENT_NESTING` or `trace2.eventNesting` can be used to
+filter deeply nested regions and data events. It defaults to "2".
+
+`"region_leave"`::
+ This event is generated when leaving a region.
++
+------------
+{
+ "event":"region_leave",
+ ...
+ "repo":1, # optional
+ "t_rel":0.002876, # time spent in region in seconds
+ "nesting":1, # region stack depth
+ "category":"index", # optional
+ "label":"do_read_index", # optional
+ "msg":".git/index" # optional
+}
+------------
+
+`"data"`::
+ This event is generated to log a thread- and region-local
+ key/value pair.
++
+------------
+{
+ "event":"data",
+ ...
+ "repo":1, # optional
+ "t_abs":0.024107, # absolute elapsed time
+ "t_rel":0.001031, # elapsed time in region/thread
+ "nesting":2, # region stack depth
+ "category":"index",
+ "key":"read/cache_nr",
+ "value":"3552"
+}
+------------
++
+The "value" field may be an integer or a string.
+
+`"data-json"`::
+ This event is generated to log a pre-formatted JSON string
+ containing structured data.
++
+------------
+{
+ "event":"data_json",
+ ...
+ "repo":1, # optional
+ "t_abs":0.015905,
+ "t_rel":0.015905,
+ "nesting":1,
+ "category":"process",
+ "key":"windows/ancestry",
+ "value":["bash.exe","bash.exe"]
+}
+------------
+
+== Example Trace2 API Usage
+
+Here is a hypothetical usage of the Trace2 API showing the intended
+usage (without worrying about the actual Git details).
+
+Initialization::
+
+ Initialization happens in `main()`. Behind the scenes, an
+ `atexit` and `signal` handler are registered.
++
+----------------
+int main(int argc, const char **argv)
+{
+ int exit_code;
+
+ trace2_initialize();
+ trace2_cmd_start(argv);
+
+ exit_code = cmd_main(argc, argv);
+
+ trace2_cmd_exit(exit_code);
+
+ return exit_code;
+}
+----------------
+
+Command Details::
+
+ After the basics are established, additional command
+ information can be sent to Trace2 as it is discovered.
++
+----------------
+int cmd_checkout(int argc, const char **argv)
+{
+ trace2_cmd_name("checkout");
+ trace2_cmd_mode("branch");
+ trace2_def_repo(the_repository);
+
+ // emit "def_param" messages for "interesting" config settings.
+ trace2_cmd_list_config();
+
+ if (do_something())
+ trace2_cmd_error("Path '%s': cannot do something", path);
+
+ return 0;
+}
+----------------
+
+Child Processes::
+
+ Wrap code spawning child processes.
++
+----------------
+void run_child(...)
+{
+ int child_exit_code;
+ struct child_process cmd = CHILD_PROCESS_INIT;
+ ...
+ cmd.trace2_child_class = "editor";
+
+ trace2_child_start(&cmd);
+ child_exit_code = spawn_child_and_wait_for_it();
+ trace2_child_exit(&cmd, child_exit_code);
+}
+----------------
++
+For example, the following fetch command spawned ssh, index-pack,
+rev-list, and gc. This example also shows that fetch took
+5.199 seconds and of that 4.932 was in ssh.
++
+----------------
+$ export GIT_TR2_BRIEF=1
+$ export GIT_TR2=~/log.normal
+$ git fetch origin
+...
+----------------
++
+----------------
+$ cat ~/log.normal
+version 2.20.1.vfs.1.1.47.g534dbe1ad1
+start git fetch origin
+worktree /Users/jeffhost/work/gfw
+cmd_name fetch (fetch)
+child_start[0] ssh git@github.com ...
+child_start[1] git index-pack ...
+... (Trace2 events from child processes omitted)
+child_exit[1] pid:14707 code:0 elapsed:0.076353
+child_exit[0] pid:14706 code:0 elapsed:4.931869
+child_start[2] git rev-list ...
+... (Trace2 events from child process omitted)
+child_exit[2] pid:14708 code:0 elapsed:0.110605
+child_start[3] git gc --auto
+... (Trace2 events from child process omitted)
+child_exit[3] pid:14709 code:0 elapsed:0.006240
+exit elapsed:5.198503 code:0
+atexit elapsed:5.198541 code:0
+----------------
++
+When a git process is a (direct or indirect) child of another
+git process, it inherits Trace2 context information. This
+allows the child to print the command hierarchy. This example
+shows gc as child[3] of fetch. When the gc process reports
+its name as "gc", it also reports the hierarchy as "fetch/gc".
+(In this example, trace2 messages from the child process is
+indented for clarity.)
++
+----------------
+$ export GIT_TR2_BRIEF=1
+$ export GIT_TR2=~/log.normal
+$ git fetch origin
+...
+----------------
++
+----------------
+$ cat ~/log.normal
+version 2.20.1.160.g5676107ecd.dirty
+start git fetch official
+worktree /Users/jeffhost/work/gfw
+cmd_name fetch (fetch)
+...
+child_start[3] git gc --auto
+ version 2.20.1.160.g5676107ecd.dirty
+ start /Users/jeffhost/work/gfw/git gc --auto
+ worktree /Users/jeffhost/work/gfw
+ cmd_name gc (fetch/gc)
+ exit elapsed:0.001959 code:0
+ atexit elapsed:0.001997 code:0
+child_exit[3] pid:20303 code:0 elapsed:0.007564
+exit elapsed:3.868938 code:0
+atexit elapsed:3.868970 code:0
+----------------
+
+Regions::
+
+ Regions can be use to time an interesting section of code.
++
+----------------
+void wt_status_collect(struct wt_status *s)
+{
+ trace2_region_enter("status", "worktrees", s->repo);
+ wt_status_collect_changes_worktree(s);
+ trace2_region_leave("status", "worktrees", s->repo);
+
+ trace2_region_enter("status", "index", s->repo);
+ wt_status_collect_changes_index(s);
+ trace2_region_leave("status", "index", s->repo);
+
+ trace2_region_enter("status", "untracked", s->repo);
+ wt_status_collect_untracked(s);
+ trace2_region_leave("status", "untracked", s->repo);
+}
+
+void wt_status_print(struct wt_status *s)
+{
+ trace2_region_enter("status", "print", s->repo);
+ switch (s->status_format) {
+ ...
+ }
+ trace2_region_leave("status", "print", s->repo);
+}
+----------------
++
+In this example, scanning for untracked files ran from +0.012568 to
++0.027149 (since the process started) and took 0.014581 seconds.
++
+----------------
+$ export GIT_TR2_PERF_BRIEF=1
+$ export GIT_TR2_PERF=~/log.perf
+$ git status
+...
+
+$ cat ~/log.perf
+d0 | main | version | | | | | 2.20.1.160.g5676107ecd.dirty
+d0 | main | start | | 0.001173 | | | git status
+d0 | main | def_repo | r1 | | | | worktree:/Users/jeffhost/work/gfw
+d0 | main | cmd_name | | | | | status (status)
+...
+d0 | main | region_enter | r1 | 0.010988 | | status | label:worktrees
+d0 | main | region_leave | r1 | 0.011236 | 0.000248 | status | label:worktrees
+d0 | main | region_enter | r1 | 0.011260 | | status | label:index
+d0 | main | region_leave | r1 | 0.012542 | 0.001282 | status | label:index
+d0 | main | region_enter | r1 | 0.012568 | | status | label:untracked
+d0 | main | region_leave | r1 | 0.027149 | 0.014581 | status | label:untracked
+d0 | main | region_enter | r1 | 0.027411 | | status | label:print
+d0 | main | region_leave | r1 | 0.028741 | 0.001330 | status | label:print
+d0 | main | exit | | 0.028778 | | | code:0
+d0 | main | atexit | | 0.028809 | | | code:0
+----------------
++
+Regions may be nested. This causes messages to be indented in the
+PERF target, for example.
+Elapsed times are relative to the start of the correpsonding nesting
+level as expected. For example, if we add region message to:
++
+----------------
+static enum path_treatment read_directory_recursive(struct dir_struct *dir,
+ struct index_state *istate, const char *base, int baselen,
+ struct untracked_cache_dir *untracked, int check_only,
+ int stop_at_first_file, const struct pathspec *pathspec)
+{
+ enum path_treatment state, subdir_state, dir_state = path_none;
+
+ trace2_region_enter_printf("dir", "read_recursive", NULL, "%.*s", baselen, base);
+ ...
+ trace2_region_leave_printf("dir", "read_recursive", NULL, "%.*s", baselen, base);
+ return dir_state;
+}
+----------------
++
+We can further investigate the time spent scanning for untracked files.
++
+----------------
+$ export GIT_TR2_PERF_BRIEF=1
+$ export GIT_TR2_PERF=~/log.perf
+$ git status
+...
+$ cat ~/log.perf
+d0 | main | version | | | | | 2.20.1.162.gb4ccea44db.dirty
+d0 | main | start | | 0.001173 | | | git status
+d0 | main | def_repo | r1 | | | | worktree:/Users/jeffhost/work/gfw
+d0 | main | cmd_name | | | | | status (status)
+...
+d0 | main | region_enter | r1 | 0.015047 | | status | label:untracked
+d0 | main | region_enter | | 0.015132 | | dir | ..label:read_recursive
+d0 | main | region_enter | | 0.016341 | | dir | ....label:read_recursive vcs-svn/
+d0 | main | region_leave | | 0.016422 | 0.000081 | dir | ....label:read_recursive vcs-svn/
+d0 | main | region_enter | | 0.016446 | | dir | ....label:read_recursive xdiff/
+d0 | main | region_leave | | 0.016522 | 0.000076 | dir | ....label:read_recursive xdiff/
+d0 | main | region_enter | | 0.016612 | | dir | ....label:read_recursive git-gui/
+d0 | main | region_enter | | 0.016698 | | dir | ......label:read_recursive git-gui/po/
+d0 | main | region_enter | | 0.016810 | | dir | ........label:read_recursive git-gui/po/glossary/
+d0 | main | region_leave | | 0.016863 | 0.000053 | dir | ........label:read_recursive git-gui/po/glossary/
+...
+d0 | main | region_enter | | 0.031876 | | dir | ....label:read_recursive builtin/
+d0 | main | region_leave | | 0.032270 | 0.000394 | dir | ....label:read_recursive builtin/
+d0 | main | region_leave | | 0.032414 | 0.017282 | dir | ..label:read_recursive
+d0 | main | region_leave | r1 | 0.032454 | 0.017407 | status | label:untracked
+...
+d0 | main | exit | | 0.034279 | | | code:0
+d0 | main | atexit | | 0.034322 | | | code:0
+----------------
++
+Trace2 regions are similar to the existing trace_performance_enter()
+and trace_performance_leave() routines, but are thread safe and
+maintain per-thread stacks of timers.
+
+Data Messages::
+
+ Data messages added to a region.
++
+----------------
+int read_index_from(struct index_state *istate, const char *path,
+ const char *gitdir)
+{
+ trace2_region_enter_printf("index", "do_read_index", the_repository, "%s", path);
+
+ ...
+
+ trace2_data_intmax("index", the_repository, "read/version", istate->version);
+ trace2_data_intmax("index", the_repository, "read/cache_nr", istate->cache_nr);
+
+ trace2_region_leave_printf("index", "do_read_index", the_repository, "%s", path);
+}
+----------------
++
+This example shows that the index contained 3552 entries.
++
+----------------
+$ export GIT_TR2_PERF_BRIEF=1
+$ export GIT_TR2_PERF=~/log.perf
+$ git status
+...
+$ cat ~/log.perf
+d0 | main | version | | | | | 2.20.1.156.gf9916ae094.dirty
+d0 | main | start | | 0.001173 | | | git status
+d0 | main | def_repo | r1 | | | | worktree:/Users/jeffhost/work/gfw
+d0 | main | cmd_name | | | | | status (status)
+d0 | main | region_enter | r1 | 0.001791 | | index | label:do_read_index .git/index
+d0 | main | data | r1 | 0.002494 | 0.000703 | index | ..read/version:2
+d0 | main | data | r1 | 0.002520 | 0.000729 | index | ..read/cache_nr:3552
+d0 | main | region_leave | r1 | 0.002539 | 0.000748 | index | label:do_read_index .git/index
+...
+----------------
+
+Thread Events::
+
+ Thread messages added to a thread-proc.
++
+For example, the multithreaded preload-index code can be
+instrumented with a region around the thread pool and then
+per-thread start and exit events within the threadproc.
++
+----------------
+static void *preload_thread(void *_data)
+{
+ // start the per-thread clock and emit a message.
+ trace2_thread_start("preload_thread");
+
+ // report which chunk of the array this thread was assigned.
+ trace2_data_intmax("index", the_repository, "offset", p->offset);
+ trace2_data_intmax("index", the_repository, "count", nr);
+
+ do {
+ ...
+ } while (--nr > 0);
+ ...
+
+ // report elapsed time taken by this thread.
+ trace2_thread_exit();
+ return NULL;
+}
+
+void preload_index(struct index_state *index,
+ const struct pathspec *pathspec,
+ unsigned int refresh_flags)
+{
+ trace2_region_enter("index", "preload", the_repository);
+
+ for (i = 0; i < threads; i++) {
+ ... /* create thread */
+ }
+
+ for (i = 0; i < threads; i++) {
+ ... /* join thread */
+ }
+
+ trace2_region_leave("index", "preload", the_repository);
+}
+----------------
++
+In this example preload_index() was executed by the `main` thread
+and started the `preload` region. Seven threads, named
+`th01:preload_thread` through `th07:preload_thread`, were started.
+Events from each thread are atomically appended to the shared target
+stream as they occur so they may appear in random order with respect
+other threads. Finally, the main thread waits for the threads to
+finish and leaves the region.
++
+Data events are tagged with the active thread name. They are used
+to report the per-thread parameters.
++
+----------------
+$ export GIT_TR2_PERF_BRIEF=1
+$ export GIT_TR2_PERF=~/log.perf
+$ git status
+...
+$ cat ~/log.perf
+...
+d0 | main | region_enter | r1 | 0.002595 | | index | label:preload
+d0 | th01:preload_thread | thread_start | | 0.002699 | | |
+d0 | th02:preload_thread | thread_start | | 0.002721 | | |
+d0 | th01:preload_thread | data | r1 | 0.002736 | 0.000037 | index | offset:0
+d0 | th02:preload_thread | data | r1 | 0.002751 | 0.000030 | index | offset:2032
+d0 | th03:preload_thread | thread_start | | 0.002711 | | |
+d0 | th06:preload_thread | thread_start | | 0.002739 | | |
+d0 | th01:preload_thread | data | r1 | 0.002766 | 0.000067 | index | count:508
+d0 | th06:preload_thread | data | r1 | 0.002856 | 0.000117 | index | offset:2540
+d0 | th03:preload_thread | data | r1 | 0.002824 | 0.000113 | index | offset:1016
+d0 | th04:preload_thread | thread_start | | 0.002710 | | |
+d0 | th02:preload_thread | data | r1 | 0.002779 | 0.000058 | index | count:508
+d0 | th06:preload_thread | data | r1 | 0.002966 | 0.000227 | index | count:508
+d0 | th07:preload_thread | thread_start | | 0.002741 | | |
+d0 | th07:preload_thread | data | r1 | 0.003017 | 0.000276 | index | offset:3048
+d0 | th05:preload_thread | thread_start | | 0.002712 | | |
+d0 | th05:preload_thread | data | r1 | 0.003067 | 0.000355 | index | offset:1524
+d0 | th05:preload_thread | data | r1 | 0.003090 | 0.000378 | index | count:508
+d0 | th07:preload_thread | data | r1 | 0.003037 | 0.000296 | index | count:504
+d0 | th03:preload_thread | data | r1 | 0.002971 | 0.000260 | index | count:508
+d0 | th04:preload_thread | data | r1 | 0.002983 | 0.000273 | index | offset:508
+d0 | th04:preload_thread | data | r1 | 0.007311 | 0.004601 | index | count:508
+d0 | th05:preload_thread | thread_exit | | 0.008781 | 0.006069 | |
+d0 | th01:preload_thread | thread_exit | | 0.009561 | 0.006862 | |
+d0 | th03:preload_thread | thread_exit | | 0.009742 | 0.007031 | |
+d0 | th06:preload_thread | thread_exit | | 0.009820 | 0.007081 | |
+d0 | th02:preload_thread | thread_exit | | 0.010274 | 0.007553 | |
+d0 | th07:preload_thread | thread_exit | | 0.010477 | 0.007736 | |
+d0 | th04:preload_thread | thread_exit | | 0.011657 | 0.008947 | |
+d0 | main | region_leave | r1 | 0.011717 | 0.009122 | index | label:preload
+...
+d0 | main | exit | | 0.029996 | | | code:0
+d0 | main | atexit | | 0.030027 | | | code:0
+----------------
++
+In this example, the preload region took 0.009122 seconds. The 7 threads
+took between 0.006069 and 0.008947 seconds to work on their portion of
+the index. Thread "th01" worked on 508 items at offset 0. Thread "th02"
+worked on 508 items at offset 2032. Thread "th04" worked on 508 itemts
+at offset 508.
++
+This example also shows that thread names are assigned in a racy manner
+as each thread starts and allocates TLS storage.
+
+== Future Work
+
+=== Relationship to the Existing Trace Api (api-trace.txt)
+
+There are a few issues to resolve before we can completely
+switch to Trace2.
+
+* Updating existing tests that assume GIT_TRACE format messages.
+
+* How to best handle custom GIT_TRACE_<key> messages?
+
+** The GIT_TRACE_<key> mechanism allows each <key> to write to a
+different file (in addition to just stderr).
+
+** Do we want to maintain that ability or simply write to the existing
+Trace2 targets (and convert <key> to a "category").
* one side of history renames x -> z, and the other renames some file to
x/e, causing the need for the merge to do a transitive rename.
- * one side of history renames x -> z, but also renames all files within
- x. For example, x/a -> z/alpha, x/b -> z/bravo, etc.
+ * one side of history renames x -> z, but also renames all files within x.
+ For example, x/a -> z/alpha, x/b -> z/bravo, etc.
* both 'x' and 'y' being merged into a single directory 'z', with a
directory rename being detected for both x->z and y->z.
An example client/server communication might look like this:
----
- S: 007c74730d410fcb6603ace96f1dc55ea6196122532d refs/heads/local\0report-status delete-refs ofs-delta\n
+ S: 006274730d410fcb6603ace96f1dc55ea6196122532d refs/heads/local\0report-status delete-refs ofs-delta\n
S: 003e7d1665144a3a975c05f1f43902ddaf084e784dbe refs/heads/debug\n
S: 003f74730d410fcb6603ace96f1dc55ea6196122532d refs/heads/master\n
- S: 003f74730d410fcb6603ace96f1dc55ea6196122532d refs/heads/team\n
+ S: 003d74730d410fcb6603ace96f1dc55ea6196122532d refs/heads/team\n
S: 0000
- C: 003e7d1665144a3a975c05f1f43902ddaf084e784dbe 74730d410fcb6603ace96f1dc55ea6196122532d refs/heads/debug\n
- C: 003e74730d410fcb6603ace96f1dc55ea6196122532d 5a3f6be755bbb7deae50065988cbfa1ffa9ab68a refs/heads/master\n
+ C: 00677d1665144a3a975c05f1f43902ddaf084e784dbe 74730d410fcb6603ace96f1dc55ea6196122532d refs/heads/debug\n
+ C: 006874730d410fcb6603ace96f1dc55ea6196122532d 5a3f6be755bbb7deae50065988cbfa1ffa9ab68a refs/heads/master\n
C: 0000
C: [PACKDATA]
Git Protocol Capabilities
=========================
+NOTE: this document describes capabilities for versions 0 and 1 of the pack
+protocol. For version 2, please refer to the link:protocol-v2.html[protocol-v2]
+doc.
+
Servers SHOULD support all capabilities defined in this document.
On the very first line of the initial server response of either
purposes, and MUST NOT be used to programmatically assume the presence
or absence of particular features.
+symref
+------
+
+This parameterized capability is used to inform the receiver which symbolic ref
+points to which ref; for example, "symref=HEAD:refs/heads/master" tells the
+receiver that HEAD points to master. This capability can be repeated to
+represent multiple symrefs.
+
+Servers SHOULD include this capability for the HEAD symref if it is one of the
+refs being sent.
+
+Clients MAY use the parameters from this capability to select the proper initial
+branch when cloning a repository.
+
shallow
-------
- Git Wire Protocol, Version 2
-==============================
+Git Wire Protocol, Version 2
+============================
This document presents a specification for a version 2 of Git's wire
protocol. Protocol v2 will improve upon v1 in the following ways:
has completed, a client can reuse the connection and request that other
commands be executed.
- Packet-Line Framing
----------------------
+Packet-Line Framing
+-------------------
All communication is done using packet-line framing, just as in v1. See
`Documentation/technical/pack-protocol.txt` and
* '0000' Flush Packet (flush-pkt) - indicates the end of a message
* '0001' Delimiter Packet (delim-pkt) - separates sections of a message
- Initial Client Request
-------------------------
+Initial Client Request
+----------------------
In general a client can request to speak protocol v2 by sending
`version=2` through the respective side-channel for the transport being
found in `pack-protocol.txt` and `http-protocol.txt`. In all cases the
response from the server is the capability advertisement.
- Git Transport
-~~~~~~~~~~~~~~~
+Git Transport
+~~~~~~~~~~~~~
When using the git:// transport, you can request to use protocol v2 by
sending "version=2" as an extra parameter:
003egit-upload-pack /project.git\0host=myserver.com\0\0version=2\0
- SSH and File Transport
-~~~~~~~~~~~~~~~~~~~~~~~~
+SSH and File Transport
+~~~~~~~~~~~~~~~~~~~~~~
When using either the ssh:// or file:// transport, the GIT_PROTOCOL
environment variable must be set explicitly to include "version=2".
- HTTP Transport
-~~~~~~~~~~~~~~~~
+HTTP Transport
+~~~~~~~~~~~~~~
When using the http:// or https:// transport a client makes a "smart"
info/refs request as described in `http-protocol.txt` and requests that
Subsequent requests are then made directly to the service
`$GIT_URL/git-upload-pack`. (This works the same for git-receive-pack).
- Capability Advertisement
---------------------------
+Capability Advertisement
+------------------------
A server which decides to communicate (based on a request from a client)
using protocol version 2, notifies the client by sending a version string
key = 1*(ALPHA | DIGIT | "-_")
value = 1*(ALPHA | DIGIT | " -_.,?\/{}[]()<>!@#$%^&*+=:;")
- Command Request
------------------
+Command Request
+---------------
After receiving the capability advertisement, a client can then issue a
request to select the command it wants with any particular capabilities
optionally send an empty request consisting of just a flush-pkt to
indicate that no more requests will be made.
- Capabilities
---------------
+Capabilities
+------------
There are two different types of capabilities: normal capabilities,
which can be used to to convey information or alter the behavior of a
permits simple round-robin load-balancing on the server side, without
needing to worry about state management.
- agent
-~~~~~~~
+agent
+~~~~~
The server can advertise the `agent` capability with a value `X` (in the
form `agent=X`) to notify the client that the server is running version
and debugging purposes, and MUST NOT be used to programmatically assume
the presence or absence of particular features.
- ls-refs
-~~~~~~~~~
+ls-refs
+~~~~~~~
`ls-refs` is the command used to request a reference advertisement in v2.
Unlike the current reference advertisement, ls-refs takes in arguments
symref = "symref-target:" symref-target
peeled = "peeled:" obj-id
- fetch
-~~~~~~~
+fetch
+~~~~~
`fetch` is the command used to fetch a packfile in v2. It can be looked
at as a modified version of the v1 fetch where the ref-advertisement is
2 - progress messages
3 - fatal error message just before stream aborts
- server-option
-~~~~~~~~~~~~~~~
+server-option
+~~~~~~~~~~~~~
If advertised, indicates that any number of server specific options can be
included in a request. This is done by sending each option as a
--- /dev/null
+--
+* `0` or `false` - Disables the target.
+* `1` or `true` - Writes to `STDERR`.
+* `[2-9]` - Writes to the already opened file descriptor.
+* `<absolute-pathname>` - Writes to the file in append mode.
+* `af_unix:[<socket_type>:]<absolute-pathname>` - Write to a
+Unix DomainSocket (on platforms that support them). Socket
+type can be either `stream` or `dgram`; if omitted Git will
+try both.
+--
where <address> may be a path, a server and path, or an arbitrary
URL-like string recognized by the specific remote helper being
-invoked. See linkgit:gitremote-helpers[1] for details.
+invoked. See linkgit:gitremote-helpers[7] for details.
If there are a large number of similarly-named remote repositories and
you want to use a different format for them (such that the URLs you
#!/bin/sh
GVF=GIT-VERSION-FILE
-DEF_VER=v2.21.0
+DEF_VER=v2.22.0-rc0
LF='
'
#
# Define FILENO_IS_A_MACRO if fileno() is a macro, not a real function.
#
+# Define NEED_ACCESS_ROOT_HANDLER if access() under root may success for X_OK
+# even if execution permission isn't granted for any user.
+#
# Define PAGER_ENV to a SP separated VAR=VAL pairs to define
# default environment variables to be passed when a pager is spawned, e.g.
#
#
# Define DEVELOPER to enable more compiler warnings. Compiler version
# and family are auto detected, but could be overridden by defining
-# COMPILER_FEATURES (see config.mak.dev)
+# COMPILER_FEATURES (see config.mak.dev). You can still set
+# CFLAGS="..." in combination with DEVELOPER enables, whether that's
+# for tweaking something unrelated (e.g. optimization level), or for
+# selectively overriding something DEVELOPER or one of the DEVOPTS
+# (see just below) brings in.
#
# When DEVELOPER is set, DEVOPTS can be used to control compiler
# options. This variable contains keywords separated by
@$(SHELL_PATH) ./GIT-VERSION-GEN
-include GIT-VERSION-FILE
-# CFLAGS and LDFLAGS are for the users to override from the command line.
-
-CFLAGS = -g -O2 -Wall
-LDFLAGS =
-ALL_CFLAGS = $(CPPFLAGS) $(CFLAGS)
-ALL_LDFLAGS = $(LDFLAGS)
-STRIP ?= strip
-
-# Create as necessary, replace existing, make ranlib unneeded.
-ARFLAGS = rcs
-
+# Set our default configuration.
+#
# Among the variables below, these:
# gitexecdir
# template_dir
export prefix bindir sharedir sysconfdir gitwebdir perllibdir localedir
+# Set our default programs
CC = cc
AR = ar
RM = rm -f
XGETTEXT = xgettext
MSGFMT = msgfmt
CURL_CONFIG = curl-config
-PTHREAD_LIBS = -lpthread
-PTHREAD_CFLAGS =
GCOV = gcov
+STRIP = strip
SPATCH = spatch
export TCL_PATH TCLTK_PATH
-# user customisation variable for 'sparse' target
-SPARSE_FLAGS ?=
-# internal/platform customisation variable for 'sparse'
-SP_EXTRA_FLAGS =
-
-SPATCH_FLAGS = --all-includes --patch .
-
-
-
-### --- END CONFIGURATION SECTION ---
-
-# Those must not be GNU-specific; they are shared with perl/ which may
-# be built by a different compiler. (Note that this is an artifact now
-# but it still might be nice to keep that distinction.)
-BASIC_CFLAGS = -I.
-BASIC_LDFLAGS =
+# Set our default LIBS variables
+PTHREAD_LIBS = -lpthread
# Guard against environment variables
BUILTIN_OBJS =
LIB_OBJS =
PROGRAM_OBJS =
PROGRAMS =
+EXCLUDED_PROGRAMS =
SCRIPT_PERL =
SCRIPT_PYTHON =
SCRIPT_SH =
SCRIPT_SH += git-merge-resolve.sh
SCRIPT_SH += git-mergetool.sh
SCRIPT_SH += git-quiltimport.sh
-SCRIPT_SH += git-legacy-rebase.sh
-SCRIPT_SH += git-remote-testgit.sh
+SCRIPT_SH += git-legacy-stash.sh
SCRIPT_SH += git-request-pull.sh
-SCRIPT_SH += git-stash.sh
SCRIPT_SH += git-submodule.sh
SCRIPT_SH += git-web--browse.sh
SCRIPT_PYTHON += git-p4.py
-NO_INSTALL += git-remote-testgit
-
# Generated files for scripts
SCRIPT_SH_GEN = $(patsubst %.sh,%,$(SCRIPT_SH))
SCRIPT_PERL_GEN = $(patsubst %.perl,%,$(SCRIPT_PERL))
SCRIPT_PYTHON_GEN = $(patsubst %.py,%,$(SCRIPT_PYTHON))
-SCRIPT_SH_INS = $(filter-out $(NO_INSTALL),$(SCRIPT_SH_GEN))
-SCRIPT_PERL_INS = $(filter-out $(NO_INSTALL),$(SCRIPT_PERL_GEN))
-SCRIPT_PYTHON_INS = $(filter-out $(NO_INSTALL),$(SCRIPT_PYTHON_GEN))
-
# Individual rules to allow e.g.
# "make -C ../.. SCRIPT_PERL=contrib/foo/bar.perl build-perl-script"
# from subdirectories like contrib/*/
build-python-script: $(SCRIPT_PYTHON_GEN)
.PHONY: install-perl-script install-sh-script install-python-script
-install-sh-script: $(SCRIPT_SH_INS)
+install-sh-script: $(SCRIPT_SH_GEN)
$(INSTALL) $^ '$(DESTDIR_SQ)$(gitexec_instdir_SQ)'
-install-perl-script: $(SCRIPT_PERL_INS)
+install-perl-script: $(SCRIPT_PERL_GEN)
$(INSTALL) $^ '$(DESTDIR_SQ)$(gitexec_instdir_SQ)'
-install-python-script: $(SCRIPT_PYTHON_INS)
+install-python-script: $(SCRIPT_PYTHON_GEN)
$(INSTALL) $^ '$(DESTDIR_SQ)$(gitexec_instdir_SQ)'
.PHONY: clean-perl-script clean-sh-script clean-python-script
clean-python-script:
$(RM) $(SCRIPT_PYTHON_GEN)
-SCRIPTS = $(SCRIPT_SH_INS) \
- $(SCRIPT_PERL_INS) \
- $(SCRIPT_PYTHON_INS) \
+SCRIPTS = $(SCRIPT_SH_GEN) \
+ $(SCRIPT_PERL_GEN) \
+ $(SCRIPT_PYTHON_GEN) \
git-instaweb
ETAGS_TARGET = TAGS
TEST_BUILTINS_OBJS += test-revision-walking.o
TEST_BUILTINS_OBJS += test-run-command.o
TEST_BUILTINS_OBJS += test-scrap-cache-tree.o
+TEST_BUILTINS_OBJS += test-serve-v2.o
TEST_BUILTINS_OBJS += test-sha1.o
TEST_BUILTINS_OBJS += test-sha1-array.o
TEST_BUILTINS_OBJS += test-sha256.o
TEST_BUILTINS_OBJS += test-submodule-config.o
TEST_BUILTINS_OBJS += test-submodule-nested-repo-config.o
TEST_BUILTINS_OBJS += test-subprocess.o
+TEST_BUILTINS_OBJS += test-trace2.o
TEST_BUILTINS_OBJS += test-urlmatch-normalization.o
TEST_BUILTINS_OBJS += test-xml-encode.o
TEST_BUILTINS_OBJS += test-wildmatch.o
GENERATED_H += command-list.h
-LIB_H = $(shell $(FIND) . \
+LIB_H := $(sort $(shell git ls-files '*.h' ':!t/' ':!Documentation/' 2>/dev/null || \
+ $(FIND) . \
-name .git -prune -o \
-name t -prune -o \
-name Documentation -prune -o \
- -name '*.h' -print)
+ -name '*.h' -print))
LIB_OBJS += abspath.o
LIB_OBJS += advice.o
LIB_OBJS += thread-utils.o
LIB_OBJS += tmp-objdir.o
LIB_OBJS += trace.o
+LIB_OBJS += trace2.o
+LIB_OBJS += trace2/tr2_cfg.o
+LIB_OBJS += trace2/tr2_cmd_name.o
+LIB_OBJS += trace2/tr2_dst.o
+LIB_OBJS += trace2/tr2_sid.o
+LIB_OBJS += trace2/tr2_sysenv.o
+LIB_OBJS += trace2/tr2_tbuf.o
+LIB_OBJS += trace2/tr2_tgt_event.o
+LIB_OBJS += trace2/tr2_tgt_normal.o
+LIB_OBJS += trace2/tr2_tgt_perf.o
+LIB_OBJS += trace2/tr2_tls.o
LIB_OBJS += trailer.o
LIB_OBJS += transport.o
LIB_OBJS += transport-helper.o
BUILTIN_OBJS += builtin/range-diff.o
BUILTIN_OBJS += builtin/read-tree.o
BUILTIN_OBJS += builtin/rebase.o
-BUILTIN_OBJS += builtin/rebase--interactive.o
BUILTIN_OBJS += builtin/receive-pack.o
BUILTIN_OBJS += builtin/reflog.o
BUILTIN_OBJS += builtin/remote.o
BUILTIN_OBJS += builtin/revert.o
BUILTIN_OBJS += builtin/rm.o
BUILTIN_OBJS += builtin/send-pack.o
-BUILTIN_OBJS += builtin/serve.o
BUILTIN_OBJS += builtin/shortlog.o
BUILTIN_OBJS += builtin/show-branch.o
BUILTIN_OBJS += builtin/show-index.o
BUILTIN_OBJS += builtin/show-ref.o
+BUILTIN_OBJS += builtin/stash.o
BUILTIN_OBJS += builtin/stripspace.o
BUILTIN_OBJS += builtin/submodule--helper.o
BUILTIN_OBJS += builtin/symbolic-ref.o
DC_SHA1_SUBMODULE = auto
endif
+# Set CFLAGS, LDFLAGS and other *FLAGS variables. These might be
+# tweaked by config.* below as well as the command-line, both of
+# which'll override these defaults.
+CFLAGS = -g -O2 -Wall
+LDFLAGS =
+BASIC_CFLAGS = -I.
+BASIC_LDFLAGS =
+
+# library flags
+ARFLAGS = rcs
+PTHREAD_CFLAGS =
+
+# For the 'sparse' target
+SPARSE_FLAGS ?=
+SP_EXTRA_FLAGS =
+
+# For the 'coccicheck' target; setting SPATCH_BATCH_SIZE higher will
+# usually result in less CPU usage at the cost of higher peak memory.
+# Setting it to 0 will feed all files in a single spatch invocation.
+SPATCH_FLAGS = --all-includes --patch .
+SPATCH_BATCH_SIZE = 1
+
include config.mak.uname
-include config.mak.autogen
-include config.mak
include config.mak.dev
endif
+ALL_CFLAGS = $(DEVELOPER_CFLAGS) $(CPPFLAGS) $(CFLAGS)
+ALL_LDFLAGS = $(LDFLAGS)
+
comma := ,
empty :=
space := $(empty) $(empty)
BASIC_CFLAGS += -fno-omit-frame-pointer
ifneq ($(filter undefined,$(SANITIZERS)),)
BASIC_CFLAGS += -DNO_UNALIGNED_LOADS
+BASIC_CFLAGS += -DSHA1DC_FORCE_ALIGNED_ACCESS
endif
ifneq ($(filter leak,$(SANITIZERS)),)
BASIC_CFLAGS += -DSUPPRESS_ANNOTATED_LEAKS
REMOTE_CURL_PRIMARY =
REMOTE_CURL_ALIASES =
REMOTE_CURL_NAMES =
+ EXCLUDED_PROGRAMS += git-http-fetch git-http-push
else
ifdef CURLDIR
# Try "-Wl,-rpath=$(CURLDIR)/$(lib)" in such a case.
ifeq "$(curl_check)" "070908"
ifndef NO_EXPAT
PROGRAM_OBJS += http-push.o
+ else
+ EXCLUDED_PROGRAMS += git-http-push
endif
+ else
+ EXCLUDED_PROGRAMS += git-http-push
endif
curl_check := $(shell (echo 072200; $(CURL_CONFIG) --vernum | sed -e '/^70[BC]/s/^/0/') 2>/dev/null | sort -r | sed -ne 2p)
ifeq "$(curl_check)" "072200"
LIB_OBJS += compat/inet_pton.o
BASIC_CFLAGS += -DNO_INET_PTON
endif
-ifndef NO_UNIX_SOCKETS
+ifdef NO_UNIX_SOCKETS
+ BASIC_CFLAGS += -DNO_UNIX_SOCKETS
+ EXCLUDED_PROGRAMS += git-credential-cache git-credential-cache--daemon
+else
LIB_OBJS += unix-socket.o
PROGRAM_OBJS += credential-cache.o
PROGRAM_OBJS += credential-cache--daemon.o
COMPAT_OBJS += compat/fileno.o
endif
+ifdef NEED_ACCESS_ROOT_HANDLER
+ COMPAT_CFLAGS += -DNEED_ACCESS_ROOT_HANDLER
+ COMPAT_OBJS += compat/access.o
+endif
+
ifeq ($(TCLTK_PATH),)
NO_TCLTK = NoThanks
endif
command-list.h: generate-cmdlist.sh command-list.txt
command-list.h: $(wildcard Documentation/git*.txt) Documentation/*config.txt Documentation/config/*.txt
- $(QUIET_GEN)$(SHELL_PATH) ./generate-cmdlist.sh command-list.txt >$@+ && mv $@+ $@
+ $(QUIET_GEN)$(SHELL_PATH) ./generate-cmdlist.sh \
+ $(patsubst %,--exclude-program %,$(EXCLUDED_PROGRAMS)) \
+ command-list.txt >$@+ && mv $@+ $@
SCRIPT_DEFINES = $(SHELL_PATH_SQ):$(DIFF_SQ):$(GIT_VERSION):\
$(localedir_SQ):$(NO_CURL):$(USE_GETTEXT_SCHEME):$(SANE_TOOL_PATH_SQ):\
# should _not_ be included here, since they are necessary even when
# building an object for the first time.
-$(OBJECTS): $(LIB_H)
+$(OBJECTS): $(LIB_H) $(GENERATED_H)
endif
exec-cmd.sp exec-cmd.s exec-cmd.o: GIT-PREFIX
export DEFAULT_EDITOR DEFAULT_PAGER
+Documentation/GIT-EXCLUDED-PROGRAMS: FORCE
+ @EXCLUDED='EXCLUDED_PROGRAMS := $(EXCLUDED_PROGRAMS)'; \
+ if test x"$$EXCLUDED" != \
+ x"`cat Documentation/GIT-EXCLUDED-PROGRAMS 2>/dev/null`" ; then \
+ echo >&2 " * new documentation flags"; \
+ echo "$$EXCLUDED" >Documentation/GIT-EXCLUDED-PROGRAMS; \
+ fi
+
.PHONY: doc man man-perl html info pdf
doc: man-perl
$(MAKE) -C Documentation all
test_bindir_programs := $(patsubst %,bin-wrappers/%,$(BINDIR_PROGRAMS_NEED_X) $(BINDIR_PROGRAMS_NO_X) $(TEST_PROGRAMS_NEED_X))
all:: $(TEST_PROGRAMS) $(test_bindir_programs)
-all:: $(NO_INSTALL)
bin-wrappers/%: wrap-for-bin.sh
@mkdir -p bin-wrappers
sparse: $(SP_OBJ)
GEN_HDRS := command-list.h unicode-width.h
-EXCEPT_HDRS := $(GEN_HDRS) compat% xdiff%
+EXCEPT_HDRS := $(GEN_HDRS) compat/% xdiff/%
+ifndef GCRYPT_SHA256
+ EXCEPT_HDRS += sha256/gcrypt.h
+endif
CHK_HDRS = $(filter-out $(EXCEPT_HDRS),$(patsubst ./%,%,$(LIB_H)))
HCO = $(patsubst %.h,%.hco,$(CHK_HDRS))
%.cocci.patch: %.cocci $(COCCI_SOURCES)
@echo ' ' SPATCH $<; \
- ret=0; \
- for f in $(COCCI_SOURCES); do \
- $(SPATCH) --sp-file $< $$f $(SPATCH_FLAGS) || \
- { ret=$$?; break; }; \
- done >$@+ 2>$@.log; \
- if test $$ret != 0; \
+ if test $(SPATCH_BATCH_SIZE) = 0; then \
+ limit=; \
+ else \
+ limit='-n $(SPATCH_BATCH_SIZE)'; \
+ fi; \
+ if ! echo $(COCCI_SOURCES) | xargs $$limit \
+ $(SPATCH) --sp-file $< $(SPATCH_FLAGS) \
+ >$@+ 2>$@.log; \
then \
cat $@.log; \
exit 1; \
artifacts-tar:: $(ALL_PROGRAMS) $(SCRIPT_LIB) $(BUILT_INS) $(OTHER_PROGRAMS) \
GIT-BUILD-OPTIONS $(TEST_PROGRAMS) $(test_bindir_programs) \
- $(NO_INSTALL) $(MOFILES)
+ $(MOFILES)
$(QUIET_SUBDIR0)templates $(QUIET_SUBDIR1) \
SHELL_PATH='$(SHELL_PATH_SQ)' PERL_PATH='$(PERL_PATH_SQ)'
test -n "$(ARTIFACTS_DIRECTORY)"
$(RM) $(OBJECTS)
$(RM) $(LIB_FILE) $(XDIFF_LIB) $(VCSSVN_LIB)
$(RM) $(ALL_PROGRAMS) $(SCRIPT_LIB) $(BUILT_INS) git$X
- $(RM) $(TEST_PROGRAMS) $(NO_INSTALL)
+ $(RM) $(TEST_PROGRAMS)
$(RM) $(FUZZ_PROGRAMS)
$(RM) -r bin-wrappers $(dep_dirs)
$(RM) -r po/build/
$(RM) $(GIT_TARNAME).tar.gz git-core_$(GIT_VERSION)-*.tar.gz
$(RM) $(htmldocs).tar.gz $(manpages).tar.gz
$(MAKE) -C Documentation/ clean
+ $(RM) Documentation/GIT-EXCLUDED-PROGRAMS
ifndef NO_PERL
$(MAKE) -C gitweb clean
$(RM) -r perl/build/
.PHONY: check-docs
check-docs::
$(MAKE) -C Documentation lint-docs
- @(for v in $(ALL_COMMANDS); \
+ @(for v in $(patsubst %$X,%,$(ALL_COMMANDS)); \
do \
case "$$v" in \
git-merge-octopus | git-merge-ours | git-merge-recursive | \
git-merge-resolve | git-merge-subtree | \
git-fsck-objects | git-init-db | \
- git-remote-* | git-stage | \
+ git-remote-* | git-stage | git-legacy-* | \
git-?*--?* ) continue ;; \
esac ; \
test -f "Documentation/$$v.txt" || \
( \
sed -e '1,/^### command list/d' \
-e '/^#/d' \
+ -e '/guide$$/d' \
-e 's/[ ].*//' \
-e 's/^/listed /' command-list.txt; \
$(MAKE) -C Documentation print-man1 | \
grep '\.txt$$' | \
- sed -e 's|Documentation/|documented |' \
+ sed -e 's|^|documented |' \
-e 's/\.txt//'; \
) | while read how cmd; \
do \
- case " $(ALL_COMMANDS) " in \
+ case " $(patsubst %$X,%,$(ALL_COMMANDS) $(EXCLUDED_PROGRAMS)) " in \
*" $$cmd "*) ;; \
*) echo "removed but $$how: $$cmd" ;; \
esac; \
-Documentation/RelNotes/2.21.0.txt
\ No newline at end of file
+Documentation/RelNotes/2.22.0.txt
\ No newline at end of file
int advice_waiting_for_editor = 1;
int advice_graft_file_deprecated = 1;
int advice_checkout_ambiguous_remote_branch_name = 1;
+int advice_nested_tag = 1;
static int advice_use_color = -1;
static char advice_colors[][COLOR_MAXLEN] = {
{ "waitingForEditor", &advice_waiting_for_editor },
{ "graftFileDeprecated", &advice_graft_file_deprecated },
{ "checkoutAmbiguousRemoteBranchName", &advice_checkout_ambiguous_remote_branch_name },
+ { "nestedTag", &advice_nested_tag },
/* make this an alias for backward compatibility */
{ "pushNonFastForward", &advice_push_update_rejected }
extern int advice_waiting_for_editor;
extern int advice_graft_file_deprecated;
extern int advice_checkout_ambiguous_remote_branch_name;
+extern int advice_nested_tag;
int git_default_advice_config(const char *var, const char *value);
__attribute__((format (printf, 1, 2)))
void advise(const char *advice, ...);
int error_resolve_conflict(const char *me);
-extern void NORETURN die_resolve_conflict(const char *me);
+void NORETURN die_resolve_conflict(const char *me);
void NORETURN die_conclude_merge(void);
void detach_advice(const char *new_name);
state->ws_error_action = correct_ws_error;
return 0;
}
+ /*
+ * Please update $__git_whitespacelist in git-completion.bash
+ * when you add new options.
+ */
return error(_("unrecognized whitespace option '%s'"), option);
}
static void write_global_extended_header(struct archiver_args *args)
{
- const unsigned char *sha1 = args->commit_sha1;
+ const struct object_id *oid = args->commit_oid;
struct strbuf ext_header = STRBUF_INIT;
struct ustar_header header;
unsigned int mode;
- if (sha1)
+ if (oid)
strbuf_append_ext_header(&ext_header, "comment",
- sha1_to_hex(sha1), 40);
+ oid_to_hex(oid),
+ the_hash_algo->hexsz);
if (args->time > USTAR_MAX_MTIME) {
strbuf_append_ext_header_uint(&ext_header, "mtime",
args->time);
write_or_die(1, &locator64, ZIP64_DIR_TRAILER_LOCATOR_SIZE);
}
-static void write_zip_trailer(const unsigned char *sha1)
+static void write_zip_trailer(const struct object_id *oid)
{
struct zip_dir_trailer trailer;
int clamped = 0;
copy_le16_clamp(trailer.entries, zip_dir_entries, &clamped);
copy_le32(trailer.size, zip_dir.len);
copy_le32_clamp(trailer.offset, zip_offset, &clamped);
- copy_le16(trailer.comment_length, sha1 ? GIT_SHA1_HEXSZ : 0);
+ copy_le16(trailer.comment_length, oid ? the_hash_algo->hexsz : 0);
write_or_die(1, zip_dir.buf, zip_dir.len);
if (clamped)
write_zip64_trailer();
write_or_die(1, &trailer, ZIP_DIR_TRAILER_SIZE);
- if (sha1)
- write_or_die(1, sha1_to_hex(sha1), GIT_SHA1_HEXSZ);
+ if (oid)
+ write_or_die(1, oid_to_hex(oid), the_hash_algo->hexsz);
}
static void dos_time(timestamp_t *timestamp, int *dos_date, int *dos_time)
err = write_archive_entries(args, write_zip_entry);
if (!err)
- write_zip_trailer(args->commit_sha1);
+ write_zip_trailer(args->commit_oid);
strbuf_release(&zip_dir);
int remote)
{
const char *name = argv[0];
- const unsigned char *commit_sha1;
+ const struct object_id *commit_oid;
time_t archive_time;
struct tree *tree;
const struct commit *commit;
commit = lookup_commit_reference_gently(ar_args->repo, &oid, 1);
if (commit) {
- commit_sha1 = commit->object.oid.hash;
+ commit_oid = &commit->object.oid;
archive_time = commit->date;
} else {
- commit_sha1 = NULL;
+ commit_oid = NULL;
archive_time = time(NULL);
}
if (prefix) {
struct object_id tree_oid;
- unsigned int mode;
+ unsigned short mode;
int err;
err = get_tree_entry(&tree->object.oid, prefix, &tree_oid,
tree = parse_tree_indirect(&tree_oid);
}
ar_args->tree = tree;
- ar_args->commit_sha1 = commit_sha1;
+ ar_args->commit_oid = commit_oid;
ar_args->commit = commit;
ar_args->time = archive_time;
}
const char *base;
size_t baselen;
struct tree *tree;
- const unsigned char *commit_sha1;
+ const struct object_id *commit_oid;
const struct commit *commit;
timestamp_t time;
struct pathspec pathspec;
/* main api */
-extern int write_archive(int argc, const char **argv, const char *prefix,
- struct repository *repo,
- const char *name_hint, int remote);
+int write_archive(int argc, const char **argv, const char *prefix,
+ struct repository *repo,
+ const char *name_hint, int remote);
const char *archive_format_from_filename(const char *filename);
unsigned flags;
void *data;
};
-extern void register_archiver(struct archiver *);
+void register_archiver(struct archiver *);
-extern void init_tar_archiver(void);
-extern void init_zip_archiver(void);
-extern void init_archivers(void);
+void init_tar_archiver(void);
+void init_zip_archiver(void);
+void init_archivers(void);
typedef int (*write_archive_entry_fn_t)(struct archiver_args *args,
const struct object_id *oid,
const char *path, size_t pathlen,
unsigned int mode);
-extern int write_archive_entries(struct archiver_args *args, write_archive_entry_fn_t write_entry);
-extern void *object_file_to_archive(const struct archiver_args *args,
- const char *path, const struct object_id *oid,
- unsigned int mode, enum object_type *type,
- unsigned long *sizep);
+int write_archive_entries(struct archiver_args *args, write_archive_entry_fn_t write_entry);
+void *object_file_to_archive(const struct archiver_args *args,
+ const char *path, const struct object_id *oid,
+ unsigned int mode, enum object_type *type,
+ unsigned long *sizep);
#endif /* ARCHIVE_H */
* Like info/exclude and .gitignore, the attribute information can
* come from many places.
*
- * (1) .gitattribute file of the same directory;
- * (2) .gitattribute file of the parent directory if (1) does not have
+ * (1) .gitattributes file of the same directory;
+ * (2) .gitattributes file of the parent directory if (1) does not have
* any match; this goes recursively upwards, just like .gitignore.
* (3) $GIT_DIR/info/attributes, which overrides both of the above.
*
* In the same file, later entries override the earlier match, so in the
* global list, we would have entries from info/attributes the earliest
- * (reading the file from top to bottom), .gitattribute of the root
+ * (reading the file from top to bottom), .gitattributes of the root
* directory (again, reading the file from top to bottom) down to the
* current directory, and then scan the list backwards to find the first match.
* This is exactly the same as what is_excluded() does in dir.c to deal with
* set of attribute definitions, followed by the contents
* of $(prefix)/etc/gitattributes and a file specified by
* core.attributesfile. Then, contents from
- * .gitattribute files from directories closer to the
+ * .gitattributes files from directories closer to the
* root to the ones in deeper directories are pushed
* to the stack. Finally, at the very top of the stack
* we always keep the contents of $GIT_DIR/info/attributes.
const char *prefix,
struct commit *commit)
{
+ const char *argv[] = {
+ "diff-tree", "--pretty", "--stat", "--summary", "--cc", NULL
+ };
struct rev_info opt;
- /* diff-tree init */
+ git_config(git_diff_ui_config, NULL);
repo_init_revisions(r, &opt, prefix);
- git_config(git_diff_basic_config, NULL); /* no "diff" UI options */
- opt.abbrev = 0;
- opt.diff = 1;
- /* This is what "--pretty" does */
- opt.verbose_header = 1;
- opt.use_terminator = 0;
- opt.commit_format = CMIT_FMT_DEFAULT;
-
- /* diff-tree init */
- if (!opt.diffopt.output_format)
- opt.diffopt.output_format = DIFF_FORMAT_RAW;
-
- setup_revisions(0, NULL, &opt, NULL);
+ setup_revisions(ARRAY_SIZE(argv) - 1, argv, &opt, NULL);
log_tree_commit(&opt, commit);
}
* Otherwise, it will be either all non-SAMETREE commits or the single
* best commit, as chosen by `find_all`.
*/
-extern void find_bisection(struct commit_list **list, int *reaches, int *all,
- int find_all);
+void find_bisection(struct commit_list **list, int *reaches, int *all,
+ int find_all);
-extern struct commit_list *filter_skipped(struct commit_list *list,
- struct commit_list **tried,
- int show_all,
- int *count,
- int *skipped_first);
+struct commit_list *filter_skipped(struct commit_list *list,
+ struct commit_list **tried,
+ int show_all,
+ int *count,
+ int *skipped_first);
#define BISECT_SHOW_ALL (1<<0)
#define REV_LIST_QUIET (1<<1)
const char *header_prefix;
};
-extern int bisect_next_all(struct repository *r,
- const char *prefix,
- int no_checkout);
+int bisect_next_all(struct repository *r,
+ const char *prefix,
+ int no_checkout);
-extern int estimate_bisect_steps(int all);
+int estimate_bisect_steps(int all);
-extern void read_bisect_terms(const char **bad, const char **good);
+void read_bisect_terms(const char **bad, const char **good);
-extern int bisect_clean_state(void);
+int bisect_clean_state(void);
#endif
for (parents = work_tree->parents; parents; parents = parents->next) {
const struct object_id *commit_oid = &parents->item->object.oid;
struct object_id blob_oid;
- unsigned mode;
+ unsigned short mode;
if (!get_tree_entry(commit_oid, path, &blob_oid, &mode) &&
oid_object_info(r, &blob_oid, NULL) == OBJ_BLOB)
origin = make_origin(commit, path);
- ident = fmt_ident("Not Committed Yet", "not.committed.yet", NULL, 0);
+ ident = fmt_ident("Not Committed Yet", "not.committed.yet",
+ WANT_BLANK_IDENT, NULL, 0);
strbuf_addstr(&msg, "tree 0000000000000000000000000000000000000000\n");
for (parent = commit->parents; parent; parent = parent->next)
strbuf_addf(&msg, "parent %s\n",
}
for (i = 0; i < num_sg; i++) {
if (sg_origin[i]) {
- drop_origin_blob(sg_origin[i]);
+ if (!sg_origin[i]->suspects)
+ drop_origin_blob(sg_origin[i]);
blame_origin_decref(sg_origin[i]);
}
}
struct blame_entry *suspects;
mmfile_t file;
struct object_id blob_oid;
- unsigned mode;
+ unsigned short mode;
/* guilty gets set when shipping any suspects to the final
* blame list instead of other commits
*/
long start, long end,
struct blame_origin *o);
-extern struct blame_origin *get_blame_suspects(struct commit *commit);
+struct blame_origin *get_blame_suspects(struct commit *commit);
#endif /* BLAME_H */
#include "refs.h"
#include "refspec.h"
#include "remote.h"
+#include "sequencer.h"
#include "commit.h"
#include "worktree.h"
}
real_ref = NULL;
- if (get_oid(start_name, &oid)) {
+ if (get_oid_mb(start_name, &oid)) {
if (explicit_tracking) {
if (advice_set_upstream_failure) {
error(_(upstream_missing), start_name);
void remove_branch_state(struct repository *r)
{
- unlink(git_path_cherry_pick_head(r));
- unlink(git_path_revert_head(r));
+ sequencer_post_commit_cleanup(r);
unlink(git_path_merge_head(r));
unlink(git_path_merge_rr(r));
unlink(git_path_merge_msg(r));
* Return 1 if the named branch already exists; return 0 otherwise.
* Fill ref with the full refname for the branch.
*/
-extern int validate_branchname(const char *name, struct strbuf *ref);
+int validate_branchname(const char *name, struct strbuf *ref);
/*
* Check if a branch 'name' can be created as a new branch; die otherwise.
* Return 1 if the named branch already exists; return 0 otherwise.
* Fill ref with the full refname for the branch.
*/
-extern int validate_new_branchname(const char *name, struct strbuf *ref, int force);
+int validate_new_branchname(const char *name, struct strbuf *ref, int force);
/*
* Remove information about the state of working on the current
* Returns 0 on success.
*/
#define BRANCH_CONFIG_VERBOSE 01
-extern int install_branch_config(int flag, const char *local, const char *origin, const char *remote);
+int install_branch_config(int flag, const char *local, const char *origin, const char *remote);
/*
* Read branch description
*/
-extern int read_branch_desc(struct strbuf *, const char *branch_name);
+int read_branch_desc(struct strbuf *, const char *branch_name);
/*
* Check if a branch is checked out in the main worktree or any linked
* worktree and die (with a message describing its checkout location) if
* it is.
*/
-extern void die_if_checked_out(const char *branch, int ignore_current_worktree);
+void die_if_checked_out(const char *branch, int ignore_current_worktree);
/*
* Update all per-worktree HEADs pointing at the old ref to point the new ref.
* This will be used when renaming a branch. Returns 0 if successful, non-zero
* otherwise.
*/
-extern int replace_each_worktree_head_symref(const char *oldref, const char *newref,
- const char *logmsg);
+int replace_each_worktree_head_symref(const char *oldref, const char *newref,
+ const char *logmsg);
#endif
#define PRUNE_PACKED_DRY_RUN 01
#define PRUNE_PACKED_VERBOSE 02
-extern void prune_packed_objects(int);
+void prune_packed_objects(int);
struct fmt_merge_msg_opts {
unsigned add_title:1,
int shortlog_len;
};
-extern int fmt_merge_msg(struct strbuf *in, struct strbuf *out,
- struct fmt_merge_msg_opts *);
+int fmt_merge_msg(struct strbuf *in, struct strbuf *out,
+ struct fmt_merge_msg_opts *);
/**
* If a built-in has DELAY_PAGER_CONFIG set, the built-in should call this early
* You should most likely use a default of 0 or 1. "Punt" (-1) could be useful
* to be able to fall back to some historical compatibility name.
*/
-extern void setup_auto_pager(const char *cmd, int def);
+void setup_auto_pager(const char *cmd, int def);
-extern int is_builtin(const char *s);
+int is_builtin(const char *s);
-extern int cmd_add(int argc, const char **argv, const char *prefix);
-extern int cmd_am(int argc, const char **argv, const char *prefix);
-extern int cmd_annotate(int argc, const char **argv, const char *prefix);
-extern int cmd_apply(int argc, const char **argv, const char *prefix);
-extern int cmd_archive(int argc, const char **argv, const char *prefix);
-extern int cmd_bisect__helper(int argc, const char **argv, const char *prefix);
-extern int cmd_blame(int argc, const char **argv, const char *prefix);
-extern int cmd_branch(int argc, const char **argv, const char *prefix);
-extern int cmd_bundle(int argc, const char **argv, const char *prefix);
-extern int cmd_cat_file(int argc, const char **argv, const char *prefix);
-extern int cmd_checkout(int argc, const char **argv, const char *prefix);
-extern int cmd_checkout_index(int argc, const char **argv, const char *prefix);
-extern int cmd_check_attr(int argc, const char **argv, const char *prefix);
-extern int cmd_check_ignore(int argc, const char **argv, const char *prefix);
-extern int cmd_check_mailmap(int argc, const char **argv, const char *prefix);
-extern int cmd_check_ref_format(int argc, const char **argv, const char *prefix);
-extern int cmd_cherry(int argc, const char **argv, const char *prefix);
-extern int cmd_cherry_pick(int argc, const char **argv, const char *prefix);
-extern int cmd_clone(int argc, const char **argv, const char *prefix);
-extern int cmd_clean(int argc, const char **argv, const char *prefix);
-extern int cmd_column(int argc, const char **argv, const char *prefix);
-extern int cmd_commit(int argc, const char **argv, const char *prefix);
-extern int cmd_commit_graph(int argc, const char **argv, const char *prefix);
-extern int cmd_commit_tree(int argc, const char **argv, const char *prefix);
-extern int cmd_config(int argc, const char **argv, const char *prefix);
-extern int cmd_count_objects(int argc, const char **argv, const char *prefix);
-extern int cmd_credential(int argc, const char **argv, const char *prefix);
-extern int cmd_describe(int argc, const char **argv, const char *prefix);
-extern int cmd_diff_files(int argc, const char **argv, const char *prefix);
-extern int cmd_diff_index(int argc, const char **argv, const char *prefix);
-extern int cmd_diff(int argc, const char **argv, const char *prefix);
-extern int cmd_diff_tree(int argc, const char **argv, const char *prefix);
-extern int cmd_difftool(int argc, const char **argv, const char *prefix);
-extern int cmd_fast_export(int argc, const char **argv, const char *prefix);
-extern int cmd_fetch(int argc, const char **argv, const char *prefix);
-extern int cmd_fetch_pack(int argc, const char **argv, const char *prefix);
-extern int cmd_fmt_merge_msg(int argc, const char **argv, const char *prefix);
-extern int cmd_for_each_ref(int argc, const char **argv, const char *prefix);
-extern int cmd_format_patch(int argc, const char **argv, const char *prefix);
-extern int cmd_fsck(int argc, const char **argv, const char *prefix);
-extern int cmd_gc(int argc, const char **argv, const char *prefix);
-extern int cmd_get_tar_commit_id(int argc, const char **argv, const char *prefix);
-extern int cmd_grep(int argc, const char **argv, const char *prefix);
-extern int cmd_hash_object(int argc, const char **argv, const char *prefix);
-extern int cmd_help(int argc, const char **argv, const char *prefix);
-extern int cmd_index_pack(int argc, const char **argv, const char *prefix);
-extern int cmd_init_db(int argc, const char **argv, const char *prefix);
-extern int cmd_interpret_trailers(int argc, const char **argv, const char *prefix);
-extern int cmd_log(int argc, const char **argv, const char *prefix);
-extern int cmd_log_reflog(int argc, const char **argv, const char *prefix);
-extern int cmd_ls_files(int argc, const char **argv, const char *prefix);
-extern int cmd_ls_tree(int argc, const char **argv, const char *prefix);
-extern int cmd_ls_remote(int argc, const char **argv, const char *prefix);
-extern int cmd_mailinfo(int argc, const char **argv, const char *prefix);
-extern int cmd_mailsplit(int argc, const char **argv, const char *prefix);
-extern int cmd_merge(int argc, const char **argv, const char *prefix);
-extern int cmd_merge_base(int argc, const char **argv, const char *prefix);
-extern int cmd_merge_index(int argc, const char **argv, const char *prefix);
-extern int cmd_merge_ours(int argc, const char **argv, const char *prefix);
-extern int cmd_merge_file(int argc, const char **argv, const char *prefix);
-extern int cmd_merge_recursive(int argc, const char **argv, const char *prefix);
-extern int cmd_merge_tree(int argc, const char **argv, const char *prefix);
-extern int cmd_mktag(int argc, const char **argv, const char *prefix);
-extern int cmd_mktree(int argc, const char **argv, const char *prefix);
-extern int cmd_multi_pack_index(int argc, const char **argv, const char *prefix);
-extern int cmd_mv(int argc, const char **argv, const char *prefix);
-extern int cmd_name_rev(int argc, const char **argv, const char *prefix);
-extern int cmd_notes(int argc, const char **argv, const char *prefix);
-extern int cmd_pack_objects(int argc, const char **argv, const char *prefix);
-extern int cmd_pack_redundant(int argc, const char **argv, const char *prefix);
-extern int cmd_patch_id(int argc, const char **argv, const char *prefix);
-extern int cmd_prune(int argc, const char **argv, const char *prefix);
-extern int cmd_prune_packed(int argc, const char **argv, const char *prefix);
-extern int cmd_pull(int argc, const char **argv, const char *prefix);
-extern int cmd_push(int argc, const char **argv, const char *prefix);
-extern int cmd_range_diff(int argc, const char **argv, const char *prefix);
-extern int cmd_read_tree(int argc, const char **argv, const char *prefix);
-extern int cmd_rebase(int argc, const char **argv, const char *prefix);
-extern int cmd_rebase__interactive(int argc, const char **argv, const char *prefix);
-extern int cmd_receive_pack(int argc, const char **argv, const char *prefix);
-extern int cmd_reflog(int argc, const char **argv, const char *prefix);
-extern int cmd_remote(int argc, const char **argv, const char *prefix);
-extern int cmd_remote_ext(int argc, const char **argv, const char *prefix);
-extern int cmd_remote_fd(int argc, const char **argv, const char *prefix);
-extern int cmd_repack(int argc, const char **argv, const char *prefix);
-extern int cmd_rerere(int argc, const char **argv, const char *prefix);
-extern int cmd_reset(int argc, const char **argv, const char *prefix);
-extern int cmd_rev_list(int argc, const char **argv, const char *prefix);
-extern int cmd_rev_parse(int argc, const char **argv, const char *prefix);
-extern int cmd_revert(int argc, const char **argv, const char *prefix);
-extern int cmd_rm(int argc, const char **argv, const char *prefix);
-extern int cmd_send_pack(int argc, const char **argv, const char *prefix);
-extern int cmd_serve(int argc, const char **argv, const char *prefix);
-extern int cmd_shortlog(int argc, const char **argv, const char *prefix);
-extern int cmd_show(int argc, const char **argv, const char *prefix);
-extern int cmd_show_branch(int argc, const char **argv, const char *prefix);
-extern int cmd_show_index(int argc, const char **argv, const char *prefix);
-extern int cmd_status(int argc, const char **argv, const char *prefix);
-extern int cmd_stripspace(int argc, const char **argv, const char *prefix);
-extern int cmd_submodule__helper(int argc, const char **argv, const char *prefix);
-extern int cmd_symbolic_ref(int argc, const char **argv, const char *prefix);
-extern int cmd_tag(int argc, const char **argv, const char *prefix);
-extern int cmd_tar_tree(int argc, const char **argv, const char *prefix);
-extern int cmd_unpack_file(int argc, const char **argv, const char *prefix);
-extern int cmd_unpack_objects(int argc, const char **argv, const char *prefix);
-extern int cmd_update_index(int argc, const char **argv, const char *prefix);
-extern int cmd_update_ref(int argc, const char **argv, const char *prefix);
-extern int cmd_update_server_info(int argc, const char **argv, const char *prefix);
-extern int cmd_upload_archive(int argc, const char **argv, const char *prefix);
-extern int cmd_upload_archive_writer(int argc, const char **argv, const char *prefix);
-extern int cmd_upload_pack(int argc, const char **argv, const char *prefix);
-extern int cmd_var(int argc, const char **argv, const char *prefix);
-extern int cmd_verify_commit(int argc, const char **argv, const char *prefix);
-extern int cmd_verify_tag(int argc, const char **argv, const char *prefix);
-extern int cmd_version(int argc, const char **argv, const char *prefix);
-extern int cmd_whatchanged(int argc, const char **argv, const char *prefix);
-extern int cmd_worktree(int argc, const char **argv, const char *prefix);
-extern int cmd_write_tree(int argc, const char **argv, const char *prefix);
-extern int cmd_verify_pack(int argc, const char **argv, const char *prefix);
-extern int cmd_show_ref(int argc, const char **argv, const char *prefix);
-extern int cmd_pack_refs(int argc, const char **argv, const char *prefix);
-extern int cmd_replace(int argc, const char **argv, const char *prefix);
+int cmd_add(int argc, const char **argv, const char *prefix);
+int cmd_am(int argc, const char **argv, const char *prefix);
+int cmd_annotate(int argc, const char **argv, const char *prefix);
+int cmd_apply(int argc, const char **argv, const char *prefix);
+int cmd_archive(int argc, const char **argv, const char *prefix);
+int cmd_bisect__helper(int argc, const char **argv, const char *prefix);
+int cmd_blame(int argc, const char **argv, const char *prefix);
+int cmd_branch(int argc, const char **argv, const char *prefix);
+int cmd_bundle(int argc, const char **argv, const char *prefix);
+int cmd_cat_file(int argc, const char **argv, const char *prefix);
+int cmd_checkout(int argc, const char **argv, const char *prefix);
+int cmd_checkout_index(int argc, const char **argv, const char *prefix);
+int cmd_check_attr(int argc, const char **argv, const char *prefix);
+int cmd_check_ignore(int argc, const char **argv, const char *prefix);
+int cmd_check_mailmap(int argc, const char **argv, const char *prefix);
+int cmd_check_ref_format(int argc, const char **argv, const char *prefix);
+int cmd_cherry(int argc, const char **argv, const char *prefix);
+int cmd_cherry_pick(int argc, const char **argv, const char *prefix);
+int cmd_clone(int argc, const char **argv, const char *prefix);
+int cmd_clean(int argc, const char **argv, const char *prefix);
+int cmd_column(int argc, const char **argv, const char *prefix);
+int cmd_commit(int argc, const char **argv, const char *prefix);
+int cmd_commit_graph(int argc, const char **argv, const char *prefix);
+int cmd_commit_tree(int argc, const char **argv, const char *prefix);
+int cmd_config(int argc, const char **argv, const char *prefix);
+int cmd_count_objects(int argc, const char **argv, const char *prefix);
+int cmd_credential(int argc, const char **argv, const char *prefix);
+int cmd_describe(int argc, const char **argv, const char *prefix);
+int cmd_diff_files(int argc, const char **argv, const char *prefix);
+int cmd_diff_index(int argc, const char **argv, const char *prefix);
+int cmd_diff(int argc, const char **argv, const char *prefix);
+int cmd_diff_tree(int argc, const char **argv, const char *prefix);
+int cmd_difftool(int argc, const char **argv, const char *prefix);
+int cmd_fast_export(int argc, const char **argv, const char *prefix);
+int cmd_fetch(int argc, const char **argv, const char *prefix);
+int cmd_fetch_pack(int argc, const char **argv, const char *prefix);
+int cmd_fmt_merge_msg(int argc, const char **argv, const char *prefix);
+int cmd_for_each_ref(int argc, const char **argv, const char *prefix);
+int cmd_format_patch(int argc, const char **argv, const char *prefix);
+int cmd_fsck(int argc, const char **argv, const char *prefix);
+int cmd_gc(int argc, const char **argv, const char *prefix);
+int cmd_get_tar_commit_id(int argc, const char **argv, const char *prefix);
+int cmd_grep(int argc, const char **argv, const char *prefix);
+int cmd_hash_object(int argc, const char **argv, const char *prefix);
+int cmd_help(int argc, const char **argv, const char *prefix);
+int cmd_index_pack(int argc, const char **argv, const char *prefix);
+int cmd_init_db(int argc, const char **argv, const char *prefix);
+int cmd_interpret_trailers(int argc, const char **argv, const char *prefix);
+int cmd_log(int argc, const char **argv, const char *prefix);
+int cmd_log_reflog(int argc, const char **argv, const char *prefix);
+int cmd_ls_files(int argc, const char **argv, const char *prefix);
+int cmd_ls_tree(int argc, const char **argv, const char *prefix);
+int cmd_ls_remote(int argc, const char **argv, const char *prefix);
+int cmd_mailinfo(int argc, const char **argv, const char *prefix);
+int cmd_mailsplit(int argc, const char **argv, const char *prefix);
+int cmd_merge(int argc, const char **argv, const char *prefix);
+int cmd_merge_base(int argc, const char **argv, const char *prefix);
+int cmd_merge_index(int argc, const char **argv, const char *prefix);
+int cmd_merge_ours(int argc, const char **argv, const char *prefix);
+int cmd_merge_file(int argc, const char **argv, const char *prefix);
+int cmd_merge_recursive(int argc, const char **argv, const char *prefix);
+int cmd_merge_tree(int argc, const char **argv, const char *prefix);
+int cmd_mktag(int argc, const char **argv, const char *prefix);
+int cmd_mktree(int argc, const char **argv, const char *prefix);
+int cmd_multi_pack_index(int argc, const char **argv, const char *prefix);
+int cmd_mv(int argc, const char **argv, const char *prefix);
+int cmd_name_rev(int argc, const char **argv, const char *prefix);
+int cmd_notes(int argc, const char **argv, const char *prefix);
+int cmd_pack_objects(int argc, const char **argv, const char *prefix);
+int cmd_pack_redundant(int argc, const char **argv, const char *prefix);
+int cmd_patch_id(int argc, const char **argv, const char *prefix);
+int cmd_prune(int argc, const char **argv, const char *prefix);
+int cmd_prune_packed(int argc, const char **argv, const char *prefix);
+int cmd_pull(int argc, const char **argv, const char *prefix);
+int cmd_push(int argc, const char **argv, const char *prefix);
+int cmd_range_diff(int argc, const char **argv, const char *prefix);
+int cmd_read_tree(int argc, const char **argv, const char *prefix);
+int cmd_rebase(int argc, const char **argv, const char *prefix);
+int cmd_rebase__interactive(int argc, const char **argv, const char *prefix);
+int cmd_receive_pack(int argc, const char **argv, const char *prefix);
+int cmd_reflog(int argc, const char **argv, const char *prefix);
+int cmd_remote(int argc, const char **argv, const char *prefix);
+int cmd_remote_ext(int argc, const char **argv, const char *prefix);
+int cmd_remote_fd(int argc, const char **argv, const char *prefix);
+int cmd_repack(int argc, const char **argv, const char *prefix);
+int cmd_rerere(int argc, const char **argv, const char *prefix);
+int cmd_reset(int argc, const char **argv, const char *prefix);
+int cmd_rev_list(int argc, const char **argv, const char *prefix);
+int cmd_rev_parse(int argc, const char **argv, const char *prefix);
+int cmd_revert(int argc, const char **argv, const char *prefix);
+int cmd_rm(int argc, const char **argv, const char *prefix);
+int cmd_send_pack(int argc, const char **argv, const char *prefix);
+int cmd_shortlog(int argc, const char **argv, const char *prefix);
+int cmd_show(int argc, const char **argv, const char *prefix);
+int cmd_show_branch(int argc, const char **argv, const char *prefix);
+int cmd_show_index(int argc, const char **argv, const char *prefix);
+int cmd_status(int argc, const char **argv, const char *prefix);
+int cmd_stash(int argc, const char **argv, const char *prefix);
+int cmd_stripspace(int argc, const char **argv, const char *prefix);
+int cmd_submodule__helper(int argc, const char **argv, const char *prefix);
+int cmd_symbolic_ref(int argc, const char **argv, const char *prefix);
+int cmd_tag(int argc, const char **argv, const char *prefix);
+int cmd_tar_tree(int argc, const char **argv, const char *prefix);
+int cmd_unpack_file(int argc, const char **argv, const char *prefix);
+int cmd_unpack_objects(int argc, const char **argv, const char *prefix);
+int cmd_update_index(int argc, const char **argv, const char *prefix);
+int cmd_update_ref(int argc, const char **argv, const char *prefix);
+int cmd_update_server_info(int argc, const char **argv, const char *prefix);
+int cmd_upload_archive(int argc, const char **argv, const char *prefix);
+int cmd_upload_archive_writer(int argc, const char **argv, const char *prefix);
+int cmd_upload_pack(int argc, const char **argv, const char *prefix);
+int cmd_var(int argc, const char **argv, const char *prefix);
+int cmd_verify_commit(int argc, const char **argv, const char *prefix);
+int cmd_verify_tag(int argc, const char **argv, const char *prefix);
+int cmd_version(int argc, const char **argv, const char *prefix);
+int cmd_whatchanged(int argc, const char **argv, const char *prefix);
+int cmd_worktree(int argc, const char **argv, const char *prefix);
+int cmd_write_tree(int argc, const char **argv, const char *prefix);
+int cmd_verify_pack(int argc, const char **argv, const char *prefix);
+int cmd_show_ref(int argc, const char **argv, const char *prefix);
+int cmd_pack_refs(int argc, const char **argv, const char *prefix);
+int cmd_replace(int argc, const char **argv, const char *prefix);
#endif
}
for (i = 0; i < dir->nr; i++) {
- check_embedded_repo(dir->entries[i]->name);
if (add_file_to_index(&the_index, dir->entries[i]->name, flags)) {
if (!ignore_add_errors)
die(_("adding files failed"));
exit_status = 1;
+ } else {
+ check_embedded_repo(dir->entries[i]->name);
}
}
return exit_status;
cp.in = xopen(am_path(state, "rewritten"), O_RDONLY);
cp.stdout_to_stderr = 1;
+ cp.trace2_hook_name = "post-rewrite";
ret = run_command(&cp);
while (!strbuf_getline_lf(&sb, fp)) {
struct object_id from_obj, to_obj;
+ const char *p;
- if (sb.len != GIT_SHA1_HEXSZ * 2 + 1) {
+ if (sb.len != the_hash_algo->hexsz * 2 + 1) {
ret = error(invalid_line, sb.buf);
goto finish;
}
- if (get_oid_hex(sb.buf, &from_obj)) {
+ if (parse_oid_hex(sb.buf, &from_obj, &p)) {
ret = error(invalid_line, sb.buf);
goto finish;
}
- if (sb.buf[GIT_SHA1_HEXSZ] != ' ') {
+ if (*p != ' ') {
ret = error(invalid_line, sb.buf);
goto finish;
}
- if (get_oid_hex(sb.buf + GIT_SHA1_HEXSZ + 1, &to_obj)) {
+ if (get_oid_hex(p + 1, &to_obj)) {
ret = error(invalid_line, sb.buf);
goto finish;
}
* review them with extra care to spot mismerges.
*/
struct rev_info rev_info;
- const char *diff_filter_str = "--diff-filter=AM";
repo_init_revisions(the_repository, &rev_info, NULL);
rev_info.diffopt.output_format = DIFF_FORMAT_NAME_STATUS;
- diff_opt_parse(&rev_info.diffopt, &diff_filter_str, 1, rev_info.prefix);
+ rev_info.diffopt.filter |= diff_filter_bit('A');
+ rev_info.diffopt.filter |= diff_filter_bit('M');
add_pending_oid(&rev_info, "HEAD", &our_tree, 0);
diff_setup_done(&rev_info.diffopt);
run_diff_index(&rev_info, 1);
}
author = fmt_ident(state->author_name, state->author_email,
+ WANT_AUTHOR_IDENT,
state->ignore_date ? NULL : state->author_date,
IDENT_STRICT);
*opt_value = PATCH_FORMAT_HG;
else if (!strcmp(arg, "mboxrd"))
*opt_value = PATCH_FORMAT_MBOXRD;
+ /*
+ * Please update $__git_patchformat in git-completion.bash
+ * when you add new options
+ */
else
return error(_("Invalid value for --patch-format: %s"), arg);
return 0;
#include "object-store.h"
#include "blame.h"
#include "string-list.h"
+#include "refs.h"
static char blame_usage[] = N_("git blame [<options>] [<rev-opts>] [<rev>] [--] <file>");
* and are only included here to get included in the "-h"
* output:
*/
- { OPTION_LOWLEVEL_CALLBACK, 0, "indent-heuristic", NULL, NULL, N_("Use an experimental heuristic to improve diffs"), PARSE_OPT_NOARG, parse_opt_unknown_cb },
+ { OPTION_LOWLEVEL_CALLBACK, 0, "indent-heuristic", NULL, NULL, N_("Use an experimental heuristic to improve diffs"), PARSE_OPT_NOARG, NULL, 0, parse_opt_unknown_cb },
OPT_BIT(0, "minimal", &xdl_opts, N_("Spend extra cycles to find better match"), XDF_NEED_MINIMAL),
OPT_STRING('S', NULL, &revs_file, N_("file"), N_("Use revisions from <file> instead of calling git-rev-list")),
revs.disable_stdin = 1;
setup_revisions(argc, argv, &revs, NULL);
+ if (!revs.pending.nr && is_bare_repository()) {
+ struct commit *head_commit;
+ struct object_id head_oid;
+
+ if (!resolve_ref_unsafe("HEAD", RESOLVE_REF_READING,
+ &head_oid, NULL) ||
+ !(head_commit = lookup_commit_reference_gently(revs.repo,
+ &head_oid, 1)))
+ die("no such ref: HEAD");
+
+ add_pending_object(&revs, &head_commit->object, "HEAD");
+ }
init_scoreboard(&sb);
sb.revs = &revs;
free(to_free);
}
+static void print_current_branch_name(void)
+{
+ int flags;
+ const char *refname = resolve_ref_unsafe("HEAD", 0, NULL, &flags);
+ const char *shortname;
+ if (!refname)
+ die(_("could not resolve HEAD"));
+ else if (!(flags & REF_ISSYMREF))
+ return;
+ else if (skip_prefix(refname, "refs/heads/", &shortname))
+ puts(shortname);
+ else
+ die(_("HEAD (%s) points outside of refs/heads/"), refname);
+}
+
static void reject_rebase_or_bisect_branch(const char *target)
{
struct worktree **worktrees = get_worktrees(0);
int cmd_branch(int argc, const char **argv, const char *prefix)
{
int delete = 0, rename = 0, copy = 0, force = 0, list = 0;
+ int show_current = 0;
int reflog = 0, edit_description = 0;
int quiet = 0, unset_upstream = 0;
const char *new_upstream = NULL;
OPT_BIT('c', "copy", ©, N_("copy a branch and its reflog"), 1),
OPT_BIT('C', NULL, ©, N_("copy a branch, even if target exists"), 2),
OPT_BOOL('l', "list", &list, N_("list branch names")),
+ OPT_BOOL(0, "show-current", &show_current, N_("show current branch name")),
OPT_BOOL(0, "create-reflog", &reflog, N_("create the branch's reflog")),
OPT_BOOL(0, "edit-description", &edit_description,
N_("edit the description for the branch")),
OPT_MERGED(&filter, N_("print only branches that are merged")),
OPT_NO_MERGED(&filter, N_("print only branches that are not merged")),
OPT_COLUMN(0, "column", &colopts, N_("list branches in columns")),
- OPT_CALLBACK(0 , "sort", sorting_tail, N_("key"),
- N_("field name to sort on"), &parse_opt_ref_sorting),
+ OPT_REF_SORT(sorting_tail),
{
OPTION_CALLBACK, 0, "points-at", &filter.points_at, N_("object"),
N_("print only branches of the object"), 0, parse_opt_object_name
argc = parse_options(argc, argv, prefix, options, builtin_branch_usage,
0);
- if (!delete && !rename && !copy && !edit_description && !new_upstream && !unset_upstream && argc == 0)
+ if (!delete && !rename && !copy && !edit_description && !new_upstream &&
+ !show_current && !unset_upstream && argc == 0)
list = 1;
if (filter.with_commit || filter.merge != REF_FILTER_MERGED_NONE || filter.points_at.nr ||
filter.no_commit)
list = 1;
- if (!!delete + !!rename + !!copy + !!new_upstream +
+ if (!!delete + !!rename + !!copy + !!new_upstream + !!show_current +
list + unset_upstream > 1)
usage_with_options(builtin_branch_usage, options);
if (!argc)
die(_("branch name required"));
return delete_branches(argc, argv, delete > 1, filter.kind, quiet);
+ } else if (show_current) {
+ print_current_branch_name();
+ return 0;
} else if (list) {
/* git branch --local also shows HEAD when it is detached */
if ((filter.kind & FILTER_REFS_BRANCHES) && filter.detached)
int ignore_other_worktrees;
int show_progress;
int count_checkout_paths;
+ int overlay_mode;
/*
* If new checkout options are added, skip_merge_working_tree
* should be updated accordingly.
return pos;
}
-static int check_stage(int stage, const struct cache_entry *ce, int pos)
+static int check_stage(int stage, const struct cache_entry *ce, int pos,
+ int overlay_mode)
{
while (pos < active_nr &&
!strcmp(active_cache[pos]->name, ce->name)) {
return 0;
pos++;
}
+ if (!overlay_mode)
+ return 0;
if (stage == 2)
return error(_("path '%s' does not have our version"), ce->name);
else
}
static int checkout_stage(int stage, const struct cache_entry *ce, int pos,
- const struct checkout *state, int *nr_checkouts)
+ const struct checkout *state, int *nr_checkouts,
+ int overlay_mode)
{
while (pos < active_nr &&
!strcmp(active_cache[pos]->name, ce->name)) {
NULL, nr_checkouts);
pos++;
}
+ if (!overlay_mode) {
+ unlink_entry(ce);
+ return 0;
+ }
if (stage == 2)
return error(_("path '%s' does not have our version"), ce->name);
else
return status;
}
+static void mark_ce_for_checkout_overlay(struct cache_entry *ce,
+ char *ps_matched,
+ const struct checkout_opts *opts)
+{
+ ce->ce_flags &= ~CE_MATCHED;
+ if (!opts->ignore_skipworktree && ce_skip_worktree(ce))
+ return;
+ if (opts->source_tree && !(ce->ce_flags & CE_UPDATE))
+ /*
+ * "git checkout tree-ish -- path", but this entry
+ * is in the original index but is not in tree-ish
+ * or does not match the pathspec; it will not be
+ * checked out to the working tree. We will not do
+ * anything to this entry at all.
+ */
+ return;
+ /*
+ * Either this entry came from the tree-ish we are
+ * checking the paths out of, or we are checking out
+ * of the index.
+ *
+ * If it comes from the tree-ish, we already know it
+ * matches the pathspec and could just stamp
+ * CE_MATCHED to it from update_some(). But we still
+ * need ps_matched and read_tree_recursive (and
+ * eventually tree_entry_interesting) cannot fill
+ * ps_matched yet. Once it can, we can avoid calling
+ * match_pathspec() for _all_ entries when
+ * opts->source_tree != NULL.
+ */
+ if (ce_path_match(&the_index, ce, &opts->pathspec, ps_matched))
+ ce->ce_flags |= CE_MATCHED;
+}
+
+static void mark_ce_for_checkout_no_overlay(struct cache_entry *ce,
+ char *ps_matched,
+ const struct checkout_opts *opts)
+{
+ ce->ce_flags &= ~CE_MATCHED;
+ if (!opts->ignore_skipworktree && ce_skip_worktree(ce))
+ return;
+ if (ce_path_match(&the_index, ce, &opts->pathspec, ps_matched)) {
+ ce->ce_flags |= CE_MATCHED;
+ if (opts->source_tree && !(ce->ce_flags & CE_UPDATE))
+ /*
+ * In overlay mode, but the path is not in
+ * tree-ish, which means we should remove it
+ * from the index and the working tree.
+ */
+ ce->ce_flags |= CE_REMOVE | CE_WT_REMOVE;
+ }
+}
+
static int checkout_paths(const struct checkout_opts *opts,
const char *revision)
{
struct lock_file lock_file = LOCK_INIT;
int nr_checkouts = 0, nr_unmerged = 0;
+ trace2_cmd_mode(opts->patch_mode ? "patch" : "path");
+
if (opts->track != BRANCH_TRACK_UNSPECIFIED)
die(_("'%s' cannot be used with updating paths"), "--track");
* Make sure all pathspecs participated in locating the paths
* to be checked out.
*/
- for (pos = 0; pos < active_nr; pos++) {
- struct cache_entry *ce = active_cache[pos];
- ce->ce_flags &= ~CE_MATCHED;
- if (!opts->ignore_skipworktree && ce_skip_worktree(ce))
- continue;
- if (opts->source_tree && !(ce->ce_flags & CE_UPDATE))
- /*
- * "git checkout tree-ish -- path", but this entry
- * is in the original index; it will not be checked
- * out to the working tree and it does not matter
- * if pathspec matched this entry. We will not do
- * anything to this entry at all.
- */
- continue;
- /*
- * Either this entry came from the tree-ish we are
- * checking the paths out of, or we are checking out
- * of the index.
- *
- * If it comes from the tree-ish, we already know it
- * matches the pathspec and could just stamp
- * CE_MATCHED to it from update_some(). But we still
- * need ps_matched and read_tree_recursive (and
- * eventually tree_entry_interesting) cannot fill
- * ps_matched yet. Once it can, we can avoid calling
- * match_pathspec() for _all_ entries when
- * opts->source_tree != NULL.
- */
- if (ce_path_match(&the_index, ce, &opts->pathspec, ps_matched))
- ce->ce_flags |= CE_MATCHED;
- }
+ for (pos = 0; pos < active_nr; pos++)
+ if (opts->overlay_mode)
+ mark_ce_for_checkout_overlay(active_cache[pos],
+ ps_matched,
+ opts);
+ else
+ mark_ce_for_checkout_no_overlay(active_cache[pos],
+ ps_matched,
+ opts);
- if (report_path_error(ps_matched, &opts->pathspec, opts->prefix)) {
+ if (report_path_error(ps_matched, &opts->pathspec)) {
free(ps_matched);
return 1;
}
if (opts->force) {
warning(_("path '%s' is unmerged"), ce->name);
} else if (opts->writeout_stage) {
- errs |= check_stage(opts->writeout_stage, ce, pos);
+ errs |= check_stage(opts->writeout_stage, ce, pos, opts->overlay_mode);
} else if (opts->merge) {
errs |= check_stages((1<<2) | (1<<3), ce, pos);
} else {
if (opts->writeout_stage)
errs |= checkout_stage(opts->writeout_stage,
ce, pos,
- &state, &nr_checkouts);
+ &state,
+ &nr_checkouts, opts->overlay_mode);
else if (opts->merge)
errs |= checkout_merged(pos, &state,
&nr_unmerged);
pos = skip_same_name(ce, pos) - 1;
}
}
+ remove_marked_cache_entries(&the_index, 1);
+ remove_scheduled_dirs();
errs |= finish_delayed_checkout(&state, &nr_checkouts);
if (opts->count_checkout_paths) {
* opts->show_progress only impacts output so doesn't require a merge
*/
+ /*
+ * opts->overlay_mode cannot be used with switching branches so is
+ * not tested here
+ */
+
/*
* If we aren't creating a new branch any changes or updates will
* happen in the existing branch. Since that could only be updating
topts.initial_checkout = is_cache_unborn();
topts.update = 1;
topts.merge = 1;
- topts.gently = opts->merge && old_branch_info->commit;
+ topts.quiet = opts->merge && old_branch_info->commit;
topts.verbose_update = opts->show_progress;
topts.fn = twoway_merge;
if (opts->overwrite_ignore) {
*/
struct tree *result;
struct tree *work;
+ struct tree *old_tree;
struct merge_options o;
+ struct strbuf sb = STRBUF_INIT;
+
if (!opts->merge)
return 1;
*/
if (!old_branch_info->commit)
return 1;
+ old_tree = get_commit_tree(old_branch_info->commit);
+
+ if (repo_index_has_changes(the_repository, old_tree, &sb))
+ die(_("cannot continue with staged changes in "
+ "the following files:\n%s"), sb.buf);
+ strbuf_release(&sb);
+
+ if (repo_index_has_changes(the_repository,
+ get_commit_tree(old_branch_info->commit),
+ &sb))
+ warning(_("staged changes in the following files may be lost: %s"),
+ sb.buf);
+ strbuf_release(&sb);
/* Do more real merge */
ret = merge_trees(&o,
get_commit_tree(new_branch_info->commit),
work,
- get_commit_tree(old_branch_info->commit),
+ old_tree,
&result);
if (ret < 0)
exit(128);
void *path_to_free;
struct object_id rev;
int flag, writeout_error = 0;
+
+ trace2_cmd_mode("branch");
+
memset(&old_branch_info, 0, sizeof(old_branch_info));
old_branch_info.path = path_to_free = resolve_refdup("HEAD", 0, &rev, &flag);
if (old_branch_info.path)
int status;
struct strbuf branch_ref = STRBUF_INIT;
+ trace2_cmd_mode("unborn");
+
if (!opts->new_branch)
die(_("You are on a branch yet to be born"));
strbuf_addf(&branch_ref, "refs/heads/%s", opts->new_branch);
die(_("'%s' cannot be used with switching branches"),
"--patch");
+ if (!opts->overlay_mode)
+ die(_("'%s' cannot be used with switching branches"),
+ "--no-overlay");
+
if (opts->writeout_stage)
die(_("'%s' cannot be used with switching branches"),
"--ours/--theirs");
"checkout", "control recursive updating of submodules",
PARSE_OPT_OPTARG, option_parse_recurse_submodules_worktree_updater },
OPT_BOOL(0, "progress", &opts.show_progress, N_("force progress reporting")),
+ OPT_BOOL(0, "overlay", &opts.overlay_mode, N_("use overlay mode (default)")),
OPT_END(),
};
opts.overwrite_ignore = 1;
opts.prefix = prefix;
opts.show_progress = -1;
+ opts.overlay_mode = -1;
git_config(git_checkout_config, &opts);
if ((!!opts.new_branch + !!opts.new_branch_force + !!opts.new_orphan_branch) > 1)
die(_("-b, -B and --orphan are mutually exclusive"));
+ if (opts.overlay_mode == 1 && opts.patch_mode)
+ die(_("-p and --overlay are mutually exclusive"));
+
/*
* From here on, new_branch will contain the branch to be checked out,
* and new_branch_force and new_orphan_branch will tell us which one of
static int max_jobs = -1;
static struct string_list option_recurse_submodules = STRING_LIST_INIT_NODUP;
static struct list_objects_filter_options filter_options;
+static struct string_list server_options = STRING_LIST_INIT_NODUP;
static int recurse_submodules_cb(const struct option *opt,
const char *arg, int unset)
N_("don't use local hardlinks, always copy")),
OPT_BOOL('s', "shared", &option_shared,
N_("setup as shared repository")),
- { OPTION_CALLBACK, 0, "recursive", &option_recurse_submodules,
- N_("pathspec"), N_("initialize submodules in the clone"),
- PARSE_OPT_OPTARG | PARSE_OPT_HIDDEN, recurse_submodules_cb,
- (intptr_t)"." },
+ OPT_ALIAS(0, "recursive", "recurse-submodules"),
{ OPTION_CALLBACK, 0, "recurse-submodules", &option_recurse_submodules,
N_("pathspec"), N_("initialize submodules in the clone"),
PARSE_OPT_OPTARG, recurse_submodules_cb, (intptr_t)"." },
N_("separate git dir from working tree")),
OPT_STRING_LIST('c', "config", &option_config, N_("key=value"),
N_("set config inside the new repository")),
+ OPT_STRING_LIST(0, "server-option", &server_options,
+ N_("server-specific"), N_("option to transmit")),
OPT_SET_INT('4', "ipv4", &family, N_("use IPv4 addresses only"),
TRANSPORT_FAMILY_IPV4),
OPT_SET_INT('6', "ipv6", &family, N_("use IPv6 addresses only"),
const char *branch_top,
const char *msg,
struct transport *transport,
- int check_connectivity)
+ int check_connectivity,
+ int check_refs_only)
{
const struct ref *rm = mapped_refs;
opt.transport = transport;
opt.progress = transport->progress;
+ opt.check_refs_only = !!check_refs_only;
if (check_connected(iterate_ref_map, &rm, &opt))
die(_("remote did not send all necessary objects"));
transport_set_option(transport, TRANS_OPT_UPLOADPACK,
option_upload_pack);
+ if (server_options.nr)
+ transport->server_options = &server_options;
+
if (filter_options.choice) {
struct strbuf expanded_filter_spec = STRBUF_INIT;
expand_list_objects_filter_spec(&filter_options,
update_remote_refs(refs, mapped_refs, remote_head_points_at,
branch_top.buf, reflog_msg.buf, transport,
- !is_local);
+ !is_local, filter_options.choice);
update_head(our_head_points_at, remote_head, reflog_msg.buf);
{
struct commit_graph *graph = NULL;
char *graph_name;
+ int open_ok;
+ int fd;
+ struct stat st;
static struct option builtin_commit_graph_verify_options[] = {
OPT_STRING(0, "object-dir", &opts.obj_dir,
opts.obj_dir = get_object_directory();
graph_name = get_commit_graph_filename(opts.obj_dir);
- graph = load_commit_graph_one(graph_name);
+ open_ok = open_commit_graph(graph_name, &fd, &st);
+ if (!open_ok && errno == ENOENT)
+ return 0;
+ if (!open_ok)
+ die_errno(_("Could not open commit-graph '%s'"), graph_name);
+ graph = load_commit_graph_one_fd_st(fd, &st);
FREE_AND_NULL(graph_name);
if (!graph)
- return 0;
+ return 1;
UNLEAK(graph);
return verify_commit_graph(the_repository, graph);
{
struct commit_graph *graph = NULL;
char *graph_name;
+ int open_ok;
+ int fd;
+ struct stat st;
static struct option builtin_commit_graph_read_options[] = {
OPT_STRING(0, "object-dir", &opts.obj_dir,
opts.obj_dir = get_object_directory();
graph_name = get_commit_graph_filename(opts.obj_dir);
- graph = load_commit_graph_one(graph_name);
+ open_ok = open_commit_graph(graph_name, &fd, &st);
+ if (!open_ok)
+ die_errno(_("Could not open commit-graph '%s'"), graph_name);
+
+ graph = load_commit_graph_one_fd_st(fd, &st);
if (!graph)
- die("graph file %s does not exist", graph_name);
+ return 1;
FREE_AND_NULL(graph_name);
#include "builtin.h"
#include "utf8.h"
#include "gpg-interface.h"
+#include "parse-options.h"
-static const char commit_tree_usage[] = "git commit-tree [(-p <sha1>)...] [-S[<keyid>]] [-m <message>] [-F <file>] <sha1>";
+static const char * const commit_tree_usage[] = {
+ N_("git commit-tree [(-p <parent>)...] [-S[<keyid>]] [(-m <message>)...] "
+ "[(-F <file>)...] <tree>"),
+ NULL
+};
static const char *sign_commit;
struct commit_list *parents;
for (parents = *parents_p; parents; parents = parents->next) {
if (parents->item == parent) {
- error("duplicate parent %s ignored", oid_to_hex(oid));
+ error(_("duplicate parent %s ignored"), oid_to_hex(oid));
return;
}
parents_p = &parents->next;
return git_default_config(var, value, cb);
}
+static int parse_parent_arg_callback(const struct option *opt,
+ const char *arg, int unset)
+{
+ struct object_id oid;
+ struct commit_list **parents = opt->value;
+
+ BUG_ON_OPT_NEG_NOARG(unset, arg);
+
+ if (get_oid_commit(arg, &oid))
+ die(_("not a valid object name %s"), arg);
+
+ assert_oid_type(&oid, OBJ_COMMIT);
+ new_parent(lookup_commit(the_repository, &oid), parents);
+ return 0;
+}
+
+static int parse_message_arg_callback(const struct option *opt,
+ const char *arg, int unset)
+{
+ struct strbuf *buf = opt->value;
+
+ BUG_ON_OPT_NEG_NOARG(unset, arg);
+
+ if (buf->len)
+ strbuf_addch(buf, '\n');
+ strbuf_addstr(buf, arg);
+ strbuf_complete_line(buf);
+
+ return 0;
+}
+
+static int parse_file_arg_callback(const struct option *opt,
+ const char *arg, int unset)
+{
+ int fd;
+ struct strbuf *buf = opt->value;
+
+ BUG_ON_OPT_NEG_NOARG(unset, arg);
+
+ if (buf->len)
+ strbuf_addch(buf, '\n');
+ if (!strcmp(arg, "-"))
+ fd = 0;
+ else {
+ fd = open(arg, O_RDONLY);
+ if (fd < 0)
+ die_errno(_("git commit-tree: failed to open '%s'"), arg);
+ }
+ if (strbuf_read(buf, fd, 0) < 0)
+ die_errno(_("git commit-tree: failed to read '%s'"), arg);
+ if (fd && close(fd))
+ die_errno(_("git commit-tree: failed to close '%s'"), arg);
+
+ return 0;
+}
+
int cmd_commit_tree(int argc, const char **argv, const char *prefix)
{
- int i, got_tree = 0;
+ static struct strbuf buffer = STRBUF_INIT;
struct commit_list *parents = NULL;
struct object_id tree_oid;
struct object_id commit_oid;
- struct strbuf buffer = STRBUF_INIT;
+
+ struct option options[] = {
+ { OPTION_CALLBACK, 'p', NULL, &parents, N_("parent"),
+ N_("id of a parent commit object"), PARSE_OPT_NONEG,
+ parse_parent_arg_callback },
+ { OPTION_CALLBACK, 'm', NULL, &buffer, N_("message"),
+ N_("commit message"), PARSE_OPT_NONEG,
+ parse_message_arg_callback },
+ { OPTION_CALLBACK, 'F', NULL, &buffer, N_("file"),
+ N_("read commit log message from file"), PARSE_OPT_NONEG,
+ parse_file_arg_callback },
+ { OPTION_STRING, 'S', "gpg-sign", &sign_commit, N_("key-id"),
+ N_("GPG sign commit"), PARSE_OPT_OPTARG, NULL, (intptr_t) "" },
+ OPT_END()
+ };
git_config(commit_tree_config, NULL);
if (argc < 2 || !strcmp(argv[1], "-h"))
- usage(commit_tree_usage);
-
- for (i = 1; i < argc; i++) {
- const char *arg = argv[i];
- if (!strcmp(arg, "-p")) {
- struct object_id oid;
- if (argc <= ++i)
- usage(commit_tree_usage);
- if (get_oid_commit(argv[i], &oid))
- die("Not a valid object name %s", argv[i]);
- assert_oid_type(&oid, OBJ_COMMIT);
- new_parent(lookup_commit(the_repository, &oid),
- &parents);
- continue;
- }
+ usage_with_options(commit_tree_usage, options);
- if (!strcmp(arg, "--gpg-sign")) {
- sign_commit = "";
- continue;
- }
+ argc = parse_options(argc, argv, prefix, options, commit_tree_usage, 0);
- if (skip_prefix(arg, "-S", &sign_commit) ||
- skip_prefix(arg, "--gpg-sign=", &sign_commit))
- continue;
+ if (argc != 1)
+ die(_("must give exactly one tree"));
- if (!strcmp(arg, "--no-gpg-sign")) {
- sign_commit = NULL;
- continue;
- }
-
- if (!strcmp(arg, "-m")) {
- if (argc <= ++i)
- usage(commit_tree_usage);
- if (buffer.len)
- strbuf_addch(&buffer, '\n');
- strbuf_addstr(&buffer, argv[i]);
- strbuf_complete_line(&buffer);
- continue;
- }
-
- if (!strcmp(arg, "-F")) {
- int fd;
-
- if (argc <= ++i)
- usage(commit_tree_usage);
- if (buffer.len)
- strbuf_addch(&buffer, '\n');
- if (!strcmp(argv[i], "-"))
- fd = 0;
- else {
- fd = open(argv[i], O_RDONLY);
- if (fd < 0)
- die_errno("git commit-tree: failed to open '%s'",
- argv[i]);
- }
- if (strbuf_read(&buffer, fd, 0) < 0)
- die_errno("git commit-tree: failed to read '%s'",
- argv[i]);
- if (fd && close(fd))
- die_errno("git commit-tree: failed to close '%s'",
- argv[i]);
- continue;
- }
-
- if (get_oid_tree(arg, &tree_oid))
- die("Not a valid object name %s", arg);
- if (got_tree)
- die("Cannot give more than one trees");
- got_tree = 1;
- }
+ if (get_oid_tree(argv[0], &tree_oid))
+ die(_("not a valid object name %s"), argv[0]);
if (!buffer.len) {
if (strbuf_read(&buffer, 0, 0) < 0)
- die_errno("git commit-tree: failed to read");
+ die_errno(_("git commit-tree: failed to read"));
}
if (commit_tree(buffer.buf, buffer.len, &tree_oid, parents, &commit_oid,
* and return the paths that match the given pattern in list.
*/
static int list_paths(struct string_list *list, const char *with_tree,
- const char *prefix, const struct pathspec *pattern)
+ const struct pathspec *pattern)
{
int i, ret;
char *m;
item->util = item; /* better a valid pointer than a fake one */
}
- ret = report_path_error(m, pattern, prefix);
+ ret = report_path_error(m, pattern);
free(m);
return ret;
}
die(_("cannot do a partial commit during a cherry-pick."));
}
- if (list_paths(&partial, !current_head ? NULL : "HEAD", prefix, &pathspec))
+ if (list_paths(&partial, !current_head ? NULL : "HEAD", &pathspec))
exit(1);
discard_cache();
set_ident_var(&date, strbuf_detach(&date_buf, NULL));
}
- strbuf_addstr(author_ident, fmt_ident(name, email, date, IDENT_STRICT));
+ strbuf_addstr(author_ident, fmt_ident(name, email, WANT_AUTHOR_IDENT, date,
+ IDENT_STRICT));
assert_split_ident(&author, author_ident);
export_one("GIT_AUTHOR_NAME", author.name_begin, author.name_end, 0);
export_one("GIT_AUTHOR_EMAIL", author.mail_begin, author.mail_end, 0);
const char *hook_arg2 = NULL;
int clean_message_contents = (cleanup_mode != COMMIT_MSG_CLEANUP_NONE);
int old_display_comment_prefix;
+ int merge_contains_scissors = 0;
/* This checks and barfs if author is badly specified */
determine_author_info(author_ident);
strbuf_addbuf(&sb, &message);
hook_arg1 = "message";
} else if (!stat(git_path_merge_msg(the_repository), &statbuf)) {
+ size_t merge_msg_start;
+
/*
* prepend SQUASH_MSG here if it exists and a
* "merge --squash" was originally performed
hook_arg1 = "squash";
} else
hook_arg1 = "merge";
+
+ merge_msg_start = sb.len;
if (strbuf_read_file(&sb, git_path_merge_msg(the_repository), 0) < 0)
die_errno(_("could not read MERGE_MSG"));
+
+ if (cleanup_mode == COMMIT_MSG_CLEANUP_SCISSORS &&
+ wt_status_locate_end(sb.buf + merge_msg_start,
+ sb.len - merge_msg_start) <
+ sb.len - merge_msg_start)
+ merge_contains_scissors = 1;
} else if (!stat(git_path_squash_msg(the_repository), &statbuf)) {
if (strbuf_read_file(&sb, git_path_squash_msg(the_repository), 0) < 0)
die_errno(_("could not read SQUASH_MSG"));
struct ident_split ci, ai;
if (whence != FROM_COMMIT) {
- if (cleanup_mode == COMMIT_MSG_CLEANUP_SCISSORS)
+ if (cleanup_mode == COMMIT_MSG_CLEANUP_SCISSORS &&
+ !merge_contains_scissors)
wt_status_add_cut_line(s->fp);
status_printf_ln(s, GIT_COLOR_NORMAL,
whence == FROM_MERGE
_("Please enter the commit message for your changes."
" Lines starting\nwith '%c' will be ignored, and an empty"
" message aborts the commit.\n"), comment_line_char);
- else if (cleanup_mode == COMMIT_MSG_CLEANUP_SCISSORS &&
- whence == FROM_COMMIT)
- wt_status_add_cut_line(s->fp);
- else /* COMMIT_MSG_CLEANUP_SPACE, that is. */
+ else if (cleanup_mode == COMMIT_MSG_CLEANUP_SCISSORS) {
+ if (whence == FROM_COMMIT && !merge_contains_scissors)
+ wt_status_add_cut_line(s->fp);
+ } else /* COMMIT_MSG_CLEANUP_SPACE, that is. */
status_printf(s, GIT_COLOR_NORMAL,
_("Please enter the commit message for your changes."
" Lines starting\n"
s->show_untracked_files = SHOW_NORMAL_UNTRACKED_FILES;
else if (!strcmp(untracked_files_arg, "all"))
s->show_untracked_files = SHOW_ALL_UNTRACKED_FILES;
+ /*
+ * Please update $__git_untracked_file_modes in
+ * git-completion.bash when you add new options
+ */
else
die(_("Invalid untracked files mode '%s'"), untracked_files_arg);
}
die(_("Only one of --include/--only/--all/--interactive/--patch can be used."));
if (argc == 0 && (also || (only && !amend && !allow_empty)))
die(_("No paths with --include/--only does not make sense."));
- if (!cleanup_arg || !strcmp(cleanup_arg, "default"))
- cleanup_mode = use_editor ? COMMIT_MSG_CLEANUP_ALL :
- COMMIT_MSG_CLEANUP_SPACE;
- else if (!strcmp(cleanup_arg, "verbatim"))
- cleanup_mode = COMMIT_MSG_CLEANUP_NONE;
- else if (!strcmp(cleanup_arg, "whitespace"))
- cleanup_mode = COMMIT_MSG_CLEANUP_SPACE;
- else if (!strcmp(cleanup_arg, "strip"))
- cleanup_mode = COMMIT_MSG_CLEANUP_ALL;
- else if (!strcmp(cleanup_arg, "scissors"))
- cleanup_mode = use_editor ? COMMIT_MSG_CLEANUP_SCISSORS :
- COMMIT_MSG_CLEANUP_SPACE;
- else
- die(_("Invalid cleanup mode %s"), cleanup_arg);
+ cleanup_mode = get_cleanup_mode(cleanup_arg, use_editor);
handle_untracked_files_arg(s);
if (all && argc > 0)
- die(_("Paths with -a does not make sense."));
+ die(_("paths '%s ...' with -a does not make sense"),
+ argv[0]);
if (status_format != STATUS_FORMAT_NONE)
dry_run = 1;
OPT_BOOL('s', "signoff", &signoff, N_("add Signed-off-by:")),
OPT_FILENAME('t', "template", &template_file, N_("use specified template file")),
OPT_BOOL('e', "edit", &edit_flag, N_("force edit of commit")),
- OPT_STRING(0, "cleanup", &cleanup_arg, N_("default"), N_("how to strip spaces and #comments from message")),
+ OPT_CLEANUP(&cleanup_arg),
OPT_BOOL(0, "status", &include_status, N_("include status in commit message template")),
{ OPTION_STRING, 'S', "gpg-sign", &sign_commit, N_("key-id"),
N_("GPG sign commit"), PARSE_OPT_OPTARG, NULL, (intptr_t) "" },
die(_("could not read commit message: %s"), strerror(saved_errno));
}
- if (verbose || /* Truncate the message just before the diff, if any. */
- cleanup_mode == COMMIT_MSG_CLEANUP_SCISSORS)
- strbuf_setlen(&sb, wt_status_locate_end(sb.buf, sb.len));
- if (cleanup_mode != COMMIT_MSG_CLEANUP_NONE)
- strbuf_stripspace(&sb, cleanup_mode == COMMIT_MSG_CLEANUP_ALL);
+ cleanup_message(&sb, cleanup_mode, verbose);
if (message_is_empty(&sb, cleanup_mode) && !allow_empty_message) {
rollback_index_files();
die("%s", err.buf);
}
- unlink(git_path_cherry_pick_head(the_repository));
- unlink(git_path_revert_head(the_repository));
+ sequencer_post_commit_cleanup(the_repository);
unlink(git_path_merge_head(the_repository));
unlink(git_path_merge_msg(the_repository));
unlink(git_path_merge_mode(the_repository));
}
static const char diff_tree_usage[] =
-"git diff-tree [--stdin] [-m] [-c] [--cc] [-s] [-v] [--pretty] [-t] [-r] [--root] "
+"git diff-tree [--stdin] [-m] [-c | --cc] [-s] [-v] [--pretty] [-t] [-r] [--root] "
"[<common-diff-options>] <tree-ish> [<tree-ish>] [<path>...]\n"
" -r diff recursively\n"
+" -c show combined diff for merge commits\n"
+" --cc show combined diff for merge commits removing uninteresting hunks\n"
+" --combined-all-paths\n"
+" show name of file in all parents for combined diffs\n"
" --root include the initial commit as diff against /dev/null\n"
COMMON_DIFF_OPTIONS_HELP;
repo_init_revisions(the_repository, &rev, prefix);
- if (no_index && argc != i + 2) {
- if (no_index == DIFF_NO_INDEX_IMPLICIT) {
- /*
- * There was no --no-index and there were not two
- * paths. It is possible that the user intended
- * to do an inside-repository operation.
- */
- fprintf(stderr, "Not a git repository\n");
- fprintf(stderr,
- "To compare two paths outside a working tree:\n");
- }
- /* Give the usage message for non-repository usage and exit. */
- usagef("git diff %s <path> <path>",
- no_index == DIFF_NO_INDEX_EXPLICIT ?
- "--no-index" : "[--no-index]");
-
- }
- if (no_index)
- /* If this is a no-index diff, just run it and exit there. */
- diff_no_index(the_repository, &rev, argc, argv);
-
- /* Otherwise, we are doing the usual "git" diff */
- rev.diffopt.skip_stat_unmatch = !!diff_auto_refresh_index;
-
- /* Scale to real terminal size and respect statGraphWidth config */
+ /* Set up defaults that will apply to both no-index and regular diffs. */
rev.diffopt.stat_width = -1;
rev.diffopt.stat_graph_width = -1;
-
- /* Default to let external and textconv be used */
rev.diffopt.flags.allow_external = 1;
rev.diffopt.flags.allow_textconv = 1;
+ /* If this is a no-index diff, just run it and exit there. */
+ if (no_index)
+ exit(diff_no_index(&rev, no_index == DIFF_NO_INDEX_IMPLICIT,
+ argc, argv));
+
+
+ /*
+ * Otherwise, we are doing the usual "git" diff; set up any
+ * further defaults that apply to regular diffs.
+ */
+ rev.diffopt.skip_stat_unmatch = !!diff_auto_refresh_index;
+
/*
* Default to intent-to-add entries invisible in the
* index. This makes them show up as new files in diff-files
#include "object-store.h"
#include "dir.h"
-static char *diff_gui_tool;
static int trust_exit_code;
static const char *const builtin_difftool_usage[] = {
static int difftool_config(const char *var, const char *value, void *cb)
{
- if (!strcmp(var, "diff.guitool")) {
- diff_gui_tool = xstrdup(value);
- return 0;
- }
-
if (!strcmp(var, "difftool.trustexitcode")) {
trust_exit_code = git_config_bool(var, value);
return 0;
*mode2 = (int)strtol(p + 1, &p, 8);
if (*p != ' ')
return error("expected ' ', got '%c'", *p);
- if (get_oid_hex(++p, oid1))
- return error("expected object ID, got '%s'", p + 1);
- p += GIT_SHA1_HEXSZ;
+ if (parse_oid_hex(++p, oid1, (const char **)&p))
+ return error("expected object ID, got '%s'", p);
if (*p != ' ')
return error("expected ' ', got '%c'", *p);
- if (get_oid_hex(++p, oid2))
- return error("expected object ID, got '%s'", p + 1);
- p += GIT_SHA1_HEXSZ;
+ if (parse_oid_hex(++p, oid2, (const char **)&p))
+ return error("expected object ID, got '%s'", p);
if (*p != ' ')
return error("expected ' ', got '%c'", *p);
*status = *++p;
int cmd_difftool(int argc, const char **argv, const char *prefix)
{
int use_gui_tool = 0, dir_diff = 0, prompt = -1, symlinks = 0,
- tool_help = 0;
+ tool_help = 0, no_index = 0;
static char *difftool_cmd = NULL, *extcmd = NULL;
struct option builtin_difftool_options[] = {
OPT_BOOL('g', "gui", &use_gui_tool,
"tool returns a non - zero exit code")),
OPT_STRING('x', "extcmd", &extcmd, N_("command"),
N_("specify a custom command for viewing diffs")),
+ OPT_ARGUMENT("no-index", &no_index, N_("passed to `diff`")),
OPT_END()
};
if (tool_help)
return print_tool_help();
- /* NEEDSWORK: once we no longer spawn anything, remove this */
- setenv(GIT_DIR_ENVIRONMENT, absolute_path(get_git_dir()), 1);
- setenv(GIT_WORK_TREE_ENVIRONMENT, absolute_path(get_git_work_tree()), 1);
+ if (!no_index && !startup_info->have_repository)
+ die(_("difftool requires worktree or --no-index"));
+
+ if (!no_index){
+ setup_work_tree();
+ setenv(GIT_DIR_ENVIRONMENT, absolute_path(get_git_dir()), 1);
+ setenv(GIT_WORK_TREE_ENVIRONMENT, absolute_path(get_git_work_tree()), 1);
+ }
+
+ if (use_gui_tool + !!difftool_cmd + !!extcmd > 1)
+ die(_("--gui, --tool and --extcmd are mutually exclusive"));
- if (use_gui_tool && diff_gui_tool && *diff_gui_tool)
- setenv("GIT_DIFF_TOOL", diff_gui_tool, 1);
+ if (use_gui_tool)
+ setenv("GIT_MERGETOOL_GUI", "true", 1);
else if (difftool_cmd) {
if (*difftool_cmd)
setenv("GIT_DIFF_TOOL", difftool_cmd, 1);
BUG("unknown protocol version");
}
- ref = fetch_pack(&args, fd, conn, ref, dest, sought, nr_sought,
+ ref = fetch_pack(&args, fd, ref, sought, nr_sought,
&shallow, pack_lockfile_ptr, version);
if (pack_lockfile) {
printf("lock %s\n", pack_lockfile);
sigchain_push_common(unlock_pack_on_signal);
atexit(unlock_pack);
+ sigchain_push(SIGPIPE, SIG_IGN);
exit_code = do_fetch(gtransport, &rs);
+ sigchain_pop(SIGPIPE);
refspec_clear(&rs);
transport_disconnect(gtransport);
gtransport = NULL;
OPT_INTEGER( 0 , "count", &maxcount, N_("show only <n> matched refs")),
OPT_STRING( 0 , "format", &format.format, N_("format"), N_("format to use for the output")),
OPT__COLOR(&format.use_color, N_("respect format colors")),
- OPT_CALLBACK(0 , "sort", sorting_tail, N_("key"),
- N_("field name to sort on"), &parse_opt_ref_sorting),
+ OPT_REF_SORT(sorting_tail),
OPT_CALLBACK(0, "points-at", &filter.points_at,
N_("object"), N_("print only refs which points at the given object"),
parse_opt_object_name),
return 0;
}
+static void mark_unreachable_referents(const struct object_id *oid)
+{
+ struct fsck_options options = FSCK_OPTIONS_DEFAULT;
+ struct object *obj = lookup_object(the_repository, oid->hash);
+
+ if (!obj || !(obj->flags & HAS_OBJ))
+ return; /* not part of our original set */
+ if (obj->flags & REACHABLE)
+ return; /* reachable objects already traversed */
+
+ /*
+ * Avoid passing OBJ_NONE to fsck_walk, which will parse the object
+ * (and we want to avoid parsing blobs).
+ */
+ if (obj->type == OBJ_NONE) {
+ enum object_type type = oid_object_info(the_repository,
+ &obj->oid, NULL);
+ if (type > 0)
+ object_as_type(the_repository, obj, type, 0);
+ }
+
+ options.walk = mark_used;
+ fsck_walk(obj, NULL, &options);
+}
+
+static int mark_loose_unreachable_referents(const struct object_id *oid,
+ const char *path,
+ void *data)
+{
+ mark_unreachable_referents(oid);
+ return 0;
+}
+
+static int mark_packed_unreachable_referents(const struct object_id *oid,
+ struct packed_git *pack,
+ uint32_t pos,
+ void *data)
+{
+ mark_unreachable_referents(oid);
+ return 0;
+}
+
/*
* Check a single reachable object
*/
/* Traverse the pending reachable objects */
traverse_reachable();
+ /*
+ * With --connectivity-only, we won't have actually opened and marked
+ * unreachable objects with USED. Do that now to make --dangling, etc
+ * accurate.
+ */
+ if (connectivity_only && (show_dangling || write_lost_and_found)) {
+ /*
+ * Even though we already have a "struct object" for each of
+ * these in memory, we must not iterate over the internal
+ * object hash as we do below. Our loop would potentially
+ * resize the hash, making our iteration invalid.
+ *
+ * Instead, we'll just go back to the source list of objects,
+ * and ignore any that weren't present in our earlier
+ * traversal.
+ */
+ for_each_loose_object(mark_loose_unreachable_referents, NULL, 0);
+ for_each_packed_object(mark_packed_unreachable_referents, NULL, 0);
+ }
+
/* Look up all the requirements, warn about missing objects.. */
max = get_max_object_index();
if (verbose)
raise(signo);
}
+static int gc_config_is_timestamp_never(const char *var)
+{
+ const char *value;
+ timestamp_t expire;
+
+ if (!git_config_get_value(var, &value) && value) {
+ if (parse_expiry_date(value, &expire))
+ die(_("failed to parse '%s' value '%s'"), var, value);
+ return expire == 0;
+ }
+ return 0;
+}
+
static void gc_config(void)
{
const char *value;
pack_refs = git_config_bool("gc.packrefs", value);
}
+ if (gc_config_is_timestamp_never("gc.reflogexpire") &&
+ gc_config_is_timestamp_never("gc.reflogexpireunreachable"))
+ prune_reflogs = 0;
+
git_config_get_int("gc.aggressivewindow", &aggressive_window);
git_config_get_int("gc.aggressivedepth", &aggressive_depth);
git_config_get_int("gc.auto", &gc_auto_threshold);
int auto_threshold;
int num_loose = 0;
int needed = 0;
-
- if (gc_auto_threshold <= 0)
- return 0;
+ const unsigned hexsz_loose = the_hash_algo->hexsz - 2;
dir = opendir(git_path("objects/17"));
if (!dir)
auto_threshold = DIV_ROUND_UP(gc_auto_threshold, 256);
while ((ent = readdir(dir)) != NULL) {
- if (strspn(ent->d_name, "0123456789abcdef") != 38 ||
- ent->d_name[38] != '\0')
+ if (strspn(ent->d_name, "0123456789abcdef") != hexsz_loose ||
+ ent->d_name[hexsz_loose] != '\0')
continue;
if (++num_loose > auto_threshold) {
needed = 1;
static void gc_before_repack(void)
{
+ /*
+ * We may be called twice, as both the pre- and
+ * post-daemonized phases will call us, but running these
+ * commands more than once is pointless and wasteful.
+ */
+ static int done = 0;
+ if (done++)
+ return;
+
if (pack_refs && run_command_v_opt(pack_refs_cmd.argv, RUN_GIT_CMD))
die(FAILED_RUN, pack_refs_cmd.argv[0]);
if (prune_reflogs && run_command_v_opt(reflog.argv, RUN_GIT_CMD))
die(FAILED_RUN, reflog.argv[0]);
-
- pack_refs = 0;
- prune_reflogs = 0;
}
int cmd_gc(int argc, const char **argv, const char *prefix)
char *content = buffer + RECORDSIZE;
const char *comment;
ssize_t n;
+ long len;
+ char *end;
if (argc != 1)
usage(builtin_get_tar_commit_id_usage);
die_errno("git get-tar-commit-id: EOF before reading tar header");
if (header->typeflag[0] != 'g')
return 1;
- if (!skip_prefix(content, "52 comment=", &comment))
+
+ len = strtol(content, &end, 10);
+ if (errno == ERANGE || end == content || len < 0)
+ return 1;
+ if (!skip_prefix(end, " comment=", &comment))
+ return 1;
+ len -= comment - content;
+ if (len < 1 || !(len % 2) ||
+ hash_algo_by_length((len - 1) / 2) == GIT_HASH_UNKNOWN)
return 1;
- if (write_in_full(1, comment, 41) < 0)
+ if (write_in_full(1, comment, len) < 0)
die_errno("git get-tar-commit-id: write error");
return 0;
return HELP_FORMAT_INFO;
if (!strcmp(format, "web") || !strcmp(format, "html"))
return HELP_FORMAT_WEB;
+ /*
+ * Please update _git_config() in git-completion.bash when you
+ * add new help formats.
+ */
die(_("unrecognized help format '%s'"), format);
}
unsigned i, max, foreign_nr = 0;
max = get_max_object_index();
- for (i = 0; i < max; i++)
+
+ if (verbose)
+ progress = start_delayed_progress(_("Checking objects"), max);
+
+ for (i = 0; i < max; i++) {
foreign_nr += check_object(get_indexed_object(i));
+ display_progress(progress, i + 1);
+ }
+
+ stop_progress(&progress);
return foreign_nr;
}
struct strbuf path = STRBUF_INIT;
struct strbuf template_path = STRBUF_INIT;
size_t template_len;
- struct repository_format template_format;
+ struct repository_format template_format = REPOSITORY_FORMAT_INIT;
struct strbuf err = STRBUF_INIT;
DIR *dir;
char *to_free = NULL;
free(to_free);
strbuf_release(&path);
strbuf_release(&template_path);
+ clear_repository_format(&template_format);
}
static int git_init_db_config(const char *k, const char *v, void *cb)
if (!strcmp(k, "init.templatedir"))
return git_config_pathname(&init_db_template_dir, k, v);
+ if (starts_with(k, "core."))
+ return platform_core_config(k, v, cb);
+
return 0;
}
struct strbuf err = STRBUF_INIT;
/* Just look for `init.templatedir` */
+ init_db_template_dir = NULL; /* re-set in case it was set before */
git_config(git_init_db_config, NULL);
/*
}
startup_info->have_repository = 1;
+ /* Just look for `core.hidedotfiles` */
+ git_config(git_init_db_config, NULL);
+
safe_create_dir(git_dir, 0);
init_is_bare_repository = is_bare_repository();
return DECORATE_SHORT_REFS;
else if (!strcmp(value, "auto"))
return auto_decoration_style();
+ /*
+ * Please update _git_log() in git-completion.bash when you
+ * add new decoration styles.
+ */
return -1;
}
* This gives a rough estimate for how many commits we
* will print out in the list.
*/
-static int estimate_commit_count(struct rev_info *rev, struct commit_list *list)
+static int estimate_commit_count(struct commit_list *list)
{
int n = 0;
switch (simplify_commit(revs, commit)) {
case commit_show:
if (show_header) {
- int n = estimate_commit_count(revs, list);
+ int n = estimate_commit_count(list);
show_early_header(revs, "incomplete", n);
show_header = 0;
}
show_early_output = log_show_early;
}
-static void setup_early_output(struct rev_info *rev)
+static void setup_early_output(void)
{
struct sigaction sa;
static void finish_early_output(struct rev_info *rev)
{
- int n = estimate_commit_count(rev, rev->commits);
+ int n = estimate_commit_count(rev->commits);
signal(SIGALRM, SIG_IGN);
show_early_header(rev, "done", n);
}
int saved_dcctc = 0, close_file = rev->diffopt.close_file;
if (rev->early_output)
- setup_early_output(rev);
+ setup_early_output();
if (prepare_revision_walk(rev))
die(_("revision walk setup failed"));
return cmd_log_walk(&rev);
}
-static void show_tagger(char *buf, int len, struct rev_info *rev)
+static void show_tagger(const char *buf, struct rev_info *rev)
{
struct strbuf out = STRBUF_INIT;
struct pretty_print_context pp = {0};
if (get_oid_with_context(the_repository, obj_name,
GET_OID_RECORD_PATH,
&oidc, &obj_context))
- die(_("Not a valid object name %s"), obj_name);
+ die(_("not a valid object name %s"), obj_name);
if (!obj_context.path ||
!textconv_object(the_repository, obj_context.path,
obj_context.mode, &oidc, 1, &buf, &size)) {
int offset = 0;
if (!buf)
- return error(_("Could not read object %s"), oid_to_hex(oid));
+ return error(_("could not read object %s"), oid_to_hex(oid));
assert(type == OBJ_TAG);
while (offset < size && buf[offset] != '\n') {
int new_offset = offset + 1;
+ const char *ident;
while (new_offset < size && buf[new_offset++] != '\n')
; /* do nothing */
- if (starts_with(buf + offset, "tagger "))
- show_tagger(buf + offset + 7,
- new_offset - offset - 7, rev);
+ if (skip_prefix(buf + offset, "tagger ", &ident))
+ show_tagger(ident, rev);
offset = new_offset;
}
break;
o = parse_object(the_repository, &t->tagged->oid);
if (!o)
- ret = error(_("Could not read object %s"),
+ ret = error(_("could not read object %s"),
oid_to_hex(&t->tagged->oid));
objects[i].item = o;
i--;
ret = cmd_log_walk(&rev);
break;
default:
- ret = error(_("Unknown type: %d"), o->type);
+ ret = error(_("unknown type: %d"), o->type);
}
}
free(objects);
printf("%s\n", filename.buf + outdir_offset);
if ((rev->diffopt.file = fopen(filename.buf, "w")) == NULL) {
- error_errno(_("Cannot open patch file %s"), filename.buf);
+ error_errno(_("cannot open patch file %s"), filename.buf);
strbuf_release(&filename);
return -1;
}
unsigned flags1, flags2;
if (rev->pending.nr != 2)
- die(_("Need exactly one range."));
+ die(_("need exactly one range"));
o1 = rev->pending.objects[0].item;
o2 = rev->pending.objects[1].item;
c2 = lookup_commit_reference(the_repository, &o2->oid);
if ((flags1 & UNINTERESTING) == (flags2 & UNINTERESTING))
- die(_("Not a range."));
+ die(_("not a range"));
init_patch_ids(the_repository, ids);
struct commit *head = list[0];
if (!cmit_fmt_is_mail(rev->commit_format))
- die(_("Cover letter needs email format"));
+ die(_("cover letter needs email format"));
committer = git_committer_info(0);
if (!use_stdout &&
open_next_file(NULL, rev->numbered_files ? NULL : "cover-letter", rev, quiet))
- return;
+ die(_("failed to create cover-letter file"));
log_write_email_headers(rev, head, &pp.after_subject, &need_8bit_cte, 0);
const char **dir = (const char **)opt->value;
BUG_ON_OPT_NEG(unset);
if (*dir)
- die(_("Two output directories?"));
+ die(_("two output directories?"));
*dir = arg;
return 0;
}
*thread = THREAD_SHALLOW;
else if (!strcmp(arg, "deep"))
*thread = THREAD_DEEP;
+ /*
+ * Please update _git_formatpatch() in git-completion.bash
+ * when you add new options.
+ */
else
return 1;
return 0;
if (base_commit && strcmp(base_commit, "auto")) {
base = lookup_commit_reference_by_name(base_commit);
if (!base)
- die(_("Unknown commit %s"), base_commit);
+ die(_("unknown commit %s"), base_commit);
} else if ((base_commit && !strcmp(base_commit, "auto")) || base_auto) {
struct branch *curr_branch = branch_get(NULL);
const char *upstream = branch_get_upstream(curr_branch, NULL);
struct object_id oid;
if (get_oid(upstream, &oid))
- die(_("Failed to resolve '%s' as a valid ref."), upstream);
+ die(_("failed to resolve '%s' as a valid ref"), upstream);
commit = lookup_commit_or_die(&oid, "upstream base");
base_list = get_merge_bases_many(commit, total, list);
/* There should be one and only one merge base. */
if (!base_list || base_list->next)
- die(_("Could not find exact merge base."));
+ die(_("could not find exact merge base"));
base = base_list->item;
free_commit_list(base_list);
} else {
- die(_("Failed to get upstream, if you want to record base commit automatically,\n"
+ die(_("failed to get upstream, if you want to record base commit automatically,\n"
"please use git branch --set-upstream-to to track a remote branch.\n"
- "Or you could specify base commit by --base=<base-commit-id> manually."));
+ "Or you could specify base commit by --base=<base-commit-id> manually"));
}
}
struct commit_list *merge_base;
merge_base = get_merge_bases(rev[2 * i], rev[2 * i + 1]);
if (!merge_base || merge_base->next)
- die(_("Failed to find exact merge base"));
+ die(_("failed to find exact merge base"));
rev[i] = merge_base->item;
}
if (use_stdout)
die(_("standard output, or directory, which one?"));
if (mkdir(output_directory, 0777) < 0 && errno != EEXIST)
- die_errno(_("Could not create directory '%s'"),
+ die_errno(_("could not create directory '%s'"),
output_directory);
}
if (!use_stdout &&
open_next_file(rev.numbered_files ? NULL : commit, NULL, &rev, quiet))
- die(_("Failed to create output files"));
+ die(_("failed to create output files"));
shown = log_tree_commit(&rev, commit);
free_commit_buffer(the_repository->parsed_objects,
commit);
revs.max_parents = 1;
if (add_pending_commit(head, &revs, 0))
- die(_("Unknown commit %s"), head);
+ die(_("unknown commit %s"), head);
if (add_pending_commit(upstream, &revs, UNINTERESTING))
- die(_("Unknown commit %s"), upstream);
+ die(_("unknown commit %s"), upstream);
/* Don't say anything if head and upstream are the same. */
if (revs.pending.nr == 2) {
get_patch_ids(&revs, &ids);
if (limit && add_pending_commit(limit, &revs, UNINTERESTING))
- die(_("Unknown commit %s"), limit);
+ die(_("unknown commit %s"), limit);
/* reverse the list of commits */
if (prepare_revision_walk(&revs))
if (debug_mode) {
const struct stat_data *sd = &ce->ce_stat_data;
- printf(" ctime: %d:%d\n", sd->sd_ctime.sec, sd->sd_ctime.nsec);
- printf(" mtime: %d:%d\n", sd->sd_mtime.sec, sd->sd_mtime.nsec);
- printf(" dev: %d\tino: %d\n", sd->sd_dev, sd->sd_ino);
- printf(" uid: %d\tgid: %d\n", sd->sd_uid, sd->sd_gid);
- printf(" size: %d\tflags: %x\n", sd->sd_size, ce->ce_flags);
+ printf(" ctime: %u:%u\n", sd->sd_ctime.sec, sd->sd_ctime.nsec);
+ printf(" mtime: %u:%u\n", sd->sd_mtime.sec, sd->sd_mtime.nsec);
+ printf(" dev: %u\tino: %u\n", sd->sd_dev, sd->sd_ino);
+ printf(" uid: %u\tgid: %u\n", sd->sd_uid, sd->sd_gid);
+ printf(" size: %u\tflags: %x\n", sd->sd_size, ce->ce_flags);
}
}
if (ps_matched) {
int bad;
- bad = report_path_error(ps_matched, &pathspec, prefix);
+ bad = report_path_error(ps_matched, &pathspec);
if (bad)
fprintf(stderr, "Did you forget to 'git add'?\n");
OPT_BIT(0, "refs", &flags, N_("do not show peeled tags"), REF_NORMAL),
OPT_BOOL(0, "get-url", &get_url,
N_("take url.<base>.insteadOf into account")),
- OPT_CALLBACK(0 , "sort", sorting_tail, N_("key"),
- N_("field name to sort on"), &parse_opt_ref_sorting),
+ OPT_REF_SORT(sorting_tail),
OPT_SET_INT_F(0, "exit-code", &status,
N_("exit with exit code 2 if no matching refs are found"),
2, PARSE_OPT_NOCOMPLETE),
#include "tag.h"
#include "alias.h"
#include "commit-reach.h"
+#include "wt-status.h"
#define DEFAULT_TWOHEAD (1<<0)
#define DEFAULT_OCTOPUS (1<<1)
static enum ff_type fast_forward = FF_ALLOW;
+static const char *cleanup_arg;
+static enum commit_msg_cleanup_mode cleanup_mode;
+
static int option_parse_message(const struct option *opt,
const char *arg, int unset)
{
return 0;
}
-static int option_read_message(struct parse_opt_ctx_t *ctx,
- const struct option *opt, int unset)
+static enum parse_opt_result option_read_message(struct parse_opt_ctx_t *ctx,
+ const struct option *opt,
+ const char *arg_not_used,
+ int unset)
{
struct strbuf *buf = opt->value;
const char *arg;
+ BUG_ON_OPT_ARG(arg_not_used);
if (unset)
BUG("-F cannot be negated");
N_("perform a commit if the merge succeeds (default)")),
OPT_BOOL('e', "edit", &option_edit,
N_("edit message before committing")),
+ OPT_CLEANUP(&cleanup_arg),
OPT_SET_INT(0, "ff", &fast_forward, N_("allow fast-forward (default)"), FF_ALLOW),
OPT_SET_INT_F(0, "ff-only", &fast_forward,
N_("abort if fast-forward is not possible"),
option_parse_message),
{ OPTION_LOWLEVEL_CALLBACK, 'F', "file", &merge_msg, N_("path"),
N_("read message from file"), PARSE_OPT_NONEG,
- (parse_opt_cb *) option_read_message },
+ NULL, 0, option_read_message },
OPT__VERBOSITY(&verbosity),
OPT_BOOL(0, "abort", &abort_current_merge,
N_("abort the current in-progress merge")),
return git_config_string(&pull_twohead, k, v);
else if (!strcmp(k, "pull.octopus"))
return git_config_string(&pull_octopus, k, v);
+ else if (!strcmp(k, "commit.cleanup"))
+ return git_config_string(&cleanup_arg, k, v);
else if (!strcmp(k, "merge.renormalize"))
option_renormalize = git_config_bool(k, v);
else if (!strcmp(k, "merge.ff")) {
static const char merge_editor_comment[] =
N_("Please enter a commit message to explain why this merge is necessary,\n"
"especially if it merges an updated upstream into a topic branch.\n"
- "\n"
- "Lines starting with '%c' will be ignored, and an empty message aborts\n"
+ "\n");
+
+static const char scissors_editor_comment[] =
+N_("An empty message aborts the commit.\n");
+
+static const char no_scissors_editor_comment[] =
+N_("Lines starting with '%c' will be ignored, and an empty message aborts\n"
"the commit.\n");
static void write_merge_heads(struct commit_list *);
{
struct strbuf msg = STRBUF_INIT;
strbuf_addbuf(&msg, &merge_msg);
- strbuf_addch(&msg, '\n');
if (squash)
BUG("the control must not reach here under --squash");
- if (0 < option_edit)
- strbuf_commented_addf(&msg, _(merge_editor_comment), comment_line_char);
+ if (0 < option_edit) {
+ strbuf_addch(&msg, '\n');
+ if (cleanup_mode == COMMIT_MSG_CLEANUP_SCISSORS) {
+ wt_status_append_cut_line(&msg);
+ strbuf_commented_addf(&msg, "\n");
+ }
+ strbuf_commented_addf(&msg, _(merge_editor_comment));
+ strbuf_commented_addf(&msg, _(cleanup_mode == COMMIT_MSG_CLEANUP_SCISSORS ?
+ scissors_editor_comment :
+ no_scissors_editor_comment), comment_line_char);
+ }
if (signoff)
append_signoff(&msg, ignore_non_trailer(msg.buf, msg.len), 0);
write_merge_heads(remoteheads);
abort_commit(remoteheads, NULL);
read_merge_msg(&msg);
- strbuf_stripspace(&msg, 0 < option_edit);
+ cleanup_message(&msg, cleanup_mode, 0);
if (!msg.len)
abort_commit(remoteheads, _("Empty commit message."));
strbuf_release(&merge_msg);
parents = remoteheads;
if (!head_subsumed || fast_forward == FF_NO)
commit_list_insert(head, &parents);
- strbuf_addch(&merge_msg, '\n');
prepare_to_commit(remoteheads);
if (commit_tree(merge_msg.buf, merge_msg.len, result_tree, parents,
&result_commit, NULL, sign_commit))
filename = git_path_merge_msg(the_repository);
fp = xfopen(filename, "a");
- append_conflicts_hint(&the_index, &msgbuf);
+ /*
+ * We can't use cleanup_mode because if we're not using the editor,
+ * get_cleanup_mode will return COMMIT_MSG_CLEANUP_SPACE instead, even
+ * though the message is meant to be processed later by git-commit.
+ * Thus, we will get the cleanup mode which is returned when we _are_
+ * using an editor.
+ */
+ append_conflicts_hint(&the_index, &msgbuf,
+ get_cleanup_mode(cleanup_arg, 1));
fputs(msgbuf.buf, fp);
strbuf_release(&msgbuf);
fclose(fp);
}
resolve_undo_clear();
+ if (option_edit < 0)
+ option_edit = default_edit_option();
+
+ cleanup_mode = get_cleanup_mode(cleanup_arg, 0 < option_edit);
+
if (verbosity < 0)
show_diffstat = 0;
fast_forward = FF_NO;
}
- if (option_edit < 0)
- option_edit = default_edit_option();
-
if (!use_strategies) {
if (!remoteheads)
; /* already up-to-date */
#include "config.h"
#include "parse-options.h"
#include "midx.h"
+#include "trace2.h"
static char const * const builtin_multi_pack_index_usage[] = {
N_("git multi-pack-index [--object-dir=<dir>] (write|verify)"),
return 1;
}
+ trace2_cmd_mode(argv[0]);
+
if (!strcmp(argv[0], "write"))
return write_midx_file(opts.object_dir);
if (!strcmp(argv[0], "verify"))
- return verify_midx_file(opts.object_dir);
+ return verify_midx_file(the_repository, opts.object_dir);
die(_("unrecognized verb: %s"), argv[0]);
}
static void name_rev_line(char *p, struct name_ref_data *data)
{
struct strbuf buf = STRBUF_INIT;
- int forty = 0;
+ int counter = 0;
char *p_start;
+ const unsigned hexsz = the_hash_algo->hexsz;
+
for (p_start = p; *p; p++) {
#define ishex(x) (isdigit((x)) || ((x) >= 'a' && (x) <= 'f'))
if (!ishex(*p))
- forty = 0;
- else if (++forty == GIT_SHA1_HEXSZ &&
+ counter = 0;
+ else if (++counter == hexsz &&
!ishex(*(p+1))) {
struct object_id oid;
const char *name = NULL;
char c = *(p+1);
int p_len = p - p_start + 1;
- forty = 0;
+ counter = 0;
*(p+1) = 0;
- if (!get_oid(p - (GIT_SHA1_HEXSZ - 1), &oid)) {
+ if (!get_oid(p - (hexsz - 1), &oid)) {
struct object *o =
lookup_object(the_repository,
oid.hash);
continue;
if (data->name_only)
- printf("%.*s%s", p_len - GIT_SHA1_HEXSZ, p_start, name);
+ printf("%.*s%s", p_len - hexsz, p_start, name);
else
printf("%.*s (%s)", p_len, p_start, name);
p_start = p + 1;
#include "object-store.h"
#include "dir.h"
#include "midx.h"
+#include "trace2.h"
#define IN_PACK(obj) oe_in_pack(&to_pack, obj)
#define SIZE(obj) oe_size(&to_pack, obj)
static int use_bitmap_index_default = 1;
static int use_bitmap_index = -1;
static int write_bitmap_index;
-static uint16_t write_bitmap_options;
+static uint16_t write_bitmap_options = BITMAP_OPT_HASH_CACHE;
static int exclude_promisor_objects;
if (written != nr_result)
die(_("wrote %"PRIu32" objects while expecting %"PRIu32),
written, nr_result);
+ trace2_data_intmax("pack-objects", the_repository,
+ "write_pack_file/wrote", nr_result);
}
static int no_try_delta(const char *path)
for (m = get_multi_pack_index(the_repository); m; m = m->next) {
struct pack_entry e;
- if (fill_midx_entry(oid, &e, m)) {
+ if (fill_midx_entry(the_repository, oid, &e, m)) {
struct packed_git *p = e.p;
off_t offset;
struct object_entry **base_out)
{
struct object_entry *base;
+ struct object_id base_oid;
if (!base_sha1)
return 0;
* even if it was buried too deep in history to make it into the
* packing list.
*/
- if (thin && bitmap_has_sha1_in_uninteresting(bitmap_git, base_sha1)) {
+ oidread(&base_oid, base_sha1);
+ if (thin && bitmap_has_oid_in_uninteresting(bitmap_git, &base_oid)) {
if (use_delta_islands) {
- struct object_id base_oid;
- hashcpy(base_oid.hash, base_sha1);
if (!in_same_island(&delta->idx.oid, &base_oid))
return 0;
}
}
}
+ trace2_region_enter("pack-objects", "enumerate-objects",
+ the_repository);
prepare_packing_data(the_repository, &to_pack);
if (progress)
if (include_tag && nr_result)
for_each_ref(add_ref_tag, NULL);
stop_progress(&progress_state);
+ trace2_region_leave("pack-objects", "enumerate-objects",
+ the_repository);
if (non_empty && !nr_result)
return 0;
- if (nr_result)
+ if (nr_result) {
+ trace2_region_enter("pack-objects", "prepare-pack",
+ the_repository);
prepare_pack(window, depth);
+ trace2_region_leave("pack-objects", "prepare-pack",
+ the_repository);
+ }
+
+ trace2_region_enter("pack-objects", "write-pack-file", the_repository);
write_pack_file();
+ trace2_region_leave("pack-objects", "write-pack-file", the_repository);
+
if (progress)
fprintf_ln(stderr,
_("Total %"PRIu32" (delta %"PRIu32"),"
struct pack_list *next;
struct packed_git *pack;
struct llist *unique_objects;
- struct llist *all_objects;
+ struct llist *remaining_objects;
+ size_t all_objects_size;
} *local_packs = NULL, *altodb_packs = NULL;
-struct pll {
- struct pll *next;
- struct pack_list *pl;
-};
-
static struct llist_item *free_nodes;
static inline void llist_item_put(struct llist_item *item)
return new_item;
}
-static void llist_free(struct llist *list)
-{
- while ((list->back = list->front)) {
- list->front = list->front->next;
- llist_item_put(list->back);
- }
- free(list);
-}
-
static inline void llist_init(struct llist **list)
{
*list = xmalloc(sizeof(struct llist));
struct llist_item *p1_hint = NULL, *p2_hint = NULL;
const unsigned int hashsz = the_hash_algo->rawsz;
+ if (!p1->unique_objects)
+ p1->unique_objects = llist_copy(p1->remaining_objects);
+ if (!p2->unique_objects)
+ p2->unique_objects = llist_copy(p2->remaining_objects);
+
p1_base = p1->pack->index_data;
p2_base = p2->pack->index_data;
p1_base += 256 * 4 + ((p1->pack->index_version < 2) ? 4 : 8);
}
}
-static void pll_free(struct pll *l)
-{
- struct pll *old;
- struct pack_list *opl;
-
- while (l) {
- old = l;
- while (l->pl) {
- opl = l->pl;
- l->pl = opl->next;
- free(opl);
- }
- l = l->next;
- free(old);
- }
-}
-
-/* all the permutations have to be free()d at the same time,
- * since they refer to each other
- */
-static struct pll * get_permutations(struct pack_list *list, int n)
-{
- struct pll *subset, *ret = NULL, *new_pll = NULL;
-
- if (list == NULL || pack_list_size(list) < n || n == 0)
- return NULL;
-
- if (n == 1) {
- while (list) {
- new_pll = xmalloc(sizeof(*new_pll));
- new_pll->pl = NULL;
- pack_list_insert(&new_pll->pl, list);
- new_pll->next = ret;
- ret = new_pll;
- list = list->next;
- }
- return ret;
- }
-
- while (list->next) {
- subset = get_permutations(list->next, n - 1);
- while (subset) {
- new_pll = xmalloc(sizeof(*new_pll));
- new_pll->pl = subset->pl;
- pack_list_insert(&new_pll->pl, list);
- new_pll->next = ret;
- ret = new_pll;
- subset = subset->next;
- }
- list = list->next;
- }
- return ret;
-}
-
-static int is_superset(struct pack_list *pl, struct llist *list)
-{
- struct llist *diff;
-
- diff = llist_copy(list);
-
- while (pl) {
- llist_sorted_difference_inplace(diff, pl->all_objects);
- if (diff->size == 0) { /* we're done */
- llist_free(diff);
- return 1;
- }
- pl = pl->next;
- }
- llist_free(diff);
- return 0;
-}
-
static size_t sizeof_union(struct packed_git *p1, struct packed_git *p2)
{
size_t ret = 0;
return ret;
}
+static int cmp_remaining_objects(const void *a, const void *b)
+{
+ struct pack_list *pl_a = *((struct pack_list **)a);
+ struct pack_list *pl_b = *((struct pack_list **)b);
+
+ if (pl_a->remaining_objects->size == pl_b->remaining_objects->size) {
+ /* have the same remaining_objects, big pack first */
+ if (pl_a->all_objects_size == pl_b->all_objects_size)
+ return 0;
+ else if (pl_a->all_objects_size < pl_b->all_objects_size)
+ return 1;
+ else
+ return -1;
+ } else if (pl_a->remaining_objects->size < pl_b->remaining_objects->size) {
+ /* sort by remaining objects, more objects first */
+ return 1;
+ } else {
+ return -1;
+ }
+}
+
+/* Sort pack_list, greater size of remaining_objects first */
+static void sort_pack_list(struct pack_list **pl)
+{
+ struct pack_list **ary, *p;
+ int i;
+ size_t n = pack_list_size(*pl);
+
+ if (n < 2)
+ return;
+
+ /* prepare an array of packed_list for easier sorting */
+ ary = xcalloc(n, sizeof(struct pack_list *));
+ for (n = 0, p = *pl; p; p = p->next)
+ ary[n++] = p;
+
+ QSORT(ary, n, cmp_remaining_objects);
+
+ /* link them back again */
+ for (i = 0; i < n - 1; i++)
+ ary[i]->next = ary[i + 1];
+ ary[n - 1]->next = NULL;
+ *pl = ary[0];
+
+ free(ary);
+}
+
+
static void minimize(struct pack_list **min)
{
- struct pack_list *pl, *unique = NULL,
- *non_unique = NULL, *min_perm = NULL;
- struct pll *perm, *perm_all, *perm_ok = NULL, *new_perm;
- struct llist *missing;
- off_t min_perm_size = 0, perm_size;
- int n;
+ struct pack_list *pl, *unique = NULL, *non_unique = NULL;
+ struct llist *missing, *unique_pack_objects;
pl = local_packs;
while (pl) {
missing = llist_copy(all_objects);
pl = unique;
while (pl) {
- llist_sorted_difference_inplace(missing, pl->all_objects);
+ llist_sorted_difference_inplace(missing, pl->remaining_objects);
pl = pl->next;
}
+ *min = unique;
+
/* return if there are no objects missing from the unique set */
if (missing->size == 0) {
- *min = unique;
free(missing);
return;
}
- /* find the permutations which contain all missing objects */
- for (n = 1; n <= pack_list_size(non_unique) && !perm_ok; n++) {
- perm_all = perm = get_permutations(non_unique, n);
- while (perm) {
- if (is_superset(perm->pl, missing)) {
- new_perm = xmalloc(sizeof(struct pll));
- memcpy(new_perm, perm, sizeof(struct pll));
- new_perm->next = perm_ok;
- perm_ok = new_perm;
- }
- perm = perm->next;
- }
- if (perm_ok)
- break;
- pll_free(perm_all);
- }
- if (perm_ok == NULL)
- die("Internal error: No complete sets found!");
-
- /* find the permutation with the smallest size */
- perm = perm_ok;
- while (perm) {
- perm_size = pack_set_bytecount(perm->pl);
- if (!min_perm_size || min_perm_size > perm_size) {
- min_perm_size = perm_size;
- min_perm = perm->pl;
- }
- perm = perm->next;
- }
- *min = min_perm;
- /* add the unique packs to the list */
- pl = unique;
+ unique_pack_objects = llist_copy(all_objects);
+ llist_sorted_difference_inplace(unique_pack_objects, missing);
+
+ /* remove unique pack objects from the non_unique packs */
+ pl = non_unique;
while (pl) {
- pack_list_insert(min, pl);
+ llist_sorted_difference_inplace(pl->remaining_objects, unique_pack_objects);
pl = pl->next;
}
+
+ while (non_unique) {
+ /* sort the non_unique packs, greater size of remaining_objects first */
+ sort_pack_list(&non_unique);
+ if (non_unique->remaining_objects->size == 0)
+ break;
+
+ pack_list_insert(min, non_unique);
+
+ for (pl = non_unique->next; pl && pl->remaining_objects->size > 0; pl = pl->next)
+ llist_sorted_difference_inplace(pl->remaining_objects, non_unique->remaining_objects);
+
+ non_unique = non_unique->next;
+ }
}
static void load_all_objects(void)
while (pl) {
hint = NULL;
- l = pl->all_objects->front;
+ l = pl->remaining_objects->front;
while (l) {
hint = llist_insert_sorted_unique(all_objects,
l->oid, hint);
/* remove objects present in remote packs */
pl = altodb_packs;
while (pl) {
- llist_sorted_difference_inplace(all_objects, pl->all_objects);
+ llist_sorted_difference_inplace(all_objects, pl->remaining_objects);
pl = pl->next;
}
}
while (alt) {
local = local_packs;
while (local) {
- llist_sorted_difference_inplace(local->unique_objects,
- alt->all_objects);
+ llist_sorted_difference_inplace(local->remaining_objects,
+ alt->remaining_objects);
local = local->next;
}
- llist_sorted_difference_inplace(all_objects, alt->all_objects);
alt = alt->next;
}
}
return NULL;
l.pack = p;
- llist_init(&l.all_objects);
+ llist_init(&l.remaining_objects);
if (open_pack_index(p))
return NULL;
base += 256 * 4 + ((p->index_version < 2) ? 4 : 8);
step = the_hash_algo->rawsz + ((p->index_version < 2) ? 4 : 0);
while (off < p->num_objects * step) {
- llist_insert_back(l.all_objects, (const struct object_id *)(base + off));
+ llist_insert_back(l.remaining_objects, (const struct object_id *)(base + off));
off += step;
}
- /* this list will be pruned in cmp_two_packs later */
- l.unique_objects = llist_copy(l.all_objects);
+ l.all_objects_size = l.remaining_objects->size;
+ l.unique_objects = NULL;
if (p->pack_local)
return pack_list_insert(&local_packs, &l);
else
int cmd_pack_redundant(int argc, const char **argv, const char *prefix)
{
int i;
- struct pack_list *min, *red, *pl;
+ struct pack_list *min = NULL, *red, *pl;
struct llist *ignore;
struct object_id *oid;
char buf[GIT_MAX_HEXSZ + 2]; /* hex hash + \n + \0 */
load_all_objects();
- cmp_local_packs();
if (alt_odb)
scan_alt_odb_packs();
llist_sorted_difference_inplace(all_objects, ignore);
pl = local_packs;
while (pl) {
- llist_sorted_difference_inplace(pl->unique_objects, ignore);
+ llist_sorted_difference_inplace(pl->remaining_objects, ignore);
pl = pl->next;
}
+ cmp_local_packs();
+
minimize(&min);
if (verbose) {
pl = red = pack_list_difference(local_packs, min);
while (pl) {
printf("%s\n%s\n",
- sha1_pack_index_name(pl->pack->sha1),
+ sha1_pack_index_name(pl->pack->hash),
pl->pack->pack_name);
pl = pl->next;
}
#include "builtin.h"
+#include "config.h"
#include "parse-options.h"
#include "refs.h"
#include "repository.h"
OPT_BIT(0, "prune", &flags, N_("prune loose refs (default)"), PACK_REFS_PRUNE),
OPT_END(),
};
+ git_config(git_default_config, NULL);
if (parse_options(argc, argv, prefix, opts, pack_refs_usage, 0))
usage_with_options(pack_refs_usage, opts);
return refs_pack_refs(get_main_ref_store(the_repository), flags);
argc = parse_options(argc, argv, prefix, prune_packed_options,
prune_packed_usage, 0);
+ if (argc > 0)
+ usage_msg_opt(_("too many arguments"),
+ prune_packed_usage,
+ prune_packed_options);
+
prune_packed_objects(opts);
return 0;
}
return 0;
}
+static void perform_reachability_traversal(struct rev_info *revs)
+{
+ static int initialized;
+ struct progress *progress = NULL;
+
+ if (initialized)
+ return;
+
+ if (show_progress)
+ progress = start_delayed_progress(_("Checking connectivity"), 0);
+ mark_reachable_objects(revs, 1, expire, progress);
+ stop_progress(&progress);
+ initialized = 1;
+}
+
+static int is_object_reachable(const struct object_id *oid,
+ struct rev_info *revs)
+{
+ struct object *obj;
+
+ perform_reachability_traversal(revs);
+
+ obj = lookup_object(the_repository, oid->hash);
+ return obj && (obj->flags & SEEN);
+}
+
static int prune_object(const struct object_id *oid, const char *fullpath,
void *data)
{
+ struct rev_info *revs = data;
struct stat st;
- /*
- * Do we know about this object?
- * It must have been reachable
- */
- if (lookup_object(the_repository, oid->hash))
+ if (is_object_reachable(oid, revs))
return 0;
if (lstat(fullpath, &st)) {
int cmd_prune(int argc, const char **argv, const char *prefix)
{
struct rev_info revs;
- struct progress *progress = NULL;
int exclude_promisor_objects = 0;
const struct option options[] = {
OPT__DRY_RUN(&show_only, N_("do not remove, show only")),
if (show_progress == -1)
show_progress = isatty(2);
- if (show_progress)
- progress = start_delayed_progress(_("Checking connectivity"), 0);
if (exclude_promisor_objects) {
fetch_if_missing = 0;
revs.exclude_promisor_objects = 1;
}
- mark_reachable_objects(&revs, 1, expire, progress);
- stop_progress(&progress);
for_each_loose_file_in_objdir(get_object_directory(), prune_object,
- prune_cruft, prune_subdir, NULL);
+ prune_cruft, prune_subdir, &revs);
prune_packed_objects(show_only ? PRUNE_PACKED_DRY_RUN : 0);
remove_temporary_files(get_object_directory());
remove_temporary_files(s);
free(s);
- if (is_repository_shallow(the_repository))
+ if (is_repository_shallow(the_repository)) {
+ perform_reachability_traversal(&revs);
prune_shallow(show_only ? PRUNE_SHOW_ONLY : 0);
+ }
return 0;
}
#include "lockfile.h"
#include "wt-status.h"
#include "commit-reach.h"
+#include "sequencer.h"
enum rebase_type {
REBASE_INVALID = -1,
return REBASE_MERGES;
else if (!strcmp(value, "interactive") || !strcmp(value, "i"))
return REBASE_INTERACTIVE;
+ /*
+ * Please update _git_config() in git-completion.bash when you
+ * add new rebase modes.
+ */
if (fatal)
die(_("Invalid value for %s: %s"), key, value);
static char *opt_squash;
static char *opt_commit;
static char *opt_edit;
+static char *cleanup_arg;
static char *opt_ff;
static char *opt_verify_signatures;
static int opt_autostash = -1;
OPT_PASSTHRU(0, "edit", &opt_edit, NULL,
N_("edit message before committing"),
PARSE_OPT_NOARG),
+ OPT_CLEANUP(&cleanup_arg),
OPT_PASSTHRU(0, "ff", &opt_ff, NULL,
N_("allow fast-forward"),
PARSE_OPT_NOARG),
fp = xfopen(filename, "r");
while (strbuf_getline_lf(&sb, fp) != EOF) {
- if (get_oid_hex(sb.buf, &oid))
- continue; /* invalid line: does not start with SHA1 */
- if (starts_with(sb.buf + GIT_SHA1_HEXSZ, "\tnot-for-merge\t"))
+ const char *p;
+ if (parse_oid_hex(sb.buf, &oid, &p))
+ continue; /* invalid line: does not start with object ID */
+ if (starts_with(p, "\tnot-for-merge\t"))
continue; /* ref is not-for-merge */
oid_array_append(merge_heads, &oid);
}
argv_array_push(&args, opt_commit);
if (opt_edit)
argv_array_push(&args, opt_edit);
+ if (cleanup_arg)
+ argv_array_pushf(&args, "--cleanup=%s", cleanup_arg);
if (opt_ff)
argv_array_push(&args, opt_ff);
if (opt_verify_signatures)
cp.no_stderr = 1;
cp.git_cmd = 1;
- ret = capture_command(&cp, &sb, GIT_SHA1_HEXSZ);
+ ret = capture_command(&cp, &sb, GIT_MAX_HEXSZ);
if (ret)
goto cleanup;
}
/**
- * Given the current HEAD SHA1, the merge head returned from git-fetch and the
+ * Given the current HEAD oid, the merge head returned from git-fetch and the
* fork point calculated by get_rebase_fork_point(), runs git-rebase with the
* appropriate arguments and returns its exit status.
*/
argc = parse_options(argc, argv, prefix, pull_options, pull_usage, 0);
+ if (cleanup_arg)
+ /*
+ * this only checks the validity of cleanup_arg; we don't need
+ * a valid value for use_editor
+ */
+ get_cleanup_mode(cleanup_arg, 0);
+
parse_repo_refspecs(argc, argv, &repo, &refspecs);
if (!opt_ff)
int creation_factor = RANGE_DIFF_CREATION_FACTOR_DEFAULT;
struct diff_options diffopt = { NULL };
int simple_color = -1;
- struct option options[] = {
+ struct option range_diff_options[] = {
OPT_INTEGER(0, "creation-factor", &creation_factor,
N_("Percentage by which creation is weighted")),
OPT_BOOL(0, "no-dual-color", &simple_color,
N_("use simple diff colors")),
OPT_END()
};
- int i, j, res = 0;
+ struct option *options;
+ int res = 0;
struct strbuf range1 = STRBUF_INIT, range2 = STRBUF_INIT;
git_config(git_diff_ui_config, NULL);
repo_diff_setup(the_repository, &diffopt);
+ options = parse_options_concat(range_diff_options, diffopt.parseopts);
argc = parse_options(argc, argv, NULL, options,
- builtin_range_diff_usage, PARSE_OPT_KEEP_UNKNOWN |
- PARSE_OPT_KEEP_DASHDASH | PARSE_OPT_KEEP_ARGV0);
-
- for (i = j = 1; i < argc && strcmp("--", argv[i]); ) {
- int c = diff_opt_parse(&diffopt, argv + i, argc - i, prefix);
+ builtin_range_diff_usage, 0);
- if (!c)
- argv[j++] = argv[i++];
- else
- i += c;
- }
- while (i < argc)
- argv[j++] = argv[i++];
- argc = j;
diff_setup_done(&diffopt);
- /* Make sure that there are no unparsed options */
- argc = parse_options(argc, argv, NULL,
- options + ARRAY_SIZE(options) - 1, /* OPT_END */
- builtin_range_diff_usage, 0);
-
/* force color when --dual-color was used */
if (!simple_color)
diffopt.use_color = 1;
error(_("need two commit ranges"));
usage_with_options(builtin_range_diff_usage, options);
}
+ FREE_AND_NULL(options);
res = show_range_diff(range1.buf, range2.buf, creation_factor,
simple_color < 1, &diffopt);
{ OPTION_CALLBACK, 0, "recurse-submodules", NULL,
"checkout", "control recursive updating of submodules",
PARSE_OPT_OPTARG, option_parse_recurse_submodules_worktree_updater },
+ OPT__QUIET(&opts.quiet, N_("suppress feedback messages")),
OPT_END()
};
+++ /dev/null
-#define USE_THE_INDEX_COMPATIBILITY_MACROS
-#include "builtin.h"
-#include "cache.h"
-#include "config.h"
-#include "parse-options.h"
-#include "sequencer.h"
-#include "rebase-interactive.h"
-#include "argv-array.h"
-#include "refs.h"
-#include "rerere.h"
-#include "run-command.h"
-
-static GIT_PATH_FUNC(path_state_dir, "rebase-merge/")
-static GIT_PATH_FUNC(path_squash_onto, "rebase-merge/squash-onto")
-static GIT_PATH_FUNC(path_interactive, "rebase-merge/interactive")
-
-static int get_revision_ranges(const char *upstream, const char *onto,
- const char **head_hash,
- char **revisions, char **shortrevisions)
-{
- const char *base_rev = upstream ? upstream : onto, *shorthead;
- struct object_id orig_head;
-
- if (get_oid("HEAD", &orig_head))
- return error(_("no HEAD?"));
-
- *head_hash = find_unique_abbrev(&orig_head, GIT_MAX_HEXSZ);
- *revisions = xstrfmt("%s...%s", base_rev, *head_hash);
-
- shorthead = find_unique_abbrev(&orig_head, DEFAULT_ABBREV);
-
- if (upstream) {
- const char *shortrev;
- struct object_id rev_oid;
-
- get_oid(base_rev, &rev_oid);
- shortrev = find_unique_abbrev(&rev_oid, DEFAULT_ABBREV);
-
- *shortrevisions = xstrfmt("%s..%s", shortrev, shorthead);
- } else
- *shortrevisions = xstrdup(shorthead);
-
- return 0;
-}
-
-static int init_basic_state(struct replay_opts *opts, const char *head_name,
- const char *onto, const char *orig_head)
-{
- FILE *interactive;
-
- if (!is_directory(path_state_dir()) && mkdir_in_gitdir(path_state_dir()))
- return error_errno(_("could not create temporary %s"), path_state_dir());
-
- delete_reflog("REBASE_HEAD");
-
- interactive = fopen(path_interactive(), "w");
- if (!interactive)
- return error_errno(_("could not mark as interactive"));
- fclose(interactive);
-
- return write_basic_state(opts, head_name, onto, orig_head);
-}
-
-static int do_interactive_rebase(struct replay_opts *opts, unsigned flags,
- const char *switch_to, const char *upstream,
- const char *onto, const char *onto_name,
- const char *squash_onto, const char *head_name,
- const char *restrict_revision, char *raw_strategies,
- const char *cmd, unsigned autosquash)
-{
- int ret;
- const char *head_hash = NULL;
- char *revisions = NULL, *shortrevisions = NULL;
- struct argv_array make_script_args = ARGV_ARRAY_INIT;
- FILE *todo_list;
-
- if (prepare_branch_to_be_rebased(opts, switch_to))
- return -1;
-
- if (get_revision_ranges(upstream, onto, &head_hash,
- &revisions, &shortrevisions))
- return -1;
-
- if (raw_strategies)
- parse_strategy_opts(opts, raw_strategies);
-
- if (init_basic_state(opts, head_name, onto, head_hash)) {
- free(revisions);
- free(shortrevisions);
-
- return -1;
- }
-
- if (!upstream && squash_onto)
- write_file(path_squash_onto(), "%s\n", squash_onto);
-
- todo_list = fopen(rebase_path_todo(), "w");
- if (!todo_list) {
- free(revisions);
- free(shortrevisions);
-
- return error_errno(_("could not open %s"), rebase_path_todo());
- }
-
- argv_array_pushl(&make_script_args, "", revisions, NULL);
- if (restrict_revision)
- argv_array_push(&make_script_args, restrict_revision);
-
- ret = sequencer_make_script(the_repository, todo_list,
- make_script_args.argc, make_script_args.argv,
- flags);
- fclose(todo_list);
-
- if (ret)
- error(_("could not generate todo list"));
- else {
- discard_cache();
- ret = complete_action(the_repository, opts, flags,
- shortrevisions, onto_name, onto,
- head_hash, cmd, autosquash);
- }
-
- free(revisions);
- free(shortrevisions);
- argv_array_clear(&make_script_args);
-
- return ret;
-}
-
-static const char * const builtin_rebase_interactive_usage[] = {
- N_("git rebase--interactive [<options>]"),
- NULL
-};
-
-int cmd_rebase__interactive(int argc, const char **argv, const char *prefix)
-{
- struct replay_opts opts = REPLAY_OPTS_INIT;
- unsigned flags = 0, keep_empty = 0, rebase_merges = 0, autosquash = 0;
- int abbreviate_commands = 0, rebase_cousins = -1, ret = 0;
- const char *onto = NULL, *onto_name = NULL, *restrict_revision = NULL,
- *squash_onto = NULL, *upstream = NULL, *head_name = NULL,
- *switch_to = NULL, *cmd = NULL;
- char *raw_strategies = NULL;
- enum {
- NONE = 0, CONTINUE, SKIP, EDIT_TODO, SHOW_CURRENT_PATCH,
- SHORTEN_OIDS, EXPAND_OIDS, CHECK_TODO_LIST, REARRANGE_SQUASH, ADD_EXEC
- } command = 0;
- struct option options[] = {
- OPT_BOOL(0, "ff", &opts.allow_ff, N_("allow fast-forward")),
- OPT_BOOL(0, "keep-empty", &keep_empty, N_("keep empty commits")),
- OPT_BOOL(0, "allow-empty-message", &opts.allow_empty_message,
- N_("allow commits with empty messages")),
- OPT_BOOL(0, "rebase-merges", &rebase_merges, N_("rebase merge commits")),
- OPT_BOOL(0, "rebase-cousins", &rebase_cousins,
- N_("keep original branch points of cousins")),
- OPT_BOOL(0, "autosquash", &autosquash,
- N_("move commits that begin with squash!/fixup!")),
- OPT_BOOL(0, "signoff", &opts.signoff, N_("sign commits")),
- OPT__VERBOSE(&opts.verbose, N_("be verbose")),
- OPT_CMDMODE(0, "continue", &command, N_("continue rebase"),
- CONTINUE),
- OPT_CMDMODE(0, "skip", &command, N_("skip commit"), SKIP),
- OPT_CMDMODE(0, "edit-todo", &command, N_("edit the todo list"),
- EDIT_TODO),
- OPT_CMDMODE(0, "show-current-patch", &command, N_("show the current patch"),
- SHOW_CURRENT_PATCH),
- OPT_CMDMODE(0, "shorten-ids", &command,
- N_("shorten commit ids in the todo list"), SHORTEN_OIDS),
- OPT_CMDMODE(0, "expand-ids", &command,
- N_("expand commit ids in the todo list"), EXPAND_OIDS),
- OPT_CMDMODE(0, "check-todo-list", &command,
- N_("check the todo list"), CHECK_TODO_LIST),
- OPT_CMDMODE(0, "rearrange-squash", &command,
- N_("rearrange fixup/squash lines"), REARRANGE_SQUASH),
- OPT_CMDMODE(0, "add-exec-commands", &command,
- N_("insert exec commands in todo list"), ADD_EXEC),
- OPT_STRING(0, "onto", &onto, N_("onto"), N_("onto")),
- OPT_STRING(0, "restrict-revision", &restrict_revision,
- N_("restrict-revision"), N_("restrict revision")),
- OPT_STRING(0, "squash-onto", &squash_onto, N_("squash-onto"),
- N_("squash onto")),
- OPT_STRING(0, "upstream", &upstream, N_("upstream"),
- N_("the upstream commit")),
- OPT_STRING(0, "head-name", &head_name, N_("head-name"), N_("head name")),
- { OPTION_STRING, 'S', "gpg-sign", &opts.gpg_sign, N_("key-id"),
- N_("GPG-sign commits"),
- PARSE_OPT_OPTARG, NULL, (intptr_t) "" },
- OPT_STRING(0, "strategy", &opts.strategy, N_("strategy"),
- N_("rebase strategy")),
- OPT_STRING(0, "strategy-opts", &raw_strategies, N_("strategy-opts"),
- N_("strategy options")),
- OPT_STRING(0, "switch-to", &switch_to, N_("switch-to"),
- N_("the branch or commit to checkout")),
- OPT_STRING(0, "onto-name", &onto_name, N_("onto-name"), N_("onto name")),
- OPT_STRING(0, "cmd", &cmd, N_("cmd"), N_("the command to run")),
- OPT_RERERE_AUTOUPDATE(&opts.allow_rerere_auto),
- OPT_BOOL(0, "reschedule-failed-exec", &opts.reschedule_failed_exec,
- N_("automatically re-schedule any `exec` that fails")),
- OPT_END()
- };
-
- sequencer_init_config(&opts);
- git_config_get_bool("rebase.abbreviatecommands", &abbreviate_commands);
-
- opts.action = REPLAY_INTERACTIVE_REBASE;
- opts.allow_ff = 1;
- opts.allow_empty = 1;
-
- if (argc == 1)
- usage_with_options(builtin_rebase_interactive_usage, options);
-
- argc = parse_options(argc, argv, NULL, options,
- builtin_rebase_interactive_usage, PARSE_OPT_KEEP_ARGV0);
-
- opts.gpg_sign = xstrdup_or_null(opts.gpg_sign);
-
- flags |= keep_empty ? TODO_LIST_KEEP_EMPTY : 0;
- flags |= abbreviate_commands ? TODO_LIST_ABBREVIATE_CMDS : 0;
- flags |= rebase_merges ? TODO_LIST_REBASE_MERGES : 0;
- flags |= rebase_cousins > 0 ? TODO_LIST_REBASE_COUSINS : 0;
- flags |= command == SHORTEN_OIDS ? TODO_LIST_SHORTEN_IDS : 0;
-
- if (rebase_cousins >= 0 && !rebase_merges)
- warning(_("--[no-]rebase-cousins has no effect without "
- "--rebase-merges"));
-
- switch (command) {
- case NONE:
- if (!onto && !upstream)
- die(_("a base commit must be provided with --upstream or --onto"));
-
- ret = do_interactive_rebase(&opts, flags, switch_to, upstream, onto,
- onto_name, squash_onto, head_name, restrict_revision,
- raw_strategies, cmd, autosquash);
- break;
- case SKIP: {
- struct string_list merge_rr = STRING_LIST_INIT_DUP;
-
- rerere_clear(the_repository, &merge_rr);
- /* fallthrough */
- case CONTINUE:
- ret = sequencer_continue(the_repository, &opts);
- break;
- }
- case EDIT_TODO:
- ret = edit_todo_list(the_repository, flags);
- break;
- case SHOW_CURRENT_PATCH: {
- struct child_process cmd = CHILD_PROCESS_INIT;
-
- cmd.git_cmd = 1;
- argv_array_pushl(&cmd.args, "show", "REBASE_HEAD", "--", NULL);
- ret = run_command(&cmd);
-
- break;
- }
- case SHORTEN_OIDS:
- case EXPAND_OIDS:
- ret = transform_todos(the_repository, flags);
- break;
- case CHECK_TODO_LIST:
- ret = check_todo_list(the_repository);
- break;
- case REARRANGE_SQUASH:
- ret = rearrange_squash(the_repository);
- break;
- case ADD_EXEC:
- ret = sequencer_add_exec_commands(the_repository, cmd);
- break;
- default:
- BUG("invalid command '%d'", command);
- }
-
- return !!ret;
-}
#include "commit-reach.h"
#include "rerere.h"
#include "branch.h"
+#include "sequencer.h"
+#include "rebase-interactive.h"
static char const * const builtin_rebase_usage[] = {
N_("git rebase [-i] [options] [--exec <cmd>] [--onto <newbase>] "
NULL
};
+static GIT_PATH_FUNC(path_squash_onto, "rebase-merge/squash-onto")
+static GIT_PATH_FUNC(path_interactive, "rebase-merge/interactive")
static GIT_PATH_FUNC(apply_dir, "rebase-apply")
static GIT_PATH_FUNC(merge_dir, "rebase-merge")
REBASE_PRESERVE_MERGES
};
-static int use_builtin_rebase(void)
-{
- struct child_process cp = CHILD_PROCESS_INIT;
- struct strbuf out = STRBUF_INIT;
- int ret, env = git_env_bool("GIT_TEST_REBASE_USE_BUILTIN", -1);
-
- if (env != -1)
- return env;
-
- argv_array_pushl(&cp.args,
- "config", "--bool", "rebase.usebuiltin", NULL);
- cp.git_cmd = 1;
- if (capture_command(&cp, &out, 6)) {
- strbuf_release(&out);
- return 1;
- }
-
- strbuf_trim(&out);
- ret = !strcmp("true", out.buf);
- strbuf_release(&out);
- return ret;
-}
-
struct rebase_options {
enum rebase_type type;
const char *state_dir;
char *strategy, *strategy_opts;
struct strbuf git_format_patch_opt;
int reschedule_failed_exec;
+ int use_legacy_rebase;
+};
+
+#define REBASE_OPTIONS_INIT { \
+ .type = REBASE_UNSPECIFIED, \
+ .flags = REBASE_NO_QUIET, \
+ .git_am_opts = ARGV_ARRAY_INIT, \
+ .git_format_patch_opt = STRBUF_INIT \
+ }
+
+static struct replay_opts get_replay_opts(const struct rebase_options *opts)
+{
+ struct replay_opts replay = REPLAY_OPTS_INIT;
+
+ replay.action = REPLAY_INTERACTIVE_REBASE;
+ sequencer_init_config(&replay);
+
+ replay.signoff = opts->signoff;
+ replay.allow_ff = !(opts->flags & REBASE_FORCE);
+ if (opts->allow_rerere_autoupdate)
+ replay.allow_rerere_auto = opts->allow_rerere_autoupdate;
+ replay.allow_empty = 1;
+ replay.allow_empty_message = opts->allow_empty_message;
+ replay.verbose = opts->flags & REBASE_VERBOSE;
+ replay.reschedule_failed_exec = opts->reschedule_failed_exec;
+ replay.gpg_sign = xstrdup_or_null(opts->gpg_sign_opt);
+ replay.strategy = opts->strategy;
+ if (opts->strategy_opts)
+ parse_strategy_opts(&replay, opts->strategy_opts);
+
+ return replay;
+}
+
+enum action {
+ ACTION_NONE = 0,
+ ACTION_CONTINUE,
+ ACTION_SKIP,
+ ACTION_ABORT,
+ ACTION_QUIT,
+ ACTION_EDIT_TODO,
+ ACTION_SHOW_CURRENT_PATCH,
+ ACTION_SHORTEN_OIDS,
+ ACTION_EXPAND_OIDS,
+ ACTION_CHECK_TODO_LIST,
+ ACTION_REARRANGE_SQUASH,
+ ACTION_ADD_EXEC
};
+static const char *action_names[] = { "undefined",
+ "continue",
+ "skip",
+ "abort",
+ "quit",
+ "edit_todo",
+ "show_current_patch" };
+
+static int add_exec_commands(struct string_list *commands)
+{
+ const char *todo_file = rebase_path_todo();
+ struct todo_list todo_list = TODO_LIST_INIT;
+ int res;
+
+ if (strbuf_read_file(&todo_list.buf, todo_file, 0) < 0)
+ return error_errno(_("could not read '%s'."), todo_file);
+
+ if (todo_list_parse_insn_buffer(the_repository, todo_list.buf.buf,
+ &todo_list)) {
+ todo_list_release(&todo_list);
+ return error(_("unusable todo list: '%s'"), todo_file);
+ }
+
+ todo_list_add_exec_commands(&todo_list, commands);
+ res = todo_list_write_to_file(the_repository, &todo_list,
+ todo_file, NULL, NULL, -1, 0);
+ todo_list_release(&todo_list);
+
+ if (res)
+ return error_errno(_("could not write '%s'."), todo_file);
+ return 0;
+}
+
+static int rearrange_squash_in_todo_file(void)
+{
+ const char *todo_file = rebase_path_todo();
+ struct todo_list todo_list = TODO_LIST_INIT;
+ int res = 0;
+
+ if (strbuf_read_file(&todo_list.buf, todo_file, 0) < 0)
+ return error_errno(_("could not read '%s'."), todo_file);
+ if (todo_list_parse_insn_buffer(the_repository, todo_list.buf.buf,
+ &todo_list)) {
+ todo_list_release(&todo_list);
+ return error(_("unusable todo list: '%s'"), todo_file);
+ }
+
+ res = todo_list_rearrange_squash(&todo_list);
+ if (!res)
+ res = todo_list_write_to_file(the_repository, &todo_list,
+ todo_file, NULL, NULL, -1, 0);
+
+ todo_list_release(&todo_list);
+
+ if (res)
+ return error_errno(_("could not write '%s'."), todo_file);
+ return 0;
+}
+
+static int transform_todo_file(unsigned flags)
+{
+ const char *todo_file = rebase_path_todo();
+ struct todo_list todo_list = TODO_LIST_INIT;
+ int res;
+
+ if (strbuf_read_file(&todo_list.buf, todo_file, 0) < 0)
+ return error_errno(_("could not read '%s'."), todo_file);
+
+ if (todo_list_parse_insn_buffer(the_repository, todo_list.buf.buf,
+ &todo_list)) {
+ todo_list_release(&todo_list);
+ return error(_("unusable todo list: '%s'"), todo_file);
+ }
+
+ res = todo_list_write_to_file(the_repository, &todo_list, todo_file,
+ NULL, NULL, -1, flags);
+ todo_list_release(&todo_list);
+
+ if (res)
+ return error_errno(_("could not write '%s'."), todo_file);
+ return 0;
+}
+
+static int edit_todo_file(unsigned flags)
+{
+ const char *todo_file = rebase_path_todo();
+ struct todo_list todo_list = TODO_LIST_INIT,
+ new_todo = TODO_LIST_INIT;
+ int res = 0;
+
+ if (strbuf_read_file(&todo_list.buf, todo_file, 0) < 0)
+ return error_errno(_("could not read '%s'."), todo_file);
+
+ strbuf_stripspace(&todo_list.buf, 1);
+ res = edit_todo_list(the_repository, &todo_list, &new_todo, NULL, NULL, flags);
+ if (!res && todo_list_write_to_file(the_repository, &new_todo, todo_file,
+ NULL, NULL, -1, flags & ~(TODO_LIST_SHORTEN_IDS)))
+ res = error_errno(_("could not write '%s'"), todo_file);
+
+ todo_list_release(&todo_list);
+ todo_list_release(&new_todo);
+
+ return res;
+}
+
+static int get_revision_ranges(struct commit *upstream, struct commit *onto,
+ const char **head_hash,
+ char **revisions, char **shortrevisions)
+{
+ struct commit *base_rev = upstream ? upstream : onto;
+ const char *shorthead;
+ struct object_id orig_head;
+
+ if (get_oid("HEAD", &orig_head))
+ return error(_("no HEAD?"));
+
+ *head_hash = find_unique_abbrev(&orig_head, GIT_MAX_HEXSZ);
+ *revisions = xstrfmt("%s...%s", oid_to_hex(&base_rev->object.oid),
+ *head_hash);
+
+ shorthead = find_unique_abbrev(&orig_head, DEFAULT_ABBREV);
+
+ if (upstream) {
+ const char *shortrev;
+
+ shortrev = find_unique_abbrev(&base_rev->object.oid,
+ DEFAULT_ABBREV);
+
+ *shortrevisions = xstrfmt("%s..%s", shortrev, shorthead);
+ } else
+ *shortrevisions = xstrdup(shorthead);
+
+ return 0;
+}
+
+static int init_basic_state(struct replay_opts *opts, const char *head_name,
+ struct commit *onto, const char *orig_head)
+{
+ FILE *interactive;
+
+ if (!is_directory(merge_dir()) && mkdir_in_gitdir(merge_dir()))
+ return error_errno(_("could not create temporary %s"), merge_dir());
+
+ delete_reflog("REBASE_HEAD");
+
+ interactive = fopen(path_interactive(), "w");
+ if (!interactive)
+ return error_errno(_("could not mark as interactive"));
+ fclose(interactive);
+
+ return write_basic_state(opts, head_name, onto, orig_head);
+}
+
+static void split_exec_commands(const char *cmd, struct string_list *commands)
+{
+ if (cmd && *cmd) {
+ string_list_split(commands, cmd, '\n', -1);
+
+ /* rebase.c adds a new line to cmd after every command,
+ * so here the last command is always empty */
+ string_list_remove_empty_items(commands, 0);
+ }
+}
+
+static int do_interactive_rebase(struct rebase_options *opts, unsigned flags)
+{
+ int ret;
+ const char *head_hash = NULL;
+ char *revisions = NULL, *shortrevisions = NULL;
+ struct argv_array make_script_args = ARGV_ARRAY_INIT;
+ struct todo_list todo_list = TODO_LIST_INIT;
+ struct replay_opts replay = get_replay_opts(opts);
+ struct string_list commands = STRING_LIST_INIT_DUP;
+
+ if (prepare_branch_to_be_rebased(the_repository, &replay,
+ opts->switch_to))
+ return -1;
+
+ if (get_revision_ranges(opts->upstream, opts->onto, &head_hash,
+ &revisions, &shortrevisions))
+ return -1;
+
+ if (init_basic_state(&replay,
+ opts->head_name ? opts->head_name : "detached HEAD",
+ opts->onto, head_hash)) {
+ free(revisions);
+ free(shortrevisions);
+
+ return -1;
+ }
+
+ if (!opts->upstream && opts->squash_onto)
+ write_file(path_squash_onto(), "%s\n",
+ oid_to_hex(opts->squash_onto));
+
+ argv_array_pushl(&make_script_args, "", revisions, NULL);
+ if (opts->restrict_revision)
+ argv_array_push(&make_script_args,
+ oid_to_hex(&opts->restrict_revision->object.oid));
+
+ ret = sequencer_make_script(the_repository, &todo_list.buf,
+ make_script_args.argc, make_script_args.argv,
+ flags);
+
+ if (ret)
+ error(_("could not generate todo list"));
+ else {
+ discard_cache();
+ if (todo_list_parse_insn_buffer(the_repository, todo_list.buf.buf,
+ &todo_list))
+ BUG("unusable todo list");
+
+ split_exec_commands(opts->cmd, &commands);
+ ret = complete_action(the_repository, &replay, flags,
+ shortrevisions, opts->onto_name, opts->onto, head_hash,
+ &commands, opts->autosquash, &todo_list);
+ }
+
+ string_list_clear(&commands, 0);
+ free(revisions);
+ free(shortrevisions);
+ todo_list_release(&todo_list);
+ argv_array_clear(&make_script_args);
+
+ return ret;
+}
+
+static int run_rebase_interactive(struct rebase_options *opts,
+ enum action command)
+{
+ unsigned flags = 0;
+ int abbreviate_commands = 0, ret = 0;
+
+ git_config_get_bool("rebase.abbreviatecommands", &abbreviate_commands);
+
+ flags |= opts->keep_empty ? TODO_LIST_KEEP_EMPTY : 0;
+ flags |= abbreviate_commands ? TODO_LIST_ABBREVIATE_CMDS : 0;
+ flags |= opts->rebase_merges ? TODO_LIST_REBASE_MERGES : 0;
+ flags |= opts->rebase_cousins > 0 ? TODO_LIST_REBASE_COUSINS : 0;
+ flags |= command == ACTION_SHORTEN_OIDS ? TODO_LIST_SHORTEN_IDS : 0;
+
+ switch (command) {
+ case ACTION_NONE: {
+ if (!opts->onto && !opts->upstream)
+ die(_("a base commit must be provided with --upstream or --onto"));
+
+ ret = do_interactive_rebase(opts, flags);
+ break;
+ }
+ case ACTION_SKIP: {
+ struct string_list merge_rr = STRING_LIST_INIT_DUP;
+
+ rerere_clear(the_repository, &merge_rr);
+ }
+ /* fallthrough */
+ case ACTION_CONTINUE: {
+ struct replay_opts replay_opts = get_replay_opts(opts);
+
+ ret = sequencer_continue(the_repository, &replay_opts);
+ break;
+ }
+ case ACTION_EDIT_TODO:
+ ret = edit_todo_file(flags);
+ break;
+ case ACTION_SHOW_CURRENT_PATCH: {
+ struct child_process cmd = CHILD_PROCESS_INIT;
+
+ cmd.git_cmd = 1;
+ argv_array_pushl(&cmd.args, "show", "REBASE_HEAD", "--", NULL);
+ ret = run_command(&cmd);
+
+ break;
+ }
+ case ACTION_SHORTEN_OIDS:
+ case ACTION_EXPAND_OIDS:
+ ret = transform_todo_file(flags);
+ break;
+ case ACTION_CHECK_TODO_LIST:
+ ret = check_todo_list_from_file(the_repository);
+ break;
+ case ACTION_REARRANGE_SQUASH:
+ ret = rearrange_squash_in_todo_file();
+ break;
+ case ACTION_ADD_EXEC: {
+ struct string_list commands = STRING_LIST_INIT_DUP;
+
+ split_exec_commands(opts->cmd, &commands);
+ ret = add_exec_commands(&commands);
+ string_list_clear(&commands, 0);
+ break;
+ }
+ default:
+ BUG("invalid command '%d'", command);
+ }
+
+ return ret;
+}
+
+static const char * const builtin_rebase_interactive_usage[] = {
+ N_("git rebase--interactive [<options>]"),
+ NULL
+};
+
+int cmd_rebase__interactive(int argc, const char **argv, const char *prefix)
+{
+ struct rebase_options opts = REBASE_OPTIONS_INIT;
+ struct object_id squash_onto = null_oid;
+ enum action command = ACTION_NONE;
+ struct option options[] = {
+ OPT_NEGBIT(0, "ff", &opts.flags, N_("allow fast-forward"),
+ REBASE_FORCE),
+ OPT_BOOL(0, "keep-empty", &opts.keep_empty, N_("keep empty commits")),
+ OPT_BOOL(0, "allow-empty-message", &opts.allow_empty_message,
+ N_("allow commits with empty messages")),
+ OPT_BOOL(0, "rebase-merges", &opts.rebase_merges, N_("rebase merge commits")),
+ OPT_BOOL(0, "rebase-cousins", &opts.rebase_cousins,
+ N_("keep original branch points of cousins")),
+ OPT_BOOL(0, "autosquash", &opts.autosquash,
+ N_("move commits that begin with squash!/fixup!")),
+ OPT_BOOL(0, "signoff", &opts.signoff, N_("sign commits")),
+ OPT_BIT('v', "verbose", &opts.flags,
+ N_("display a diffstat of what changed upstream"),
+ REBASE_NO_QUIET | REBASE_VERBOSE | REBASE_DIFFSTAT),
+ OPT_CMDMODE(0, "continue", &command, N_("continue rebase"),
+ ACTION_CONTINUE),
+ OPT_CMDMODE(0, "skip", &command, N_("skip commit"), ACTION_SKIP),
+ OPT_CMDMODE(0, "edit-todo", &command, N_("edit the todo list"),
+ ACTION_EDIT_TODO),
+ OPT_CMDMODE(0, "show-current-patch", &command, N_("show the current patch"),
+ ACTION_SHOW_CURRENT_PATCH),
+ OPT_CMDMODE(0, "shorten-ids", &command,
+ N_("shorten commit ids in the todo list"), ACTION_SHORTEN_OIDS),
+ OPT_CMDMODE(0, "expand-ids", &command,
+ N_("expand commit ids in the todo list"), ACTION_EXPAND_OIDS),
+ OPT_CMDMODE(0, "check-todo-list", &command,
+ N_("check the todo list"), ACTION_CHECK_TODO_LIST),
+ OPT_CMDMODE(0, "rearrange-squash", &command,
+ N_("rearrange fixup/squash lines"), ACTION_REARRANGE_SQUASH),
+ OPT_CMDMODE(0, "add-exec-commands", &command,
+ N_("insert exec commands in todo list"), ACTION_ADD_EXEC),
+ { OPTION_CALLBACK, 0, "onto", &opts.onto, N_("onto"), N_("onto"),
+ PARSE_OPT_NONEG, parse_opt_commit, 0 },
+ { OPTION_CALLBACK, 0, "restrict-revision", &opts.restrict_revision,
+ N_("restrict-revision"), N_("restrict revision"),
+ PARSE_OPT_NONEG, parse_opt_commit, 0 },
+ { OPTION_CALLBACK, 0, "squash-onto", &squash_onto, N_("squash-onto"),
+ N_("squash onto"), PARSE_OPT_NONEG, parse_opt_object_id, 0 },
+ { OPTION_CALLBACK, 0, "upstream", &opts.upstream, N_("upstream"),
+ N_("the upstream commit"), PARSE_OPT_NONEG, parse_opt_commit,
+ 0 },
+ OPT_STRING(0, "head-name", &opts.head_name, N_("head-name"), N_("head name")),
+ { OPTION_STRING, 'S', "gpg-sign", &opts.gpg_sign_opt, N_("key-id"),
+ N_("GPG-sign commits"),
+ PARSE_OPT_OPTARG, NULL, (intptr_t) "" },
+ OPT_STRING(0, "strategy", &opts.strategy, N_("strategy"),
+ N_("rebase strategy")),
+ OPT_STRING(0, "strategy-opts", &opts.strategy_opts, N_("strategy-opts"),
+ N_("strategy options")),
+ OPT_STRING(0, "switch-to", &opts.switch_to, N_("switch-to"),
+ N_("the branch or commit to checkout")),
+ OPT_STRING(0, "onto-name", &opts.onto_name, N_("onto-name"), N_("onto name")),
+ OPT_STRING(0, "cmd", &opts.cmd, N_("cmd"), N_("the command to run")),
+ OPT_RERERE_AUTOUPDATE(&opts.allow_rerere_autoupdate),
+ OPT_BOOL(0, "reschedule-failed-exec", &opts.reschedule_failed_exec,
+ N_("automatically re-schedule any `exec` that fails")),
+ OPT_END()
+ };
+
+ opts.rebase_cousins = -1;
+
+ if (argc == 1)
+ usage_with_options(builtin_rebase_interactive_usage, options);
+
+ argc = parse_options(argc, argv, NULL, options,
+ builtin_rebase_interactive_usage, PARSE_OPT_KEEP_ARGV0);
+
+ if (!is_null_oid(&squash_onto))
+ opts.squash_onto = &squash_onto;
+
+ if (opts.rebase_cousins >= 0 && !opts.rebase_merges)
+ warning(_("--[no-]rebase-cousins has no effect without "
+ "--rebase-merges"));
+
+ return !!run_rebase_interactive(&opts, command);
+}
+
static int is_interactive(struct rebase_options *opts)
{
return opts->type == REBASE_INTERACTIVE ||
&buf))
return -1;
if (!strcmp(buf.buf, "--rerere-autoupdate"))
- opts->allow_rerere_autoupdate = 1;
+ opts->allow_rerere_autoupdate = RERERE_AUTOUPDATE;
else if (!strcmp(buf.buf, "--no-rerere-autoupdate"))
- opts->allow_rerere_autoupdate = 0;
+ opts->allow_rerere_autoupdate = RERERE_NOAUTOUPDATE;
else
warning(_("ignoring invalid allow_rerere_autoupdate: "
"'%s'"), buf.buf);
- } else
- opts->allow_rerere_autoupdate = -1;
+ }
if (file_exists(state_dir_path("gpg_sign_opt", opts))) {
strbuf_reset(&buf);
return 0;
}
-static int write_basic_state(struct rebase_options *opts)
+static int rebase_write_basic_state(struct rebase_options *opts)
{
write_file(state_dir_path("head-name", opts), "%s",
opts->head_name ? opts->head_name : "detached HEAD");
if (opts->strategy_opts)
write_file(state_dir_path("strategy_opts", opts), "%s",
opts->strategy_opts);
- if (opts->allow_rerere_autoupdate >= 0)
+ if (opts->allow_rerere_autoupdate > 0)
write_file(state_dir_path("allow_rerere_autoupdate", opts),
"-%s-rerere-autoupdate",
- opts->allow_rerere_autoupdate ? "" : "-no");
+ opts->allow_rerere_autoupdate == RERERE_AUTOUPDATE ?
+ "" : "-no");
if (opts->gpg_sign_opt)
write_file(state_dir_path("gpg_sign_opt", opts), "%s",
opts->gpg_sign_opt);
#define RESET_HEAD_HARD (1<<1)
#define RESET_HEAD_RUN_POST_CHECKOUT_HOOK (1<<2)
#define RESET_HEAD_REFS_ONLY (1<<3)
+#define RESET_ORIG_HEAD (1<<4)
static int reset_head(struct object_id *oid, const char *action,
const char *switch_to_branch, unsigned flags,
unsigned reset_hard = flags & RESET_HEAD_HARD;
unsigned run_hook = flags & RESET_HEAD_RUN_POST_CHECKOUT_HOOK;
unsigned refs_only = flags & RESET_HEAD_REFS_ONLY;
+ unsigned update_orig_head = flags & RESET_ORIG_HEAD;
struct object_id head_oid;
struct tree_desc desc[2] = { { NULL }, { NULL } };
struct lock_file lock = LOCK_INIT;
strbuf_addf(&msg, "%s: ", reflog_action ? reflog_action : "rebase");
prefix_len = msg.len;
- if (!get_oid("ORIG_HEAD", &oid_old_orig))
- old_orig = &oid_old_orig;
- if (!get_oid("HEAD", &oid_orig)) {
- orig = &oid_orig;
- if (!reflog_orig_head) {
- strbuf_addstr(&msg, "updating ORIG_HEAD");
- reflog_orig_head = msg.buf;
- }
- update_ref(reflog_orig_head, "ORIG_HEAD", orig, old_orig, 0,
- UPDATE_REFS_MSG_ON_ERR);
- } else if (old_orig)
- delete_ref(NULL, "ORIG_HEAD", old_orig, 0);
+ if (update_orig_head) {
+ if (!get_oid("ORIG_HEAD", &oid_old_orig))
+ old_orig = &oid_old_orig;
+ if (!get_oid("HEAD", &oid_orig)) {
+ orig = &oid_orig;
+ if (!reflog_orig_head) {
+ strbuf_addstr(&msg, "updating ORIG_HEAD");
+ reflog_orig_head = msg.buf;
+ }
+ update_ref(reflog_orig_head, "ORIG_HEAD", orig,
+ old_orig, 0, UPDATE_REFS_MSG_ON_ERR);
+ } else if (old_orig)
+ delete_ref(NULL, "ORIG_HEAD", old_orig, 0);
+ }
+
if (!reflog_head) {
strbuf_setlen(&msg, prefix_len);
strbuf_addstr(&msg, "updating HEAD");
detach_head ? REF_NO_DEREF : 0,
UPDATE_REFS_MSG_ON_ERR);
else {
- ret = update_ref(reflog_orig_head, switch_to_branch, oid,
+ ret = update_ref(reflog_head, switch_to_branch, oid,
NULL, 0, UPDATE_REFS_MSG_ON_ERR);
if (!ret)
ret = create_symref("HEAD", switch_to_branch,
argv_array_push(&am.args, "--rebasing");
argv_array_pushf(&am.args, "--resolvemsg=%s", resolvemsg);
argv_array_push(&am.args, "--patch-format=mboxrd");
- if (opts->allow_rerere_autoupdate > 0)
+ if (opts->allow_rerere_autoupdate == RERERE_AUTOUPDATE)
argv_array_push(&am.args, "--rerere-autoupdate");
- else if (opts->allow_rerere_autoupdate == 0)
+ else if (opts->allow_rerere_autoupdate == RERERE_NOAUTOUPDATE)
argv_array_push(&am.args, "--no-rerere-autoupdate");
if (opts->gpg_sign_opt)
argv_array_push(&am.args, opts->gpg_sign_opt);
}
if (is_directory(opts->state_dir))
- write_basic_state(opts);
+ rebase_write_basic_state(opts);
return status;
}
-static int run_specific_rebase(struct rebase_options *opts)
+static int run_specific_rebase(struct rebase_options *opts, enum action action)
{
const char *argv[] = { NULL, NULL };
struct strbuf script_snippet = STRBUF_INIT, buf = STRBUF_INIT;
if (opts->type == REBASE_INTERACTIVE) {
/* Run builtin interactive rebase */
- struct child_process child = CHILD_PROCESS_INIT;
-
- argv_array_pushf(&child.env_array, "GIT_CHERRY_PICK_HELP=%s",
- resolvemsg);
+ setenv("GIT_CHERRY_PICK_HELP", resolvemsg, 1);
if (!(opts->flags & REBASE_INTERACTIVE_EXPLICIT)) {
- argv_array_push(&child.env_array,
- "GIT_SEQUENCE_EDITOR=:");
+ setenv("GIT_SEQUENCE_EDITOR", ":", 1);
opts->autosquash = 0;
}
+ if (opts->gpg_sign_opt) {
+ /* remove the leading "-S" */
+ char *tmp = xstrdup(opts->gpg_sign_opt + 2);
+ free(opts->gpg_sign_opt);
+ opts->gpg_sign_opt = tmp;
+ }
- child.git_cmd = 1;
- argv_array_push(&child.args, "rebase--interactive");
-
- if (opts->action)
- argv_array_pushf(&child.args, "--%s", opts->action);
- if (opts->keep_empty)
- argv_array_push(&child.args, "--keep-empty");
- if (opts->rebase_merges)
- argv_array_push(&child.args, "--rebase-merges");
- if (opts->rebase_cousins)
- argv_array_push(&child.args, "--rebase-cousins");
- if (opts->autosquash)
- argv_array_push(&child.args, "--autosquash");
- if (opts->flags & REBASE_VERBOSE)
- argv_array_push(&child.args, "--verbose");
- if (opts->flags & REBASE_FORCE)
- argv_array_push(&child.args, "--no-ff");
- if (opts->restrict_revision)
- argv_array_pushf(&child.args,
- "--restrict-revision=^%s",
- oid_to_hex(&opts->restrict_revision->object.oid));
- if (opts->upstream)
- argv_array_pushf(&child.args, "--upstream=%s",
- oid_to_hex(&opts->upstream->object.oid));
- if (opts->onto)
- argv_array_pushf(&child.args, "--onto=%s",
- oid_to_hex(&opts->onto->object.oid));
- if (opts->squash_onto)
- argv_array_pushf(&child.args, "--squash-onto=%s",
- oid_to_hex(opts->squash_onto));
- if (opts->onto_name)
- argv_array_pushf(&child.args, "--onto-name=%s",
- opts->onto_name);
- argv_array_pushf(&child.args, "--head-name=%s",
- opts->head_name ?
- opts->head_name : "detached HEAD");
- if (opts->strategy)
- argv_array_pushf(&child.args, "--strategy=%s",
- opts->strategy);
- if (opts->strategy_opts)
- argv_array_pushf(&child.args, "--strategy-opts=%s",
- opts->strategy_opts);
- if (opts->switch_to)
- argv_array_pushf(&child.args, "--switch-to=%s",
- opts->switch_to);
- if (opts->cmd)
- argv_array_pushf(&child.args, "--cmd=%s", opts->cmd);
- if (opts->allow_empty_message)
- argv_array_push(&child.args, "--allow-empty-message");
- if (opts->allow_rerere_autoupdate > 0)
- argv_array_push(&child.args, "--rerere-autoupdate");
- else if (opts->allow_rerere_autoupdate == 0)
- argv_array_push(&child.args, "--no-rerere-autoupdate");
- if (opts->gpg_sign_opt)
- argv_array_push(&child.args, opts->gpg_sign_opt);
- if (opts->signoff)
- argv_array_push(&child.args, "--signoff");
- if (opts->reschedule_failed_exec)
- argv_array_push(&child.args, "--reschedule-failed-exec");
-
- status = run_command(&child);
+ status = run_rebase_interactive(opts, action);
goto finished_rebase;
}
add_var(&script_snippet, "action", opts->action ? opts->action : "");
add_var(&script_snippet, "signoff", opts->signoff ? "--signoff" : "");
add_var(&script_snippet, "allow_rerere_autoupdate",
- opts->allow_rerere_autoupdate < 0 ? "" :
opts->allow_rerere_autoupdate ?
- "--rerere-autoupdate" : "--no-rerere-autoupdate");
+ opts->allow_rerere_autoupdate == RERERE_AUTOUPDATE ?
+ "--rerere-autoupdate" : "--no-rerere-autoupdate" : "");
add_var(&script_snippet, "keep_empty", opts->keep_empty ? "yes" : "");
add_var(&script_snippet, "autosquash", opts->autosquash ? "t" : "");
add_var(&script_snippet, "gpg_sign_opt", opts->gpg_sign_opt);
return 0;
}
+ if (!strcmp(var, "rebase.usebuiltin")) {
+ opts->use_legacy_rebase = !git_config_bool(var, value);
+ return 0;
+ }
+
return git_default_config(var, value, data);
}
int cmd_rebase(int argc, const char **argv, const char *prefix)
{
- struct rebase_options options = {
- .type = REBASE_UNSPECIFIED,
- .flags = REBASE_NO_QUIET,
- .git_am_opts = ARGV_ARRAY_INIT,
- .allow_rerere_autoupdate = -1,
- .allow_empty_message = 1,
- .git_format_patch_opt = STRBUF_INIT,
- };
+ struct rebase_options options = REBASE_OPTIONS_INIT;
const char *branch_name;
int ret, flags, total_argc, in_progress = 0;
int ok_to_skip_pre_rebase = 0;
struct strbuf revisions = STRBUF_INIT;
struct strbuf buf = STRBUF_INIT;
struct object_id merge_base;
- enum {
- NO_ACTION,
- ACTION_CONTINUE,
- ACTION_SKIP,
- ACTION_ABORT,
- ACTION_QUIT,
- ACTION_EDIT_TODO,
- ACTION_SHOW_CURRENT_PATCH,
- } action = NO_ACTION;
+ enum action action = ACTION_NONE;
const char *gpg_sign = NULL;
struct string_list exec = STRING_LIST_INIT_NODUP;
const char *rebase_merges = NULL;
PARSE_OPT_NOARG | PARSE_OPT_NONEG,
parse_opt_interactive },
OPT_SET_INT('p', "preserve-merges", &options.type,
- N_("try to recreate merges instead of ignoring "
- "them"), REBASE_PRESERVE_MERGES),
- OPT_BOOL(0, "rerere-autoupdate",
- &options.allow_rerere_autoupdate,
- N_("allow rerere to update index with resolved "
- "conflict")),
+ N_("(DEPRECATED) try to recreate merges instead of "
+ "ignoring them"), REBASE_PRESERVE_MERGES),
+ OPT_RERERE_AUTOUPDATE(&options.allow_rerere_autoupdate),
OPT_BOOL('k', "keep-empty", &options.keep_empty,
N_("preserve empty commits during rebase")),
OPT_BOOL(0, "autosquash", &options.autosquash,
};
int i;
- /*
- * NEEDSWORK: Once the builtin rebase has been tested enough
- * and git-legacy-rebase.sh is retired to contrib/, this preamble
- * can be removed.
- */
-
- if (!use_builtin_rebase()) {
- const char *path = mkpath("%s/git-legacy-rebase",
- git_exec_path());
-
- if (sane_execvp(path, (char **)argv) < 0)
- die_errno(_("could not exec %s"), path);
- else
- BUG("sane_execvp() returned???");
- }
-
if (argc == 2 && !strcmp(argv[1], "-h"))
usage_with_options(builtin_rebase_usage,
builtin_rebase_options);
trace_repo_setup(prefix);
setup_work_tree();
+ options.allow_empty_message = 1;
git_config(rebase_config, &options);
+ if (options.use_legacy_rebase ||
+ !git_env_bool("GIT_TEST_REBASE_USE_BUILTIN", -1))
+ warning(_("the rebase.useBuiltin support has been removed!\n"
+ "See its entry in 'git help config' for details."));
+
strbuf_reset(&buf);
strbuf_addf(&buf, "%s/applying", apply_dir());
if(file_exists(buf.buf))
builtin_rebase_options,
builtin_rebase_usage, 0);
- if (action != NO_ACTION && total_argc != 2) {
+ if (action != ACTION_NONE && total_argc != 2) {
usage_with_options(builtin_rebase_usage,
builtin_rebase_options);
}
usage_with_options(builtin_rebase_usage,
builtin_rebase_options);
- if (action != NO_ACTION && !in_progress)
+ if (options.type == REBASE_PRESERVE_MERGES)
+ warning(_("git rebase --preserve-merges is deprecated. "
+ "Use --rebase-merges instead."));
+
+ if (action != ACTION_NONE && !in_progress)
die(_("No rebase in progress?"));
setenv(GIT_REFLOG_ACTION_ENVIRONMENT, "rebase", 0);
die(_("The --edit-todo action can only be used during "
"interactive rebase."));
+ if (trace2_is_enabled()) {
+ if (is_interactive(&options))
+ trace2_cmd_mode("interactive");
+ else if (exec.nr)
+ trace2_cmd_mode("interactive-exec");
+ else
+ trace2_cmd_mode(action_names[action]);
+ }
+
switch (action) {
case ACTION_CONTINUE: {
struct object_id head;
options.action = "show-current-patch";
options.dont_finish_rebase = 1;
goto run_rebase;
- case NO_ACTION:
+ case ACTION_NONE:
break;
default:
BUG("action: %d", action);
branch_name = options.head_name;
} else {
- free(options.head_name);
- options.head_name = NULL;
+ FREE_AND_NULL(options.head_name);
branch_name = "HEAD";
}
if (get_oid("HEAD", &options.orig_head))
strbuf_addf(&msg, "%s: checkout %s",
getenv(GIT_REFLOG_ACTION_ENVIRONMENT), options.onto_name);
if (reset_head(&options.onto->object.oid, "checkout", NULL,
- RESET_HEAD_DETACH | RESET_HEAD_RUN_POST_CHECKOUT_HOOK,
+ RESET_HEAD_DETACH | RESET_ORIG_HEAD |
+ RESET_HEAD_RUN_POST_CHECKOUT_HOOK,
NULL, msg.buf))
die(_("Could not detach HEAD"));
strbuf_release(&msg);
* we just fast-forwarded.
*/
strbuf_reset(&msg);
- if (!oidcmp(&merge_base, &options.orig_head)) {
+ if (oideq(&merge_base, &options.orig_head)) {
printf(_("Fast-forwarded %s to %s.\n"),
branch_name, options.onto_name);
strbuf_addf(&msg, "rebase finished: %s onto %s",
options.head_name ? options.head_name : "detached HEAD",
oid_to_hex(&options.onto->object.oid));
- reset_head(NULL, "Fast-forwarded", options.head_name, 0,
- "HEAD", msg.buf);
+ reset_head(NULL, "Fast-forwarded", options.head_name,
+ RESET_HEAD_REFS_ONLY, "HEAD", msg.buf);
strbuf_release(&msg);
ret = !!finish_rebase(&options);
goto cleanup;
options.revisions = revisions.buf;
run_rebase:
- ret = !!run_specific_rebase(&options);
+ ret = !!run_specific_rebase(&options, action);
cleanup:
strbuf_release(&revisions);
proc.argv = argv;
proc.in = -1;
proc.stdout_to_stderr = 1;
+ proc.trace2_hook_name = hook_name;
+
if (feed_state->push_options) {
int i;
for (i = 0; i < feed_state->push_options->nr; i++)
proc.stdout_to_stderr = 1;
proc.err = use_sideband ? -1 : 0;
proc.argv = argv;
+ proc.trace2_hook_name = "update";
code = start_command(&proc);
if (code)
proc.no_stdin = 1;
proc.stdout_to_stderr = 1;
proc.err = use_sideband ? -1 : 0;
+ proc.trace2_hook_name = "post-update";
if (!start_command(&proc)) {
if (use_sideband)
}
}
-static void check_aliased_update(struct command *cmd, struct string_list *list)
+static void check_aliased_update_internal(struct command *cmd,
+ struct string_list *list,
+ const char *dst_name, int flag)
{
- struct strbuf buf = STRBUF_INIT;
- const char *dst_name;
struct string_list_item *item;
struct command *dst_cmd;
- int flag;
-
- strbuf_addf(&buf, "%s%s", get_git_namespace(), cmd->ref_name);
- dst_name = resolve_ref_unsafe(buf.buf, 0, NULL, &flag);
- strbuf_release(&buf);
if (!(flag & REF_ISSYMREF))
return;
"inconsistent aliased update";
}
+static void check_aliased_update(struct command *cmd, struct string_list *list)
+{
+ struct strbuf buf = STRBUF_INIT;
+ const char *dst_name;
+ int flag;
+
+ strbuf_addf(&buf, "%s%s", get_git_namespace(), cmd->ref_name);
+ dst_name = resolve_ref_unsafe(buf.buf, 0, NULL, &flag);
+ check_aliased_update_internal(cmd, list, dst_name, flag);
+ strbuf_release(&buf);
+}
+
static void check_aliased_updates(struct command *commands)
{
struct command *cmd;
static int delta_base_offset = 1;
static int pack_kept_objects = -1;
-static int write_bitmaps;
+static int write_bitmaps = -1;
static int use_delta_islands;
static char *packdir, *packtmp;
(unpack_unreachable || (pack_everything & LOOSEN_UNREACHABLE)))
die(_("--keep-unreachable and -A are incompatible"));
+ if (write_bitmaps < 0)
+ write_bitmaps = (pack_everything & ALL_INTO_ONE) &&
+ is_bare_repository();
if (pack_kept_objects < 0)
pack_kept_objects = write_bitmaps;
data.format = REPLACE_FORMAT_MEDIUM;
else if (!strcmp(format, "long"))
data.format = REPLACE_FORMAT_LONG;
+ /*
+ * Please update _git_replace() in git-completion.bash when
+ * you add new format
+ */
else
return error(_("invalid replace format '%s'\n"
"valid formats are 'short', 'medium' and 'long'"),
/* prepare new parents */
for (i = 0; i < argc; i++) {
struct object_id oid;
+ struct commit *commit;
+
if (get_oid(argv[i], &oid) < 0) {
strbuf_release(&new_parents);
return error(_("not a valid object name: '%s'"),
argv[i]);
}
- if (!lookup_commit_reference(the_repository, &oid)) {
+ commit = lookup_commit_reference(the_repository, &oid);
+ if (!commit) {
strbuf_release(&new_parents);
- return error(_("could not parse %s"), argv[i]);
+ return error(_("could not parse %s as a commit"), argv[i]);
}
- strbuf_addf(&new_parents, "parent %s\n", oid_to_hex(&oid));
+ strbuf_addf(&new_parents, "parent %s\n", oid_to_hex(&commit->object.oid));
}
/* replace existing parents with new ones */
strbuf_release(&buf);
- if (oideq(&old_oid, &new_oid)) {
+ if (oideq(&commit->object.oid, &new_oid)) {
if (gentle) {
- warning(_("graft for '%s' unnecessary"), oid_to_hex(&old_oid));
+ warning(_("graft for '%s' unnecessary"),
+ oid_to_hex(&commit->object.oid));
return 0;
}
- return error(_("new commit is the same as the old one: '%s'"), oid_to_hex(&old_oid));
+ return error(_("new commit is the same as the old one: '%s'"),
+ oid_to_hex(&commit->object.oid));
}
- return replace_object_oid(old_ref, &old_oid, "replacement", &new_oid, force);
+ return replace_object_oid(old_ref, &commit->object.oid,
+ "replacement", &new_oid, force);
}
static int convert_graft_file(int force)
if (patch_mode) {
if (reset_type != NONE)
die(_("--patch is incompatible with --{hard,mixed,soft}"));
+ trace2_cmd_mode("patch-interactive");
return run_add_interactive(rev, "--patch=reset", &pathspec);
}
if (reset_type == NONE)
reset_type = MIXED; /* by default */
+ if (pathspec.nr)
+ trace2_cmd_mode("path");
+ else
+ trace2_cmd_mode(reset_type_names[reset_type]);
+
if (reset_type != SOFT && (reset_type != MIXED || get_git_work_tree()))
setup_work_tree();
int flags = quiet ? REFRESH_QUIET : REFRESH_IN_PORCELAIN;
if (read_from_tree(&pathspec, &oid, intent_to_add))
return 1;
+ the_index.updated_skipworktree = 1;
if (!quiet && get_git_work_tree()) {
uint64_t t_begin, t_delta_in_ms;
static int finish_object(struct object *obj, const char *name, void *cb_data)
{
struct rev_list_info *info = cb_data;
- if (!has_object_file(&obj->oid)) {
+ if (oid_object_info_extended(the_repository, &obj->oid, NULL, 0) < 0) {
finish_object__ma(obj);
return 1;
}
repo_init_revisions(the_repository, &revs, prefix);
revs.abbrev = DEFAULT_ABBREV;
revs.commit_format = CMIT_FMT_UNSPECIFIED;
- revs.do_not_die_on_missing_tree = 1;
/*
* Scan the argument list before invoking setup_revisions(), so that we
}
}
+ if (arg_missing_action)
+ revs.do_not_die_on_missing_tree = 1;
+
argc = setup_revisions(argc, argv, &revs, &s_r_opt);
memset(&info, 0, sizeof(info));
{
const char * const * usage_str = revert_or_cherry_pick_usage(opts);
const char *me = action_name(opts);
+ const char *cleanup_arg = NULL;
int cmd = 0;
struct option base_options[] = {
OPT_CMDMODE(0, "quit", &cmd, N_("end revert or cherry-pick sequence"), 'q'),
OPT_CMDMODE(0, "continue", &cmd, N_("resume revert or cherry-pick sequence"), 'c'),
OPT_CMDMODE(0, "abort", &cmd, N_("cancel revert or cherry-pick sequence"), 'a'),
+ OPT_CLEANUP(&cleanup_arg),
OPT_BOOL('n', "no-commit", &opts->no_commit, N_("don't automatically commit")),
OPT_BOOL('e', "edit", &opts->edit, N_("edit the commit message")),
OPT_NOOP_NOARG('r', NULL),
if (opts->keep_redundant_commits)
opts->allow_empty = 1;
+ if (cleanup_arg) {
+ opts->default_msg_cleanup = get_cleanup_mode(cleanup_arg, 1);
+ opts->explicit_cleanup = 1;
+ }
+
/* Check for incompatible command line arguments */
if (cmd) {
char *this_operation;
const struct cache_entry *ce;
const char *name = list.entry[i].name;
struct object_id oid;
- unsigned mode;
+ unsigned short mode;
int local_changes = 0;
int staged_changes = 0;
+++ /dev/null
-#include "cache.h"
-#include "builtin.h"
-#include "parse-options.h"
-#include "serve.h"
-
-static char const * const serve_usage[] = {
- N_("git serve [<options>]"),
- NULL
-};
-
-int cmd_serve(int argc, const char **argv, const char *prefix)
-{
- struct serve_options opts = SERVE_OPTIONS_INIT;
-
- struct option options[] = {
- OPT_BOOL(0, "stateless-rpc", &opts.stateless_rpc,
- N_("quit after a single request/response exchange")),
- OPT_BOOL(0, "advertise-capabilities", &opts.advertise_capabilities,
- N_("exit immediately after advertising capabilities")),
- OPT_END()
- };
-
- /* ignore all unknown cmdline switches for now */
- argc = parse_options(argc, argv, prefix, options, serve_usage,
- PARSE_OPT_KEEP_DASHDASH |
- PARSE_OPT_KEEP_UNKNOWN);
- serve(&opts);
-
- return 0;
-}
/* Ah, that is a date spec... */
timestamp_t at;
at = approxidate(reflog_base);
- read_ref_at(ref, flags, at, -1, &oid, NULL,
+ read_ref_at(get_main_ref_store(the_repository),
+ ref, flags, at, -1, &oid, NULL,
NULL, NULL, &base);
}
}
timestamp_t timestamp;
int tz;
- if (read_ref_at(ref, flags, 0, base + i, &oid, &logmsg,
+ if (read_ref_at(get_main_ref_store(the_repository),
+ ref, flags, 0, base + i, &oid, &logmsg,
×tamp, &tz, NULL)) {
reflog = i;
break;
#include "builtin.h"
#include "cache.h"
+#include "config.h"
#include "refs.h"
#include "object-store.h"
#include "object.h"
int cmd_show_ref(int argc, const char **argv, const char *prefix)
{
+ git_config(git_default_config, NULL);
+
argc = parse_options(argc, argv, prefix, show_ref_options,
show_ref_usage, 0);
--- /dev/null
+#define USE_THE_INDEX_COMPATIBILITY_MACROS
+#include "builtin.h"
+#include "config.h"
+#include "parse-options.h"
+#include "refs.h"
+#include "lockfile.h"
+#include "cache-tree.h"
+#include "unpack-trees.h"
+#include "merge-recursive.h"
+#include "argv-array.h"
+#include "run-command.h"
+#include "dir.h"
+#include "rerere.h"
+#include "revision.h"
+#include "log-tree.h"
+#include "diffcore.h"
+#include "exec-cmd.h"
+
+#define INCLUDE_ALL_FILES 2
+
+static const char * const git_stash_usage[] = {
+ N_("git stash list [<options>]"),
+ N_("git stash show [<options>] [<stash>]"),
+ N_("git stash drop [-q|--quiet] [<stash>]"),
+ N_("git stash ( pop | apply ) [--index] [-q|--quiet] [<stash>]"),
+ N_("git stash branch <branchname> [<stash>]"),
+ N_("git stash clear"),
+ N_("git stash [push [-p|--patch] [-k|--[no-]keep-index] [-q|--quiet]\n"
+ " [-u|--include-untracked] [-a|--all] [-m|--message <message>]\n"
+ " [--] [<pathspec>...]]"),
+ N_("git stash save [-p|--patch] [-k|--[no-]keep-index] [-q|--quiet]\n"
+ " [-u|--include-untracked] [-a|--all] [<message>]"),
+ NULL
+};
+
+static const char * const git_stash_list_usage[] = {
+ N_("git stash list [<options>]"),
+ NULL
+};
+
+static const char * const git_stash_show_usage[] = {
+ N_("git stash show [<options>] [<stash>]"),
+ NULL
+};
+
+static const char * const git_stash_drop_usage[] = {
+ N_("git stash drop [-q|--quiet] [<stash>]"),
+ NULL
+};
+
+static const char * const git_stash_pop_usage[] = {
+ N_("git stash pop [--index] [-q|--quiet] [<stash>]"),
+ NULL
+};
+
+static const char * const git_stash_apply_usage[] = {
+ N_("git stash apply [--index] [-q|--quiet] [<stash>]"),
+ NULL
+};
+
+static const char * const git_stash_branch_usage[] = {
+ N_("git stash branch <branchname> [<stash>]"),
+ NULL
+};
+
+static const char * const git_stash_clear_usage[] = {
+ N_("git stash clear"),
+ NULL
+};
+
+static const char * const git_stash_store_usage[] = {
+ N_("git stash store [-m|--message <message>] [-q|--quiet] <commit>"),
+ NULL
+};
+
+static const char * const git_stash_push_usage[] = {
+ N_("git stash [push [-p|--patch] [-k|--[no-]keep-index] [-q|--quiet]\n"
+ " [-u|--include-untracked] [-a|--all] [-m|--message <message>]\n"
+ " [--] [<pathspec>...]]"),
+ NULL
+};
+
+static const char * const git_stash_save_usage[] = {
+ N_("git stash save [-p|--patch] [-k|--[no-]keep-index] [-q|--quiet]\n"
+ " [-u|--include-untracked] [-a|--all] [<message>]"),
+ NULL
+};
+
+static const char *ref_stash = "refs/stash";
+static struct strbuf stash_index_path = STRBUF_INIT;
+
+/*
+ * w_commit is set to the commit containing the working tree
+ * b_commit is set to the base commit
+ * i_commit is set to the commit containing the index tree
+ * u_commit is set to the commit containing the untracked files tree
+ * w_tree is set to the working tree
+ * b_tree is set to the base tree
+ * i_tree is set to the index tree
+ * u_tree is set to the untracked files tree
+ */
+struct stash_info {
+ struct object_id w_commit;
+ struct object_id b_commit;
+ struct object_id i_commit;
+ struct object_id u_commit;
+ struct object_id w_tree;
+ struct object_id b_tree;
+ struct object_id i_tree;
+ struct object_id u_tree;
+ struct strbuf revision;
+ int is_stash_ref;
+ int has_u;
+};
+
+static void free_stash_info(struct stash_info *info)
+{
+ strbuf_release(&info->revision);
+}
+
+static void assert_stash_like(struct stash_info *info, const char *revision)
+{
+ if (get_oidf(&info->b_commit, "%s^1", revision) ||
+ get_oidf(&info->w_tree, "%s:", revision) ||
+ get_oidf(&info->b_tree, "%s^1:", revision) ||
+ get_oidf(&info->i_tree, "%s^2:", revision))
+ die(_("'%s' is not a stash-like commit"), revision);
+}
+
+static int get_stash_info(struct stash_info *info, int argc, const char **argv)
+{
+ int ret;
+ char *end_of_rev;
+ char *expanded_ref;
+ const char *revision;
+ const char *commit = NULL;
+ struct object_id dummy;
+ struct strbuf symbolic = STRBUF_INIT;
+
+ if (argc > 1) {
+ int i;
+ struct strbuf refs_msg = STRBUF_INIT;
+
+ for (i = 0; i < argc; i++)
+ strbuf_addf(&refs_msg, " '%s'", argv[i]);
+
+ fprintf_ln(stderr, _("Too many revisions specified:%s"),
+ refs_msg.buf);
+ strbuf_release(&refs_msg);
+
+ return -1;
+ }
+
+ if (argc == 1)
+ commit = argv[0];
+
+ strbuf_init(&info->revision, 0);
+ if (!commit) {
+ if (!ref_exists(ref_stash)) {
+ free_stash_info(info);
+ fprintf_ln(stderr, _("No stash entries found."));
+ return -1;
+ }
+
+ strbuf_addf(&info->revision, "%s@{0}", ref_stash);
+ } else if (strspn(commit, "0123456789") == strlen(commit)) {
+ strbuf_addf(&info->revision, "%s@{%s}", ref_stash, commit);
+ } else {
+ strbuf_addstr(&info->revision, commit);
+ }
+
+ revision = info->revision.buf;
+
+ if (get_oid(revision, &info->w_commit)) {
+ error(_("%s is not a valid reference"), revision);
+ free_stash_info(info);
+ return -1;
+ }
+
+ assert_stash_like(info, revision);
+
+ info->has_u = !get_oidf(&info->u_tree, "%s^3:", revision);
+
+ end_of_rev = strchrnul(revision, '@');
+ strbuf_add(&symbolic, revision, end_of_rev - revision);
+
+ ret = dwim_ref(symbolic.buf, symbolic.len, &dummy, &expanded_ref);
+ strbuf_release(&symbolic);
+ switch (ret) {
+ case 0: /* Not found, but valid ref */
+ info->is_stash_ref = 0;
+ break;
+ case 1:
+ info->is_stash_ref = !strcmp(expanded_ref, ref_stash);
+ break;
+ default: /* Invalid or ambiguous */
+ free_stash_info(info);
+ }
+
+ free(expanded_ref);
+ return !(ret == 0 || ret == 1);
+}
+
+static int do_clear_stash(void)
+{
+ struct object_id obj;
+ if (get_oid(ref_stash, &obj))
+ return 0;
+
+ return delete_ref(NULL, ref_stash, &obj, 0);
+}
+
+static int clear_stash(int argc, const char **argv, const char *prefix)
+{
+ struct option options[] = {
+ OPT_END()
+ };
+
+ argc = parse_options(argc, argv, prefix, options,
+ git_stash_clear_usage,
+ PARSE_OPT_STOP_AT_NON_OPTION);
+
+ if (argc)
+ return error(_("git stash clear with parameters is "
+ "unimplemented"));
+
+ return do_clear_stash();
+}
+
+static int reset_tree(struct object_id *i_tree, int update, int reset)
+{
+ int nr_trees = 1;
+ struct unpack_trees_options opts;
+ struct tree_desc t[MAX_UNPACK_TREES];
+ struct tree *tree;
+ struct lock_file lock_file = LOCK_INIT;
+
+ read_cache_preload(NULL);
+ if (refresh_cache(REFRESH_QUIET))
+ return -1;
+
+ hold_locked_index(&lock_file, LOCK_DIE_ON_ERROR);
+
+ memset(&opts, 0, sizeof(opts));
+
+ tree = parse_tree_indirect(i_tree);
+ if (parse_tree(tree))
+ return -1;
+
+ init_tree_desc(t, tree->buffer, tree->size);
+
+ opts.head_idx = 1;
+ opts.src_index = &the_index;
+ opts.dst_index = &the_index;
+ opts.merge = 1;
+ opts.reset = reset;
+ opts.update = update;
+ opts.fn = oneway_merge;
+
+ if (unpack_trees(nr_trees, t, &opts))
+ return -1;
+
+ if (write_locked_index(&the_index, &lock_file, COMMIT_LOCK))
+ return error(_("unable to write new index file"));
+
+ return 0;
+}
+
+static int diff_tree_binary(struct strbuf *out, struct object_id *w_commit)
+{
+ struct child_process cp = CHILD_PROCESS_INIT;
+ const char *w_commit_hex = oid_to_hex(w_commit);
+
+ /*
+ * Diff-tree would not be very hard to replace with a native function,
+ * however it should be done together with apply_cached.
+ */
+ cp.git_cmd = 1;
+ argv_array_pushl(&cp.args, "diff-tree", "--binary", NULL);
+ argv_array_pushf(&cp.args, "%s^2^..%s^2", w_commit_hex, w_commit_hex);
+
+ return pipe_command(&cp, NULL, 0, out, 0, NULL, 0);
+}
+
+static int apply_cached(struct strbuf *out)
+{
+ struct child_process cp = CHILD_PROCESS_INIT;
+
+ /*
+ * Apply currently only reads either from stdin or a file, thus
+ * apply_all_patches would have to be updated to optionally take a
+ * buffer.
+ */
+ cp.git_cmd = 1;
+ argv_array_pushl(&cp.args, "apply", "--cached", NULL);
+ return pipe_command(&cp, out->buf, out->len, NULL, 0, NULL, 0);
+}
+
+static int reset_head(void)
+{
+ struct child_process cp = CHILD_PROCESS_INIT;
+
+ /*
+ * Reset is overall quite simple, however there is no current public
+ * API for resetting.
+ */
+ cp.git_cmd = 1;
+ argv_array_push(&cp.args, "reset");
+
+ return run_command(&cp);
+}
+
+static void add_diff_to_buf(struct diff_queue_struct *q,
+ struct diff_options *options,
+ void *data)
+{
+ int i;
+
+ for (i = 0; i < q->nr; i++) {
+ strbuf_addstr(data, q->queue[i]->one->path);
+
+ /* NUL-terminate: will be fed to update-index -z */
+ strbuf_addch(data, '\0');
+ }
+}
+
+static int get_newly_staged(struct strbuf *out, struct object_id *c_tree)
+{
+ struct child_process cp = CHILD_PROCESS_INIT;
+ const char *c_tree_hex = oid_to_hex(c_tree);
+
+ /*
+ * diff-index is very similar to diff-tree above, and should be
+ * converted together with update_index.
+ */
+ cp.git_cmd = 1;
+ argv_array_pushl(&cp.args, "diff-index", "--cached", "--name-only",
+ "--diff-filter=A", NULL);
+ argv_array_push(&cp.args, c_tree_hex);
+ return pipe_command(&cp, NULL, 0, out, 0, NULL, 0);
+}
+
+static int update_index(struct strbuf *out)
+{
+ struct child_process cp = CHILD_PROCESS_INIT;
+
+ /*
+ * Update-index is very complicated and may need to have a public
+ * function exposed in order to remove this forking.
+ */
+ cp.git_cmd = 1;
+ argv_array_pushl(&cp.args, "update-index", "--add", "--stdin", NULL);
+ return pipe_command(&cp, out->buf, out->len, NULL, 0, NULL, 0);
+}
+
+static int restore_untracked(struct object_id *u_tree)
+{
+ int res;
+ struct child_process cp = CHILD_PROCESS_INIT;
+
+ /*
+ * We need to run restore files from a given index, but without
+ * affecting the current index, so we use GIT_INDEX_FILE with
+ * run_command to fork processes that will not interfere.
+ */
+ cp.git_cmd = 1;
+ argv_array_push(&cp.args, "read-tree");
+ argv_array_push(&cp.args, oid_to_hex(u_tree));
+ argv_array_pushf(&cp.env_array, "GIT_INDEX_FILE=%s",
+ stash_index_path.buf);
+ if (run_command(&cp)) {
+ remove_path(stash_index_path.buf);
+ return -1;
+ }
+
+ child_process_init(&cp);
+ cp.git_cmd = 1;
+ argv_array_pushl(&cp.args, "checkout-index", "--all", NULL);
+ argv_array_pushf(&cp.env_array, "GIT_INDEX_FILE=%s",
+ stash_index_path.buf);
+
+ res = run_command(&cp);
+ remove_path(stash_index_path.buf);
+ return res;
+}
+
+static int do_apply_stash(const char *prefix, struct stash_info *info,
+ int index, int quiet)
+{
+ int ret;
+ int has_index = index;
+ struct merge_options o;
+ struct object_id c_tree;
+ struct object_id index_tree;
+ struct commit *result;
+ const struct object_id *bases[1];
+
+ read_cache_preload(NULL);
+ if (refresh_cache(REFRESH_QUIET))
+ return -1;
+
+ if (write_cache_as_tree(&c_tree, 0, NULL))
+ return error(_("cannot apply a stash in the middle of a merge"));
+
+ if (index) {
+ if (oideq(&info->b_tree, &info->i_tree) ||
+ oideq(&c_tree, &info->i_tree)) {
+ has_index = 0;
+ } else {
+ struct strbuf out = STRBUF_INIT;
+
+ if (diff_tree_binary(&out, &info->w_commit)) {
+ strbuf_release(&out);
+ return error(_("could not generate diff %s^!."),
+ oid_to_hex(&info->w_commit));
+ }
+
+ ret = apply_cached(&out);
+ strbuf_release(&out);
+ if (ret)
+ return error(_("conflicts in index."
+ "Try without --index."));
+
+ discard_cache();
+ read_cache();
+ if (write_cache_as_tree(&index_tree, 0, NULL))
+ return error(_("could not save index tree"));
+
+ reset_head();
+ }
+ }
+
+ if (info->has_u && restore_untracked(&info->u_tree))
+ return error(_("could not restore untracked files from stash"));
+
+ init_merge_options(&o, the_repository);
+
+ o.branch1 = "Updated upstream";
+ o.branch2 = "Stashed changes";
+
+ if (oideq(&info->b_tree, &c_tree))
+ o.branch1 = "Version stash was based on";
+
+ if (quiet)
+ o.verbosity = 0;
+
+ if (o.verbosity >= 3)
+ printf_ln(_("Merging %s with %s"), o.branch1, o.branch2);
+
+ bases[0] = &info->b_tree;
+
+ ret = merge_recursive_generic(&o, &c_tree, &info->w_tree, 1, bases,
+ &result);
+ if (ret) {
+ rerere(0);
+
+ if (index)
+ fprintf_ln(stderr, _("Index was not unstashed."));
+
+ return ret;
+ }
+
+ if (has_index) {
+ if (reset_tree(&index_tree, 0, 0))
+ return -1;
+ } else {
+ struct strbuf out = STRBUF_INIT;
+
+ if (get_newly_staged(&out, &c_tree)) {
+ strbuf_release(&out);
+ return -1;
+ }
+
+ if (reset_tree(&c_tree, 0, 1)) {
+ strbuf_release(&out);
+ return -1;
+ }
+
+ ret = update_index(&out);
+ strbuf_release(&out);
+ if (ret)
+ return -1;
+
+ discard_cache();
+ }
+
+ if (quiet) {
+ if (refresh_cache(REFRESH_QUIET))
+ warning("could not refresh index");
+ } else {
+ struct child_process cp = CHILD_PROCESS_INIT;
+
+ /*
+ * Status is quite simple and could be replaced with calls to
+ * wt_status in the future, but it adds complexities which may
+ * require more tests.
+ */
+ cp.git_cmd = 1;
+ cp.dir = prefix;
+ argv_array_push(&cp.args, "status");
+ run_command(&cp);
+ }
+
+ return 0;
+}
+
+static int apply_stash(int argc, const char **argv, const char *prefix)
+{
+ int ret;
+ int quiet = 0;
+ int index = 0;
+ struct stash_info info;
+ struct option options[] = {
+ OPT__QUIET(&quiet, N_("be quiet, only report errors")),
+ OPT_BOOL(0, "index", &index,
+ N_("attempt to recreate the index")),
+ OPT_END()
+ };
+
+ argc = parse_options(argc, argv, prefix, options,
+ git_stash_apply_usage, 0);
+
+ if (get_stash_info(&info, argc, argv))
+ return -1;
+
+ ret = do_apply_stash(prefix, &info, index, quiet);
+ free_stash_info(&info);
+ return ret;
+}
+
+static int do_drop_stash(struct stash_info *info, int quiet)
+{
+ int ret;
+ struct child_process cp_reflog = CHILD_PROCESS_INIT;
+ struct child_process cp = CHILD_PROCESS_INIT;
+
+ /*
+ * reflog does not provide a simple function for deleting refs. One will
+ * need to be added to avoid implementing too much reflog code here
+ */
+
+ cp_reflog.git_cmd = 1;
+ argv_array_pushl(&cp_reflog.args, "reflog", "delete", "--updateref",
+ "--rewrite", NULL);
+ argv_array_push(&cp_reflog.args, info->revision.buf);
+ ret = run_command(&cp_reflog);
+ if (!ret) {
+ if (!quiet)
+ printf_ln(_("Dropped %s (%s)"), info->revision.buf,
+ oid_to_hex(&info->w_commit));
+ } else {
+ return error(_("%s: Could not drop stash entry"),
+ info->revision.buf);
+ }
+
+ /*
+ * This could easily be replaced by get_oid, but currently it will throw
+ * a fatal error when a reflog is empty, which we can not recover from.
+ */
+ cp.git_cmd = 1;
+ /* Even though --quiet is specified, rev-parse still outputs the hash */
+ cp.no_stdout = 1;
+ argv_array_pushl(&cp.args, "rev-parse", "--verify", "--quiet", NULL);
+ argv_array_pushf(&cp.args, "%s@{0}", ref_stash);
+ ret = run_command(&cp);
+
+ /* do_clear_stash if we just dropped the last stash entry */
+ if (ret)
+ do_clear_stash();
+
+ return 0;
+}
+
+static void assert_stash_ref(struct stash_info *info)
+{
+ if (!info->is_stash_ref) {
+ error(_("'%s' is not a stash reference"), info->revision.buf);
+ free_stash_info(info);
+ exit(1);
+ }
+}
+
+static int drop_stash(int argc, const char **argv, const char *prefix)
+{
+ int ret;
+ int quiet = 0;
+ struct stash_info info;
+ struct option options[] = {
+ OPT__QUIET(&quiet, N_("be quiet, only report errors")),
+ OPT_END()
+ };
+
+ argc = parse_options(argc, argv, prefix, options,
+ git_stash_drop_usage, 0);
+
+ if (get_stash_info(&info, argc, argv))
+ return -1;
+
+ assert_stash_ref(&info);
+
+ ret = do_drop_stash(&info, quiet);
+ free_stash_info(&info);
+ return ret;
+}
+
+static int pop_stash(int argc, const char **argv, const char *prefix)
+{
+ int ret;
+ int index = 0;
+ int quiet = 0;
+ struct stash_info info;
+ struct option options[] = {
+ OPT__QUIET(&quiet, N_("be quiet, only report errors")),
+ OPT_BOOL(0, "index", &index,
+ N_("attempt to recreate the index")),
+ OPT_END()
+ };
+
+ argc = parse_options(argc, argv, prefix, options,
+ git_stash_pop_usage, 0);
+
+ if (get_stash_info(&info, argc, argv))
+ return -1;
+
+ assert_stash_ref(&info);
+ if ((ret = do_apply_stash(prefix, &info, index, quiet)))
+ printf_ln(_("The stash entry is kept in case "
+ "you need it again."));
+ else
+ ret = do_drop_stash(&info, quiet);
+
+ free_stash_info(&info);
+ return ret;
+}
+
+static int branch_stash(int argc, const char **argv, const char *prefix)
+{
+ int ret;
+ const char *branch = NULL;
+ struct stash_info info;
+ struct child_process cp = CHILD_PROCESS_INIT;
+ struct option options[] = {
+ OPT_END()
+ };
+
+ argc = parse_options(argc, argv, prefix, options,
+ git_stash_branch_usage, 0);
+
+ if (!argc) {
+ fprintf_ln(stderr, _("No branch name specified"));
+ return -1;
+ }
+
+ branch = argv[0];
+
+ if (get_stash_info(&info, argc - 1, argv + 1))
+ return -1;
+
+ cp.git_cmd = 1;
+ argv_array_pushl(&cp.args, "checkout", "-b", NULL);
+ argv_array_push(&cp.args, branch);
+ argv_array_push(&cp.args, oid_to_hex(&info.b_commit));
+ ret = run_command(&cp);
+ if (!ret)
+ ret = do_apply_stash(prefix, &info, 1, 0);
+ if (!ret && info.is_stash_ref)
+ ret = do_drop_stash(&info, 0);
+
+ free_stash_info(&info);
+
+ return ret;
+}
+
+static int list_stash(int argc, const char **argv, const char *prefix)
+{
+ struct child_process cp = CHILD_PROCESS_INIT;
+ struct option options[] = {
+ OPT_END()
+ };
+
+ argc = parse_options(argc, argv, prefix, options,
+ git_stash_list_usage,
+ PARSE_OPT_KEEP_UNKNOWN);
+
+ if (!ref_exists(ref_stash))
+ return 0;
+
+ cp.git_cmd = 1;
+ argv_array_pushl(&cp.args, "log", "--format=%gd: %gs", "-g",
+ "--first-parent", "-m", NULL);
+ argv_array_pushv(&cp.args, argv);
+ argv_array_push(&cp.args, ref_stash);
+ argv_array_push(&cp.args, "--");
+ return run_command(&cp);
+}
+
+static int show_stat = 1;
+static int show_patch;
+
+static int git_stash_config(const char *var, const char *value, void *cb)
+{
+ if (!strcmp(var, "stash.showstat")) {
+ show_stat = git_config_bool(var, value);
+ return 0;
+ }
+ if (!strcmp(var, "stash.showpatch")) {
+ show_patch = git_config_bool(var, value);
+ return 0;
+ }
+ return git_default_config(var, value, cb);
+}
+
+static int show_stash(int argc, const char **argv, const char *prefix)
+{
+ int i;
+ int opts = 0;
+ int ret = 0;
+ struct stash_info info;
+ struct rev_info rev;
+ struct argv_array stash_args = ARGV_ARRAY_INIT;
+ struct option options[] = {
+ OPT_END()
+ };
+
+ init_diff_ui_defaults();
+ git_config(git_diff_ui_config, NULL);
+ init_revisions(&rev, prefix);
+
+ for (i = 1; i < argc; i++) {
+ if (argv[i][0] != '-')
+ argv_array_push(&stash_args, argv[i]);
+ else
+ opts++;
+ }
+
+ ret = get_stash_info(&info, stash_args.argc, stash_args.argv);
+ argv_array_clear(&stash_args);
+ if (ret)
+ return -1;
+
+ /*
+ * The config settings are applied only if there are not passed
+ * any options.
+ */
+ if (!opts) {
+ git_config(git_stash_config, NULL);
+ if (show_stat)
+ rev.diffopt.output_format = DIFF_FORMAT_DIFFSTAT;
+
+ if (show_patch)
+ rev.diffopt.output_format |= DIFF_FORMAT_PATCH;
+
+ if (!show_stat && !show_patch) {
+ free_stash_info(&info);
+ return 0;
+ }
+ }
+
+ argc = setup_revisions(argc, argv, &rev, NULL);
+ if (argc > 1) {
+ free_stash_info(&info);
+ usage_with_options(git_stash_show_usage, options);
+ }
+ if (!rev.diffopt.output_format) {
+ rev.diffopt.output_format = DIFF_FORMAT_PATCH;
+ diff_setup_done(&rev.diffopt);
+ }
+
+ rev.diffopt.flags.recursive = 1;
+ setup_diff_pager(&rev.diffopt);
+ diff_tree_oid(&info.b_commit, &info.w_commit, "", &rev.diffopt);
+ log_tree_diff_flush(&rev);
+
+ free_stash_info(&info);
+ return diff_result_code(&rev.diffopt, 0);
+}
+
+static int do_store_stash(const struct object_id *w_commit, const char *stash_msg,
+ int quiet)
+{
+ if (!stash_msg)
+ stash_msg = "Created via \"git stash store\".";
+
+ if (update_ref(stash_msg, ref_stash, w_commit, NULL,
+ REF_FORCE_CREATE_REFLOG,
+ quiet ? UPDATE_REFS_QUIET_ON_ERR :
+ UPDATE_REFS_MSG_ON_ERR)) {
+ if (!quiet) {
+ fprintf_ln(stderr, _("Cannot update %s with %s"),
+ ref_stash, oid_to_hex(w_commit));
+ }
+ return -1;
+ }
+
+ return 0;
+}
+
+static int store_stash(int argc, const char **argv, const char *prefix)
+{
+ int quiet = 0;
+ const char *stash_msg = NULL;
+ struct object_id obj;
+ struct object_context dummy;
+ struct option options[] = {
+ OPT__QUIET(&quiet, N_("be quiet")),
+ OPT_STRING('m', "message", &stash_msg, "message",
+ N_("stash message")),
+ OPT_END()
+ };
+
+ argc = parse_options(argc, argv, prefix, options,
+ git_stash_store_usage,
+ PARSE_OPT_KEEP_UNKNOWN);
+
+ if (argc != 1) {
+ if (!quiet)
+ fprintf_ln(stderr, _("\"git stash store\" requires one "
+ "<commit> argument"));
+ return -1;
+ }
+
+ if (get_oid_with_context(the_repository,
+ argv[0], quiet ? GET_OID_QUIETLY : 0, &obj,
+ &dummy)) {
+ if (!quiet)
+ fprintf_ln(stderr, _("Cannot update %s with %s"),
+ ref_stash, argv[0]);
+ return -1;
+ }
+
+ return do_store_stash(&obj, stash_msg, quiet);
+}
+
+static void add_pathspecs(struct argv_array *args,
+ const struct pathspec *ps) {
+ int i;
+
+ for (i = 0; i < ps->nr; i++)
+ argv_array_push(args, ps->items[i].original);
+}
+
+/*
+ * `untracked_files` will be filled with the names of untracked files.
+ * The return value is:
+ *
+ * = 0 if there are not any untracked files
+ * > 0 if there are untracked files
+ */
+static int get_untracked_files(const struct pathspec *ps, int include_untracked,
+ struct strbuf *untracked_files)
+{
+ int i;
+ int max_len;
+ int found = 0;
+ char *seen;
+ struct dir_struct dir;
+
+ memset(&dir, 0, sizeof(dir));
+ if (include_untracked != INCLUDE_ALL_FILES)
+ setup_standard_excludes(&dir);
+
+ seen = xcalloc(ps->nr, 1);
+
+ max_len = fill_directory(&dir, the_repository->index, ps);
+ for (i = 0; i < dir.nr; i++) {
+ struct dir_entry *ent = dir.entries[i];
+ if (dir_path_match(&the_index, ent, ps, max_len, seen)) {
+ found++;
+ strbuf_addstr(untracked_files, ent->name);
+ /* NUL-terminate: will be fed to update-index -z */
+ strbuf_addch(untracked_files, '\0');
+ }
+ free(ent);
+ }
+
+ free(seen);
+ free(dir.entries);
+ free(dir.ignored);
+ clear_directory(&dir);
+ return found;
+}
+
+/*
+ * The return value of `check_changes_tracked_files()` can be:
+ *
+ * < 0 if there was an error
+ * = 0 if there are no changes.
+ * > 0 if there are changes.
+ */
+static int check_changes_tracked_files(const struct pathspec *ps)
+{
+ int result;
+ struct rev_info rev;
+ struct object_id dummy;
+ int ret = 0;
+
+ /* No initial commit. */
+ if (get_oid("HEAD", &dummy))
+ return -1;
+
+ if (read_cache() < 0)
+ return -1;
+
+ init_revisions(&rev, NULL);
+ copy_pathspec(&rev.prune_data, ps);
+
+ rev.diffopt.flags.quick = 1;
+ rev.diffopt.flags.ignore_submodules = 1;
+ rev.abbrev = 0;
+
+ add_head_to_pending(&rev);
+ diff_setup_done(&rev.diffopt);
+
+ result = run_diff_index(&rev, 1);
+ if (diff_result_code(&rev.diffopt, result)) {
+ ret = 1;
+ goto done;
+ }
+
+ object_array_clear(&rev.pending);
+ result = run_diff_files(&rev, 0);
+ if (diff_result_code(&rev.diffopt, result)) {
+ ret = 1;
+ goto done;
+ }
+
+done:
+ clear_pathspec(&rev.prune_data);
+ return ret;
+}
+
+/*
+ * The function will fill `untracked_files` with the names of untracked files
+ * It will return 1 if there were any changes and 0 if there were not.
+ */
+static int check_changes(const struct pathspec *ps, int include_untracked,
+ struct strbuf *untracked_files)
+{
+ int ret = 0;
+ if (check_changes_tracked_files(ps))
+ ret = 1;
+
+ if (include_untracked && get_untracked_files(ps, include_untracked,
+ untracked_files))
+ ret = 1;
+
+ return ret;
+}
+
+static int save_untracked_files(struct stash_info *info, struct strbuf *msg,
+ struct strbuf files)
+{
+ int ret = 0;
+ struct strbuf untracked_msg = STRBUF_INIT;
+ struct child_process cp_upd_index = CHILD_PROCESS_INIT;
+ struct index_state istate = { NULL };
+
+ cp_upd_index.git_cmd = 1;
+ argv_array_pushl(&cp_upd_index.args, "update-index", "-z", "--add",
+ "--remove", "--stdin", NULL);
+ argv_array_pushf(&cp_upd_index.env_array, "GIT_INDEX_FILE=%s",
+ stash_index_path.buf);
+
+ strbuf_addf(&untracked_msg, "untracked files on %s\n", msg->buf);
+ if (pipe_command(&cp_upd_index, files.buf, files.len, NULL, 0,
+ NULL, 0)) {
+ ret = -1;
+ goto done;
+ }
+
+ if (write_index_as_tree(&info->u_tree, &istate, stash_index_path.buf, 0,
+ NULL)) {
+ ret = -1;
+ goto done;
+ }
+
+ if (commit_tree(untracked_msg.buf, untracked_msg.len,
+ &info->u_tree, NULL, &info->u_commit, NULL, NULL)) {
+ ret = -1;
+ goto done;
+ }
+
+done:
+ discard_index(&istate);
+ strbuf_release(&untracked_msg);
+ remove_path(stash_index_path.buf);
+ return ret;
+}
+
+static int stash_patch(struct stash_info *info, const struct pathspec *ps,
+ struct strbuf *out_patch, int quiet)
+{
+ int ret = 0;
+ struct child_process cp_read_tree = CHILD_PROCESS_INIT;
+ struct child_process cp_add_i = CHILD_PROCESS_INIT;
+ struct child_process cp_diff_tree = CHILD_PROCESS_INIT;
+ struct index_state istate = { NULL };
+
+ remove_path(stash_index_path.buf);
+
+ cp_read_tree.git_cmd = 1;
+ argv_array_pushl(&cp_read_tree.args, "read-tree", "HEAD", NULL);
+ argv_array_pushf(&cp_read_tree.env_array, "GIT_INDEX_FILE=%s",
+ stash_index_path.buf);
+ if (run_command(&cp_read_tree)) {
+ ret = -1;
+ goto done;
+ }
+
+ /* Find out what the user wants. */
+ cp_add_i.git_cmd = 1;
+ argv_array_pushl(&cp_add_i.args, "add--interactive", "--patch=stash",
+ "--", NULL);
+ add_pathspecs(&cp_add_i.args, ps);
+ argv_array_pushf(&cp_add_i.env_array, "GIT_INDEX_FILE=%s",
+ stash_index_path.buf);
+ if (run_command(&cp_add_i)) {
+ ret = -1;
+ goto done;
+ }
+
+ /* State of the working tree. */
+ if (write_index_as_tree(&info->w_tree, &istate, stash_index_path.buf, 0,
+ NULL)) {
+ ret = -1;
+ goto done;
+ }
+
+ cp_diff_tree.git_cmd = 1;
+ argv_array_pushl(&cp_diff_tree.args, "diff-tree", "-p", "HEAD",
+ oid_to_hex(&info->w_tree), "--", NULL);
+ if (pipe_command(&cp_diff_tree, NULL, 0, out_patch, 0, NULL, 0)) {
+ ret = -1;
+ goto done;
+ }
+
+ if (!out_patch->len) {
+ if (!quiet)
+ fprintf_ln(stderr, _("No changes selected"));
+ ret = 1;
+ }
+
+done:
+ discard_index(&istate);
+ remove_path(stash_index_path.buf);
+ return ret;
+}
+
+static int stash_working_tree(struct stash_info *info, const struct pathspec *ps)
+{
+ int ret = 0;
+ struct rev_info rev;
+ struct child_process cp_upd_index = CHILD_PROCESS_INIT;
+ struct strbuf diff_output = STRBUF_INIT;
+ struct index_state istate = { NULL };
+
+ init_revisions(&rev, NULL);
+ copy_pathspec(&rev.prune_data, ps);
+
+ set_alternate_index_output(stash_index_path.buf);
+ if (reset_tree(&info->i_tree, 0, 0)) {
+ ret = -1;
+ goto done;
+ }
+ set_alternate_index_output(NULL);
+
+ rev.diffopt.output_format = DIFF_FORMAT_CALLBACK;
+ rev.diffopt.format_callback = add_diff_to_buf;
+ rev.diffopt.format_callback_data = &diff_output;
+
+ if (read_cache_preload(&rev.diffopt.pathspec) < 0) {
+ ret = -1;
+ goto done;
+ }
+
+ add_pending_object(&rev, parse_object(the_repository, &info->b_commit),
+ "");
+ if (run_diff_index(&rev, 0)) {
+ ret = -1;
+ goto done;
+ }
+
+ cp_upd_index.git_cmd = 1;
+ argv_array_pushl(&cp_upd_index.args, "update-index", "-z", "--add",
+ "--remove", "--stdin", NULL);
+ argv_array_pushf(&cp_upd_index.env_array, "GIT_INDEX_FILE=%s",
+ stash_index_path.buf);
+
+ if (pipe_command(&cp_upd_index, diff_output.buf, diff_output.len,
+ NULL, 0, NULL, 0)) {
+ ret = -1;
+ goto done;
+ }
+
+ if (write_index_as_tree(&info->w_tree, &istate, stash_index_path.buf, 0,
+ NULL)) {
+ ret = -1;
+ goto done;
+ }
+
+done:
+ discard_index(&istate);
+ UNLEAK(rev);
+ object_array_clear(&rev.pending);
+ clear_pathspec(&rev.prune_data);
+ strbuf_release(&diff_output);
+ remove_path(stash_index_path.buf);
+ return ret;
+}
+
+static int do_create_stash(const struct pathspec *ps, struct strbuf *stash_msg_buf,
+ int include_untracked, int patch_mode,
+ struct stash_info *info, struct strbuf *patch,
+ int quiet)
+{
+ int ret = 0;
+ int flags = 0;
+ int untracked_commit_option = 0;
+ const char *head_short_sha1 = NULL;
+ const char *branch_ref = NULL;
+ const char *branch_name = "(no branch)";
+ struct commit *head_commit = NULL;
+ struct commit_list *parents = NULL;
+ struct strbuf msg = STRBUF_INIT;
+ struct strbuf commit_tree_label = STRBUF_INIT;
+ struct strbuf untracked_files = STRBUF_INIT;
+
+ prepare_fallback_ident("git stash", "git@stash");
+
+ read_cache_preload(NULL);
+ refresh_cache(REFRESH_QUIET);
+
+ if (get_oid("HEAD", &info->b_commit)) {
+ if (!quiet)
+ fprintf_ln(stderr, _("You do not have "
+ "the initial commit yet"));
+ ret = -1;
+ goto done;
+ } else {
+ head_commit = lookup_commit(the_repository, &info->b_commit);
+ }
+
+ if (!check_changes(ps, include_untracked, &untracked_files)) {
+ ret = 1;
+ goto done;
+ }
+
+ branch_ref = resolve_ref_unsafe("HEAD", 0, NULL, &flags);
+ if (flags & REF_ISSYMREF)
+ branch_name = strrchr(branch_ref, '/') + 1;
+ head_short_sha1 = find_unique_abbrev(&head_commit->object.oid,
+ DEFAULT_ABBREV);
+ strbuf_addf(&msg, "%s: %s ", branch_name, head_short_sha1);
+ pp_commit_easy(CMIT_FMT_ONELINE, head_commit, &msg);
+
+ strbuf_addf(&commit_tree_label, "index on %s\n", msg.buf);
+ commit_list_insert(head_commit, &parents);
+ if (write_cache_as_tree(&info->i_tree, 0, NULL) ||
+ commit_tree(commit_tree_label.buf, commit_tree_label.len,
+ &info->i_tree, parents, &info->i_commit, NULL, NULL)) {
+ if (!quiet)
+ fprintf_ln(stderr, _("Cannot save the current "
+ "index state"));
+ ret = -1;
+ goto done;
+ }
+
+ if (include_untracked) {
+ if (save_untracked_files(info, &msg, untracked_files)) {
+ if (!quiet)
+ fprintf_ln(stderr, _("Cannot save "
+ "the untracked files"));
+ ret = -1;
+ goto done;
+ }
+ untracked_commit_option = 1;
+ }
+ if (patch_mode) {
+ ret = stash_patch(info, ps, patch, quiet);
+ if (ret < 0) {
+ if (!quiet)
+ fprintf_ln(stderr, _("Cannot save the current "
+ "worktree state"));
+ goto done;
+ } else if (ret > 0) {
+ goto done;
+ }
+ } else {
+ if (stash_working_tree(info, ps)) {
+ if (!quiet)
+ fprintf_ln(stderr, _("Cannot save the current "
+ "worktree state"));
+ ret = -1;
+ goto done;
+ }
+ }
+
+ if (!stash_msg_buf->len)
+ strbuf_addf(stash_msg_buf, "WIP on %s", msg.buf);
+ else
+ strbuf_insertf(stash_msg_buf, 0, "On %s: ", branch_name);
+
+ /*
+ * `parents` will be empty after calling `commit_tree()`, so there is
+ * no need to call `free_commit_list()`
+ */
+ parents = NULL;
+ if (untracked_commit_option)
+ commit_list_insert(lookup_commit(the_repository,
+ &info->u_commit),
+ &parents);
+ commit_list_insert(lookup_commit(the_repository, &info->i_commit),
+ &parents);
+ commit_list_insert(head_commit, &parents);
+
+ if (commit_tree(stash_msg_buf->buf, stash_msg_buf->len, &info->w_tree,
+ parents, &info->w_commit, NULL, NULL)) {
+ if (!quiet)
+ fprintf_ln(stderr, _("Cannot record "
+ "working tree state"));
+ ret = -1;
+ goto done;
+ }
+
+done:
+ strbuf_release(&commit_tree_label);
+ strbuf_release(&msg);
+ strbuf_release(&untracked_files);
+ return ret;
+}
+
+static int create_stash(int argc, const char **argv, const char *prefix)
+{
+ int ret = 0;
+ struct strbuf stash_msg_buf = STRBUF_INIT;
+ struct stash_info info;
+ struct pathspec ps;
+
+ /* Starting with argv[1], since argv[0] is "create" */
+ strbuf_join_argv(&stash_msg_buf, argc - 1, ++argv, ' ');
+
+ memset(&ps, 0, sizeof(ps));
+ if (!check_changes_tracked_files(&ps))
+ return 0;
+
+ ret = do_create_stash(&ps, &stash_msg_buf, 0, 0, &info,
+ NULL, 0);
+ if (!ret)
+ printf_ln("%s", oid_to_hex(&info.w_commit));
+
+ strbuf_release(&stash_msg_buf);
+ return ret;
+}
+
+static int do_push_stash(const struct pathspec *ps, const char *stash_msg, int quiet,
+ int keep_index, int patch_mode, int include_untracked)
+{
+ int ret = 0;
+ struct stash_info info;
+ struct strbuf patch = STRBUF_INIT;
+ struct strbuf stash_msg_buf = STRBUF_INIT;
+ struct strbuf untracked_files = STRBUF_INIT;
+
+ if (patch_mode && keep_index == -1)
+ keep_index = 1;
+
+ if (patch_mode && include_untracked) {
+ fprintf_ln(stderr, _("Can't use --patch and --include-untracked"
+ " or --all at the same time"));
+ ret = -1;
+ goto done;
+ }
+
+ read_cache_preload(NULL);
+ if (!include_untracked && ps->nr) {
+ int i;
+ char *ps_matched = xcalloc(ps->nr, 1);
+
+ for (i = 0; i < active_nr; i++)
+ ce_path_match(&the_index, active_cache[i], ps,
+ ps_matched);
+
+ if (report_path_error(ps_matched, ps)) {
+ fprintf_ln(stderr, _("Did you forget to 'git add'?"));
+ ret = -1;
+ free(ps_matched);
+ goto done;
+ }
+ free(ps_matched);
+ }
+
+ if (refresh_cache(REFRESH_QUIET)) {
+ ret = -1;
+ goto done;
+ }
+
+ if (!check_changes(ps, include_untracked, &untracked_files)) {
+ if (!quiet)
+ printf_ln(_("No local changes to save"));
+ goto done;
+ }
+
+ if (!reflog_exists(ref_stash) && do_clear_stash()) {
+ ret = -1;
+ if (!quiet)
+ fprintf_ln(stderr, _("Cannot initialize stash"));
+ goto done;
+ }
+
+ if (stash_msg)
+ strbuf_addstr(&stash_msg_buf, stash_msg);
+ if (do_create_stash(ps, &stash_msg_buf, include_untracked, patch_mode,
+ &info, &patch, quiet)) {
+ ret = -1;
+ goto done;
+ }
+
+ if (do_store_stash(&info.w_commit, stash_msg_buf.buf, 1)) {
+ ret = -1;
+ if (!quiet)
+ fprintf_ln(stderr, _("Cannot save the current status"));
+ goto done;
+ }
+
+ if (!quiet)
+ printf_ln(_("Saved working directory and index state %s"),
+ stash_msg_buf.buf);
+
+ if (!patch_mode) {
+ if (include_untracked && !ps->nr) {
+ struct child_process cp = CHILD_PROCESS_INIT;
+
+ cp.git_cmd = 1;
+ argv_array_pushl(&cp.args, "clean", "--force",
+ "--quiet", "-d", NULL);
+ if (include_untracked == INCLUDE_ALL_FILES)
+ argv_array_push(&cp.args, "-x");
+ if (run_command(&cp)) {
+ ret = -1;
+ goto done;
+ }
+ }
+ discard_cache();
+ if (ps->nr) {
+ struct child_process cp_add = CHILD_PROCESS_INIT;
+ struct child_process cp_diff = CHILD_PROCESS_INIT;
+ struct child_process cp_apply = CHILD_PROCESS_INIT;
+ struct strbuf out = STRBUF_INIT;
+
+ cp_add.git_cmd = 1;
+ argv_array_push(&cp_add.args, "add");
+ if (!include_untracked)
+ argv_array_push(&cp_add.args, "-u");
+ if (include_untracked == INCLUDE_ALL_FILES)
+ argv_array_push(&cp_add.args, "--force");
+ argv_array_push(&cp_add.args, "--");
+ add_pathspecs(&cp_add.args, ps);
+ if (run_command(&cp_add)) {
+ ret = -1;
+ goto done;
+ }
+
+ cp_diff.git_cmd = 1;
+ argv_array_pushl(&cp_diff.args, "diff-index", "-p",
+ "--cached", "--binary", "HEAD", "--",
+ NULL);
+ add_pathspecs(&cp_diff.args, ps);
+ if (pipe_command(&cp_diff, NULL, 0, &out, 0, NULL, 0)) {
+ ret = -1;
+ goto done;
+ }
+
+ cp_apply.git_cmd = 1;
+ argv_array_pushl(&cp_apply.args, "apply", "--index",
+ "-R", NULL);
+ if (pipe_command(&cp_apply, out.buf, out.len, NULL, 0,
+ NULL, 0)) {
+ ret = -1;
+ goto done;
+ }
+ } else {
+ struct child_process cp = CHILD_PROCESS_INIT;
+ cp.git_cmd = 1;
+ argv_array_pushl(&cp.args, "reset", "--hard", "-q",
+ NULL);
+ if (run_command(&cp)) {
+ ret = -1;
+ goto done;
+ }
+ }
+
+ if (keep_index == 1 && !is_null_oid(&info.i_tree)) {
+ struct child_process cp_ls = CHILD_PROCESS_INIT;
+ struct child_process cp_checkout = CHILD_PROCESS_INIT;
+ struct strbuf out = STRBUF_INIT;
+
+ if (reset_tree(&info.i_tree, 0, 1)) {
+ ret = -1;
+ goto done;
+ }
+
+ cp_ls.git_cmd = 1;
+ argv_array_pushl(&cp_ls.args, "ls-files", "-z",
+ "--modified", "--", NULL);
+
+ add_pathspecs(&cp_ls.args, ps);
+ if (pipe_command(&cp_ls, NULL, 0, &out, 0, NULL, 0)) {
+ ret = -1;
+ goto done;
+ }
+
+ cp_checkout.git_cmd = 1;
+ argv_array_pushl(&cp_checkout.args, "checkout-index",
+ "-z", "--force", "--stdin", NULL);
+ if (pipe_command(&cp_checkout, out.buf, out.len, NULL,
+ 0, NULL, 0)) {
+ ret = -1;
+ goto done;
+ }
+ }
+ goto done;
+ } else {
+ struct child_process cp = CHILD_PROCESS_INIT;
+
+ cp.git_cmd = 1;
+ argv_array_pushl(&cp.args, "apply", "-R", NULL);
+
+ if (pipe_command(&cp, patch.buf, patch.len, NULL, 0, NULL, 0)) {
+ if (!quiet)
+ fprintf_ln(stderr, _("Cannot remove "
+ "worktree changes"));
+ ret = -1;
+ goto done;
+ }
+
+ if (keep_index < 1) {
+ struct child_process cp = CHILD_PROCESS_INIT;
+
+ cp.git_cmd = 1;
+ argv_array_pushl(&cp.args, "reset", "-q", "--", NULL);
+ add_pathspecs(&cp.args, ps);
+ if (run_command(&cp)) {
+ ret = -1;
+ goto done;
+ }
+ }
+ goto done;
+ }
+
+done:
+ strbuf_release(&stash_msg_buf);
+ return ret;
+}
+
+static int push_stash(int argc, const char **argv, const char *prefix)
+{
+ int keep_index = -1;
+ int patch_mode = 0;
+ int include_untracked = 0;
+ int quiet = 0;
+ const char *stash_msg = NULL;
+ struct pathspec ps;
+ struct option options[] = {
+ OPT_BOOL('k', "keep-index", &keep_index,
+ N_("keep index")),
+ OPT_BOOL('p', "patch", &patch_mode,
+ N_("stash in patch mode")),
+ OPT__QUIET(&quiet, N_("quiet mode")),
+ OPT_BOOL('u', "include-untracked", &include_untracked,
+ N_("include untracked files in stash")),
+ OPT_SET_INT('a', "all", &include_untracked,
+ N_("include ignore files"), 2),
+ OPT_STRING('m', "message", &stash_msg, N_("message"),
+ N_("stash message")),
+ OPT_END()
+ };
+
+ if (argc)
+ argc = parse_options(argc, argv, prefix, options,
+ git_stash_push_usage,
+ 0);
+
+ parse_pathspec(&ps, 0, PATHSPEC_PREFER_FULL | PATHSPEC_PREFIX_ORIGIN,
+ prefix, argv);
+ return do_push_stash(&ps, stash_msg, quiet, keep_index, patch_mode,
+ include_untracked);
+}
+
+static int save_stash(int argc, const char **argv, const char *prefix)
+{
+ int keep_index = -1;
+ int patch_mode = 0;
+ int include_untracked = 0;
+ int quiet = 0;
+ int ret = 0;
+ const char *stash_msg = NULL;
+ struct pathspec ps;
+ struct strbuf stash_msg_buf = STRBUF_INIT;
+ struct option options[] = {
+ OPT_BOOL('k', "keep-index", &keep_index,
+ N_("keep index")),
+ OPT_BOOL('p', "patch", &patch_mode,
+ N_("stash in patch mode")),
+ OPT__QUIET(&quiet, N_("quiet mode")),
+ OPT_BOOL('u', "include-untracked", &include_untracked,
+ N_("include untracked files in stash")),
+ OPT_SET_INT('a', "all", &include_untracked,
+ N_("include ignore files"), 2),
+ OPT_STRING('m', "message", &stash_msg, "message",
+ N_("stash message")),
+ OPT_END()
+ };
+
+ argc = parse_options(argc, argv, prefix, options,
+ git_stash_save_usage,
+ PARSE_OPT_KEEP_DASHDASH);
+
+ if (argc)
+ stash_msg = strbuf_join_argv(&stash_msg_buf, argc, argv, ' ');
+
+ memset(&ps, 0, sizeof(ps));
+ ret = do_push_stash(&ps, stash_msg, quiet, keep_index,
+ patch_mode, include_untracked);
+
+ strbuf_release(&stash_msg_buf);
+ return ret;
+}
+
+static int use_builtin_stash(void)
+{
+ struct child_process cp = CHILD_PROCESS_INIT;
+ struct strbuf out = STRBUF_INIT;
+ int ret, env = git_env_bool("GIT_TEST_STASH_USE_BUILTIN", -1);
+
+ if (env != -1)
+ return env;
+
+ argv_array_pushl(&cp.args,
+ "config", "--bool", "stash.usebuiltin", NULL);
+ cp.git_cmd = 1;
+ if (capture_command(&cp, &out, 6)) {
+ strbuf_release(&out);
+ return 1;
+ }
+
+ strbuf_trim(&out);
+ ret = !strcmp("true", out.buf);
+ strbuf_release(&out);
+ return ret;
+}
+
+int cmd_stash(int argc, const char **argv, const char *prefix)
+{
+ int i = -1;
+ pid_t pid = getpid();
+ const char *index_file;
+ struct argv_array args = ARGV_ARRAY_INIT;
+
+ struct option options[] = {
+ OPT_END()
+ };
+
+ if (!use_builtin_stash()) {
+ const char *path = mkpath("%s/git-legacy-stash",
+ git_exec_path());
+
+ if (sane_execvp(path, (char **)argv) < 0)
+ die_errno(_("could not exec %s"), path);
+ else
+ BUG("sane_execvp() returned???");
+ }
+
+ prefix = setup_git_directory();
+ trace_repo_setup(prefix);
+ setup_work_tree();
+
+ git_config(git_diff_basic_config, NULL);
+
+ argc = parse_options(argc, argv, prefix, options, git_stash_usage,
+ PARSE_OPT_KEEP_UNKNOWN | PARSE_OPT_KEEP_DASHDASH);
+
+ index_file = get_index_file();
+ strbuf_addf(&stash_index_path, "%s.stash.%" PRIuMAX, index_file,
+ (uintmax_t)pid);
+
+ if (!argc)
+ return !!push_stash(0, NULL, prefix);
+ else if (!strcmp(argv[0], "apply"))
+ return !!apply_stash(argc, argv, prefix);
+ else if (!strcmp(argv[0], "clear"))
+ return !!clear_stash(argc, argv, prefix);
+ else if (!strcmp(argv[0], "drop"))
+ return !!drop_stash(argc, argv, prefix);
+ else if (!strcmp(argv[0], "pop"))
+ return !!pop_stash(argc, argv, prefix);
+ else if (!strcmp(argv[0], "branch"))
+ return !!branch_stash(argc, argv, prefix);
+ else if (!strcmp(argv[0], "list"))
+ return !!list_stash(argc, argv, prefix);
+ else if (!strcmp(argv[0], "show"))
+ return !!show_stash(argc, argv, prefix);
+ else if (!strcmp(argv[0], "store"))
+ return !!store_stash(argc, argv, prefix);
+ else if (!strcmp(argv[0], "create"))
+ return !!create_stash(argc, argv, prefix);
+ else if (!strcmp(argv[0], "push"))
+ return !!push_stash(argc, argv, prefix);
+ else if (!strcmp(argv[0], "save"))
+ return !!save_stash(argc, argv, prefix);
+ else if (*argv[0] != '-')
+ usage_msg_opt(xstrfmt(_("unknown subcommand: %s"), argv[0]),
+ git_stash_usage, options);
+
+ if (strcmp(argv[0], "-p")) {
+ while (++i < argc && strcmp(argv[i], "--")) {
+ /*
+ * `akpqu` is a string which contains all short options,
+ * except `-m` which is verified separately.
+ */
+ if ((strlen(argv[i]) == 2) && *argv[i] == '-' &&
+ strchr("akpqu", argv[i][1]))
+ continue;
+
+ if (!strcmp(argv[i], "--all") ||
+ !strcmp(argv[i], "--keep-index") ||
+ !strcmp(argv[i], "--no-keep-index") ||
+ !strcmp(argv[i], "--patch") ||
+ !strcmp(argv[i], "--quiet") ||
+ !strcmp(argv[i], "--include-untracked"))
+ continue;
+
+ /*
+ * `-m` and `--message=` are verified separately because
+ * they need to be immediately followed by a string
+ * (i.e.`-m"foobar"` or `--message="foobar"`).
+ */
+ if (starts_with(argv[i], "-m") ||
+ starts_with(argv[i], "--message="))
+ continue;
+
+ usage_with_options(git_stash_usage, options);
+ }
+ }
+
+ argv_array_push(&args, "push");
+ argv_array_pushv(&args, argv);
+ return !!push_stash(args.argc, args.argv, prefix);
+}
i++;
}
- if (ps_matched && report_path_error(ps_matched, pathspec, prefix))
+ if (ps_matched && report_path_error(ps_matched, pathspec))
result = -1;
free(ps_matched);
};
const char *const git_submodule_helper_usage[] = {
- N_("git submodule--helper foreach [--quiet] [--recursive] <command>"),
+ N_("git submodule--helper foreach [--quiet] [--recursive] [--] <command>"),
NULL
};
argc = parse_options(argc, argv, prefix, module_foreach_options,
- git_submodule_helper_usage, PARSE_OPT_KEEP_UNKNOWN);
+ git_submodule_helper_usage, 0);
if (module_list_compute(0, NULL, prefix, &pathspec, &list) < 0)
return 1;
};
const char *const git_submodule_helper_usage[] = {
- N_("git submodule--helper init [<path>]"),
+ N_("git submodule--helper init [<options>] [<path>]"),
NULL
};
die(_("submodule '%s' cannot add alternate: %s"),
sas->submodule_name, err.buf);
case SUBMODULE_ALTERNATE_ERROR_INFO:
- fprintf(stderr, _("submodule '%s' cannot add alternate: %s"),
+ fprintf_ln(stderr, _("submodule '%s' cannot add alternate: %s"),
sas->submodule_name, err.buf);
case SUBMODULE_ALTERNATE_ERROR_IGNORE:
; /* nothing */
{
int i;
- run_processes_parallel(suc->max_jobs,
- update_clone_get_next_task,
- update_clone_start_failure,
- update_clone_task_finished,
- suc);
+ run_processes_parallel_tr2(suc->max_jobs, update_clone_get_next_task,
+ update_clone_start_failure,
+ update_clone_task_finished, suc, "submodule",
+ "parallel/update");
/*
* We saved the output and put it out all at once now.
};
const char *const git_submodule_helper_usage[] = {
- N_("git submodule--helper embed-git-dir [<path>...]"),
+ N_("git submodule--helper absorb-git-dirs [<options>] [<path>...]"),
NULL
};
static int module_config(int argc, const char **argv, const char *prefix)
{
enum {
- CHECK_WRITEABLE = 1
+ CHECK_WRITEABLE = 1,
+ DO_UNSET = 2
} command = 0;
struct option module_config_options[] = {
OPT_CMDMODE(0, "check-writeable", &command,
N_("check if it is safe to write to the .gitmodules file"),
CHECK_WRITEABLE),
+ OPT_CMDMODE(0, "unset", &command,
+ N_("unset the config in the .gitmodules file"),
+ DO_UNSET),
OPT_END()
};
const char *const git_submodule_helper_usage[] = {
- N_("git submodule--helper config name [value]"),
+ N_("git submodule--helper config <name> [<value>]"),
+ N_("git submodule--helper config --unset <name>"),
N_("git submodule--helper config --check-writeable"),
NULL
};
return is_writing_gitmodules_ok() ? 0 : -1;
/* Equivalent to ACTION_GET in builtin/config.c */
- if (argc == 2)
+ if (argc == 2 && command != DO_UNSET)
return print_config_from_gitmodules(the_repository, argv[1]);
/* Equivalent to ACTION_SET in builtin/config.c */
- if (argc == 3) {
+ if (argc == 3 || (argc == 2 && command == DO_UNSET)) {
+ const char *value = (argc == 3) ? argv[2] : NULL;
+
if (!is_writing_gitmodules_ok())
die(_("please make sure that the .gitmodules file is in the working tree"));
- return config_set_in_gitmodules_file_gently(argv[1], argv[2]);
+ return config_set_in_gitmodules_file_gently(argv[1], value);
}
usage_with_options(git_submodule_helper_usage, module_config_options);
#include "ref-filter.h"
static const char * const git_tag_usage[] = {
- N_("git tag [-a | -s | -u <key-id>] [-f] [-m <msg> | -F <file>] <tagname> [<head>]"),
+ N_("git tag [-a | -s | -u <key-id>] [-f] [-m <msg> | -F <file>]\n"
+ "\t\t<tagname> [<head>]"),
N_("git tag -d <tagname>..."),
- N_("git tag -l [-n[<num>]] [--contains <commit>] [--no-contains <commit>] [--points-at <object>]"
- "\n\t\t[--format=<format>] [--[no-]merged [<commit>]] [<pattern>...]"),
+ N_("git tag -l [-n[<num>]] [--contains <commit>] [--no-contains <commit>] [--points-at <object>]\n"
+ "\t\t[--format=<format>] [--[no-]merged [<commit>]] [<pattern>...]"),
N_("git tag -v [--format=<format>] <tagname>..."),
NULL
};
} cleanup_mode;
};
-static void create_tag(const struct object_id *object, const char *tag,
+static const char message_advice_nested_tag[] =
+ N_("You have created a nested tag. The object referred to by your new tag is\n"
+ "already a tag. If you meant to tag the object that it points to, use:\n"
+ "\n"
+ "\tgit tag -f %s %s^{}");
+
+static void create_tag(const struct object_id *object, const char *object_ref,
+ const char *tag,
struct strbuf *buf, struct create_tag_options *opt,
struct object_id *prev, struct object_id *result)
{
type = oid_object_info(the_repository, object, NULL);
if (type <= OBJ_NONE)
- die(_("bad object type."));
+ die(_("bad object type."));
+
+ if (type == OBJ_TAG && advice_nested_tag)
+ advise(_(message_advice_nested_tag), tag, object_ref);
strbuf_addf(&header,
"object %s\n"
OPT_FILENAME('F', "file", &msgfile, N_("read message from file")),
OPT_BOOL('e', "edit", &edit_flag, N_("force edit of tag message")),
OPT_BOOL('s', "sign", &opt.sign, N_("annotated and GPG-signed tag")),
- OPT_STRING(0, "cleanup", &cleanup_arg, N_("mode"),
- N_("how to strip spaces and #comments from message")),
+ OPT_CLEANUP(&cleanup_arg),
OPT_STRING('u', "local-user", &keyid, N_("key-id"),
N_("use another key to sign the tag")),
OPT__FORCE(&force, N_("replace the tag if exists"), 0),
OPT_WITHOUT(&filter.no_commit, N_("print only tags that don't contain the commit")),
OPT_MERGED(&filter, N_("print only tags that are merged")),
OPT_NO_MERGED(&filter, N_("print only tags that are not merged")),
- OPT_CALLBACK(0 , "sort", sorting_tail, N_("key"),
- N_("field name to sort on"), &parse_opt_ref_sorting),
+ OPT_REF_SORT(sorting_tail),
{
OPTION_CALLBACK, 0, "points-at", &filter.points_at, N_("object"),
N_("print only tags of the object"), PARSE_OPT_LASTARG_DEFAULT,
if (create_tag_object) {
if (force_sign_annotate && !annotate)
opt.sign = 1;
- create_tag(&object, tag, &buf, &opt, &prev, &object);
+ create_tag(&object, object_ref, tag, &buf, &opt, &prev, &object);
}
transaction = ref_transaction_begin(&err);
struct object_id *ent, const char *path,
int namelen, int stage)
{
- unsigned mode;
+ unsigned short mode;
struct object_id oid;
struct cache_entry *ce;
}
static int do_reupdate(int ac, const char **av,
- const char *prefix, int prefix_length)
+ const char *prefix)
{
/* Read HEAD and run update-index on paths that are
* merged and already different between index and HEAD.
return 0;
}
-static int cacheinfo_callback(struct parse_opt_ctx_t *ctx,
- const struct option *opt, int unset)
+static enum parse_opt_result cacheinfo_callback(
+ struct parse_opt_ctx_t *ctx, const struct option *opt,
+ const char *arg, int unset)
{
struct object_id oid;
unsigned int mode;
const char *path;
BUG_ON_OPT_NEG(unset);
+ BUG_ON_OPT_ARG(arg);
if (!parse_new_style_cacheinfo(ctx->argv[1], &mode, &oid, &path)) {
if (add_cacheinfo(mode, &oid, path, 0))
return 0;
}
-static int stdin_cacheinfo_callback(struct parse_opt_ctx_t *ctx,
- const struct option *opt, int unset)
+static enum parse_opt_result stdin_cacheinfo_callback(
+ struct parse_opt_ctx_t *ctx, const struct option *opt,
+ const char *arg, int unset)
{
int *nul_term_line = opt->value;
BUG_ON_OPT_NEG(unset);
+ BUG_ON_OPT_ARG(arg);
if (ctx->argc != 1)
return error("option '%s' must be the last argument", opt->long_name);
return 0;
}
-static int stdin_callback(struct parse_opt_ctx_t *ctx,
- const struct option *opt, int unset)
+static enum parse_opt_result stdin_callback(
+ struct parse_opt_ctx_t *ctx, const struct option *opt,
+ const char *arg, int unset)
{
int *read_from_stdin = opt->value;
BUG_ON_OPT_NEG(unset);
+ BUG_ON_OPT_ARG(arg);
if (ctx->argc != 1)
return error("option '%s' must be the last argument", opt->long_name);
return 0;
}
-static int unresolve_callback(struct parse_opt_ctx_t *ctx,
- const struct option *opt, int unset)
+static enum parse_opt_result unresolve_callback(
+ struct parse_opt_ctx_t *ctx, const struct option *opt,
+ const char *arg, int unset)
{
int *has_errors = opt->value;
const char *prefix = startup_info->prefix;
BUG_ON_OPT_NEG(unset);
+ BUG_ON_OPT_ARG(arg);
/* consume remaining arguments. */
*has_errors = do_unresolve(ctx->argc, ctx->argv,
return 0;
}
-static int reupdate_callback(struct parse_opt_ctx_t *ctx,
- const struct option *opt, int unset)
+static enum parse_opt_result reupdate_callback(
+ struct parse_opt_ctx_t *ctx, const struct option *opt,
+ const char *arg, int unset)
{
int *has_errors = opt->value;
const char *prefix = startup_info->prefix;
BUG_ON_OPT_NEG(unset);
+ BUG_ON_OPT_ARG(arg);
/* consume remaining arguments. */
setup_work_tree();
- *has_errors = do_reupdate(ctx->argc, ctx->argv,
- prefix, prefix ? strlen(prefix) : 0);
+ *has_errors = do_reupdate(ctx->argc, ctx->argv, prefix);
if (*has_errors)
active_cache_changed = 0;
N_("add the specified entry to the index"),
PARSE_OPT_NOARG | /* disallow --cacheinfo=<mode> form */
PARSE_OPT_NONEG | PARSE_OPT_LITERAL_ARGHELP,
- (parse_opt_cb *) cacheinfo_callback},
+ NULL, 0,
+ cacheinfo_callback},
{OPTION_CALLBACK, 0, "chmod", &set_executable_bit, "(+|-)x",
N_("override the executable bit of the listed files"),
PARSE_OPT_NONEG,
{OPTION_LOWLEVEL_CALLBACK, 0, "stdin", &read_from_stdin, NULL,
N_("read list of paths to be updated from standard input"),
PARSE_OPT_NONEG | PARSE_OPT_NOARG,
- (parse_opt_cb *) stdin_callback},
+ NULL, 0, stdin_callback},
{OPTION_LOWLEVEL_CALLBACK, 0, "index-info", &nul_term_line, NULL,
N_("add entries from standard input to the index"),
PARSE_OPT_NONEG | PARSE_OPT_NOARG,
- (parse_opt_cb *) stdin_cacheinfo_callback},
+ NULL, 0, stdin_cacheinfo_callback},
{OPTION_LOWLEVEL_CALLBACK, 0, "unresolve", &has_errors, NULL,
N_("repopulate stages #2 and #3 for the listed paths"),
PARSE_OPT_NONEG | PARSE_OPT_NOARG,
- (parse_opt_cb *) unresolve_callback},
+ NULL, 0, unresolve_callback},
{OPTION_LOWLEVEL_CALLBACK, 'g', "again", &has_errors, NULL,
N_("only update entries that differ from HEAD"),
PARSE_OPT_NONEG | PARSE_OPT_NOARG,
- (parse_opt_cb *) reupdate_callback},
+ NULL, 0, reupdate_callback},
OPT_BIT(0, "ignore-missing", &refresh_args.flags,
N_("ignore files missing from worktree"),
REFRESH_IGNORE_MISSING),
if (entries < 0)
die("cache corrupted");
+ the_index.updated_skipworktree = 1;
+
/*
* Custom copy of parse_options() because we want to handle
* filename arguments as they come.
struct strbuf sb_git = STRBUF_INIT, sb_repo = STRBUF_INIT;
struct strbuf sb = STRBUF_INIT;
const char *name;
- struct stat st;
struct child_process cp = CHILD_PROCESS_INIT;
struct argv_array child_env = ARGV_ARRAY_INIT;
- int counter = 0, len, ret;
+ unsigned int counter = 0;
+ int len, ret;
struct strbuf symref = STRBUF_INIT;
struct commit *commit = NULL;
int is_branch = 0;
if (safe_create_leading_directories_const(sb_repo.buf))
die_errno(_("could not create leading directories of '%s'"),
sb_repo.buf);
- while (!stat(sb_repo.buf, &st)) {
+
+ while (mkdir(sb_repo.buf, 0777)) {
counter++;
+ if ((errno != EEXIST) || !counter /* overflow */)
+ die_errno(_("could not create directory of '%s'"),
+ sb_repo.buf);
strbuf_setlen(&sb_repo, len);
strbuf_addf(&sb_repo, "%d", counter);
}
atexit(remove_junk);
sigchain_push_common(remove_junk_on_signal);
- if (mkdir(sb_repo.buf, 0777))
- die_errno(_("could not create directory of '%s'"), sb_repo.buf);
junk_git_dir = xstrdup(sb_repo.buf);
is_junk = 1;
cp.dir = path;
cp.env = env;
cp.argv = NULL;
+ cp.trace2_hook_name = "post-checkout";
argv_array_pushl(&cp.args, absolute_path(hook),
oid_to_hex(&null_oid),
oid_to_hex(&commit->object.oid),
#include "cache.h"
-extern int index_bulk_checkin(struct object_id *oid,
- int fd, size_t size, enum object_type type,
- const char *path, unsigned flags);
+int index_bulk_checkin(struct object_id *oid,
+ int fd, size_t size, enum object_type type,
+ const char *path, unsigned flags);
-extern void plug_bulk_checkin(void);
-extern void unplug_bulk_checkin(void);
+void plug_bulk_checkin(void);
+void unplug_bulk_checkin(void);
#endif
#include "gettext.h"
#include "convert.h"
#include "trace.h"
+#include "trace2.h"
#include "string-list.h"
#include "pack-revindex.h"
#include "hash.h"
struct cache_time timestamp;
unsigned name_hash_initialized : 1,
initialized : 1,
- drop_cache_tree : 1;
+ drop_cache_tree : 1,
+ updated_workdir : 1,
+ updated_skipworktree : 1;
struct hashmap name_hash;
struct hashmap dir_hash;
struct object_id oid;
};
/* Name hashing */
-extern int test_lazy_init_name_hash(struct index_state *istate, int try_threaded);
-extern void add_name_hash(struct index_state *istate, struct cache_entry *ce);
-extern void remove_name_hash(struct index_state *istate, struct cache_entry *ce);
-extern void free_name_hash(struct index_state *istate);
+int test_lazy_init_name_hash(struct index_state *istate, int try_threaded);
+void add_name_hash(struct index_state *istate, struct cache_entry *ce);
+void remove_name_hash(struct index_state *istate, struct cache_entry *ce);
+void free_name_hash(struct index_state *istate);
/* Cache entry creation and cleanup */
*/
extern const char * const local_repo_env[];
-extern void setup_git_env(const char *git_dir);
+void setup_git_env(const char *git_dir);
/*
* Returns true iff we have a configured git repository (either via
int have_git_dir(void);
extern int is_bare_repository_cfg;
-extern int is_bare_repository(void);
-extern int is_inside_git_dir(void);
+int is_bare_repository(void);
+int is_inside_git_dir(void);
extern char *git_work_tree_cfg;
-extern int is_inside_work_tree(void);
-extern const char *get_git_dir(void);
-extern const char *get_git_common_dir(void);
-extern char *get_object_directory(void);
-extern char *get_index_file(void);
-extern char *get_graft_file(struct repository *r);
-extern void set_git_dir(const char *path);
-extern int get_common_dir_noenv(struct strbuf *sb, const char *gitdir);
-extern int get_common_dir(struct strbuf *sb, const char *gitdir);
-extern const char *get_git_namespace(void);
-extern const char *strip_namespace(const char *namespaced_ref);
-extern const char *get_super_prefix(void);
-extern const char *get_git_work_tree(void);
+int is_inside_work_tree(void);
+const char *get_git_dir(void);
+const char *get_git_common_dir(void);
+char *get_object_directory(void);
+char *get_index_file(void);
+char *get_graft_file(struct repository *r);
+void set_git_dir(const char *path);
+int get_common_dir_noenv(struct strbuf *sb, const char *gitdir);
+int get_common_dir(struct strbuf *sb, const char *gitdir);
+const char *get_git_namespace(void);
+const char *strip_namespace(const char *namespaced_ref);
+const char *get_super_prefix(void);
+const char *get_git_work_tree(void);
/*
* Return true if the given path is a git directory; note that this _just_
* looks at the directory itself. If you want to know whether "foo/.git"
* is a repository, you must feed that path, not just "foo".
*/
-extern int is_git_directory(const char *path);
+int is_git_directory(const char *path);
/*
* Return 1 if the given path is the root of a git repository or
* as we usually consider sub-repos precious, and would prefer to err on the
* side of not disrupting or deleting them.
*/
-extern int is_nonbare_repository_dir(struct strbuf *path);
+int is_nonbare_repository_dir(struct strbuf *path);
#define READ_GITFILE_ERR_STAT_FAILED 1
#define READ_GITFILE_ERR_NOT_A_FILE 2
#define READ_GITFILE_ERR_NO_PATH 6
#define READ_GITFILE_ERR_NOT_A_REPO 7
#define READ_GITFILE_ERR_TOO_LARGE 8
-extern void read_gitfile_error_die(int error_code, const char *path, const char *dir);
-extern const char *read_gitfile_gently(const char *path, int *return_error_code);
+void read_gitfile_error_die(int error_code, const char *path, const char *dir);
+const char *read_gitfile_gently(const char *path, int *return_error_code);
#define read_gitfile(path) read_gitfile_gently((path), NULL)
-extern const char *resolve_gitdir_gently(const char *suspect, int *return_error_code);
+const char *resolve_gitdir_gently(const char *suspect, int *return_error_code);
#define resolve_gitdir(path) resolve_gitdir_gently((path), NULL)
-extern void set_git_work_tree(const char *tree);
+void set_git_work_tree(const char *tree);
#define ALTERNATE_DB_ENVIRONMENT "GIT_ALTERNATE_OBJECT_DIRECTORIES"
-extern void setup_work_tree(void);
+void setup_work_tree(void);
/*
* Find the commondir and gitdir of the repository that contains the current
* working directory, without changing the working directory or other global
* both have the same result appended to the buffer. The return value is
* either 0 upon success and non-zero if no repository was found.
*/
-extern int discover_git_directory(struct strbuf *commondir,
- struct strbuf *gitdir);
-extern const char *setup_git_directory_gently(int *);
-extern const char *setup_git_directory(void);
-extern char *prefix_path(const char *prefix, int len, const char *path);
-extern char *prefix_path_gently(const char *prefix, int len, int *remaining, const char *path);
+int discover_git_directory(struct strbuf *commondir,
+ struct strbuf *gitdir);
+const char *setup_git_directory_gently(int *);
+const char *setup_git_directory(void);
+char *prefix_path(const char *prefix, int len, const char *path);
+char *prefix_path_gently(const char *prefix, int len, int *remaining, const char *path);
/*
* Concatenate "prefix" (if len is non-zero) and "path", with no
* The return value is always a newly allocated string (even if the
* prefix was empty).
*/
-extern char *prefix_filename(const char *prefix, const char *path);
+char *prefix_filename(const char *prefix, const char *path);
-extern int check_filename(const char *prefix, const char *name);
-extern void verify_filename(const char *prefix,
- const char *name,
- int diagnose_misspelt_rev);
-extern void verify_non_filename(const char *prefix, const char *name);
-extern int path_inside_repo(const char *prefix, const char *path);
+int check_filename(const char *prefix, const char *name);
+void verify_filename(const char *prefix,
+ const char *name,
+ int diagnose_misspelt_rev);
+void verify_non_filename(const char *prefix, const char *name);
+int path_inside_repo(const char *prefix, const char *path);
#define INIT_DB_QUIET 0x0001
#define INIT_DB_EXIST_OK 0x0002
-extern int init_db(const char *git_dir, const char *real_git_dir,
- const char *template_dir, unsigned int flags);
+int init_db(const char *git_dir, const char *real_git_dir,
+ const char *template_dir, unsigned int flags);
-extern void sanitize_stdfds(void);
-extern int daemonize(void);
+void sanitize_stdfds(void);
+int daemonize(void);
#define alloc_nr(x) (((x)+16)*3/2)
/* Initialize and use the cache information */
struct lock_file;
-extern void preload_index(struct index_state *index,
- const struct pathspec *pathspec,
- unsigned int refresh_flags);
-extern int do_read_index(struct index_state *istate, const char *path,
- int must_exist); /* for testting only! */
-extern int read_index_from(struct index_state *, const char *path,
- const char *gitdir);
-extern int is_index_unborn(struct index_state *);
+void preload_index(struct index_state *index,
+ const struct pathspec *pathspec,
+ unsigned int refresh_flags);
+int do_read_index(struct index_state *istate, const char *path,
+ int must_exist); /* for testting only! */
+int read_index_from(struct index_state *, const char *path,
+ const char *gitdir);
+int is_index_unborn(struct index_state *);
/* For use with `write_locked_index()`. */
#define COMMIT_LOCK (1 << 0)
* If `SKIP_IF_UNCHANGED` is given and the index is unchanged, nothing
* is written (and the lock is rolled back if `COMMIT_LOCK` is given).
*/
-extern int write_locked_index(struct index_state *, struct lock_file *lock, unsigned flags);
+int write_locked_index(struct index_state *, struct lock_file *lock, unsigned flags);
-extern int discard_index(struct index_state *);
-extern void move_index_extensions(struct index_state *dst, struct index_state *src);
-extern int unmerged_index(const struct index_state *);
+int discard_index(struct index_state *);
+void move_index_extensions(struct index_state *dst, struct index_state *src);
+int unmerged_index(const struct index_state *);
/**
* Returns 1 if istate differs from tree, 0 otherwise. If tree is NULL,
* provided, the space-separated list of files that differ will be appended
* to it.
*/
-extern int repo_index_has_changes(struct repository *repo,
- struct tree *tree,
- struct strbuf *sb);
+int repo_index_has_changes(struct repository *repo,
+ struct tree *tree,
+ struct strbuf *sb);
-extern int verify_path(const char *path, unsigned mode);
-extern int strcmp_offset(const char *s1, const char *s2, size_t *first_change);
-extern int index_dir_exists(struct index_state *istate, const char *name, int namelen);
-extern void adjust_dirname_case(struct index_state *istate, char *name);
-extern struct cache_entry *index_file_exists(struct index_state *istate, const char *name, int namelen, int igncase);
+int verify_path(const char *path, unsigned mode);
+int strcmp_offset(const char *s1, const char *s2, size_t *first_change);
+int index_dir_exists(struct index_state *istate, const char *name, int namelen);
+void adjust_dirname_case(struct index_state *istate, char *name);
+struct cache_entry *index_file_exists(struct index_state *istate, const char *name, int namelen, int igncase);
/*
* Searches for an entry defined by name and namelen in the given index.
* index_name_pos(&index, "f", 1) -> -3
* index_name_pos(&index, "g", 1) -> -5
*/
-extern int index_name_pos(const struct index_state *, const char *name, int namelen);
+int index_name_pos(const struct index_state *, const char *name, int namelen);
#define ADD_CACHE_OK_TO_ADD 1 /* Ok to add */
#define ADD_CACHE_OK_TO_REPLACE 2 /* Ok to replace file/directory */
#define ADD_CACHE_NEW_ONLY 16 /* Do not replace existing ones */
#define ADD_CACHE_KEEP_CACHE_TREE 32 /* Do not invalidate cache-tree */
#define ADD_CACHE_RENORMALIZE 64 /* Pass along HASH_RENORMALIZE */
-extern int add_index_entry(struct index_state *, struct cache_entry *ce, int option);
-extern void rename_index_entry_at(struct index_state *, int pos, const char *new_name);
+int add_index_entry(struct index_state *, struct cache_entry *ce, int option);
+void rename_index_entry_at(struct index_state *, int pos, const char *new_name);
/* Remove entry, return true if there are more entries to go. */
-extern int remove_index_entry_at(struct index_state *, int pos);
+int remove_index_entry_at(struct index_state *, int pos);
-extern void remove_marked_cache_entries(struct index_state *istate);
-extern int remove_file_from_index(struct index_state *, const char *path);
+void remove_marked_cache_entries(struct index_state *istate, int invalidate);
+int remove_file_from_index(struct index_state *, const char *path);
#define ADD_CACHE_VERBOSE 1
#define ADD_CACHE_PRETEND 2
#define ADD_CACHE_IGNORE_ERRORS 4
* the latter will do necessary lstat(2) internally before
* calling the former.
*/
-extern int add_to_index(struct index_state *, const char *path, struct stat *, int flags);
-extern int add_file_to_index(struct index_state *, const char *path, int flags);
+int add_to_index(struct index_state *, const char *path, struct stat *, int flags);
+int add_file_to_index(struct index_state *, const char *path, int flags);
-extern int chmod_index_entry(struct index_state *, struct cache_entry *ce, char flip);
-extern int ce_same_name(const struct cache_entry *a, const struct cache_entry *b);
-extern void set_object_name_for_intent_to_add_entry(struct cache_entry *ce);
-extern int index_name_is_other(const struct index_state *, const char *, int);
-extern void *read_blob_data_from_index(const struct index_state *, const char *, unsigned long *);
+int chmod_index_entry(struct index_state *, struct cache_entry *ce, char flip);
+int ce_same_name(const struct cache_entry *a, const struct cache_entry *b);
+void set_object_name_for_intent_to_add_entry(struct cache_entry *ce);
+int index_name_is_other(const struct index_state *, const char *, int);
+void *read_blob_data_from_index(const struct index_state *, const char *, unsigned long *);
/* do stat comparison even if CE_VALID is true */
#define CE_MATCH_IGNORE_VALID 01
#define CE_MATCH_REFRESH 0x10
/* don't refresh_fsmonitor state or do stat comparison even if CE_FSMONITOR_VALID is true */
#define CE_MATCH_IGNORE_FSMONITOR 0X20
-extern int is_racy_timestamp(const struct index_state *istate,
- const struct cache_entry *ce);
-extern int ie_match_stat(struct index_state *, const struct cache_entry *, struct stat *, unsigned int);
-extern int ie_modified(struct index_state *, const struct cache_entry *, struct stat *, unsigned int);
+int is_racy_timestamp(const struct index_state *istate,
+ const struct cache_entry *ce);
+int ie_match_stat(struct index_state *, const struct cache_entry *, struct stat *, unsigned int);
+int ie_modified(struct index_state *, const struct cache_entry *, struct stat *, unsigned int);
#define HASH_WRITE_OBJECT 1
#define HASH_FORMAT_CHECK 2
#define HASH_RENORMALIZE 4
-extern int index_fd(struct index_state *istate, struct object_id *oid, int fd, struct stat *st, enum object_type type, const char *path, unsigned flags);
-extern int index_path(struct index_state *istate, struct object_id *oid, const char *path, struct stat *st, unsigned flags);
+int index_fd(struct index_state *istate, struct object_id *oid, int fd, struct stat *st, enum object_type type, const char *path, unsigned flags);
+int index_path(struct index_state *istate, struct object_id *oid, const char *path, struct stat *st, unsigned flags);
/*
* Record to sd the data from st that we use to check whether a file
* might have changed.
*/
-extern void fill_stat_data(struct stat_data *sd, struct stat *st);
+void fill_stat_data(struct stat_data *sd, struct stat *st);
/*
* Return 0 if st is consistent with a file not having been changed
* combination of MTIME_CHANGED, CTIME_CHANGED, OWNER_CHANGED,
* INODE_CHANGED, and DATA_CHANGED.
*/
-extern int match_stat_data(const struct stat_data *sd, struct stat *st);
-extern int match_stat_data_racy(const struct index_state *istate,
- const struct stat_data *sd, struct stat *st);
+int match_stat_data(const struct stat_data *sd, struct stat *st);
+int match_stat_data_racy(const struct index_state *istate,
+ const struct stat_data *sd, struct stat *st);
-extern void fill_stat_cache_info(struct cache_entry *ce, struct stat *st);
+void fill_stat_cache_info(struct cache_entry *ce, struct stat *st);
#define REFRESH_REALLY 0x0001 /* ignore_valid */
#define REFRESH_UNMERGED 0x0002 /* allow unmerged */
#define REFRESH_IGNORE_SUBMODULES 0x0010 /* ignore submodules */
#define REFRESH_IN_PORCELAIN 0x0020 /* user friendly output, not "needs update" */
#define REFRESH_PROGRESS 0x0040 /* show progress bar if stderr is tty */
-extern int refresh_index(struct index_state *, unsigned int flags, const struct pathspec *pathspec, char *seen, const char *header_msg);
-extern struct cache_entry *refresh_cache_entry(struct index_state *, struct cache_entry *, unsigned int);
+int refresh_index(struct index_state *, unsigned int flags, const struct pathspec *pathspec, char *seen, const char *header_msg);
+struct cache_entry *refresh_cache_entry(struct index_state *, struct cache_entry *, unsigned int);
-extern void set_alternate_index_output(const char *);
+void set_alternate_index_output(const char *);
extern int verify_index_checksum;
extern int verify_ce_order;
extern const char *core_partial_clone_filter_default;
extern int repository_format_worktree_config;
+/*
+ * You _have_ to initialize a `struct repository_format` using
+ * `= REPOSITORY_FORMAT_INIT` before calling `read_repository_format()`.
+ */
struct repository_format {
int version;
int precious_objects;
struct string_list unknown_extensions;
};
+/*
+ * Always use this to initialize a `struct repository_format`
+ * to a well-defined, default state before calling
+ * `read_repository()`.
+ */
+#define REPOSITORY_FORMAT_INIT \
+{ \
+ .version = -1, \
+ .is_bare = -1, \
+ .hash_algo = GIT_HASH_SHA1, \
+ .unknown_extensions = STRING_LIST_INIT_DUP, \
+}
+
/*
* Read the repository format characteristics from the config file "path" into
- * "format" struct. Returns the numeric version. On error, -1 is returned,
- * format->version is set to -1, and all other fields in the struct are
- * undefined.
+ * "format" struct. Returns the numeric version. On error, or if no version is
+ * found in the configuration, -1 is returned, format->version is set to -1,
+ * and all other fields in the struct are set to the default configuration
+ * (REPOSITORY_FORMAT_INIT). Always initialize the struct using
+ * REPOSITORY_FORMAT_INIT before calling this function.
*/
int read_repository_format(struct repository_format *format, const char *path);
+/*
+ * Free the memory held onto by `format`, but not the struct itself.
+ * (No need to use this after `read_repository_format()` fails.)
+ */
+void clear_repository_format(struct repository_format *format);
+
/*
* Verify that the repository described by repository_format is something we
* can read. If it is, return 0. Otherwise, return -1, and "err" will describe
* set_git_dir() before calling this, and use it only for "are we in a valid
* repo?".
*/
-extern void check_repository_format(void);
+void check_repository_format(void);
#define MTIME_CHANGED 0x0001
#define CTIME_CHANGED 0x0002
* Note that while this version avoids the static buffer, it is not fully
* reentrant, as it calls into other non-reentrant git code.
*/
-extern const char *find_unique_abbrev(const struct object_id *oid, int len);
-extern int find_unique_abbrev_r(char *hex, const struct object_id *oid, int len);
+const char *repo_find_unique_abbrev(struct repository *r, const struct object_id *oid, int len);
+#define find_unique_abbrev(oid, len) repo_find_unique_abbrev(the_repository, oid, len)
+int repo_find_unique_abbrev_r(struct repository *r, char *hex, const struct object_id *oid, int len);
+#define find_unique_abbrev_r(hex, oid, len) repo_find_unique_abbrev_r(the_repository, hex, oid, len)
extern const unsigned char null_sha1[GIT_MAX_RAWSZ];
extern const struct object_id null_oid;
int raceproof_create_file(const char *path, create_file_fn fn, void *cb);
int mkdir_in_gitdir(const char *path);
-extern char *expand_user_path(const char *path, int real_home);
+char *expand_user_path(const char *path, int real_home);
const char *enter_repo(const char *path, int strict);
static inline int is_absolute_path(const char *path)
{
* "$XDG_CONFIG_HOME/git/$filename" if $XDG_CONFIG_HOME is non-empty, otherwise
* "$HOME/.config/git/$filename". Return NULL upon error.
*/
-extern char *xdg_config_home(const char *filename);
+char *xdg_config_home(const char *filename);
/**
* Return a newly allocated string with the evaluation of
* "$XDG_CACHE_HOME/git/$filename" if $XDG_CACHE_HOME is non-empty, otherwise
* "$HOME/.cache/git/$filename". Return NULL upon error.
*/
-extern char *xdg_cache_home(const char *filename);
+char *xdg_cache_home(const char *filename);
-extern int git_open_cloexec(const char *name, int flags);
+int git_open_cloexec(const char *name, int flags);
#define git_open(name) git_open_cloexec(name, O_RDONLY)
-extern int unpack_loose_header(git_zstream *stream, unsigned char *map, unsigned long mapsize, void *buffer, unsigned long bufsiz);
-extern int parse_loose_header(const char *hdr, unsigned long *sizep);
+int unpack_loose_header(git_zstream *stream, unsigned char *map, unsigned long mapsize, void *buffer, unsigned long bufsiz);
+int parse_loose_header(const char *hdr, unsigned long *sizep);
-extern int check_object_signature(const struct object_id *oid, void *buf, unsigned long size, const char *type);
+int check_object_signature(const struct object_id *oid, void *buf, unsigned long size, const char *type);
-extern int finalize_object_file(const char *tmpfile, const char *filename);
+int finalize_object_file(const char *tmpfile, const char *filename);
/* Helper to check and "touch" a file */
-extern int check_and_freshen_file(const char *fn, int freshen);
+int check_and_freshen_file(const char *fn, int freshen);
extern const signed char hexval_table[256];
static inline unsigned int hexval(unsigned char c)
#define FALLBACK_DEFAULT_ABBREV 7
struct object_context {
- unsigned mode;
+ unsigned short mode;
/*
* symlink_path is only used by get_tree_entry_follow_symlinks,
* and only for symlinks that point outside the repository.
*/
};
-extern int get_oid(const char *str, struct object_id *oid);
-extern int get_oid_commit(const char *str, struct object_id *oid);
-extern int get_oid_committish(const char *str, struct object_id *oid);
-extern int get_oid_tree(const char *str, struct object_id *oid);
-extern int get_oid_treeish(const char *str, struct object_id *oid);
-extern int get_oid_blob(const char *str, struct object_id *oid);
-extern void maybe_die_on_misspelt_object_name(const char *name, const char *prefix);
-extern enum get_oid_result get_oid_with_context(struct repository *repo, const char *str,
- unsigned flags, struct object_id *oid,
- struct object_context *oc);
+int repo_get_oid(struct repository *r, const char *str, struct object_id *oid);
+int get_oidf(struct object_id *oid, const char *fmt, ...);
+int repo_get_oid_commit(struct repository *r, const char *str, struct object_id *oid);
+int repo_get_oid_committish(struct repository *r, const char *str, struct object_id *oid);
+int repo_get_oid_tree(struct repository *r, const char *str, struct object_id *oid);
+int repo_get_oid_treeish(struct repository *r, const char *str, struct object_id *oid);
+int repo_get_oid_blob(struct repository *r, const char *str, struct object_id *oid);
+int repo_get_oid_mb(struct repository *r, const char *str, struct object_id *oid);
+void maybe_die_on_misspelt_object_name(struct repository *repo,
+ const char *name,
+ const char *prefix);
+enum get_oid_result get_oid_with_context(struct repository *repo, const char *str,
+ unsigned flags, struct object_id *oid,
+ struct object_context *oc);
+
+#define get_oid(str, oid) repo_get_oid(the_repository, str, oid)
+#define get_oid_commit(str, oid) repo_get_oid_commit(the_repository, str, oid)
+#define get_oid_committish(str, oid) repo_get_oid_committish(the_repository, str, oid)
+#define get_oid_tree(str, oid) repo_get_oid_tree(the_repository, str, oid)
+#define get_oid_treeish(str, oid) repo_get_oid_treeish(the_repository, str, oid)
+#define get_oid_blob(str, oid) repo_get_oid_blob(the_repository, str, oid)
+#define get_oid_mb(str, oid) repo_get_oid_mb(the_repository, str, oid)
typedef int each_abbrev_fn(const struct object_id *oid, void *);
-extern int for_each_abbrev(const char *prefix, each_abbrev_fn, void *);
+int repo_for_each_abbrev(struct repository *r, const char *prefix, each_abbrev_fn, void *);
+#define for_each_abbrev(prefix, fn, data) repo_for_each_abbrev(the_repository, prefix, fn, data)
-extern int set_disambiguate_hint_config(const char *var, const char *value);
+int set_disambiguate_hint_config(const char *var, const char *value);
/*
* Try to read a SHA1 in hexadecimal format from the 40 characters
* input, so it is safe to pass this function an arbitrary
* null-terminated string.
*/
-extern int get_sha1_hex(const char *hex, unsigned char *sha1);
-extern int get_oid_hex(const char *hex, struct object_id *sha1);
+int get_sha1_hex(const char *hex, unsigned char *sha1);
+int get_oid_hex(const char *hex, struct object_id *sha1);
/*
* Read `len` pairs of hexadecimal digits from `hex` and write the
* values to `binary` as `len` bytes. Return 0 on success, or -1 if
* the input does not consist of hex digits).
*/
-extern int hex_to_bytes(unsigned char *binary, const char *hex, size_t len);
+int hex_to_bytes(unsigned char *binary, const char *hex, size_t len);
/*
* Convert a binary hash to its hex equivalent. The `_r` variant is reentrant,
* other invalid character. end is only updated on success; otherwise, it is
* unmodified.
*/
-extern int parse_oid_hex(const char *hex, struct object_id *oid, const char **end);
+int parse_oid_hex(const char *hex, struct object_id *oid, const char **end);
/*
* This reads short-hand syntax that not only evaluates to a commit
#define INTERPRET_BRANCH_LOCAL (1<<0)
#define INTERPRET_BRANCH_REMOTE (1<<1)
#define INTERPRET_BRANCH_HEAD (1<<2)
-extern int interpret_branch_name(const char *str, int len, struct strbuf *,
- unsigned allowed);
-extern int get_oid_mb(const char *str, struct object_id *oid);
-
-extern int validate_headref(const char *ref);
-
-extern int base_name_compare(const char *name1, int len1, int mode1, const char *name2, int len2, int mode2);
-extern int df_name_compare(const char *name1, int len1, int mode1, const char *name2, int len2, int mode2);
-extern int name_compare(const char *name1, size_t len1, const char *name2, size_t len2);
-extern int cache_name_stage_compare(const char *name1, int len1, int stage1, const char *name2, int len2, int stage2);
-
-extern void *read_object_with_reference(const struct object_id *oid,
- const char *required_type,
- unsigned long *size,
- struct object_id *oid_ret);
-
-extern struct object *peel_to_type(const char *name, int namelen,
- struct object *o, enum object_type);
+int repo_interpret_branch_name(struct repository *r,
+ const char *str, int len,
+ struct strbuf *buf,
+ unsigned allowed);
+#define interpret_branch_name(str, len, buf, allowed) \
+ repo_interpret_branch_name(the_repository, str, len, buf, allowed)
+
+int validate_headref(const char *ref);
+
+int base_name_compare(const char *name1, int len1, int mode1, const char *name2, int len2, int mode2);
+int df_name_compare(const char *name1, int len1, int mode1, const char *name2, int len2, int mode2);
+int name_compare(const char *name1, size_t len1, const char *name2, size_t len2);
+int cache_name_stage_compare(const char *name1, int len1, int stage1, const char *name2, int len2, int stage2);
+
+void *read_object_with_reference(const struct object_id *oid,
+ const char *required_type,
+ unsigned long *size,
+ struct object_id *oid_ret);
+
+struct object *repo_peel_to_type(struct repository *r,
+ const char *name, int namelen,
+ struct object *o, enum object_type);
+#define peel_to_type(name, namelen, obj, type) \
+ repo_peel_to_type(the_repository, name, namelen, obj, type)
enum date_mode_type {
DATE_NORMAL = 0,
#define IDENT_STRICT 1
#define IDENT_NO_DATE 2
#define IDENT_NO_NAME 4
-extern const char *git_author_info(int);
-extern const char *git_committer_info(int);
-extern const char *fmt_ident(const char *name, const char *email, const char *date_str, int);
-extern const char *fmt_name(const char *name, const char *email);
-extern const char *ident_default_name(void);
-extern const char *ident_default_email(void);
-extern const char *git_editor(void);
-extern const char *git_sequence_editor(void);
-extern const char *git_pager(int stdout_is_tty);
-extern int is_terminal_dumb(void);
-extern int git_ident_config(const char *, const char *, void *);
-extern void reset_ident_date(void);
+
+enum want_ident {
+ WANT_BLANK_IDENT,
+ WANT_AUTHOR_IDENT,
+ WANT_COMMITTER_IDENT
+};
+
+const char *git_author_info(int);
+const char *git_committer_info(int);
+const char *fmt_ident(const char *name, const char *email,
+ enum want_ident whose_ident,
+ const char *date_str, int);
+const char *fmt_name(enum want_ident);
+const char *ident_default_name(void);
+const char *ident_default_email(void);
+const char *git_editor(void);
+const char *git_sequence_editor(void);
+const char *git_pager(int stdout_is_tty);
+int is_terminal_dumb(void);
+int git_ident_config(const char *, const char *, void *);
+/*
+ * Prepare an ident to fall back on if the user didn't configure it.
+ */
+void prepare_fallback_ident(const char *name, const char *email);
+void reset_ident_date(void);
struct ident_split {
const char *name_begin;
* Signals an success with 0, but time part of the result may be NULL
* if the input lacks timestamp and zone
*/
-extern int split_ident_line(struct ident_split *, const char *, int);
+int split_ident_line(struct ident_split *, const char *, int);
/*
* Like show_date, but pull the timestamp and tz parameters from
* Because there are two fields, we must choose one as the primary key; we
* currently arbitrarily pick the email.
*/
-extern int ident_cmp(const struct ident_split *, const struct ident_split *);
+int ident_cmp(const struct ident_split *, const struct ident_split *);
struct checkout {
struct index_state *istate;
#define CHECKOUT_INIT { NULL, "" }
#define TEMPORARY_FILENAME_LENGTH 25
-extern int checkout_entry(struct cache_entry *ce, const struct checkout *state, char *topath, int *nr_checkouts);
-extern void enable_delayed_checkout(struct checkout *state);
-extern int finish_delayed_checkout(struct checkout *state, int *nr_checkouts);
+int checkout_entry(struct cache_entry *ce, const struct checkout *state, char *topath, int *nr_checkouts);
+void enable_delayed_checkout(struct checkout *state);
+int finish_delayed_checkout(struct checkout *state, int *nr_checkouts);
+/*
+ * Unlink the last component and schedule the leading directories for
+ * removal, such that empty directories get removed.
+ */
+void unlink_entry(const struct cache_entry *ce);
struct cache_def {
struct strbuf path;
strbuf_release(&cache->path);
}
-extern int has_symlink_leading_path(const char *name, int len);
-extern int threaded_has_symlink_leading_path(struct cache_def *, const char *, int);
-extern int check_leading_path(const char *name, int len);
-extern int has_dirs_only_path(const char *name, int len, int prefix_len);
-extern void schedule_dir_for_removal(const char *name, int len);
-extern void remove_scheduled_dirs(void);
+int has_symlink_leading_path(const char *name, int len);
+int threaded_has_symlink_leading_path(struct cache_def *, const char *, int);
+int check_leading_path(const char *name, int len);
+int has_dirs_only_path(const char *name, int len, int prefix_len);
+void schedule_dir_for_removal(const char *name, int len);
+void remove_scheduled_dirs(void);
struct pack_window {
struct pack_window *next;
* usual "XXXXXX" trailer, and the resulting filename is written into the
* "template" buffer. Returns the open descriptor.
*/
-extern int odb_mkstemp(struct strbuf *temp_filename, const char *pattern);
+int odb_mkstemp(struct strbuf *temp_filename, const char *pattern);
/*
* Create a pack .keep file named "name" (which should generally be the output
* of odb_pack_name). Returns a file descriptor opened for writing, or -1 on
* error.
*/
-extern int odb_pack_keep(const char *name);
+int odb_pack_keep(const char *name);
/*
* Set this to 0 to prevent oid_object_info_extended() from fetching missing
extern int fetch_if_missing;
/* Dumb servers support */
-extern int update_server_info(int);
+int update_server_info(int);
-extern const char *get_log_output_encoding(void);
-extern const char *get_commit_output_encoding(void);
+const char *get_log_output_encoding(void);
+const char *get_commit_output_encoding(void);
/*
* This is a hack for test programs like test-dump-untracked-cache to
*/
extern int ignore_untracked_cache_config;
-extern int committer_ident_sufficiently_given(void);
-extern int author_ident_sufficiently_given(void);
+int committer_ident_sufficiently_given(void);
+int author_ident_sufficiently_given(void);
extern const char *git_commit_encoding;
extern const char *git_log_output_encoding;
extern const char *git_mailmap_blob;
/* IO helper functions */
-extern void maybe_flush_or_die(FILE *, const char *);
+void maybe_flush_or_die(FILE *, const char *);
__attribute__((format (printf, 2, 3)))
-extern void fprintf_or_die(FILE *, const char *fmt, ...);
+void fprintf_or_die(FILE *, const char *fmt, ...);
#define COPY_READ_ERROR (-2)
#define COPY_WRITE_ERROR (-3)
-extern int copy_fd(int ifd, int ofd);
-extern int copy_file(const char *dst, const char *src, int mode);
-extern int copy_file_with_time(const char *dst, const char *src, int mode);
+int copy_fd(int ifd, int ofd);
+int copy_file(const char *dst, const char *src, int mode);
+int copy_file_with_time(const char *dst, const char *src, int mode);
-extern void write_or_die(int fd, const void *buf, size_t count);
-extern void fsync_or_die(int fd, const char *);
+void write_or_die(int fd, const void *buf, size_t count);
+void fsync_or_die(int fd, const char *);
-extern ssize_t read_in_full(int fd, void *buf, size_t count);
-extern ssize_t write_in_full(int fd, const void *buf, size_t count);
-extern ssize_t pread_in_full(int fd, void *buf, size_t count, off_t offset);
+ssize_t read_in_full(int fd, void *buf, size_t count);
+ssize_t write_in_full(int fd, const void *buf, size_t count);
+ssize_t pread_in_full(int fd, void *buf, size_t count, off_t offset);
static inline ssize_t write_str_in_full(int fd, const char *str)
{
* Open (and truncate) the file at path, write the contents of buf to it,
* and close it. Dies if any errors are encountered.
*/
-extern void write_file_buf(const char *path, const char *buf, size_t len);
+void write_file_buf(const char *path, const char *buf, size_t len);
/**
* Like write_file_buf(), but format the contents into a buffer first.
* write_file(path, "counter: %d", ctr);
*/
__attribute__((format (printf, 2, 3)))
-extern void write_file(const char *path, const char *fmt, ...);
+void write_file(const char *path, const char *fmt, ...);
/* pager.c */
-extern void setup_pager(void);
-extern int pager_in_use(void);
+void setup_pager(void);
+int pager_in_use(void);
extern int pager_use_color;
-extern int term_columns(void);
-extern int decimal_width(uintmax_t);
-extern int check_pager_config(const char *cmd);
-extern void prepare_pager_args(struct child_process *, const char *pager);
+int term_columns(void);
+int decimal_width(uintmax_t);
+int check_pager_config(const char *cmd);
+void prepare_pager_args(struct child_process *, const char *pager);
extern const char *editor_program;
extern const char *askpass_program;
/* All WS_* -- when extended, adapt diff.c emit_symbol */
#define WS_RULE_MASK 07777
extern unsigned whitespace_rule_cfg;
-extern unsigned whitespace_rule(struct index_state *, const char *);
-extern unsigned parse_whitespace_rule(const char *);
-extern unsigned ws_check(const char *line, int len, unsigned ws_rule);
-extern void ws_check_emit(const char *line, int len, unsigned ws_rule, FILE *stream, const char *set, const char *reset, const char *ws);
-extern char *whitespace_error_string(unsigned ws);
-extern void ws_fix_copy(struct strbuf *, const char *, int, unsigned, int *);
-extern int ws_blank_line(const char *line, int len, unsigned ws_rule);
+unsigned whitespace_rule(struct index_state *, const char *);
+unsigned parse_whitespace_rule(const char *);
+unsigned ws_check(const char *line, int len, unsigned ws_rule);
+void ws_check_emit(const char *line, int len, unsigned ws_rule, FILE *stream, const char *set, const char *reset, const char *ws);
+char *whitespace_error_string(unsigned ws);
+void ws_fix_copy(struct strbuf *, const char *, int, unsigned, int *);
+int ws_blank_line(const char *line, int len, unsigned ws_rule);
#define ws_tab_width(rule) ((rule) & WS_TAB_WIDTH_MASK)
/* ls-files */
* Should we print an ellipsis after an abbreviated SHA-1 value
* when doing diff-raw output or indicating a detached HEAD?
*/
-extern int print_sha1_ellipsis(void);
+int print_sha1_ellipsis(void);
/* Return 1 if the file is empty or does not exists, 0 otherwise. */
-extern int is_empty_or_missing_file(const char *filename);
+int is_empty_or_missing_file(const char *filename);
#endif /* CACHE_H */
+++ /dev/null
-#include "cache.h"
-
-int main(int ac, char **av)
-{
- int i;
- int dirty, clean, racy;
-
- dirty = clean = racy = 0;
- read_cache();
- for (i = 0; i < active_nr; i++) {
- struct cache_entry *ce = active_cache[i];
- struct stat st;
-
- if (lstat(ce->name, &st)) {
- error_errno("lstat(%s)", ce->name);
- continue;
- }
-
- if (ce_match_stat(ce, &st, 0))
- dirty++;
- else if (ce_match_stat(ce, &st, CE_MATCH_RACY_IS_DIRTY))
- racy++;
- else
- clean++;
- }
- printf("dirty %d, clean %d, racy %d\n", dirty, clean, racy);
- return 0;
-}
* tracking branch. Return the name of the remote if such a branch
* exists, NULL otherwise.
*/
-extern const char *unique_tracking_name(const char *name,
- struct object_id *oid,
- int *dwim_remotes_matched);
+const char *unique_tracking_name(const char *name,
+ struct object_id *oid,
+ int *dwim_remotes_matched);
#endif /* CHECKOUT_H */
Documentation)
sudo apt-get -q update
sudo apt-get -q -y install asciidoc xmlto
+
+ test -n "$ALREADY_HAVE_ASCIIDOCTOR" ||
+ gem install --version 1.5.8 asciidoctor
;;
esac
+++ /dev/null
-#!/usr/bin/env bash
-#
-# Script to trigger the Git for Windows build and test run.
-# Set the $GFW_CI_TOKEN as environment variable.
-# Pass the branch (only branches on https://github.com/git/git are
-# supported) and a commit hash.
-#
-
-. ${0%/*}/lib.sh
-
-test $# -ne 2 && echo "Unexpected number of parameters" && exit 1
-test -z "$GFW_CI_TOKEN" && echo "GFW_CI_TOKEN not defined" && exit
-
-BRANCH=$1
-COMMIT=$2
-
-gfwci () {
- local CURL_ERROR_CODE HTTP_CODE
- CONTENT_FILE=$(mktemp -t "git-windows-ci-XXXXXX")
- while test -z $HTTP_CODE
- do
- HTTP_CODE=$(curl \
- -H "Authentication: Bearer $GFW_CI_TOKEN" \
- --silent --retry 5 --write-out '%{HTTP_CODE}' \
- --output >(sed "$(printf '1s/^\xef\xbb\xbf//')" >$CONTENT_FILE) \
- "https://git-for-windows-ci.azurewebsites.net/api/TestNow?$1" \
- )
- CURL_ERROR_CODE=$?
- # The GfW CI web app sometimes returns HTTP errors of
- # "502 bad gateway" or "503 service unavailable".
- # We also need to check the HTTP content because the GfW web
- # app seems to pass through (error) results from other Azure
- # calls with HTTP code 200.
- # Wait a little and retry if we detect this error. More info:
- # https://docs.microsoft.com/en-in/azure/app-service-web/app-service-web-troubleshoot-http-502-http-503
- if test $HTTP_CODE -eq 502 ||
- test $HTTP_CODE -eq 503 ||
- grep "502 - Web server received an invalid response" $CONTENT_FILE >/dev/null
- then
- sleep 10
- HTTP_CODE=
- fi
- done
- cat $CONTENT_FILE
- rm $CONTENT_FILE
- if test $CURL_ERROR_CODE -ne 0
- then
- return $CURL_ERROR_CODE
- fi
- if test "$HTTP_CODE" -ge 400 && test "$HTTP_CODE" -lt 600
- then
- return 127
- fi
-}
-
-# Trigger build job
-BUILD_ID=$(gfwci "action=trigger&branch=$BRANCH&commit=$COMMIT&skipTests=false")
-if test $? -ne 0
-then
- echo "Unable to trigger Visual Studio Team Services Build"
- echo "$BUILD_ID"
- exit 1
-fi
-
-# Check if the $BUILD_ID contains a number
-case $BUILD_ID in
-''|*[!0-9]*) echo "Unexpected build number: $BUILD_ID" && exit 1
-esac
-
-echo "Visual Studio Team Services Build #${BUILD_ID}"
-
-# Tracing execued commands would produce too much noise in the waiting
-# loop below.
-set +x
-
-# Wait until build job finished
-STATUS=
-RESULT=
-while true
-do
- LAST_STATUS=$STATUS
- STATUS=$(gfwci "action=status&buildId=$BUILD_ID")
- test "$STATUS" = "$LAST_STATUS" || printf "\nStatus: %s " "$STATUS"
- printf "."
-
- case "$STATUS" in
- inProgress|postponed|notStarted) sleep 10 ;; # continue
- "completed: succeeded") RESULT="success"; break;; # success
- "completed: failed") break;; # failure
- *) echo "Unhandled status: $STATUS"; break;; # unknown
- esac
-done
-
-# Print log
-echo ""
-echo ""
-set -x
-gfwci "action=log&buildId=$BUILD_ID" | cut -c 30-
-
-# Set exit code for TravisCI
-test "$RESULT" = "success"
-
-save_good_tree
. ${0%/*}/lib.sh
-test -n "$ALREADY_HAVE_ASCIIDOCTOR" ||
-gem install asciidoctor
+filter_log () {
+ sed -e '/^GIT_VERSION = /d' \
+ -e '/^ \* new asciidoc flags$/d' \
+ "$1"
+}
make check-builtins
make check-docs
# Build docs with AsciiDoc
-make doc > >(tee stdout.log) 2> >(tee stderr.log >&2)
-! test -s stderr.log
+make doc > >(tee stdout.log) 2> >(tee stderr.raw >&2)
+cat stderr.raw
+filter_log stderr.raw >stderr.log
+test ! -s stderr.log
test -s Documentation/git.html
test -s Documentation/git.xml
test -s Documentation/git.1
grep '<meta name="generator" content="AsciiDoc ' Documentation/git.html
-rm -f stdout.log stderr.log
+rm -f stdout.log stderr.log stderr.raw
check_unignored_build_artifacts
# Build docs with AsciiDoctor
make clean
-make USE_ASCIIDOCTOR=1 doc > >(tee stdout.log) 2> >(tee stderr.log >&2)
-sed '/^GIT_VERSION = / d' stderr.log
-! test -s stderr.log
+make USE_ASCIIDOCTOR=1 doc > >(tee stdout.log) 2> >(tee stderr.raw >&2)
+cat stderr.raw
+filter_log stderr.raw >stderr.log
+test ! -s stderr.log
test -s Documentation/git.html
grep '<meta name="generator" content="Asciidoctor ' Documentation/git.html
-rm -f stdout.log stderr.log
+rm -f stdout.log stderr.log stderr.raw
check_unignored_build_artifacts
save_good_tree
};
struct option;
-extern int parseopt_column_callback(const struct option *, const char *, int);
-extern int git_column_config(const char *var, const char *value,
- const char *command, unsigned int *colopts);
-extern int finalize_colopts(unsigned int *colopts, int stdout_is_tty);
+int parseopt_column_callback(const struct option *, const char *, int);
+int git_column_config(const char *var, const char *value,
+ const char *command, unsigned int *colopts);
+int finalize_colopts(unsigned int *colopts, int stdout_is_tty);
static inline int column_active(unsigned int colopts)
{
return (colopts & COL_ENABLE_MASK) == COL_ENABLED;
}
struct string_list;
-extern void print_columns(const struct string_list *list, unsigned int colopts,
- const struct column_options *opts);
+void print_columns(const struct string_list *list, unsigned int colopts,
+ const struct column_options *opts);
-extern int run_column_filter(int colopts, const struct column_options *);
-extern int stop_column_filter(void);
+int run_column_filter(int colopts, const struct column_options *);
+int stop_column_filter(void);
#endif
two->path, strlen(two->path), two->mode);
}
-static struct combine_diff_path *intersect_paths(struct combine_diff_path *curr, int n, int num_parent)
+static int filename_changed(char status)
+{
+ return status == 'R' || status == 'C';
+}
+
+static struct combine_diff_path *intersect_paths(
+ struct combine_diff_path *curr,
+ int n,
+ int num_parent,
+ int combined_all_paths)
{
struct diff_queue_struct *q = &diff_queued_diff;
struct combine_diff_path *p, **tail = &curr;
- int i, cmp;
+ int i, j, cmp;
if (!n) {
for (i = 0; i < q->nr; i++) {
oidcpy(&p->parent[n].oid, &q->queue[i]->one->oid);
p->parent[n].mode = q->queue[i]->one->mode;
p->parent[n].status = q->queue[i]->status;
+
+ if (combined_all_paths &&
+ filename_changed(p->parent[n].status)) {
+ strbuf_init(&p->parent[n].path, 0);
+ strbuf_addstr(&p->parent[n].path,
+ q->queue[i]->one->path);
+ }
*tail = p;
tail = &p->next;
}
if (cmp < 0) {
/* p->path not in q->queue[]; drop it */
*tail = p->next;
+ for (j = 0; j < num_parent; j++)
+ if (combined_all_paths &&
+ filename_changed(p->parent[j].status))
+ strbuf_release(&p->parent[j].path);
free(p);
continue;
}
oidcpy(&p->parent[n].oid, &q->queue[i]->one->oid);
p->parent[n].mode = q->queue[i]->one->mode;
p->parent[n].status = q->queue[i]->status;
+ if (combined_all_paths &&
+ filename_changed(p->parent[n].status))
+ strbuf_addstr(&p->parent[n].path,
+ q->queue[i]->one->path);
tail = &p->next;
i++;
if (!show_file_header)
return;
- if (added)
- dump_quoted_path("--- ", "", "/dev/null",
- line_prefix, c_meta, c_reset);
- else
- dump_quoted_path("--- ", a_prefix, elem->path,
- line_prefix, c_meta, c_reset);
+ if (rev->combined_all_paths) {
+ for (i = 0; i < num_parent; i++) {
+ char *path = filename_changed(elem->parent[i].status)
+ ? elem->parent[i].path.buf : elem->path;
+ if (elem->parent[i].status == DIFF_STATUS_ADDED)
+ dump_quoted_path("--- ", "", "/dev/null",
+ line_prefix, c_meta, c_reset);
+ else
+ dump_quoted_path("--- ", a_prefix, path,
+ line_prefix, c_meta, c_reset);
+ }
+ } else {
+ if (added)
+ dump_quoted_path("--- ", "", "/dev/null",
+ line_prefix, c_meta, c_reset);
+ else
+ dump_quoted_path("--- ", a_prefix, elem->path,
+ line_prefix, c_meta, c_reset);
+ }
if (deleted)
dump_quoted_path("+++ ", "", "/dev/null",
line_prefix, c_meta, c_reset);
putchar(inter_name_termination);
}
+ for (i = 0; i < num_parent; i++)
+ if (rev->combined_all_paths) {
+ if (filename_changed(p->parent[i].status))
+ write_name_quoted(p->parent[i].path.buf, stdout,
+ inter_name_termination);
+ else
+ write_name_quoted(p->path, stdout,
+ inter_name_termination);
+ }
write_name_quoted(p->path, stdout, line_termination);
}
/* find set of paths that every parent touches */
static struct combine_diff_path *find_paths_generic(const struct object_id *oid,
- const struct oid_array *parents, struct diff_options *opt)
+ const struct oid_array *parents,
+ struct diff_options *opt,
+ int combined_all_paths)
{
struct combine_diff_path *paths = NULL;
int i, num_parent = parents->nr;
opt->output_format = DIFF_FORMAT_NO_OUTPUT;
diff_tree_oid(&parents->oid[i], oid, "", opt);
diffcore_std(opt);
- paths = intersect_paths(paths, i, num_parent);
+ paths = intersect_paths(paths, i, num_parent,
+ combined_all_paths);
/* if showing diff, show it in requested order */
if (opt->output_format != DIFF_FORMAT_NO_OUTPUT &&
* diff(sha1,parent_i) for all i to do the job, specifically
* for parent0.
*/
- paths = find_paths_generic(oid, parents, &diffopts);
+ paths = find_paths_generic(oid, parents, &diffopts,
+ rev->combined_all_paths);
}
else {
int stat_opt;
while (paths) {
struct combine_diff_path *tmp = paths;
paths = paths->next;
+ for (i = 0; i < num_parent; i++)
+ if (rev->combined_all_paths &&
+ filename_changed(tmp->parent[i].status))
+ strbuf_release(&tmp->parent[i].path);
free(tmp);
}
return 1;
}
-struct commit_graph *load_commit_graph_one(const char *graph_file)
+int open_commit_graph(const char *graph_file, int *fd, struct stat *st)
+{
+ *fd = git_open(graph_file);
+ if (*fd < 0)
+ return 0;
+ if (fstat(*fd, st)) {
+ close(*fd);
+ return 0;
+ }
+ return 1;
+}
+
+struct commit_graph *load_commit_graph_one_fd_st(int fd, struct stat *st)
{
void *graph_map;
size_t graph_size;
- struct stat st;
struct commit_graph *ret;
- int fd = git_open(graph_file);
- if (fd < 0)
- return NULL;
- if (fstat(fd, &st)) {
- close(fd);
- return NULL;
- }
- graph_size = xsize_t(st.st_size);
+ graph_size = xsize_t(st->st_size);
if (graph_size < GRAPH_MIN_SIZE) {
close(fd);
- die(_("graph file %s is too small"), graph_file);
+ error(_("commit-graph file is too small"));
+ return NULL;
}
graph_map = xmmap(NULL, graph_size, PROT_READ, MAP_PRIVATE, fd, 0);
ret = parse_commit_graph(graph_map, fd, graph_size);
if (!ret) {
munmap(graph_map, graph_size);
close(fd);
- exit(1);
}
return ret;
}
+static int verify_commit_graph_lite(struct commit_graph *g)
+{
+ /*
+ * Basic validation shared between parse_commit_graph()
+ * which'll be called every time the graph is used, and the
+ * much more expensive verify_commit_graph() used by
+ * "commit-graph verify".
+ *
+ * There should only be very basic checks here to ensure that
+ * we don't e.g. segfault in fill_commit_in_graph(), but
+ * because this is a very hot codepath nothing that e.g. loops
+ * over g->num_commits, or runs a checksum on the commit-graph
+ * itself.
+ */
+ if (!g->chunk_oid_fanout) {
+ error("commit-graph is missing the OID Fanout chunk");
+ return 1;
+ }
+ if (!g->chunk_oid_lookup) {
+ error("commit-graph is missing the OID Lookup chunk");
+ return 1;
+ }
+ if (!g->chunk_commit_data) {
+ error("commit-graph is missing the Commit Data chunk");
+ return 1;
+ }
+
+ return 0;
+}
+
struct commit_graph *parse_commit_graph(void *graph_map, int fd,
size_t graph_size)
{
graph_signature = get_be32(data);
if (graph_signature != GRAPH_SIGNATURE) {
- error(_("graph signature %X does not match signature %X"),
+ error(_("commit-graph signature %X does not match signature %X"),
graph_signature, GRAPH_SIGNATURE);
return NULL;
}
graph_version = *(unsigned char*)(data + 4);
if (graph_version != GRAPH_VERSION) {
- error(_("graph version %X does not match version %X"),
+ error(_("commit-graph version %X does not match version %X"),
graph_version, GRAPH_VERSION);
return NULL;
}
hash_version = *(unsigned char*)(data + 5);
if (hash_version != oid_version()) {
- error(_("hash version %X does not match version %X"),
+ error(_("commit-graph hash version %X does not match version %X"),
hash_version, oid_version());
return NULL;
}
if (data + graph_size - chunk_lookup <
GRAPH_CHUNKLOOKUP_WIDTH) {
- error(_("chunk lookup table entry missing; graph file may be incomplete"));
+ error(_("commit-graph chunk lookup table entry missing; file may be incomplete"));
free(graph);
return NULL;
}
chunk_lookup += GRAPH_CHUNKLOOKUP_WIDTH;
if (chunk_offset > graph_size - the_hash_algo->rawsz) {
- error(_("improper chunk offset %08x%08x"), (uint32_t)(chunk_offset >> 32),
+ error(_("commit-graph improper chunk offset %08x%08x"), (uint32_t)(chunk_offset >> 32),
(uint32_t)chunk_offset);
free(graph);
return NULL;
}
if (chunk_repeated) {
- error(_("chunk id %08x appears multiple times"), chunk_id);
+ error(_("commit-graph chunk id %08x appears multiple times"), chunk_id);
free(graph);
return NULL;
}
last_chunk_offset = chunk_offset;
}
+ if (verify_commit_graph_lite(graph)) {
+ free(graph);
+ return NULL;
+ }
+
return graph;
}
+static struct commit_graph *load_commit_graph_one(const char *graph_file)
+{
+
+ struct stat st;
+ int fd;
+ int open_ok = open_commit_graph(graph_file, &fd, &st);
+
+ if (!open_ok)
+ return NULL;
+
+ return load_commit_graph_one_fd_st(fd, &st);
+}
+
static void prepare_commit_graph_one(struct repository *r, const char *obj_dir)
{
char *graph_name;
struct object_directory *odb;
int config_value;
+ if (git_env_bool(GIT_TEST_COMMIT_GRAPH_DIE_ON_LOAD, 0))
+ die("dying as requested by the '%s' variable on commit-graph load!",
+ GIT_TEST_COMMIT_GRAPH_DIE_ON_LOAD);
+
if (r->objects->commit_graph_attempted)
return !!r->objects->commit_graph;
r->objects->commit_graph_attempted = 1;
item->generation = get_be32(commit_data + g->hash_len + 8) >> 2;
}
+static inline void set_commit_tree(struct commit *c, struct tree *t)
+{
+ c->maybe_tree = t;
+}
+
static int fill_commit_in_graph(struct repository *r,
struct commit *item,
struct commit_graph *g, uint32_t pos)
item->object.parsed = 1;
item->graph_pos = pos;
- item->maybe_tree = NULL;
+ set_commit_tree(item, NULL);
date_high = get_be32(commit_data + g->hash_len + 8) & 0x3;
date_low = get_be32(commit_data + g->hash_len + 12);
GRAPH_DATA_WIDTH * (c->graph_pos);
hashcpy(oid.hash, commit_data);
- c->maybe_tree = lookup_tree(r, &oid);
+ set_commit_tree(c, lookup_tree(r, &oid));
return c->maybe_tree;
}
uint32_t packedDate[2];
display_progress(progress, ++*progress_cnt);
- parse_commit(*list);
+ parse_commit_no_graph(*list);
hashwrite(f, get_commit_tree_oid(*list)->hash, hash_len);
parent = (*list)->parents;
display_progress(progress, i + 1);
commit = lookup_commit(the_repository, &oids->list[i]);
- if (commit && !parse_commit(commit))
+ if (commit && !parse_commit_no_graph(commit))
add_missing_parents(oids, commit);
}
stop_progress(&progress);
continue;
commits.list[commits.nr] = lookup_commit(the_repository, &oids.list[i]);
- parse_commit(commits.list[commits.nr]);
+ parse_commit_no_graph(commits.list[commits.nr]);
for (parent = commits.list[commits.nr]->parents;
parent; parent = parent->next)
return 1;
}
- verify_commit_graph_error = 0;
-
- if (!g->chunk_oid_fanout)
- graph_report("commit-graph is missing the OID Fanout chunk");
- if (!g->chunk_oid_lookup)
- graph_report("commit-graph is missing the OID Lookup chunk");
- if (!g->chunk_commit_data)
- graph_report("commit-graph is missing the Commit Data chunk");
-
+ verify_commit_graph_error = verify_commit_graph_lite(g);
if (verify_commit_graph_error)
return verify_commit_graph_error;
hashcpy(cur_oid.hash, g->chunk_oid_lookup + g->hash_len * i);
if (i && oidcmp(&prev_oid, &cur_oid) >= 0)
- graph_report("commit-graph has incorrect OID order: %s then %s",
+ graph_report(_("commit-graph has incorrect OID order: %s then %s"),
oid_to_hex(&prev_oid),
oid_to_hex(&cur_oid));
uint32_t fanout_value = get_be32(g->chunk_oid_fanout + cur_fanout_pos);
if (i != fanout_value)
- graph_report("commit-graph has incorrect fanout value: fanout[%d] = %u != %u",
+ graph_report(_("commit-graph has incorrect fanout value: fanout[%d] = %u != %u"),
cur_fanout_pos, fanout_value, i);
cur_fanout_pos++;
}
graph_commit = lookup_commit(r, &cur_oid);
if (!parse_commit_in_graph_one(r, g, graph_commit))
- graph_report("failed to parse %s from commit-graph",
+ graph_report(_("failed to parse commit %s from commit-graph"),
oid_to_hex(&cur_oid));
}
uint32_t fanout_value = get_be32(g->chunk_oid_fanout + cur_fanout_pos);
if (g->num_commits != fanout_value)
- graph_report("commit-graph has incorrect fanout value: fanout[%d] = %u != %u",
+ graph_report(_("commit-graph has incorrect fanout value: fanout[%d] = %u != %u"),
cur_fanout_pos, fanout_value, i);
cur_fanout_pos++;
graph_commit = lookup_commit(r, &cur_oid);
odb_commit = (struct commit *)create_object(r, cur_oid.hash, alloc_commit_node(r));
if (parse_commit_internal(odb_commit, 0, 0)) {
- graph_report("failed to parse %s from object database",
+ graph_report(_("failed to parse commit %s from object database for commit-graph"),
oid_to_hex(&cur_oid));
continue;
}
if (!oideq(&get_commit_tree_in_graph_one(r, g, graph_commit)->object.oid,
get_commit_tree_oid(odb_commit)))
- graph_report("root tree OID for commit %s in commit-graph is %s != %s",
+ graph_report(_("root tree OID for commit %s in commit-graph is %s != %s"),
oid_to_hex(&cur_oid),
oid_to_hex(get_commit_tree_oid(graph_commit)),
oid_to_hex(get_commit_tree_oid(odb_commit)));
while (graph_parents) {
if (odb_parents == NULL) {
- graph_report("commit-graph parent list for commit %s is too long",
+ graph_report(_("commit-graph parent list for commit %s is too long"),
oid_to_hex(&cur_oid));
break;
}
if (!oideq(&graph_parents->item->object.oid, &odb_parents->item->object.oid))
- graph_report("commit-graph parent for %s is %s != %s",
+ graph_report(_("commit-graph parent for %s is %s != %s"),
oid_to_hex(&cur_oid),
oid_to_hex(&graph_parents->item->object.oid),
oid_to_hex(&odb_parents->item->object.oid));
}
if (odb_parents != NULL)
- graph_report("commit-graph parent list for commit %s terminates early",
+ graph_report(_("commit-graph parent list for commit %s terminates early"),
oid_to_hex(&cur_oid));
if (!graph_commit->generation) {
if (generation_zero == GENERATION_NUMBER_EXISTS)
- graph_report("commit-graph has generation number zero for commit %s, but non-zero elsewhere",
+ graph_report(_("commit-graph has generation number zero for commit %s, but non-zero elsewhere"),
oid_to_hex(&cur_oid));
generation_zero = GENERATION_ZERO_EXISTS;
} else if (generation_zero == GENERATION_ZERO_EXISTS)
- graph_report("commit-graph has non-zero generation number for commit %s, but zero elsewhere",
+ graph_report(_("commit-graph has non-zero generation number for commit %s, but zero elsewhere"),
oid_to_hex(&cur_oid));
if (generation_zero == GENERATION_ZERO_EXISTS)
max_generation--;
if (graph_commit->generation != max_generation + 1)
- graph_report("commit-graph generation for commit %s is %u != %u",
+ graph_report(_("commit-graph generation for commit %s is %u != %u"),
oid_to_hex(&cur_oid),
graph_commit->generation,
max_generation + 1);
if (graph_commit->date != odb_commit->date)
- graph_report("commit date for commit %s in commit-graph is %"PRItime" != %"PRItime,
+ graph_report(_("commit date for commit %s in commit-graph is %"PRItime" != %"PRItime),
oid_to_hex(&cur_oid),
graph_commit->date,
odb_commit->date);
#include "cache.h"
#define GIT_TEST_COMMIT_GRAPH "GIT_TEST_COMMIT_GRAPH"
+#define GIT_TEST_COMMIT_GRAPH_DIE_ON_LOAD "GIT_TEST_COMMIT_GRAPH_DIE_ON_LOAD"
struct commit;
char *get_commit_graph_filename(const char *obj_dir);
+int open_commit_graph(const char *graph_file, int *fd, struct stat *st);
/*
* Given a commit struct, try to fill the commit struct info, including:
const unsigned char *chunk_extra_edges;
};
-struct commit_graph *load_commit_graph_one(const char *graph_file);
+struct commit_graph *load_commit_graph_one_fd_st(int fd, struct stat *st);
struct commit_graph *parse_commit_graph(void *graph_map, int fd,
size_t graph_size);
}
}
-struct tree *get_commit_tree(const struct commit *commit)
+static inline void set_commit_tree(struct commit *c, struct tree *t)
+{
+ c->maybe_tree = t;
+}
+
+struct tree *repo_get_commit_tree(struct repository *r,
+ const struct commit *commit)
{
if (commit->maybe_tree || !commit->object.parsed)
return commit->maybe_tree;
- if (commit->graph_pos == COMMIT_NOT_FROM_GRAPH)
- BUG("commit has NULL tree, but was not loaded from commit-graph");
+ if (commit->graph_pos != COMMIT_NOT_FROM_GRAPH)
+ return get_commit_tree_in_graph(r, commit);
- return get_commit_tree_in_graph(the_repository, commit);
+ return NULL;
}
struct object_id *get_commit_tree_oid(const struct commit *commit)
void release_commit_memory(struct parsed_object_pool *pool, struct commit *c)
{
- c->maybe_tree = NULL;
+ set_commit_tree(c, NULL);
c->index = 0;
free_commit_buffer(pool, c);
free_commit_list(c->parents);
if (get_oid_hex(bufptr + 5, &parent) < 0)
return error("bad tree pointer in commit %s",
oid_to_hex(&item->object.oid));
- item->maybe_tree = lookup_tree(r, &parent);
+ set_commit_tree(item, lookup_tree(r, &parent));
bufptr += tree_entry_len + 1; /* "tree " + "hex sha1" + "\n" */
pptr = &item->parents;
/*
* If the commit is loaded from the commit-graph file, then this
- * member may be NULL. Only access it through get_commit_tree()
+ * member may be NULL. Only access it through repo_get_commit_tree()
* or get_commit_tree_oid().
*/
struct tree *maybe_tree;
{
return repo_parse_commit_gently(r, item, 0);
}
+
+static inline int parse_commit_no_graph(struct commit *commit)
+{
+ return repo_parse_commit_internal(the_repository, commit, 0, 0);
+}
+
#ifndef NO_THE_REPOSITORY_COMPATIBILITY_MACROS
#define parse_commit_internal(item, quiet, use) repo_parse_commit_internal(the_repository, item, quiet, use)
#define parse_commit_gently(item, quiet) repo_parse_commit_gently(the_repository, item, quiet)
*/
void free_commit_buffer(struct parsed_object_pool *pool, struct commit *);
-struct tree *get_commit_tree(const struct commit *);
+struct tree *repo_get_commit_tree(struct repository *, const struct commit *);
+#define get_commit_tree(c) repo_get_commit_tree(the_repository, c)
struct object_id *get_commit_tree_oid(const struct commit *);
/*
struct rev_info; /* in revision.h, it circularly uses enum cmit_fmt */
-extern int has_non_ascii(const char *text);
-extern const char *logmsg_reencode(const struct commit *commit,
- char **commit_encoding,
- const char *output_encoding);
+int has_non_ascii(const char *text);
+const char *logmsg_reencode(const struct commit *commit,
+ char **commit_encoding,
+ const char *output_encoding);
const char *repo_logmsg_reencode(struct repository *r,
const struct commit *commit,
char **commit_encoding,
#define logmsg_reencode(c, enc, out) repo_logmsg_reencode(the_repository, c, enc, out)
#endif
-extern const char *skip_blank_lines(const char *msg);
+const char *skip_blank_lines(const char *msg);
/** Removes the first commit from a list sorted by date, and adds all
* of its parents.
struct oid_array;
struct ref;
-extern int register_shallow(struct repository *r, const struct object_id *oid);
-extern int unregister_shallow(const struct object_id *oid);
-extern int for_each_commit_graft(each_commit_graft_fn, void *);
-extern int is_repository_shallow(struct repository *r);
-extern struct commit_list *get_shallow_commits(struct object_array *heads,
- int depth, int shallow_flag, int not_shallow_flag);
-extern struct commit_list *get_shallow_commits_by_rev_list(
+int register_shallow(struct repository *r, const struct object_id *oid);
+int unregister_shallow(const struct object_id *oid);
+int for_each_commit_graft(each_commit_graft_fn, void *);
+int is_repository_shallow(struct repository *r);
+struct commit_list *get_shallow_commits(struct object_array *heads,
+ int depth, int shallow_flag, int not_shallow_flag);
+struct commit_list *get_shallow_commits_by_rev_list(
int ac, const char **av, int shallow_flag, int not_shallow_flag);
-extern void set_alternate_shallow_file(struct repository *r, const char *path, int override);
-extern int write_shallow_commits(struct strbuf *out, int use_pack_protocol,
- const struct oid_array *extra);
-extern void setup_alternate_shallow(struct lock_file *shallow_lock,
- const char **alternate_shallow_file,
- const struct oid_array *extra);
-extern const char *setup_temporary_shallow(const struct oid_array *extra);
-extern void advertise_shallow_grafts(int);
+void set_alternate_shallow_file(struct repository *r, const char *path, int override);
+int write_shallow_commits(struct strbuf *out, int use_pack_protocol,
+ const struct oid_array *extra);
+void setup_alternate_shallow(struct lock_file *shallow_lock,
+ const char **alternate_shallow_file,
+ const struct oid_array *extra);
+const char *setup_temporary_shallow(const struct oid_array *extra);
+void advertise_shallow_grafts(int);
+/*
+ * Initialize with prepare_shallow_info() or zero-initialize (equivalent to
+ * prepare_shallow_info with a NULL oid_array).
+ */
struct shallow_info {
struct oid_array *shallow;
int *ours, nr_ours;
int nr_commits;
};
-extern void prepare_shallow_info(struct shallow_info *, struct oid_array *);
-extern void clear_shallow_info(struct shallow_info *);
-extern void remove_nonexistent_theirs_shallow(struct shallow_info *);
-extern void assign_shallow_commits_to_refs(struct shallow_info *info,
- uint32_t **used,
- int *ref_status);
-extern int delayed_reachability_test(struct shallow_info *si, int c);
+void prepare_shallow_info(struct shallow_info *, struct oid_array *);
+void clear_shallow_info(struct shallow_info *);
+void remove_nonexistent_theirs_shallow(struct shallow_info *);
+void assign_shallow_commits_to_refs(struct shallow_info *info,
+ uint32_t **used,
+ int *ref_status);
+int delayed_reachability_test(struct shallow_info *si, int c);
#define PRUNE_SHOW_ONLY 1
#define PRUNE_QUICK 2
-extern void prune_shallow(unsigned options);
+void prune_shallow(unsigned options);
extern struct trace_key trace_shallow;
-extern int interactive_add(int argc, const char **argv, const char *prefix, int patch);
-extern int run_add_interactive(const char *revision, const char *patch_mode,
- const struct pathspec *pathspec);
+int interactive_add(int argc, const char **argv, const char *prefix, int patch);
+int run_add_interactive(const char *revision, const char *patch_mode,
+ const struct pathspec *pathspec);
struct commit_extra_header {
struct commit_extra_header *next;
size_t len;
};
-extern void append_merge_tag_headers(struct commit_list *parents,
- struct commit_extra_header ***tail);
+void append_merge_tag_headers(struct commit_list *parents,
+ struct commit_extra_header ***tail);
-extern int commit_tree(const char *msg, size_t msg_len,
- const struct object_id *tree,
- struct commit_list *parents, struct object_id *ret,
- const char *author, const char *sign_commit);
+int commit_tree(const char *msg, size_t msg_len,
+ const struct object_id *tree,
+ struct commit_list *parents, struct object_id *ret,
+ const char *author, const char *sign_commit);
-extern int commit_tree_extended(const char *msg, size_t msg_len,
- const struct object_id *tree,
- struct commit_list *parents,
- struct object_id *ret, const char *author,
- const char *sign_commit,
- struct commit_extra_header *);
+int commit_tree_extended(const char *msg, size_t msg_len,
+ const struct object_id *tree,
+ struct commit_list *parents,
+ struct object_id *ret, const char *author,
+ const char *sign_commit,
+ struct commit_extra_header *);
-extern struct commit_extra_header *read_commit_extra_headers(struct commit *, const char **);
+struct commit_extra_header *read_commit_extra_headers(struct commit *, const char **);
-extern void free_commit_extra_headers(struct commit_extra_header *extra);
+void free_commit_extra_headers(struct commit_extra_header *extra);
/*
* Search the commit object contents given by "msg" for the header "key".
* Note that some headers (like mergetag) may be multi-line. It is the caller's
* responsibility to parse further in this case!
*/
-extern const char *find_commit_header(const char *msg, const char *key,
- size_t *out_len);
+const char *find_commit_header(const char *msg, const char *key,
+ size_t *out_len);
/* Find the end of the log message, the right place for a new trailer. */
-extern size_t ignore_non_trailer(const char *buf, size_t len);
+size_t ignore_non_trailer(const char *buf, size_t len);
typedef int (*each_mergetag_fn)(struct commit *commit, struct commit_extra_header *extra,
- void *cb_data);
+ void *cb_data);
-extern int for_each_mergetag(each_mergetag_fn fn, struct commit *commit, void *data);
+int for_each_mergetag(each_mergetag_fn fn, struct commit *commit, void *data);
struct merge_remote_desc {
struct object *obj; /* the named object, could be a tag */
char name[FLEX_ARRAY];
};
-extern struct merge_remote_desc *merge_remote_util(struct commit *);
-extern void set_merge_remote_desc(struct commit *commit,
- const char *name, struct object *obj);
+struct merge_remote_desc *merge_remote_util(struct commit *);
+void set_merge_remote_desc(struct commit *commit,
+ const char *name, struct object *obj);
/*
* Given "name" from the command line to merge, find the commit object
*/
struct commit *get_merge_parent(const char *name);
-extern int parse_signed_commit(const struct commit *commit,
- struct strbuf *message, struct strbuf *signature);
-extern int remove_signature(struct strbuf *buf);
+int parse_signed_commit(const struct commit *commit,
+ struct strbuf *message, struct strbuf *signature);
+int remove_signature(struct strbuf *buf);
/*
* Check the signature of the given commit. The result of the check is stored
* at all. This may allocate memory for sig->gpg_output, sig->gpg_status,
* sig->signer and sig->key.
*/
-extern int check_commit_signature(const struct commit *commit, struct signature_check *sigc);
+int check_commit_signature(const struct commit *commit, struct signature_check *sigc);
/* record author-date for each commit object */
struct author_date_slab;
int compare_commits_by_gen_then_commit_date(const void *a_, const void *b_, void *unused);
LAST_ARG_MUST_BE_NULL
-extern int run_commit_hook(int editor_is_used, const char *index_file, const char *name, ...);
+int run_commit_hook(int editor_is_used, const char *index_file, const char *name, ...);
#endif /* COMMIT_H */
int main(int argc, const char **argv)
{
+ int result;
+
+ trace2_initialize_clock();
+
/*
* Always open file descriptors 0/1/2 to avoid clobbering files
* in die(). It also avoids messing up when the pipes are dup'ed
* onto stdin/stdout/stderr in the child processes we spawn.
*/
sanitize_stdfds();
+ restore_sigpipe_to_default();
git_resolve_executable_dir(argv[0]);
+ trace2_initialize();
+ trace2_cmd_start(argv);
+ trace2_collect_process_info(TRACE2_PROCESS_INFO_STARTUP);
+
git_setup_gettext();
initialize_the_repository();
attr_start();
- restore_sigpipe_to_default();
+ result = cmd_main(argc, argv);
+
+ trace2_cmd_exit(result);
- return cmd_main(argc, argv);
+ return result;
}
--- /dev/null
+#define COMPAT_CODE_ACCESS
+#include "../git-compat-util.h"
+
+/* Do the same thing access(2) does, but use the effective uid,
+ * and don't make the mistake of telling root that any file is
+ * executable. This version uses stat(2).
+ */
+int git_access(const char *path, int mode)
+{
+ struct stat st;
+
+ /* do not interfere a normal user */
+ if (geteuid())
+ return access(path, mode);
+
+ if (stat(path, &st) < 0)
+ return -1;
+
+ /* Root can read or write any file. */
+ if (!(mode & X_OK))
+ return 0;
+
+ /* Root can execute any file that has any one of the execute
+ * bits set.
+ */
+ if (st.st_mode & (S_IXUSR | S_IXGRP | S_IXOTH))
+ return 0;
+
+ errno = EACCES;
+ return -1;
+}
+#ifndef COMPAT_BSWAP_H
+#define COMPAT_BSWAP_H
+
/*
* Let's make sure we always have a sane definition for ntohl()/htonl().
* Some libraries define those as a function call, just to perform byte
}
#endif
+
+#endif /* COMPAT_BSWAP_H */
-#define COMPAT_CODE
+#define COMPAT_CODE_FILENO
#include "../git-compat-util.h"
int git_fileno(FILE *stream)
return 0;
prog = path_lookup(interpr, 1);
if (prog) {
+ int exec_id;
int argc = 0;
const char **argv2;
while (argv[argc]) argc++;
ALLOC_ARRAY(argv2, argc + 1);
argv2[0] = (char *)cmd; /* full path to the script file */
memcpy(&argv2[1], &argv[1], sizeof(*argv) * argc);
+ exec_id = trace2_exec(prog, argv2);
pid = mingw_spawnv(prog, argv2, 1);
if (pid >= 0) {
int status;
if (waitpid(pid, &status, 0) < 0)
status = 255;
+ trace2_exec_result(exec_id, status);
exit(status);
}
+ trace2_exec_result(exec_id, -1);
pid = 1; /* indicate that we tried but failed */
free(prog);
free(argv2);
/* check if git_command is a shell script */
if (!try_shell_exec(cmd, argv)) {
int pid, status;
+ int exec_id;
+ exec_id = trace2_exec(cmd, (const char **)argv);
pid = mingw_spawnv(cmd, (const char **)argv, 0);
- if (pid < 0)
+ if (pid < 0) {
+ trace2_exec_result(exec_id, -1);
return -1;
+ }
if (waitpid(pid, &status, 0) < 0)
status = 255;
+ trace2_exec_result(exec_id, status);
exit(status);
}
return -1;
wchar_t **wenv, **wargv;
_startupinfo si;
+ trace2_initialize_clock();
+
maybe_redirect_std_handles();
/* get wide char arguments and environment */
errno = EINVAL;
return -1;
}
-/* bash cannot reliably detect negative return codes as failure */
-#define exit(code) exit((code) & 0xff)
+
#define sigemptyset(x) (void)0
static inline int sigaddset(sigset_t *set, int signum)
{ return 0; }
--- /dev/null
+#include "../../cache.h"
+#include "../../json-writer.h"
+#include "lazyload.h"
+#include <Psapi.h>
+#include <tlHelp32.h>
+
+/*
+ * An arbitrarily chosen value to limit the size of the ancestor
+ * array built in git_processes().
+ */
+#define NR_PIDS_LIMIT 10
+
+/*
+ * Find the process data for the given PID in the given snapshot
+ * and update the PROCESSENTRY32 data.
+ */
+static int find_pid(DWORD pid, HANDLE hSnapshot, PROCESSENTRY32 *pe32)
+{
+ pe32->dwSize = sizeof(PROCESSENTRY32);
+
+ if (Process32First(hSnapshot, pe32)) {
+ do {
+ if (pe32->th32ProcessID == pid)
+ return 1;
+ } while (Process32Next(hSnapshot, pe32));
+ }
+ return 0;
+}
+
+/*
+ * Accumulate JSON array of our parent processes:
+ * [
+ * exe-name-parent,
+ * exe-name-grand-parent,
+ * ...
+ * ]
+ *
+ * Note: we only report the filename of the process executable; the
+ * only way to get its full pathname is to use OpenProcess()
+ * and GetModuleFileNameEx() or QueryfullProcessImageName()
+ * and that seems rather expensive (on top of the cost of
+ * getting the snapshot).
+ *
+ * Note: we compute the set of parent processes by walking the PPID
+ * link in each visited PROCESSENTRY32 record. This search
+ * stops when an ancestor process is not found in the snapshot
+ * (because it exited before the current or intermediate parent
+ * process exited).
+ *
+ * This search may compute an incorrect result if the PPID link
+ * refers to the PID of an exited parent and that PID has been
+ * recycled and given to a new unrelated process.
+ *
+ * Worse, it is possible for a child or descendant of the
+ * current process to be given the recycled PID and cause a
+ * PPID-cycle. This would cause an infinite loop building our
+ * parent process array.
+ *
+ * Note: for completeness, the "System Idle" process has PID=0 and
+ * PPID=0 and could cause another PPID-cycle. We don't expect
+ * Git to be a descendant of the idle process, but because of
+ * PID recycling, it might be possible to get a PPID link value
+ * of 0. This too would cause an infinite loop.
+ *
+ * Therefore, we keep an array of the visited PPIDs to guard against
+ * cycles.
+ *
+ * We use a fixed-size array rather than ALLOC_GROW to keep things
+ * simple and avoid the alloc/realloc overhead. It is OK if we
+ * truncate the search and return a partial answer.
+ */
+static void get_processes(struct json_writer *jw, HANDLE hSnapshot)
+{
+ PROCESSENTRY32 pe32;
+ DWORD pid;
+ DWORD pid_list[NR_PIDS_LIMIT];
+ int k, nr_pids = 0;
+
+ pid = GetCurrentProcessId();
+ while (find_pid(pid, hSnapshot, &pe32)) {
+ /* Only report parents. Omit self from the JSON output. */
+ if (nr_pids)
+ jw_array_string(jw, pe32.szExeFile);
+
+ /* Check for cycle in snapshot. (Yes, it happened.) */
+ for (k = 0; k < nr_pids; k++)
+ if (pid == pid_list[k]) {
+ jw_array_string(jw, "(cycle)");
+ return;
+ }
+
+ if (nr_pids == NR_PIDS_LIMIT) {
+ jw_array_string(jw, "(truncated)");
+ return;
+ }
+
+ pid_list[nr_pids++] = pid;
+
+ pid = pe32.th32ParentProcessID;
+ }
+}
+
+/*
+ * Emit JSON data for the current and parent processes. Individual
+ * trace2 targets can decide how to actually print it.
+ */
+static void get_ancestry(void)
+{
+ HANDLE hSnapshot = CreateToolhelp32Snapshot(TH32CS_SNAPPROCESS, 0);
+
+ if (hSnapshot != INVALID_HANDLE_VALUE) {
+ struct json_writer jw = JSON_WRITER_INIT;
+
+ jw_array_begin(&jw, 0);
+ get_processes(&jw, hSnapshot);
+ jw_end(&jw);
+
+ trace2_data_json("process", the_repository, "windows/ancestry",
+ &jw);
+
+ jw_release(&jw);
+ CloseHandle(hSnapshot);
+ }
+}
+
+/*
+ * Is a debugger attached to the current process?
+ *
+ * This will catch debug runs (where the debugger started the process).
+ * This is the normal case. Since this code is called during our startup,
+ * it will not report instances where a debugger is attached dynamically
+ * to a running git process, but that is relatively rare.
+ */
+static void get_is_being_debugged(void)
+{
+ if (IsDebuggerPresent())
+ trace2_data_intmax("process", the_repository,
+ "windows/debugger_present", 1);
+}
+
+/*
+ * Emit JSON data with the peak memory usage of the current process.
+ */
+static void get_peak_memory_info(void)
+{
+ DECLARE_PROC_ADDR(psapi.dll, BOOL, GetProcessMemoryInfo, HANDLE,
+ PPROCESS_MEMORY_COUNTERS, DWORD);
+
+ if (INIT_PROC_ADDR(GetProcessMemoryInfo)) {
+ PROCESS_MEMORY_COUNTERS pmc;
+
+ if (GetProcessMemoryInfo(GetCurrentProcess(), &pmc,
+ sizeof(pmc))) {
+ struct json_writer jw = JSON_WRITER_INIT;
+
+ jw_object_begin(&jw, 0);
+
+#define KV(kv) #kv, (intmax_t)pmc.kv
+
+ jw_object_intmax(&jw, KV(PageFaultCount));
+ jw_object_intmax(&jw, KV(PeakWorkingSetSize));
+ jw_object_intmax(&jw, KV(PeakPagefileUsage));
+
+ jw_end(&jw);
+
+ trace2_data_json("process", the_repository,
+ "windows/memory", &jw);
+ jw_release(&jw);
+ }
+ }
+}
+
+void trace2_collect_process_info(enum trace2_process_info_reason reason)
+{
+ if (!trace2_is_enabled())
+ return;
+
+ switch (reason) {
+ case TRACE2_PROCESS_INFO_STARTUP:
+ get_is_being_debugged();
+ get_ancestry();
+ return;
+
+ case TRACE2_PROCESS_INFO_EXIT:
+ get_peak_memory_info();
+ return;
+
+ default:
+ BUG("trace2_collect_process_info: unknown reason '%d'", reason);
+ }
+}
}
ret = !wildmatch(pattern.buf + prefix, text.buf + prefix,
- icase ? WM_CASEFOLD : 0);
+ WM_PATHNAME | (icase ? WM_CASEFOLD : 0));
if (!ret && !already_tried_absolute) {
/*
if (starts_with(var, "core."))
return git_default_core_config(var, value, cb);
- if (starts_with(var, "user."))
+ if (starts_with(var, "user.") ||
+ starts_with(var, "author.") ||
+ starts_with(var, "committer."))
return git_ident_config(var, value, cb);
if (starts_with(var, "i18n."))
repo_config = NULL;
current_parsing_scope = CONFIG_SCOPE_SYSTEM;
- if (git_config_system() && !access_or_die(git_etc_gitconfig(), R_OK, 0))
+ if (git_config_system() && !access_or_die(git_etc_gitconfig(), R_OK,
+ opts->system_gently ?
+ ACCESS_EACCES_OK : 0))
ret += git_config_from_file(fn, git_etc_gitconfig(),
data);
ret += git_config_from_file(fn, user_config, data);
current_parsing_scope = CONFIG_SCOPE_REPO;
- if (repo_config && !access_or_die(repo_config, R_OK, 0))
+ if (!opts->ignore_repo && repo_config &&
+ !access_or_die(repo_config, R_OK, 0))
ret += git_config_from_file(fn, repo_config, data);
/*
* Note: this should have a new scope, CONFIG_SCOPE_WORKTREE.
* But let's not complicate things before it's actually needed.
*/
- if (repository_format_worktree_config) {
+ if (!opts->ignore_worktree && repository_format_worktree_config) {
char *path = git_pathdup("config.worktree");
if (!access_or_die(path, R_OK, 0))
ret += git_config_from_file(fn, path, data);
}
current_parsing_scope = CONFIG_SCOPE_CMDLINE;
- if (git_config_from_parameters(fn, data) < 0)
+ if (!opts->ignore_cmdline && git_config_from_parameters(fn, data) < 0)
die(_("unable to parse command-line config"));
current_parsing_scope = CONFIG_SCOPE_UNKNOWN;
strbuf_release(&gitdir);
}
+/*
+ * Read config but only enumerate system and global settings.
+ * Omit any repo-local, worktree-local, or command-line settings.
+ */
+void read_very_early_config(config_fn_t cb, void *data)
+{
+ struct config_options opts = { 0 };
+
+ opts.respect_includes = 1;
+ opts.ignore_repo = 1;
+ opts.ignore_worktree = 1;
+ opts.ignore_cmdline = 1;
+ opts.system_gently = 1;
+
+ config_with_options(cb, data, NULL, &opts);
+}
+
static struct config_set_element *configset_find_element(struct config_set *cs, const char *key)
{
struct config_set_element k;
/* Functions use to read configuration from a repository */
static void repo_read_config(struct repository *repo)
{
- struct config_options opts;
+ struct config_options opts = { 0 };
opts.respect_includes = 1;
opts.commondir = repo->commondir;
void git_config_set(const char *key, const char *value)
{
git_config_set_multivar(key, value, NULL, 0);
+
+ trace2_cmd_set_config(key, value);
}
/*
struct config_options {
unsigned int respect_includes : 1;
+ unsigned int ignore_repo : 1;
+ unsigned int ignore_worktree : 1;
+ unsigned int ignore_cmdline : 1;
+ unsigned int system_gently : 1;
const char *commondir;
const char *git_dir;
config_parser_event_fn_t event_fn;
};
typedef int (*config_fn_t)(const char *, const char *, void *);
-extern int git_default_config(const char *, const char *, void *);
-extern int git_config_from_file(config_fn_t fn, const char *, void *);
-extern int git_config_from_file_with_options(config_fn_t fn, const char *,
- void *,
- const struct config_options *);
-extern int git_config_from_mem(config_fn_t fn,
- const enum config_origin_type,
- const char *name,
- const char *buf, size_t len,
- void *data, const struct config_options *opts);
-extern int git_config_from_blob_oid(config_fn_t fn, const char *name,
- const struct object_id *oid, void *data);
-extern void git_config_push_parameter(const char *text);
-extern int git_config_from_parameters(config_fn_t fn, void *data);
-extern void read_early_config(config_fn_t cb, void *data);
-extern void git_config(config_fn_t fn, void *);
-extern int config_with_options(config_fn_t fn, void *,
- struct git_config_source *config_source,
- const struct config_options *opts);
-extern int git_parse_ssize_t(const char *, ssize_t *);
-extern int git_parse_ulong(const char *, unsigned long *);
-extern int git_parse_maybe_bool(const char *);
-extern int git_config_int(const char *, const char *);
-extern int64_t git_config_int64(const char *, const char *);
-extern unsigned long git_config_ulong(const char *, const char *);
-extern ssize_t git_config_ssize_t(const char *, const char *);
-extern int git_config_bool_or_int(const char *, const char *, int *);
-extern int git_config_bool(const char *, const char *);
-extern int git_config_string(const char **, const char *, const char *);
-extern int git_config_pathname(const char **, const char *, const char *);
-extern int git_config_expiry_date(timestamp_t *, const char *, const char *);
-extern int git_config_color(char *, const char *, const char *);
-extern int git_config_set_in_file_gently(const char *, const char *, const char *);
-extern void git_config_set_in_file(const char *, const char *, const char *);
-extern int git_config_set_gently(const char *, const char *);
-extern void git_config_set(const char *, const char *);
-extern int git_config_parse_key(const char *, char **, int *);
-extern int git_config_key_is_valid(const char *key);
-extern int git_config_set_multivar_gently(const char *, const char *, const char *, int);
-extern void git_config_set_multivar(const char *, const char *, const char *, int);
-extern int git_config_set_multivar_in_file_gently(const char *, const char *, const char *, const char *, int);
-extern void git_config_set_multivar_in_file(const char *, const char *, const char *, const char *, int);
-extern int git_config_rename_section(const char *, const char *);
-extern int git_config_rename_section_in_file(const char *, const char *, const char *);
-extern int git_config_copy_section(const char *, const char *);
-extern int git_config_copy_section_in_file(const char *, const char *, const char *);
-extern const char *git_etc_gitconfig(void);
-extern int git_env_bool(const char *, int);
-extern unsigned long git_env_ulong(const char *, unsigned long);
-extern int git_config_system(void);
-extern int config_error_nonbool(const char *);
+int git_default_config(const char *, const char *, void *);
+int git_config_from_file(config_fn_t fn, const char *, void *);
+int git_config_from_file_with_options(config_fn_t fn, const char *,
+ void *,
+ const struct config_options *);
+int git_config_from_mem(config_fn_t fn,
+ const enum config_origin_type,
+ const char *name,
+ const char *buf, size_t len,
+ void *data, const struct config_options *opts);
+int git_config_from_blob_oid(config_fn_t fn, const char *name,
+ const struct object_id *oid, void *data);
+void git_config_push_parameter(const char *text);
+int git_config_from_parameters(config_fn_t fn, void *data);
+void read_early_config(config_fn_t cb, void *data);
+void read_very_early_config(config_fn_t cb, void *data);
+void git_config(config_fn_t fn, void *);
+int config_with_options(config_fn_t fn, void *,
+ struct git_config_source *config_source,
+ const struct config_options *opts);
+int git_parse_ssize_t(const char *, ssize_t *);
+int git_parse_ulong(const char *, unsigned long *);
+int git_parse_maybe_bool(const char *);
+int git_config_int(const char *, const char *);
+int64_t git_config_int64(const char *, const char *);
+unsigned long git_config_ulong(const char *, const char *);
+ssize_t git_config_ssize_t(const char *, const char *);
+int git_config_bool_or_int(const char *, const char *, int *);
+int git_config_bool(const char *, const char *);
+int git_config_string(const char **, const char *, const char *);
+int git_config_pathname(const char **, const char *, const char *);
+int git_config_expiry_date(timestamp_t *, const char *, const char *);
+int git_config_color(char *, const char *, const char *);
+int git_config_set_in_file_gently(const char *, const char *, const char *);
+void git_config_set_in_file(const char *, const char *, const char *);
+int git_config_set_gently(const char *, const char *);
+void git_config_set(const char *, const char *);
+int git_config_parse_key(const char *, char **, int *);
+int git_config_key_is_valid(const char *key);
+int git_config_set_multivar_gently(const char *, const char *, const char *, int);
+void git_config_set_multivar(const char *, const char *, const char *, int);
+int git_config_set_multivar_in_file_gently(const char *, const char *, const char *, const char *, int);
+void git_config_set_multivar_in_file(const char *, const char *, const char *, const char *, int);
+int git_config_rename_section(const char *, const char *);
+int git_config_rename_section_in_file(const char *, const char *, const char *);
+int git_config_copy_section(const char *, const char *);
+int git_config_copy_section_in_file(const char *, const char *, const char *);
+const char *git_etc_gitconfig(void);
+int git_env_bool(const char *, int);
+unsigned long git_env_ulong(const char *, unsigned long);
+int git_config_system(void);
+int config_error_nonbool(const char *);
#if defined(__GNUC__)
#define config_error_nonbool(s) (config_error_nonbool(s), const_error())
#endif
-extern int git_config_parse_parameter(const char *, config_fn_t fn, void *data);
+int git_config_parse_parameter(const char *, config_fn_t fn, void *data);
enum config_scope {
CONFIG_SCOPE_UNKNOWN = 0,
CONFIG_SCOPE_CMDLINE,
};
-extern enum config_scope current_config_scope(void);
-extern const char *current_config_origin_type(void);
-extern const char *current_config_name(void);
+enum config_scope current_config_scope(void);
+const char *current_config_origin_type(void);
+const char *current_config_name(void);
struct config_include_data {
int depth;
const struct config_options *opts;
};
#define CONFIG_INCLUDE_INIT { 0 }
-extern int git_config_include(const char *name, const char *value, void *data);
+int git_config_include(const char *name, const char *value, void *data);
/*
* Match and parse a config key of the form:
* If the subsection pointer-to-pointer passed in is NULL, returns 0 only if
* there is no subsection at all.
*/
-extern int parse_config_key(const char *var,
- const char *section,
- const char **subsection, int *subsection_len,
- const char **key);
+int parse_config_key(const char *var,
+ const char *section,
+ const char **subsection, int *subsection_len,
+ const char **key);
struct config_set_element {
struct hashmap_entry ent;
struct configset_list list;
};
-extern void git_configset_init(struct config_set *cs);
-extern int git_configset_add_file(struct config_set *cs, const char *filename);
-extern const struct string_list *git_configset_get_value_multi(struct config_set *cs, const char *key);
-extern void git_configset_clear(struct config_set *cs);
+void git_configset_init(struct config_set *cs);
+int git_configset_add_file(struct config_set *cs, const char *filename);
+const struct string_list *git_configset_get_value_multi(struct config_set *cs, const char *key);
+void git_configset_clear(struct config_set *cs);
/*
* These functions return 1 if not found, and 0 if found, leaving the found
* value in the 'dest' pointer.
*/
-extern int git_configset_get_value(struct config_set *cs, const char *key, const char **dest);
-extern int git_configset_get_string_const(struct config_set *cs, const char *key, const char **dest);
-extern int git_configset_get_string(struct config_set *cs, const char *key, char **dest);
-extern int git_configset_get_int(struct config_set *cs, const char *key, int *dest);
-extern int git_configset_get_ulong(struct config_set *cs, const char *key, unsigned long *dest);
-extern int git_configset_get_bool(struct config_set *cs, const char *key, int *dest);
-extern int git_configset_get_bool_or_int(struct config_set *cs, const char *key, int *is_bool, int *dest);
-extern int git_configset_get_maybe_bool(struct config_set *cs, const char *key, int *dest);
-extern int git_configset_get_pathname(struct config_set *cs, const char *key, const char **dest);
+int git_configset_get_value(struct config_set *cs, const char *key, const char **dest);
+int git_configset_get_string_const(struct config_set *cs, const char *key, const char **dest);
+int git_configset_get_string(struct config_set *cs, const char *key, char **dest);
+int git_configset_get_int(struct config_set *cs, const char *key, int *dest);
+int git_configset_get_ulong(struct config_set *cs, const char *key, unsigned long *dest);
+int git_configset_get_bool(struct config_set *cs, const char *key, int *dest);
+int git_configset_get_bool_or_int(struct config_set *cs, const char *key, int *is_bool, int *dest);
+int git_configset_get_maybe_bool(struct config_set *cs, const char *key, int *dest);
+int git_configset_get_pathname(struct config_set *cs, const char *key, const char **dest);
/* Functions for reading a repository's config */
struct repository;
-extern void repo_config(struct repository *repo, config_fn_t fn, void *data);
-extern int repo_config_get_value(struct repository *repo,
- const char *key, const char **value);
-extern const struct string_list *repo_config_get_value_multi(struct repository *repo,
- const char *key);
-extern int repo_config_get_string_const(struct repository *repo,
- const char *key, const char **dest);
-extern int repo_config_get_string(struct repository *repo,
- const char *key, char **dest);
-extern int repo_config_get_int(struct repository *repo,
+void repo_config(struct repository *repo, config_fn_t fn, void *data);
+int repo_config_get_value(struct repository *repo,
+ const char *key, const char **value);
+const struct string_list *repo_config_get_value_multi(struct repository *repo,
+ const char *key);
+int repo_config_get_string_const(struct repository *repo,
+ const char *key, const char **dest);
+int repo_config_get_string(struct repository *repo,
+ const char *key, char **dest);
+int repo_config_get_int(struct repository *repo,
+ const char *key, int *dest);
+int repo_config_get_ulong(struct repository *repo,
+ const char *key, unsigned long *dest);
+int repo_config_get_bool(struct repository *repo,
+ const char *key, int *dest);
+int repo_config_get_bool_or_int(struct repository *repo,
+ const char *key, int *is_bool, int *dest);
+int repo_config_get_maybe_bool(struct repository *repo,
const char *key, int *dest);
-extern int repo_config_get_ulong(struct repository *repo,
- const char *key, unsigned long *dest);
-extern int repo_config_get_bool(struct repository *repo,
- const char *key, int *dest);
-extern int repo_config_get_bool_or_int(struct repository *repo,
- const char *key, int *is_bool, int *dest);
-extern int repo_config_get_maybe_bool(struct repository *repo,
- const char *key, int *dest);
-extern int repo_config_get_pathname(struct repository *repo,
- const char *key, const char **dest);
+int repo_config_get_pathname(struct repository *repo,
+ const char *key, const char **dest);
-extern int git_config_get_value(const char *key, const char **value);
-extern const struct string_list *git_config_get_value_multi(const char *key);
-extern void git_config_clear(void);
-extern int git_config_get_string_const(const char *key, const char **dest);
-extern int git_config_get_string(const char *key, char **dest);
-extern int git_config_get_int(const char *key, int *dest);
-extern int git_config_get_ulong(const char *key, unsigned long *dest);
-extern int git_config_get_bool(const char *key, int *dest);
-extern int git_config_get_bool_or_int(const char *key, int *is_bool, int *dest);
-extern int git_config_get_maybe_bool(const char *key, int *dest);
-extern int git_config_get_pathname(const char *key, const char **dest);
-extern int git_config_get_index_threads(int *dest);
-extern int git_config_get_untracked_cache(void);
-extern int git_config_get_split_index(void);
-extern int git_config_get_max_percent_split_change(void);
-extern int git_config_get_fsmonitor(void);
+int git_config_get_value(const char *key, const char **value);
+const struct string_list *git_config_get_value_multi(const char *key);
+void git_config_clear(void);
+int git_config_get_string_const(const char *key, const char **dest);
+int git_config_get_string(const char *key, char **dest);
+int git_config_get_int(const char *key, int *dest);
+int git_config_get_ulong(const char *key, unsigned long *dest);
+int git_config_get_bool(const char *key, int *dest);
+int git_config_get_bool_or_int(const char *key, int *is_bool, int *dest);
+int git_config_get_maybe_bool(const char *key, int *dest);
+int git_config_get_pathname(const char *key, const char **dest);
+int git_config_get_index_threads(int *dest);
+int git_config_get_untracked_cache(void);
+int git_config_get_split_index(void);
+int git_config_get_max_percent_split_change(void);
+int git_config_get_fsmonitor(void);
/* This dies if the configured or default date is in the future */
-extern int git_config_get_expiry(const char *key, const char **output);
+int git_config_get_expiry(const char *key, const char **output);
/* parse either "this many days" integer, or "5.days.ago" approxidate */
-extern int git_config_get_expiry_in_days(const char *key, timestamp_t *, timestamp_t now);
+int git_config_get_expiry_in_days(const char *key, timestamp_t *, timestamp_t now);
struct key_value_info {
const char *filename;
enum config_scope scope;
};
-extern NORETURN void git_die_config(const char *key, const char *err, ...) __attribute__((format(printf, 2, 3)));
-extern NORETURN void git_die_config_linenr(const char *key, const char *filename, int linenr);
+NORETURN void git_die_config(const char *key, const char *err, ...) __attribute__((format(printf, 2, 3)));
+NORETURN void git_die_config_linenr(const char *key, const char *filename, int linenr);
#define LOOKUP_CONFIG(mapping, var) \
lookup_config(mapping, ARRAY_SIZE(mapping), var)
ifeq ($(filter no-error,$(DEVOPTS)),)
-CFLAGS += -Werror
+DEVELOPER_CFLAGS += -Werror
endif
ifneq ($(filter pedantic,$(DEVOPTS)),)
-CFLAGS += -pedantic
+DEVELOPER_CFLAGS += -pedantic
# don't warn for each N_ use
-CFLAGS += -DUSE_PARENS_AROUND_GETTEXT_N=0
-endif
-CFLAGS += -Wall
-CFLAGS += -Wdeclaration-after-statement
-CFLAGS += -Wformat-security
-CFLAGS += -Wno-format-zero-length
-CFLAGS += -Wold-style-definition
-CFLAGS += -Woverflow
-CFLAGS += -Wpointer-arith
-CFLAGS += -Wstrict-prototypes
-CFLAGS += -Wunused
-CFLAGS += -Wvla
+DEVELOPER_CFLAGS += -DUSE_PARENS_AROUND_GETTEXT_N=0
+endif
+DEVELOPER_CFLAGS += -Wall
+DEVELOPER_CFLAGS += -Wdeclaration-after-statement
+DEVELOPER_CFLAGS += -Wformat-security
+DEVELOPER_CFLAGS += -Wno-format-zero-length
+DEVELOPER_CFLAGS += -Wold-style-definition
+DEVELOPER_CFLAGS += -Woverflow
+DEVELOPER_CFLAGS += -Wpointer-arith
+DEVELOPER_CFLAGS += -Wstrict-prototypes
+DEVELOPER_CFLAGS += -Wunused
+DEVELOPER_CFLAGS += -Wvla
ifndef COMPILER_FEATURES
COMPILER_FEATURES := $(shell ./detect-compiler $(CC))
endif
ifneq ($(filter clang4,$(COMPILER_FEATURES)),)
-CFLAGS += -Wtautological-constant-out-of-range-compare
+DEVELOPER_CFLAGS += -Wtautological-constant-out-of-range-compare
endif
ifneq ($(or $(filter gcc6,$(COMPILER_FEATURES)),$(filter clang4,$(COMPILER_FEATURES))),)
-CFLAGS += -Wextra
+DEVELOPER_CFLAGS += -Wextra
# if a function is public, there should be a prototype and the right
# header file should be included. If not, it should be static.
-CFLAGS += -Wmissing-prototypes
+DEVELOPER_CFLAGS += -Wmissing-prototypes
ifeq ($(filter extra-all,$(DEVOPTS)),)
# These are disabled because we have these all over the place.
-CFLAGS += -Wno-empty-body
-CFLAGS += -Wno-missing-field-initializers
-CFLAGS += -Wno-sign-compare
-CFLAGS += -Wno-unused-parameter
+DEVELOPER_CFLAGS += -Wno-empty-body
+DEVELOPER_CFLAGS += -Wno-missing-field-initializers
+DEVELOPER_CFLAGS += -Wno-sign-compare
+DEVELOPER_CFLAGS += -Wno-unused-parameter
endif
endif
# not worth fixing since newer compilers correctly stop complaining
ifneq ($(filter gcc4,$(COMPILER_FEATURES)),)
ifeq ($(filter gcc5,$(COMPILER_FEATURES)),)
-CFLAGS += -Wno-uninitialized
+DEVELOPER_CFLAGS += -Wno-uninitialized
endif
endif
HAVE_BSD_SYSCTL = YesPlease
FREAD_READS_DIRECTORIES = UnfortunatelyYes
HAVE_NS_GET_EXECUTABLE_PATH = YesPlease
+ BASIC_CFLAGS += -I/usr/local/include
+ BASIC_LDFLAGS += -L/usr/local/lib
endif
ifeq ($(uname_S),SunOS)
NEEDS_SOCKET = YesPlease
INTERNAL_QSORT = UnfortunatelyYes
NEEDS_LIBICONV = YesPlease
BASIC_CFLAGS += -D_LARGE_FILES
+ FILENO_IS_A_MACRO = UnfortunatelyYes
+ NEED_ACCESS_ROOT_HANDLER = UnfortunatelyYes
ifeq ($(shell expr "$(uname_V)" : '[1234]'),1)
NO_PTHREADS = YesPlease
else
CFLAGS =
BASIC_CFLAGS = -nologo -I. -I../zlib -Icompat/vcbuild -Icompat/vcbuild/include -DWIN32 -D_CONSOLE -DHAVE_STRING_H -D_CRT_SECURE_NO_WARNINGS -D_CRT_NONSTDC_NO_DEPRECATE
COMPAT_OBJS = compat/msvc.o compat/winansi.o \
+ compat/win32/path-utils.o \
compat/win32/pthread.o compat/win32/syslog.o \
+ compat/win32/trace2_win32_process_info.o \
compat/win32/dirent.o
COMPAT_CFLAGS = -D__USE_MINGW_ACCESS -DNOGDI -DHAVE_STRING_H -Icompat -Icompat/regex -Icompat/win32 -DSTRIP_EXTENSION=\".exe\"
BASIC_LDFLAGS = -IGNORE:4217 -IGNORE:4049 -NOLOGO -SUBSYSTEM:CONSOLE
NO_STRTOUMAX = YesPlease
NO_MKDTEMP = YesPlease
NO_SVN_TESTS = YesPlease
- NO_PERL_MAKEMAKER = YesPlease
RUNTIME_PREFIX = YesPlease
HAVE_WPGMPTR = YesWeDo
NO_ST_BLOCKS_IN_STRUCT_STAT = YesPlease
COMPAT_CFLAGS += -DNOGDI -Icompat -Icompat/win32
COMPAT_CFLAGS += -DSTRIP_EXTENSION=\".exe\"
COMPAT_OBJS += compat/mingw.o compat/winansi.o \
+ compat/win32/trace2_win32_process_info.o \
compat/win32/path-utils.o \
compat/win32/pthread.o compat/win32/syslog.o \
compat/win32/dirent.o
ifneq ($(shell expr "$(uname_R)" : '1\.'),2)
# MSys2
prefix = /usr/
+ # Enable DEP
+ BASIC_LDFLAGS += -Wl,--nxcompat
+ # Enable ASLR (unless debugging)
+ ifneq (,$(findstring -O,$(filter-out -O0 -Og,$(CFLAGS))))
+ BASIC_LDFLAGS += -Wl,--dynamicbase
+ endif
ifeq (MINGW32,$(MSYSTEM))
prefix = /mingw32
HOST_CPU = i686
+ BASIC_LDFLAGS += -Wl,--pic-executable,-e,_mainCRTStartup
endif
ifeq (MINGW64,$(MSYSTEM))
prefix = /mingw64
HOST_CPU = x86_64
+ BASIC_LDFLAGS += -Wl,--pic-executable,-e,mainCRTStartup
else
COMPAT_CFLAGS += -D_USE_32BIT_TIME_T
BASIC_LDFLAGS += -Wl,--large-address-aware
GIT_CONF_SUBST([NEEDS_LIBGEN])
test -n "$NEEDS_LIBGEN" && LIBS="$LIBS -lgen"
-AC_CHECK_LIB([c], [gettext],
-[LIBC_CONTAINS_LIBINTL=YesPlease],
-[LIBC_CONTAINS_LIBINTL=])
+AC_DEFUN([LIBINTL_SRC], [
+AC_LANG_PROGRAM([[
+#include <libintl.h>
+]],[[
+char *msg = gettext("test");
+]])])
+
+AC_MSG_CHECKING([if libc contains libintl])
+AC_LINK_IFELSE([LIBINTL_SRC],
+ [AC_MSG_RESULT([yes])
+ LIBC_CONTAINS_LIBINTL=YesPlease],
+ [AC_MSG_RESULT([no])
+ LIBC_CONTAINS_LIBINTL=])
GIT_CONF_SUBST([LIBC_CONTAINS_LIBINTL])
#
conn = NULL;
} else if (protocol == PROTO_GIT) {
conn = git_connect_git(fd, hostandport, path, prog, version, flags);
+ conn->trace2_child_class = "transport/git";
} else {
struct strbuf cmd = STRBUF_INIT;
const char *const *var;
strbuf_release(&cmd);
return NULL;
}
+ conn->trace2_child_class = "transport/ssh";
fill_ssh_args(conn, ssh_host, port, version, flags);
} else {
transport_check_allowed("file");
+ conn->trace2_child_class = "transport/file";
if (version > 0) {
argv_array_pushf(&conn->env_array, GIT_PROTOCOL_ENVIRONMENT "=version=%d",
version);
#define CONNECT_DIAG_URL (1u << 1)
#define CONNECT_IPV4 (1u << 2)
#define CONNECT_IPV6 (1u << 3)
-extern struct child_process *git_connect(int fd[2], const char *url, const char *prog, int flags);
-extern int finish_connect(struct child_process *conn);
-extern int git_connection_is_socket(struct child_process *conn);
-extern int server_supports(const char *feature);
-extern int parse_feature_request(const char *features, const char *feature);
-extern const char *server_feature_value(const char *feature, int *len_ret);
-extern int url_is_local_not_ssh(const char *url);
+struct child_process *git_connect(int fd[2], const char *url, const char *prog, int flags);
+int finish_connect(struct child_process *conn);
+int git_connection_is_socket(struct child_process *conn);
+int server_supports(const char *feature);
+int parse_feature_request(const char *features, const char *feature);
+const char *server_feature_value(const char *feature, int *len_ret);
+int url_is_local_not_ssh(const char *url);
struct packet_reader;
-extern enum protocol_version discover_version(struct packet_reader *reader);
+enum protocol_version discover_version(struct packet_reader *reader);
-extern int server_supports_v2(const char *c, int die_on_error);
-extern int server_supports_feature(const char *c, const char *feature,
- int die_on_error);
+int server_supports_v2(const char *c, int die_on_error);
+int server_supports_feature(const char *c, const char *feature,
+ int die_on_error);
#endif
#include "cache.h"
+#include "object-store.h"
#include "run-command.h"
#include "sigchain.h"
#include "connected.h"
strbuf_release(&idx_file);
}
+ if (opt->check_refs_only) {
+ /*
+ * For partial clones, we don't want to have to do a regular
+ * connectivity check because we have to enumerate and exclude
+ * all promisor objects (slow), and then the connectivity check
+ * itself becomes a no-op because in a partial clone every
+ * object is a promisor object. Instead, just make sure we
+ * received the objects pointed to by each wanted ref.
+ */
+ do {
+ if (!repo_has_object_file(the_repository, &oid))
+ return 1;
+ } while (!fn(cb_data, &oid));
+ return 0;
+ }
+
if (opt->shallow_file) {
argv_array_push(&rev_list.args, "--shallow-file");
argv_array_push(&rev_list.args, opt->shallow_file);
* during a fetch.
*/
unsigned is_deepening_fetch : 1;
+
+ /*
+ * If non-zero, only check the top-level objects referenced by the
+ * wanted refs (passed in as cb_data). This is useful for partial
+ * clones, where enumerating and excluding all promisor objects is very
+ * slow and the commit-walk itself becomes a no-op.
+ */
+ unsigned check_refs_only : 1;
};
#define CHECK_CONNECTED_INIT { 0 }
- c->maybe_tree->object.oid.hash
+ get_commit_tree_oid(c)->hash
-// These excluded functions must access c->maybe_tree direcly.
@@
-identifier f !~ "^(get_commit_tree|get_commit_tree_in_graph_one|load_tree_for_commit)$";
+identifier f !~ "^set_commit_tree$";
expression c;
+expression s;
@@
f(...) {<...
-- c->maybe_tree
-+ get_commit_tree(c)
+- c->maybe_tree = s
++ set_commit_tree(c, s)
...>}
+// These excluded functions must access c->maybe_tree direcly.
+// Note that if c->maybe_tree is written somewhere outside of these
+// functions, then the recommended transformation will be bogus with
+// repo_get_commit_tree() on the LHS.
@@
+identifier f !~ "^(repo_get_commit_tree|get_commit_tree_in_graph_one|load_tree_for_commit|set_commit_tree)$";
expression c;
-expression s;
@@
-- get_commit_tree(c) = s
-+ c->maybe_tree = s
+ f(...) {<...
+- c->maybe_tree
++ repo_get_commit_tree(specify_the_right_repo_here, c)
+ ...>}
--- /dev/null
+@@
+expression str;
+identifier x, flexname;
+@@
+- FLEX_ALLOC_MEM(x, flexname, str, strlen(str));
++ FLEX_ALLOC_STR(x, flexname, str);
+
+@@
+expression str;
+identifier x, ptrname;
+@@
+- FLEXPTR_ALLOC_MEM(x, ptrname, str, strlen(str));
++ FLEXPTR_ALLOC_STR(x, ptrname, str);
__git_merge_strategies=$(__git_list_merge_strategies)
}
+__git_merge_strategy_options="ours theirs subtree subtree= patience
+ histogram diff-algorithm= ignore-space-change ignore-all-space
+ ignore-space-at-eol renormalize no-renormalize no-renames
+ find-renames find-renames= rename-threshold="
+
__git_complete_revlist_file ()
{
local dequoted_word pfx ls ref cur_="$cur"
-s|--strategy)
__gitcomp "$__git_merge_strategies"
return 0
+ ;;
+ -X)
+ __gitcomp "$__git_merge_strategy_options"
+ return 0
+ ;;
esac
case "$cur" in
--strategy=*)
__gitcomp "$__git_merge_strategies" "" "${cur##--strategy=}"
return 0
;;
+ --strategy-option=*)
+ __gitcomp "$__git_merge_strategy_options" "" "${cur##--strategy-option=}"
+ return 0
+ ;;
esac
return 1
}
__git_compute_all_commands ()
{
test -n "$__git_all_commands" ||
- __git_all_commands=$(git --list-cmds=main,others,alias,nohelpers)
+ __git_all_commands=$(__git --list-cmds=main,others,alias,nohelpers)
}
# Lists all set config variables starting with the given section prefix,
}
__git_whitespacelist="nowarn warn error error-all fix"
+__git_patchformat="mbox stgit stgit-series hg mboxrd"
__git_am_inprogress_options="--skip --continue --resolved --abort --quit --show-current-patch"
_git_am ()
__gitcomp "$__git_whitespacelist" "" "${cur##--whitespace=}"
return
;;
+ --patch-format=*)
+ __gitcomp "$__git_patchformat" "" "${cur##--patch-format=}"
+ return
+ ;;
--*)
__gitcomp_builtin am "" \
"$__git_am_inprogress_options"
_git_add ()
{
case "$cur" in
+ --chmod=*)
+ __gitcomp "+x -x" "" "${cur##--chmod=}"
+ return
+ ;;
--*)
__gitcomp_builtin add
return
esac
}
+__git_ref_fieldlist="refname objecttype objectsize objectname upstream push HEAD symref"
+
_git_branch ()
{
local i c=1 only_local_ref="n" has_r="n"
__gitcomp "$__git_cherry_pick_inprogress_options"
return
fi
+
+ __git_complete_strategy && return
+
case "$cur" in
--*)
__gitcomp_builtin cherry-pick "" \
}
__git_mergetools_common="diffuse diffmerge ecmerge emerge kdiff3 meld opendiff
- tkdiff vimdiff gvimdiff xxdiff araxis p4merge bc codecompare
+ tkdiff vimdiff gvimdiff xxdiff araxis p4merge bc
+ codecompare smerge
"
_git_difftool ()
__gitcomp "$__git_fetch_recurse_submodules" "" "${cur##--recurse-submodules=}"
return
;;
+ --filter=*)
+ __gitcomp "blob:none blob:limit= sparse:oid= sparse:path=" "" "${cur##--filter=}"
+ return
+ ;;
--*)
__gitcomp_builtin fetch
return
esac
if test -n "$GIT_TESTING_ALL_COMMAND_LIST"
then
- __gitcomp "$GIT_TESTING_ALL_COMMAND_LIST $(git --list-cmds=alias,list-guide) gitk"
+ __gitcomp "$GIT_TESTING_ALL_COMMAND_LIST $(__git --list-cmds=alias,list-guide) gitk"
else
- __gitcomp "$(git --list-cmds=main,nohelpers,alias,list-guide) gitk"
+ __gitcomp "$(__git --list-cmds=main,nohelpers,alias,list-guide) gitk"
fi
}
--all-match --invert-grep
"
-__git_log_pretty_formats="oneline short medium full fuller email raw format:"
-__git_log_date_formats="relative iso8601 rfc2822 short local default raw"
+__git_log_pretty_formats="oneline short medium full fuller email raw format: mboxrd"
+__git_log_date_formats="relative iso8601 iso8601-strict rfc2822 short local default raw unix format:"
_git_log ()
{
return
;;
diff.submodule)
- __gitcomp "log short"
+ __gitcomp "$__git_diff_submodule_formats"
return
;;
help.format)
_git_replace ()
{
case "$cur" in
+ --format=*)
+ __gitcomp "short medium long" "" "${cur##--format=}"
+ return
+ ;;
--*)
__gitcomp_builtin replace
return
__gitcomp "$__git_revert_inprogress_options"
return
fi
+ __git_complete_strategy && return
case "$cur" in
--*)
__gitcomp_builtin revert "" \
{
__git_has_doubledash && return
- local subcommands="add status init deinit update summary foreach sync"
+ local subcommands="add status init deinit update set-branch summary foreach sync absorbgitdirs"
local subcommand="$(__git_find_on_cmdline "$subcommands")"
if [ -z "$subcommand" ]; then
case "$cur" in
--force --rebase --merge --reference --depth --recursive --jobs
"
;;
+ set-branch,--*)
+ __gitcomp "--default --branch"
+ ;;
summary,--*)
__gitcomp "--cached --files --summary-limit"
;;
then
__gitcomp "$GIT_TESTING_PORCELAIN_COMMAND_LIST"
else
- __gitcomp "$(git --list-cmds=list-mainporcelain,others,nohelpers,alias,list-complete,config)"
+ __gitcomp "$(__git --list-cmds=list-mainporcelain,others,nohelpers,alias,list-complete,config)"
fi
;;
esac
use warnings FATAL => 'all';
use strict;
+# Use the correct value for both UNIX and Windows (/dev/null vs nul)
+use File::Spec;
+
+my $NULL = File::Spec->devnull();
+
# Highlight by reversing foreground and background. You could do
# other things like bold or underline if you prefer.
my @OLD_HIGHLIGHT = (
# fallback, which means we will work even if git can't be run.
sub color_config {
my ($key, $default) = @_;
- my $s = `git config --get-color $key 2>/dev/null`;
+ my $s = `git config --get-color $key 2>$NULL`;
return length($s) ? $s : $default;
}
git subtree merge --prefix=<prefix> <commit>
git subtree pull --prefix=<prefix> <repository> <ref>
git subtree push --prefix=<prefix> <repository> <ref>
-git subtree split --prefix=<prefix> <commit...>
+git subtree split --prefix=<prefix> <commit>
--
h,help show the help
q quiet
fi
}
+ensure_single_rev () {
+ if test $# -ne 1
+ then
+ die "You must provide exactly one revision. Got: '$@'"
+ fi
+}
while test $# -gt 0
do
then
revs=$(git rev-parse $default --revs-only "$@") || exit $?
dirs=$(git rev-parse --no-revs --no-flags "$@") || exit $?
+ ensure_single_rev $revs
if test -n "$dirs"
then
die "Error: Use --prefix instead of bare filenames."
}
cmd_add_commit () {
- revs=$(git rev-parse $default --revs-only "$@") || exit $?
- set -- $revs
- rev="$1"
+ rev=$(git rev-parse $default --revs-only "$@") || exit $?
+ ensure_single_rev $rev
debug "Adding $dir as '$rev'..."
git read-tree --prefix="$dir" $rev || exit $?
}
cmd_merge () {
- revs=$(git rev-parse $default --revs-only "$@") || exit $?
+ rev=$(git rev-parse $default --revs-only "$@") || exit $?
+ ensure_single_rev $rev
ensure_clean
- set -- $revs
- if test $# -ne 1
- then
- die "You must provide exactly one revision. Got: '$revs'"
- fi
- rev="$1"
-
if test -n "$squash"
then
first_split="$(find_latest_squash "$dir")"
if (start_async(&async))
return 0; /* error was already reported */
- if (strbuf_read(&nbuf, async.out, len) < 0) {
+ if (strbuf_read(&nbuf, async.out, 0) < 0) {
err = error(_("read from external filter '%s' failed"), cmd);
}
if (close(async.out)) {
git_hash_ctx ctx;
};
-extern void hashfile_checkpoint(struct hashfile *, struct hashfile_checkpoint *);
-extern int hashfile_truncate(struct hashfile *, struct hashfile_checkpoint *);
+void hashfile_checkpoint(struct hashfile *, struct hashfile_checkpoint *);
+int hashfile_truncate(struct hashfile *, struct hashfile_checkpoint *);
/* finalize_hashfile flags */
#define CSUM_CLOSE 1
#define CSUM_FSYNC 2
#define CSUM_HASH_IN_STREAM 4
-extern struct hashfile *hashfd(int fd, const char *name);
-extern struct hashfile *hashfd_check(const char *name);
-extern struct hashfile *hashfd_throughput(int fd, const char *name, struct progress *tp);
-extern int finalize_hashfile(struct hashfile *, unsigned char *, unsigned int);
-extern void hashwrite(struct hashfile *, const void *, unsigned int);
-extern void hashflush(struct hashfile *f);
-extern void crc32_begin(struct hashfile *);
-extern uint32_t crc32_end(struct hashfile *);
+struct hashfile *hashfd(int fd, const char *name);
+struct hashfile *hashfd_check(const char *name);
+struct hashfile *hashfd_throughput(int fd, const char *name, struct progress *tp);
+int finalize_hashfile(struct hashfile *, unsigned char *, unsigned int);
+void hashwrite(struct hashfile *, const void *, unsigned int);
+void hashflush(struct hashfile *f);
+void crc32_begin(struct hashfile *);
+uint32_t crc32_end(struct hashfile *);
static inline void hashwrite_u8(struct hashfile *f, uint8_t data)
{
return DATE_UNIX;
if (skip_prefix(format, "format", end))
return DATE_STRFTIME;
+ /*
+ * Please update $__git_log_date_formats in
+ * git-completion.bash when you add new formats.
+ */
die("unknown date format %s", format);
}
* NULL), returning the previously associated pointer. If there is no previous
* association, this function returns NULL.
*/
-extern void *add_decoration(struct decoration *n, const struct object *obj, void *decoration);
+void *add_decoration(struct decoration *n, const struct object *obj, void *decoration);
/*
* Return the pointer associated to the given object. If there is no
* association, this function returns NULL.
*/
-extern void *lookup_decoration(struct decoration *n, const struct object *obj);
+void *lookup_decoration(struct decoration *n, const struct object *obj);
#endif
* before free_delta_index() is called. The returned pointer must be freed
* using free_delta_index().
*/
-extern struct delta_index *
+struct delta_index *
create_delta_index(const void *buf, unsigned long bufsize);
/*
*
* Given pointer must be what create_delta_index() returned, or NULL.
*/
-extern void free_delta_index(struct delta_index *index);
+void free_delta_index(struct delta_index *index);
/*
* sizeof_delta_index: returns memory usage of delta index
*
* Given pointer must be what create_delta_index() returned, or NULL.
*/
-extern unsigned long sizeof_delta_index(struct delta_index *index);
+unsigned long sizeof_delta_index(struct delta_index *index);
/*
* create_delta: create a delta from given index for the given buffer
* returned and *delta_size is updated with its size. The returned buffer
* must be freed by the caller.
*/
-extern void *
+void *
create_delta(const struct delta_index *index,
const void *buf, unsigned long bufsize,
unsigned long *delta_size, unsigned long max_delta_size);
* *trg_bufsize is updated with its size. On failure a NULL pointer is
* returned. The returned buffer must be freed by the caller.
*/
-extern void *patch_delta(const void *src_buf, unsigned long src_size,
- const void *delta_buf, unsigned long delta_size,
- unsigned long *dst_size);
+void *patch_delta(const void *src_buf, unsigned long src_size,
+ const void *delta_buf, unsigned long delta_size,
+ unsigned long *dst_size);
/* the smallest possible delta size is 4 bytes */
#define DELTA_SIZE_MIN 4
exit(128);
diff_set_mnemonic_prefix(&revs->diffopt, "c/", cached ? "i/" : "w/");
- diffcore_fix_diff_index(&revs->diffopt);
+ diffcore_fix_diff_index();
diffcore_std(&revs->diffopt);
diff_flush(&revs->diffopt);
trace_performance_leave("diff-index");
#include "revision.h"
#include "log-tree.h"
#include "builtin.h"
+#include "parse-options.h"
#include "string-list.h"
#include "dir.h"
}
}
-void diff_no_index(struct repository *r,
- struct rev_info *revs,
- int argc, const char **argv)
+static const char * const diff_no_index_usage[] = {
+ N_("git diff --no-index [<options>] <path> <path>"),
+ NULL
+};
+
+int diff_no_index(struct rev_info *revs,
+ int implicit_no_index,
+ int argc, const char **argv)
{
- int i;
+ int i, no_index;
const char *paths[2];
struct strbuf replacement = STRBUF_INIT;
const char *prefix = revs->prefix;
-
- /*
- * FIXME: --no-index should not look at index and we should be
- * able to pass NULL repo. Maybe later.
- */
- repo_diff_setup(r, &revs->diffopt);
- for (i = 1; i < argc - 2; ) {
- int j;
- if (!strcmp(argv[i], "--no-index"))
- i++;
- else if (!strcmp(argv[i], "--"))
- i++;
- else {
- j = diff_opt_parse(&revs->diffopt, argv + i, argc - i,
- revs->prefix);
- if (j <= 0)
- die("invalid diff option/value: %s", argv[i]);
- i += j;
- }
+ struct option no_index_options[] = {
+ OPT_BOOL_F(0, "no-index", &no_index, "",
+ PARSE_OPT_NONEG | PARSE_OPT_HIDDEN),
+ OPT_END(),
+ };
+ struct option *options;
+
+ options = parse_options_concat(no_index_options,
+ revs->diffopt.parseopts);
+ argc = parse_options(argc, argv, revs->prefix, options,
+ diff_no_index_usage, 0);
+ if (argc != 2) {
+ if (implicit_no_index)
+ warning(_("Not a git repository. Use --no-index to "
+ "compare two paths outside a working tree"));
+ usage_with_options(diff_no_index_usage, options);
}
-
+ FREE_AND_NULL(options);
for (i = 0; i < 2; i++) {
const char *p = argv[argc - 2 + i];
if (!strcmp(p, "-"))
revs->diffopt.flags.exit_with_status = 1;
if (queue_diff(&revs->diffopt, paths[0], paths[1]))
- exit(1);
+ return 1;
diff_set_mnemonic_prefix(&revs->diffopt, "1/", "2/");
diffcore_std(&revs->diffopt);
diff_flush(&revs->diffopt);
* The return code for --no-index imitates diff(1):
* 0 = no changes, 1 = changes, else error
*/
- exit(diff_result_code(&revs->diffopt, 0));
+ return diff_result_code(&revs->diffopt, 0);
}
#include "argv-array.h"
#include "graph.h"
#include "packfile.h"
+#include "parse-options.h"
#include "help.h"
+#include "fetch-object.h"
#ifdef NO_FAST_WORKING_DIRECTORY
#define FAST_WORKING_DIRECTORY 0
[DIFF_FILE_NEW_BOLD] = "newBold",
};
-static NORETURN void die_want_option(const char *option_name)
-{
- die(_("option '%s' requires a value"), option_name);
-}
-
define_list_config_array_extra(color_diff_slots, {"plain"});
static int parse_diff_color_slot(const char *var)
options->submodule_format = DIFF_SUBMODULE_SHORT;
else if (!strcmp(value, "diff"))
options->submodule_format = DIFF_SUBMODULE_INLINE_DIFF;
+ /*
+ * Please update $__git_diff_submodule_formats in
+ * git-completion.bash when you add new formats.
+ */
else
return -1;
return 0;
return XDF_PATIENCE_DIFF;
else if (!strcasecmp(value, "histogram"))
return XDF_HISTOGRAM_DIFF;
+ /*
+ * Please update $__git_diff_algorithms in git-completion.bash
+ * when you add new algorithms.
+ */
return -1;
}
return ws_blank_line(line, len, ecbdata->ws_rule);
}
-static void emit_add_line(const char *reset,
- struct emit_callback *ecbdata,
+static void emit_add_line(struct emit_callback *ecbdata,
const char *line, int len)
{
unsigned flags = WSEH_NEW | ecbdata->ws_rule;
emit_diff_symbol(ecbdata->opt, DIFF_SYMBOL_PLUS, line, len, flags);
}
-static void emit_del_line(const char *reset,
- struct emit_callback *ecbdata,
+static void emit_del_line(struct emit_callback *ecbdata,
const char *line, int len)
{
unsigned flags = WSEH_OLD | ecbdata->ws_rule;
emit_diff_symbol(ecbdata->opt, DIFF_SYMBOL_MINUS, line, len, flags);
}
-static void emit_context_line(const char *reset,
- struct emit_callback *ecbdata,
+static void emit_context_line(struct emit_callback *ecbdata,
const char *line, int len)
{
unsigned flags = WSEH_CONTEXT | ecbdata->ws_rule;
int prefix, const char *data, int size)
{
const char *endp = NULL;
- const char *reset = diff_get_color(ecb->color_diff, DIFF_RESET);
while (0 < size) {
int len;
len = endp ? (endp - data + 1) : size;
if (prefix != '+') {
ecb->lno_in_preimage++;
- emit_del_line(reset, ecb, data, len);
+ emit_del_line(ecb, data, len);
} else {
ecb->lno_in_postimage++;
- emit_add_line(reset, ecb, data, len);
+ emit_add_line(ecb, data, len);
}
size -= len;
data += len;
return msgbuf->buf;
}
-static unsigned long sane_truncate_line(struct emit_callback *ecb, char *line, unsigned long len)
+static unsigned long sane_truncate_line(char *line, unsigned long len)
{
const char *cp;
unsigned long allot;
static void fn_out_consume(void *priv, char *line, unsigned long len)
{
struct emit_callback *ecbdata = priv;
- const char *reset = diff_get_color(ecbdata->color_diff, DIFF_RESET);
struct diff_options *o = ecbdata->opt;
o->found_changes = 1;
if (line[0] == '@') {
if (ecbdata->diff_words)
diff_words_flush(ecbdata);
- len = sane_truncate_line(ecbdata, line, len);
+ len = sane_truncate_line(line, len);
find_lno(line, ecbdata);
emit_hunk_header(ecbdata, line, len);
return;
switch (line[0]) {
case '+':
ecbdata->lno_in_postimage++;
- emit_add_line(reset, ecbdata, line + 1, len - 1);
+ emit_add_line(ecbdata, line + 1, len - 1);
break;
case '-':
ecbdata->lno_in_preimage++;
- emit_del_line(reset, ecbdata, line + 1, len - 1);
+ emit_del_line(ecbdata, line + 1, len - 1);
break;
case ' ':
ecbdata->lno_in_postimage++;
ecbdata->lno_in_preimage++;
- emit_context_line(reset, ecbdata, line + 1, len - 1);
+ emit_context_line(ecbdata, line + 1, len - 1);
break;
default:
/* incomplete line at the end */
struct diff_filespec *one,
struct diff_filespec *two,
const char *xfrm_msg,
- int complete_rewrite,
struct diff_options *o)
{
struct argv_array argv = ARGV_ARRAY_INIT;
}
if (pgm) {
- run_external_diff(pgm, name, other, one, two, xfrm_msg,
- complete_rewrite, o);
+ run_external_diff(pgm, name, other, one, two, xfrm_msg, o);
return;
}
if (one && two)
builtin_checkdiff(name, other, attr_path, p->one, p->two, o);
}
+static void prep_parse_options(struct diff_options *options);
+
void repo_diff_setup(struct repository *r, struct diff_options *options)
{
memcpy(options, &default_diff_options, sizeof(*options));
options->color_moved = diff_color_moved_default;
options->color_moved_ws_handling = diff_color_moved_ws_default;
+
+ prep_parse_options(options);
}
void diff_setup_done(struct diff_options *options)
if (!options->use_color || external_diff())
options->color_moved = 0;
-}
-static int opt_arg(const char *arg, int arg_short, const char *arg_long, int *val)
-{
- char c, *eq;
- int len;
-
- if (*arg != '-')
- return 0;
- c = *++arg;
- if (!c)
- return 0;
- if (c == arg_short) {
- c = *++arg;
- if (!c)
- return 1;
- if (val && isdigit(c)) {
- char *end;
- int n = strtoul(arg, &end, 10);
- if (*end)
- return 0;
- *val = n;
- return 1;
- }
- return 0;
- }
- if (c != '-')
- return 0;
- arg++;
- eq = strchrnul(arg, '=');
- len = eq - arg;
- if (!len || strncmp(arg, arg_long, len))
- return 0;
- if (*eq) {
- int n;
- char *end;
- if (!isdigit(*++eq))
- return 0;
- n = strtoul(eq, &end, 10);
- if (*end)
- return 0;
- *val = n;
- }
- return 1;
-}
-
-static int diff_scoreopt_parse(const char *opt);
-
-static inline int short_opt(char opt, const char **argv,
- const char **optarg)
-{
- const char *arg = argv[0];
- if (arg[0] != '-' || arg[1] != opt)
- return 0;
- if (arg[2] != '\0') {
- *optarg = arg + 2;
- return 1;
- }
- if (!argv[1])
- die("Option '%c' requires a value", opt);
- *optarg = argv[1];
- return 2;
+ FREE_AND_NULL(options->parseopts);
}
int parse_long_opt(const char *opt, const char **argv,
return 2;
}
-static int stat_opt(struct diff_options *options, const char **av)
+static int diff_opt_stat(const struct option *opt, const char *value, int unset)
{
- const char *arg = av[0];
- char *end;
+ struct diff_options *options = opt->value;
int width = options->stat_width;
int name_width = options->stat_name_width;
int graph_width = options->stat_graph_width;
int count = options->stat_count;
- int argcount = 1;
+ char *end;
- if (!skip_prefix(arg, "--stat", &arg))
- BUG("stat option does not begin with --stat: %s", arg);
- end = (char *)arg;
+ BUG_ON_OPT_NEG(unset);
- switch (*arg) {
- case '-':
- if (skip_prefix(arg, "-width", &arg)) {
- if (*arg == '=')
- width = strtoul(arg + 1, &end, 10);
- else if (!*arg && !av[1])
- die_want_option("--stat-width");
- else if (!*arg) {
- width = strtoul(av[1], &end, 10);
- argcount = 2;
- }
- } else if (skip_prefix(arg, "-name-width", &arg)) {
- if (*arg == '=')
- name_width = strtoul(arg + 1, &end, 10);
- else if (!*arg && !av[1])
- die_want_option("--stat-name-width");
- else if (!*arg) {
- name_width = strtoul(av[1], &end, 10);
- argcount = 2;
- }
- } else if (skip_prefix(arg, "-graph-width", &arg)) {
- if (*arg == '=')
- graph_width = strtoul(arg + 1, &end, 10);
- else if (!*arg && !av[1])
- die_want_option("--stat-graph-width");
- else if (!*arg) {
- graph_width = strtoul(av[1], &end, 10);
- argcount = 2;
- }
- } else if (skip_prefix(arg, "-count", &arg)) {
- if (*arg == '=')
- count = strtoul(arg + 1, &end, 10);
- else if (!*arg && !av[1])
- die_want_option("--stat-count");
- else if (!*arg) {
- count = strtoul(av[1], &end, 10);
- argcount = 2;
- }
+ if (!strcmp(opt->long_name, "stat")) {
+ if (value) {
+ width = strtoul(value, &end, 10);
+ if (*end == ',')
+ name_width = strtoul(end+1, &end, 10);
+ if (*end == ',')
+ count = strtoul(end+1, &end, 10);
+ if (*end)
+ return error(_("invalid --stat value: %s"), value);
}
- break;
- case '=':
- width = strtoul(arg+1, &end, 10);
- if (*end == ',')
- name_width = strtoul(end+1, &end, 10);
- if (*end == ',')
- count = strtoul(end+1, &end, 10);
- }
+ } else if (!strcmp(opt->long_name, "stat-width")) {
+ width = strtoul(value, &end, 10);
+ if (*end)
+ return error(_("%s expects a numerical value"),
+ opt->long_name);
+ } else if (!strcmp(opt->long_name, "stat-name-width")) {
+ name_width = strtoul(value, &end, 10);
+ if (*end)
+ return error(_("%s expects a numerical value"),
+ opt->long_name);
+ } else if (!strcmp(opt->long_name, "stat-graph-width")) {
+ graph_width = strtoul(value, &end, 10);
+ if (*end)
+ return error(_("%s expects a numerical value"),
+ opt->long_name);
+ } else if (!strcmp(opt->long_name, "stat-count")) {
+ count = strtoul(value, &end, 10);
+ if (*end)
+ return error(_("%s expects a numerical value"),
+ opt->long_name);
+ } else
+ BUG("%s should not get here", opt->long_name);
- /* Important! This checks all the error cases! */
- if (*end)
- return 0;
options->output_format |= DIFF_FORMAT_DIFFSTAT;
options->stat_name_width = name_width;
options->stat_graph_width = graph_width;
options->stat_width = width;
options->stat_count = count;
- return argcount;
+ return 0;
}
static int parse_dirstat_opt(struct diff_options *options, const char *params)
return 1;
}
-static int parse_submodule_opt(struct diff_options *options, const char *value)
-{
- if (parse_submodule_params(options, value))
- die(_("Failed to parse --submodule option parameter: '%s'"),
- value);
- return 1;
-}
-
static const char diff_status_letters[] = {
DIFF_STATUS_ADDED,
DIFF_STATUS_COPIED,
return opt->filter & filter_bit[(int) status];
}
-static int parse_diff_filter_opt(const char *optarg, struct diff_options *opt)
+unsigned diff_filter_bit(char status)
{
+ prepare_filter_bits();
+ return filter_bit[(int) status];
+}
+
+static int diff_opt_diff_filter(const struct option *option,
+ const char *optarg, int unset)
+{
+ struct diff_options *opt = option->value;
int i, optch;
+ BUG_ON_OPT_NEG(unset);
prepare_filter_bits();
/*
bit = (0 <= optch && optch <= 'Z') ? filter_bit[optch] : 0;
if (!bit)
- return optarg[i];
+ return error(_("unknown change class '%c' in --diff-filter=%s"),
+ optarg[i], optarg);
if (negate)
opt->filter &= ~bit;
else
*fmt |= DIFF_FORMAT_PATCH;
}
-static int parse_ws_error_highlight_opt(struct diff_options *opt, const char *arg)
+static int diff_opt_ws_error_highlight(const struct option *option,
+ const char *arg, int unset)
{
+ struct diff_options *opt = option->value;
int val = parse_ws_error_highlight(arg);
- if (val < 0) {
- error("unknown value after ws-error-highlight=%.*s",
- -1 - val, arg);
- return 0;
- }
+ BUG_ON_OPT_NEG(unset);
+ if (val < 0)
+ return error(_("unknown value after ws-error-highlight=%.*s"),
+ -1 - val, arg);
opt->ws_error_highlight = val;
- return 1;
+ return 0;
}
-static int parse_objfind_opt(struct diff_options *opt, const char *arg)
+static int diff_opt_find_object(const struct option *option,
+ const char *arg, int unset)
{
+ struct diff_options *opt = option->value;
struct object_id oid;
+ BUG_ON_OPT_NEG(unset);
if (get_oid(arg, &oid))
- return error("unable to resolve '%s'", arg);
+ return error(_("unable to resolve '%s'"), arg);
if (!opt->objfind)
opt->objfind = xcalloc(1, sizeof(*opt->objfind));
opt->flags.recursive = 1;
opt->flags.tree_in_recursive = 1;
oidset_insert(opt->objfind, &oid);
- return 1;
+ return 0;
}
-int diff_opt_parse(struct diff_options *options,
- const char **av, int ac, const char *prefix)
+static int diff_opt_anchored(const struct option *opt,
+ const char *arg, int unset)
{
- const char *arg = av[0];
- const char *optarg;
- int argcount;
+ struct diff_options *options = opt->value;
- if (!prefix)
- prefix = "";
+ BUG_ON_OPT_NEG(unset);
+ options->xdl_opts = DIFF_WITH_ALG(options, PATIENCE_DIFF);
+ ALLOC_GROW(options->anchors, options->anchors_nr + 1,
+ options->anchors_alloc);
+ options->anchors[options->anchors_nr++] = xstrdup(arg);
+ return 0;
+}
- /* Output format options */
- if (!strcmp(arg, "-p") || !strcmp(arg, "-u") || !strcmp(arg, "--patch")
- || opt_arg(arg, 'U', "unified", &options->context))
- enable_patch_output(&options->output_format);
- else if (!strcmp(arg, "--raw"))
- options->output_format |= DIFF_FORMAT_RAW;
- else if (!strcmp(arg, "--patch-with-raw")) {
- enable_patch_output(&options->output_format);
- options->output_format |= DIFF_FORMAT_RAW;
- } else if (!strcmp(arg, "--numstat"))
- options->output_format |= DIFF_FORMAT_NUMSTAT;
- else if (!strcmp(arg, "--shortstat"))
- options->output_format |= DIFF_FORMAT_SHORTSTAT;
- else if (skip_prefix(arg, "-X", &arg) ||
- skip_to_optional_arg(arg, "--dirstat", &arg))
- return parse_dirstat_opt(options, arg);
- else if (!strcmp(arg, "--cumulative"))
- return parse_dirstat_opt(options, "cumulative");
- else if (skip_to_optional_arg(arg, "--dirstat-by-file", &arg)) {
- parse_dirstat_opt(options, "files");
- return parse_dirstat_opt(options, arg);
- }
- else if (!strcmp(arg, "--check"))
- options->output_format |= DIFF_FORMAT_CHECKDIFF;
- else if (!strcmp(arg, "--summary"))
- options->output_format |= DIFF_FORMAT_SUMMARY;
- else if (!strcmp(arg, "--patch-with-stat")) {
- enable_patch_output(&options->output_format);
- options->output_format |= DIFF_FORMAT_DIFFSTAT;
- } else if (!strcmp(arg, "--name-only"))
- options->output_format |= DIFF_FORMAT_NAME;
- else if (!strcmp(arg, "--name-status"))
- options->output_format |= DIFF_FORMAT_NAME_STATUS;
- else if (!strcmp(arg, "-s") || !strcmp(arg, "--no-patch"))
- options->output_format |= DIFF_FORMAT_NO_OUTPUT;
- else if (starts_with(arg, "--stat"))
- /* --stat, --stat-width, --stat-name-width, or --stat-count */
- return stat_opt(options, av);
- else if (!strcmp(arg, "--compact-summary")) {
- options->flags.stat_with_summary = 1;
- options->output_format |= DIFF_FORMAT_DIFFSTAT;
- } else if (!strcmp(arg, "--no-compact-summary"))
- options->flags.stat_with_summary = 0;
- else if (skip_prefix(arg, "--output-indicator-new=", &arg))
- options->output_indicators[OUTPUT_INDICATOR_NEW] = arg[0];
- else if (skip_prefix(arg, "--output-indicator-old=", &arg))
- options->output_indicators[OUTPUT_INDICATOR_OLD] = arg[0];
- else if (skip_prefix(arg, "--output-indicator-context=", &arg))
- options->output_indicators[OUTPUT_INDICATOR_CONTEXT] = arg[0];
-
- /* renames options */
- else if (starts_with(arg, "-B") ||
- skip_to_optional_arg(arg, "--break-rewrites", NULL)) {
- if ((options->break_opt = diff_scoreopt_parse(arg)) == -1)
- return error("invalid argument to -B: %s", arg+2);
- }
- else if (starts_with(arg, "-M") ||
- skip_to_optional_arg(arg, "--find-renames", NULL)) {
- if ((options->rename_score = diff_scoreopt_parse(arg)) == -1)
- return error("invalid argument to -M: %s", arg+2);
- options->detect_rename = DIFF_DETECT_RENAME;
- }
- else if (!strcmp(arg, "-D") || !strcmp(arg, "--irreversible-delete")) {
- options->irreversible_delete = 1;
- }
- else if (starts_with(arg, "-C") ||
- skip_to_optional_arg(arg, "--find-copies", NULL)) {
- if (options->detect_rename == DIFF_DETECT_COPY)
- options->flags.find_copies_harder = 1;
- if ((options->rename_score = diff_scoreopt_parse(arg)) == -1)
- return error("invalid argument to -C: %s", arg+2);
- options->detect_rename = DIFF_DETECT_COPY;
+static int diff_opt_binary(const struct option *opt,
+ const char *arg, int unset)
+{
+ struct diff_options *options = opt->value;
+
+ BUG_ON_OPT_NEG(unset);
+ BUG_ON_OPT_ARG(arg);
+ enable_patch_output(&options->output_format);
+ options->flags.binary = 1;
+ return 0;
+}
+
+static int diff_opt_break_rewrites(const struct option *opt,
+ const char *arg, int unset)
+{
+ int *break_opt = opt->value;
+ int opt1, opt2;
+
+ BUG_ON_OPT_NEG(unset);
+ if (!arg)
+ arg = "";
+ opt1 = parse_rename_score(&arg);
+ if (*arg == 0)
+ opt2 = 0;
+ else if (*arg != '/')
+ return error(_("%s expects <n>/<m> form"), opt->long_name);
+ else {
+ arg++;
+ opt2 = parse_rename_score(&arg);
}
- else if (!strcmp(arg, "--no-renames"))
- options->detect_rename = 0;
- else if (!strcmp(arg, "--rename-empty"))
- options->flags.rename_empty = 1;
- else if (!strcmp(arg, "--no-rename-empty"))
- options->flags.rename_empty = 0;
- else if (skip_to_optional_arg_default(arg, "--relative", &arg, NULL)) {
- options->flags.relative_name = 1;
- if (arg)
- options->prefix = arg;
- }
-
- /* xdiff options */
- else if (!strcmp(arg, "--minimal"))
- DIFF_XDL_SET(options, NEED_MINIMAL);
- else if (!strcmp(arg, "--no-minimal"))
- DIFF_XDL_CLR(options, NEED_MINIMAL);
- else if (!strcmp(arg, "-w") || !strcmp(arg, "--ignore-all-space"))
- DIFF_XDL_SET(options, IGNORE_WHITESPACE);
- else if (!strcmp(arg, "-b") || !strcmp(arg, "--ignore-space-change"))
- DIFF_XDL_SET(options, IGNORE_WHITESPACE_CHANGE);
- else if (!strcmp(arg, "--ignore-space-at-eol"))
- DIFF_XDL_SET(options, IGNORE_WHITESPACE_AT_EOL);
- else if (!strcmp(arg, "--ignore-cr-at-eol"))
- DIFF_XDL_SET(options, IGNORE_CR_AT_EOL);
- else if (!strcmp(arg, "--ignore-blank-lines"))
- DIFF_XDL_SET(options, IGNORE_BLANK_LINES);
- else if (!strcmp(arg, "--indent-heuristic"))
- DIFF_XDL_SET(options, INDENT_HEURISTIC);
- else if (!strcmp(arg, "--no-indent-heuristic"))
- DIFF_XDL_CLR(options, INDENT_HEURISTIC);
- else if (!strcmp(arg, "--patience")) {
- int i;
- options->xdl_opts = DIFF_WITH_ALG(options, PATIENCE_DIFF);
- /*
- * Both --patience and --anchored use PATIENCE_DIFF
- * internally, so remove any anchors previously
- * specified.
- */
- for (i = 0; i < options->anchors_nr; i++)
- free(options->anchors[i]);
- options->anchors_nr = 0;
- } else if (!strcmp(arg, "--histogram"))
- options->xdl_opts = DIFF_WITH_ALG(options, HISTOGRAM_DIFF);
- else if ((argcount = parse_long_opt("diff-algorithm", av, &optarg))) {
- long value = parse_algorithm_value(optarg);
- if (value < 0)
- return error("option diff-algorithm accepts \"myers\", "
- "\"minimal\", \"patience\" and \"histogram\"");
- /* clear out previous settings */
- DIFF_XDL_CLR(options, NEED_MINIMAL);
- options->xdl_opts &= ~XDF_DIFF_ALGORITHM_MASK;
- options->xdl_opts |= value;
- return argcount;
- } else if (skip_prefix(arg, "--anchored=", &arg)) {
- options->xdl_opts = DIFF_WITH_ALG(options, PATIENCE_DIFF);
- ALLOC_GROW(options->anchors, options->anchors_nr + 1,
- options->anchors_alloc);
- options->anchors[options->anchors_nr++] = xstrdup(arg);
- }
-
- /* flags options */
- else if (!strcmp(arg, "--binary")) {
- enable_patch_output(&options->output_format);
- options->flags.binary = 1;
- }
- else if (!strcmp(arg, "--full-index"))
- options->flags.full_index = 1;
- else if (!strcmp(arg, "-a") || !strcmp(arg, "--text"))
- options->flags.text = 1;
- else if (!strcmp(arg, "-R"))
- options->flags.reverse_diff = 1;
- else if (!strcmp(arg, "--find-copies-harder"))
- options->flags.find_copies_harder = 1;
- else if (!strcmp(arg, "--follow"))
- options->flags.follow_renames = 1;
- else if (!strcmp(arg, "--no-follow")) {
- options->flags.follow_renames = 0;
- options->flags.default_follow_renames = 0;
- } else if (skip_to_optional_arg_default(arg, "--color", &arg, "always")) {
- int value = git_config_colorbool(NULL, arg);
- if (value < 0)
- return error("option `color' expects \"always\", \"auto\", or \"never\"");
- options->use_color = value;
- }
- else if (!strcmp(arg, "--no-color"))
- options->use_color = 0;
- else if (!strcmp(arg, "--color-moved")) {
+ if (*arg != 0)
+ return error(_("%s expects <n>/<m> form"), opt->long_name);
+ *break_opt = opt1 | (opt2 << 16);
+ return 0;
+}
+
+static int diff_opt_char(const struct option *opt,
+ const char *arg, int unset)
+{
+ char *value = opt->value;
+
+ BUG_ON_OPT_NEG(unset);
+ if (arg[1])
+ return error(_("%s expects a character, got '%s'"),
+ opt->long_name, arg);
+ *value = arg[0];
+ return 0;
+}
+
+static int diff_opt_color_moved(const struct option *opt,
+ const char *arg, int unset)
+{
+ struct diff_options *options = opt->value;
+
+ if (unset) {
+ options->color_moved = COLOR_MOVED_NO;
+ } else if (!arg) {
if (diff_color_moved_default)
options->color_moved = diff_color_moved_default;
if (options->color_moved == COLOR_MOVED_NO)
options->color_moved = COLOR_MOVED_DEFAULT;
- } else if (!strcmp(arg, "--no-color-moved"))
- options->color_moved = COLOR_MOVED_NO;
- else if (skip_prefix(arg, "--color-moved=", &arg)) {
+ } else {
int cm = parse_color_moved(arg);
if (cm < 0)
- return error("bad --color-moved argument: %s", arg);
+ return error(_("bad --color-moved argument: %s"), arg);
options->color_moved = cm;
- } else if (!strcmp(arg, "--no-color-moved-ws")) {
+ }
+ return 0;
+}
+
+static int diff_opt_color_moved_ws(const struct option *opt,
+ const char *arg, int unset)
+{
+ struct diff_options *options = opt->value;
+ unsigned cm;
+
+ if (unset) {
options->color_moved_ws_handling = 0;
- } else if (skip_prefix(arg, "--color-moved-ws=", &arg)) {
- unsigned cm = parse_color_moved_ws(arg);
- if (cm & COLOR_MOVED_WS_ERROR)
- return -1;
- options->color_moved_ws_handling = cm;
- } else if (skip_to_optional_arg_default(arg, "--color-words", &options->word_regex, NULL)) {
- options->use_color = 1;
- options->word_diff = DIFF_WORDS_COLOR;
+ return 0;
}
- else if (!strcmp(arg, "--word-diff")) {
- if (options->word_diff == DIFF_WORDS_NONE)
- options->word_diff = DIFF_WORDS_PLAIN;
+
+ cm = parse_color_moved_ws(arg);
+ if (cm & COLOR_MOVED_WS_ERROR)
+ return error(_("invalid mode '%s' in --color-moved-ws"), arg);
+ options->color_moved_ws_handling = cm;
+ return 0;
+}
+
+static int diff_opt_color_words(const struct option *opt,
+ const char *arg, int unset)
+{
+ struct diff_options *options = opt->value;
+
+ BUG_ON_OPT_NEG(unset);
+ options->use_color = 1;
+ options->word_diff = DIFF_WORDS_COLOR;
+ options->word_regex = arg;
+ return 0;
+}
+
+static int diff_opt_compact_summary(const struct option *opt,
+ const char *arg, int unset)
+{
+ struct diff_options *options = opt->value;
+
+ BUG_ON_OPT_ARG(arg);
+ if (unset) {
+ options->flags.stat_with_summary = 0;
+ } else {
+ options->flags.stat_with_summary = 1;
+ options->output_format |= DIFF_FORMAT_DIFFSTAT;
+ }
+ return 0;
+}
+
+static int diff_opt_diff_algorithm(const struct option *opt,
+ const char *arg, int unset)
+{
+ struct diff_options *options = opt->value;
+ long value = parse_algorithm_value(arg);
+
+ BUG_ON_OPT_NEG(unset);
+ if (value < 0)
+ return error(_("option diff-algorithm accepts \"myers\", "
+ "\"minimal\", \"patience\" and \"histogram\""));
+
+ /* clear out previous settings */
+ DIFF_XDL_CLR(options, NEED_MINIMAL);
+ options->xdl_opts &= ~XDF_DIFF_ALGORITHM_MASK;
+ options->xdl_opts |= value;
+ return 0;
+}
+
+static int diff_opt_dirstat(const struct option *opt,
+ const char *arg, int unset)
+{
+ struct diff_options *options = opt->value;
+
+ BUG_ON_OPT_NEG(unset);
+ if (!strcmp(opt->long_name, "cumulative")) {
+ if (arg)
+ BUG("how come --cumulative take a value?");
+ arg = "cumulative";
+ } else if (!strcmp(opt->long_name, "dirstat-by-file"))
+ parse_dirstat_opt(options, "files");
+ parse_dirstat_opt(options, arg ? arg : "");
+ return 0;
+}
+
+static int diff_opt_find_copies(const struct option *opt,
+ const char *arg, int unset)
+{
+ struct diff_options *options = opt->value;
+
+ BUG_ON_OPT_NEG(unset);
+ if (!arg)
+ arg = "";
+ options->rename_score = parse_rename_score(&arg);
+ if (*arg != 0)
+ return error(_("invalid argument to %s"), opt->long_name);
+
+ if (options->detect_rename == DIFF_DETECT_COPY)
+ options->flags.find_copies_harder = 1;
+ else
+ options->detect_rename = DIFF_DETECT_COPY;
+
+ return 0;
+}
+
+static int diff_opt_find_renames(const struct option *opt,
+ const char *arg, int unset)
+{
+ struct diff_options *options = opt->value;
+
+ BUG_ON_OPT_NEG(unset);
+ if (!arg)
+ arg = "";
+ options->rename_score = parse_rename_score(&arg);
+ if (*arg != 0)
+ return error(_("invalid argument to %s"), opt->long_name);
+
+ options->detect_rename = DIFF_DETECT_RENAME;
+ return 0;
+}
+
+static int diff_opt_follow(const struct option *opt,
+ const char *arg, int unset)
+{
+ struct diff_options *options = opt->value;
+
+ BUG_ON_OPT_ARG(arg);
+ if (unset) {
+ options->flags.follow_renames = 0;
+ options->flags.default_follow_renames = 0;
+ } else {
+ options->flags.follow_renames = 1;
+ }
+ return 0;
+}
+
+static int diff_opt_ignore_submodules(const struct option *opt,
+ const char *arg, int unset)
+{
+ struct diff_options *options = opt->value;
+
+ BUG_ON_OPT_NEG(unset);
+ if (!arg)
+ arg = "all";
+ options->flags.override_submodule_config = 1;
+ handle_ignore_submodules_arg(options, arg);
+ return 0;
+}
+
+static int diff_opt_line_prefix(const struct option *opt,
+ const char *optarg, int unset)
+{
+ struct diff_options *options = opt->value;
+
+ BUG_ON_OPT_NEG(unset);
+ options->line_prefix = optarg;
+ options->line_prefix_length = strlen(options->line_prefix);
+ graph_setup_line_prefix(options);
+ return 0;
+}
+
+static int diff_opt_no_prefix(const struct option *opt,
+ const char *optarg, int unset)
+{
+ struct diff_options *options = opt->value;
+
+ BUG_ON_OPT_NEG(unset);
+ BUG_ON_OPT_ARG(optarg);
+ options->a_prefix = "";
+ options->b_prefix = "";
+ return 0;
+}
+
+static enum parse_opt_result diff_opt_output(struct parse_opt_ctx_t *ctx,
+ const struct option *opt,
+ const char *arg, int unset)
+{
+ struct diff_options *options = opt->value;
+ char *path;
+
+ BUG_ON_OPT_NEG(unset);
+ path = prefix_filename(ctx->prefix, arg);
+ options->file = xfopen(path, "w");
+ options->close_file = 1;
+ if (options->use_color != GIT_COLOR_ALWAYS)
+ options->use_color = GIT_COLOR_NEVER;
+ free(path);
+ return 0;
+}
+
+static int diff_opt_patience(const struct option *opt,
+ const char *arg, int unset)
+{
+ struct diff_options *options = opt->value;
+ int i;
+
+ BUG_ON_OPT_NEG(unset);
+ BUG_ON_OPT_ARG(arg);
+ options->xdl_opts = DIFF_WITH_ALG(options, PATIENCE_DIFF);
+ /*
+ * Both --patience and --anchored use PATIENCE_DIFF
+ * internally, so remove any anchors previously
+ * specified.
+ */
+ for (i = 0; i < options->anchors_nr; i++)
+ free(options->anchors[i]);
+ options->anchors_nr = 0;
+ return 0;
+}
+
+static int diff_opt_pickaxe_regex(const struct option *opt,
+ const char *arg, int unset)
+{
+ struct diff_options *options = opt->value;
+
+ BUG_ON_OPT_NEG(unset);
+ options->pickaxe = arg;
+ options->pickaxe_opts |= DIFF_PICKAXE_KIND_G;
+ return 0;
+}
+
+static int diff_opt_pickaxe_string(const struct option *opt,
+ const char *arg, int unset)
+{
+ struct diff_options *options = opt->value;
+
+ BUG_ON_OPT_NEG(unset);
+ options->pickaxe = arg;
+ options->pickaxe_opts |= DIFF_PICKAXE_KIND_S;
+ return 0;
+}
+
+static int diff_opt_relative(const struct option *opt,
+ const char *arg, int unset)
+{
+ struct diff_options *options = opt->value;
+
+ BUG_ON_OPT_NEG(unset);
+ options->flags.relative_name = 1;
+ if (arg)
+ options->prefix = arg;
+ return 0;
+}
+
+static int diff_opt_submodule(const struct option *opt,
+ const char *arg, int unset)
+{
+ struct diff_options *options = opt->value;
+
+ BUG_ON_OPT_NEG(unset);
+ if (!arg)
+ arg = "log";
+ if (parse_submodule_params(options, arg))
+ return error(_("failed to parse --submodule option parameter: '%s'"),
+ arg);
+ return 0;
+}
+
+static int diff_opt_textconv(const struct option *opt,
+ const char *arg, int unset)
+{
+ struct diff_options *options = opt->value;
+
+ BUG_ON_OPT_ARG(arg);
+ if (unset) {
+ options->flags.allow_textconv = 0;
+ } else {
+ options->flags.allow_textconv = 1;
+ options->flags.textconv_set_via_cmdline = 1;
}
- else if (skip_prefix(arg, "--word-diff=", &arg)) {
+ return 0;
+}
+
+static int diff_opt_unified(const struct option *opt,
+ const char *arg, int unset)
+{
+ struct diff_options *options = opt->value;
+ char *s;
+
+ BUG_ON_OPT_NEG(unset);
+
+ options->context = strtol(arg, &s, 10);
+ if (*s)
+ return error(_("%s expects a numerical value"), "--unified");
+ enable_patch_output(&options->output_format);
+
+ return 0;
+}
+
+static int diff_opt_word_diff(const struct option *opt,
+ const char *arg, int unset)
+{
+ struct diff_options *options = opt->value;
+
+ BUG_ON_OPT_NEG(unset);
+ if (arg) {
if (!strcmp(arg, "plain"))
options->word_diff = DIFF_WORDS_PLAIN;
else if (!strcmp(arg, "color")) {
else if (!strcmp(arg, "none"))
options->word_diff = DIFF_WORDS_NONE;
else
- die("bad --word-diff argument: %s", arg);
- }
- else if ((argcount = parse_long_opt("word-diff-regex", av, &optarg))) {
+ return error(_("bad --word-diff argument: %s"), arg);
+ } else {
if (options->word_diff == DIFF_WORDS_NONE)
options->word_diff = DIFF_WORDS_PLAIN;
- options->word_regex = optarg;
- return argcount;
}
- else if (!strcmp(arg, "--exit-code"))
- options->flags.exit_with_status = 1;
- else if (!strcmp(arg, "--quiet"))
- options->flags.quick = 1;
- else if (!strcmp(arg, "--ext-diff"))
- options->flags.allow_external = 1;
- else if (!strcmp(arg, "--no-ext-diff"))
- options->flags.allow_external = 0;
- else if (!strcmp(arg, "--textconv")) {
- options->flags.allow_textconv = 1;
- options->flags.textconv_set_via_cmdline = 1;
- } else if (!strcmp(arg, "--no-textconv"))
- options->flags.allow_textconv = 0;
- else if (skip_to_optional_arg_default(arg, "--ignore-submodules", &arg, "all")) {
- options->flags.override_submodule_config = 1;
- handle_ignore_submodules_arg(options, arg);
- } else if (skip_to_optional_arg_default(arg, "--submodule", &arg, "log"))
- return parse_submodule_opt(options, arg);
- else if (skip_prefix(arg, "--ws-error-highlight=", &arg))
- return parse_ws_error_highlight_opt(options, arg);
- else if (!strcmp(arg, "--ita-invisible-in-index"))
- options->ita_invisible_in_index = 1;
- else if (!strcmp(arg, "--ita-visible-in-index"))
- options->ita_invisible_in_index = 0;
-
- /* misc options */
- else if (!strcmp(arg, "-z"))
- options->line_termination = 0;
- else if ((argcount = short_opt('l', av, &optarg))) {
- options->rename_limit = strtoul(optarg, NULL, 10);
- return argcount;
- }
- else if ((argcount = short_opt('S', av, &optarg))) {
- options->pickaxe = optarg;
- options->pickaxe_opts |= DIFF_PICKAXE_KIND_S;
- return argcount;
- } else if ((argcount = short_opt('G', av, &optarg))) {
- options->pickaxe = optarg;
- options->pickaxe_opts |= DIFF_PICKAXE_KIND_G;
- return argcount;
- }
- else if (!strcmp(arg, "--pickaxe-all"))
- options->pickaxe_opts |= DIFF_PICKAXE_ALL;
- else if (!strcmp(arg, "--pickaxe-regex"))
- options->pickaxe_opts |= DIFF_PICKAXE_REGEX;
- else if ((argcount = short_opt('O', av, &optarg))) {
- options->orderfile = prefix_filename(prefix, optarg);
- return argcount;
- } else if (skip_prefix(arg, "--find-object=", &arg))
- return parse_objfind_opt(options, arg);
- else if ((argcount = parse_long_opt("diff-filter", av, &optarg))) {
- int offending = parse_diff_filter_opt(optarg, options);
- if (offending)
- die("unknown change class '%c' in --diff-filter=%s",
- offending, optarg);
- return argcount;
- }
- else if (!strcmp(arg, "--no-abbrev"))
- options->abbrev = 0;
- else if (!strcmp(arg, "--abbrev"))
- options->abbrev = DEFAULT_ABBREV;
- else if (skip_prefix(arg, "--abbrev=", &arg)) {
- options->abbrev = strtoul(arg, NULL, 10);
- if (options->abbrev < MINIMUM_ABBREV)
- options->abbrev = MINIMUM_ABBREV;
- else if (the_hash_algo->hexsz < options->abbrev)
- options->abbrev = the_hash_algo->hexsz;
- }
- else if ((argcount = parse_long_opt("src-prefix", av, &optarg))) {
- options->a_prefix = optarg;
- return argcount;
- }
- else if ((argcount = parse_long_opt("line-prefix", av, &optarg))) {
- options->line_prefix = optarg;
- options->line_prefix_length = strlen(options->line_prefix);
- graph_setup_line_prefix(options);
- return argcount;
- }
- else if ((argcount = parse_long_opt("dst-prefix", av, &optarg))) {
- options->b_prefix = optarg;
- return argcount;
- }
- else if (!strcmp(arg, "--no-prefix"))
- options->a_prefix = options->b_prefix = "";
- else if (opt_arg(arg, '\0', "inter-hunk-context",
- &options->interhunkcontext))
- ;
- else if (!strcmp(arg, "-W"))
- options->flags.funccontext = 1;
- else if (!strcmp(arg, "--function-context"))
- options->flags.funccontext = 1;
- else if (!strcmp(arg, "--no-function-context"))
- options->flags.funccontext = 0;
- else if ((argcount = parse_long_opt("output", av, &optarg))) {
- char *path = prefix_filename(prefix, optarg);
- options->file = xfopen(path, "w");
- options->close_file = 1;
- if (options->use_color != GIT_COLOR_ALWAYS)
- options->use_color = GIT_COLOR_NEVER;
- free(path);
- return argcount;
- } else
- return 0;
- return 1;
+ return 0;
+}
+
+static int diff_opt_word_diff_regex(const struct option *opt,
+ const char *arg, int unset)
+{
+ struct diff_options *options = opt->value;
+
+ BUG_ON_OPT_NEG(unset);
+ if (options->word_diff == DIFF_WORDS_NONE)
+ options->word_diff = DIFF_WORDS_PLAIN;
+ options->word_regex = arg;
+ return 0;
+}
+
+static void prep_parse_options(struct diff_options *options)
+{
+ struct option parseopts[] = {
+ OPT_GROUP(N_("Diff output format options")),
+ OPT_BITOP('p', "patch", &options->output_format,
+ N_("generate patch"),
+ DIFF_FORMAT_PATCH, DIFF_FORMAT_NO_OUTPUT),
+ OPT_BIT_F('s', "no-patch", &options->output_format,
+ N_("suppress diff output"),
+ DIFF_FORMAT_NO_OUTPUT, PARSE_OPT_NONEG),
+ OPT_BITOP('u', NULL, &options->output_format,
+ N_("generate patch"),
+ DIFF_FORMAT_PATCH, DIFF_FORMAT_NO_OUTPUT),
+ OPT_CALLBACK_F('U', "unified", options, N_("<n>"),
+ N_("generate diffs with <n> lines context"),
+ PARSE_OPT_NONEG, diff_opt_unified),
+ OPT_BOOL('W', "function-context", &options->flags.funccontext,
+ N_("generate diffs with <n> lines context")),
+ OPT_BIT_F(0, "raw", &options->output_format,
+ N_("generate the diff in raw format"),
+ DIFF_FORMAT_RAW, PARSE_OPT_NONEG),
+ OPT_BITOP(0, "patch-with-raw", &options->output_format,
+ N_("synonym for '-p --raw'"),
+ DIFF_FORMAT_PATCH | DIFF_FORMAT_RAW,
+ DIFF_FORMAT_NO_OUTPUT),
+ OPT_BITOP(0, "patch-with-stat", &options->output_format,
+ N_("synonym for '-p --stat'"),
+ DIFF_FORMAT_PATCH | DIFF_FORMAT_DIFFSTAT,
+ DIFF_FORMAT_NO_OUTPUT),
+ OPT_BIT_F(0, "numstat", &options->output_format,
+ N_("machine friendly --stat"),
+ DIFF_FORMAT_NUMSTAT, PARSE_OPT_NONEG),
+ OPT_BIT_F(0, "shortstat", &options->output_format,
+ N_("output only the last line of --stat"),
+ DIFF_FORMAT_SHORTSTAT, PARSE_OPT_NONEG),
+ OPT_CALLBACK_F('X', "dirstat", options, N_("<param1,param2>..."),
+ N_("output the distribution of relative amount of changes for each sub-directory"),
+ PARSE_OPT_NONEG | PARSE_OPT_OPTARG,
+ diff_opt_dirstat),
+ OPT_CALLBACK_F(0, "cumulative", options, NULL,
+ N_("synonym for --dirstat=cumulative"),
+ PARSE_OPT_NONEG | PARSE_OPT_NOARG,
+ diff_opt_dirstat),
+ OPT_CALLBACK_F(0, "dirstat-by-file", options, N_("<param1,param2>..."),
+ N_("synonym for --dirstat=files,param1,param2..."),
+ PARSE_OPT_NONEG | PARSE_OPT_OPTARG,
+ diff_opt_dirstat),
+ OPT_BIT_F(0, "check", &options->output_format,
+ N_("warn if changes introduce conflict markers or whitespace errors"),
+ DIFF_FORMAT_CHECKDIFF, PARSE_OPT_NONEG),
+ OPT_BIT_F(0, "summary", &options->output_format,
+ N_("condensed summary such as creations, renames and mode changes"),
+ DIFF_FORMAT_SUMMARY, PARSE_OPT_NONEG),
+ OPT_BIT_F(0, "name-only", &options->output_format,
+ N_("show only names of changed files"),
+ DIFF_FORMAT_NAME, PARSE_OPT_NONEG),
+ OPT_BIT_F(0, "name-status", &options->output_format,
+ N_("show only names and status of changed files"),
+ DIFF_FORMAT_NAME_STATUS, PARSE_OPT_NONEG),
+ OPT_CALLBACK_F(0, "stat", options, N_("<width>[,<name-width>[,<count>]]"),
+ N_("generate diffstat"),
+ PARSE_OPT_NONEG | PARSE_OPT_OPTARG, diff_opt_stat),
+ OPT_CALLBACK_F(0, "stat-width", options, N_("<width>"),
+ N_("generate diffstat with a given width"),
+ PARSE_OPT_NONEG, diff_opt_stat),
+ OPT_CALLBACK_F(0, "stat-name-width", options, N_("<width>"),
+ N_("generate diffstat with a given name width"),
+ PARSE_OPT_NONEG, diff_opt_stat),
+ OPT_CALLBACK_F(0, "stat-graph-width", options, N_("<width>"),
+ N_("generate diffstat with a given graph width"),
+ PARSE_OPT_NONEG, diff_opt_stat),
+ OPT_CALLBACK_F(0, "stat-count", options, N_("<count>"),
+ N_("generate diffstat with limited lines"),
+ PARSE_OPT_NONEG, diff_opt_stat),
+ OPT_CALLBACK_F(0, "compact-summary", options, NULL,
+ N_("generate compact summary in diffstat"),
+ PARSE_OPT_NOARG, diff_opt_compact_summary),
+ OPT_CALLBACK_F(0, "binary", options, NULL,
+ N_("output a binary diff that can be applied"),
+ PARSE_OPT_NONEG | PARSE_OPT_NOARG, diff_opt_binary),
+ OPT_BOOL(0, "full-index", &options->flags.full_index,
+ N_("show full pre- and post-image object names on the \"index\" lines")),
+ OPT_COLOR_FLAG(0, "color", &options->use_color,
+ N_("show colored diff")),
+ OPT_CALLBACK_F(0, "ws-error-highlight", options, N_("<kind>"),
+ N_("highlight whitespace errors in the 'context', 'old' or 'new' lines in the diff"),
+ PARSE_OPT_NONEG, diff_opt_ws_error_highlight),
+ OPT_SET_INT('z', NULL, &options->line_termination,
+ N_("do not munge pathnames and use NULs as output field terminators in --raw or --numstat"),
+ 0),
+ OPT__ABBREV(&options->abbrev),
+ OPT_STRING_F(0, "src-prefix", &options->a_prefix, N_("<prefix>"),
+ N_("show the given source prefix instead of \"a/\""),
+ PARSE_OPT_NONEG),
+ OPT_STRING_F(0, "dst-prefix", &options->b_prefix, N_("<prefix>"),
+ N_("show the given source prefix instead of \"b/\""),
+ PARSE_OPT_NONEG),
+ OPT_CALLBACK_F(0, "line-prefix", options, N_("<prefix>"),
+ N_("prepend an additional prefix to every line of output"),
+ PARSE_OPT_NONEG, diff_opt_line_prefix),
+ OPT_CALLBACK_F(0, "no-prefix", options, NULL,
+ N_("do not show any source or destination prefix"),
+ PARSE_OPT_NONEG | PARSE_OPT_NOARG, diff_opt_no_prefix),
+ OPT_INTEGER_F(0, "inter-hunk-context", &options->interhunkcontext,
+ N_("show context between diff hunks up to the specified number of lines"),
+ PARSE_OPT_NONEG),
+ OPT_CALLBACK_F(0, "output-indicator-new",
+ &options->output_indicators[OUTPUT_INDICATOR_NEW],
+ N_("<char>"),
+ N_("specify the character to indicate a new line instead of '+'"),
+ PARSE_OPT_NONEG, diff_opt_char),
+ OPT_CALLBACK_F(0, "output-indicator-old",
+ &options->output_indicators[OUTPUT_INDICATOR_OLD],
+ N_("<char>"),
+ N_("specify the character to indicate an old line instead of '-'"),
+ PARSE_OPT_NONEG, diff_opt_char),
+ OPT_CALLBACK_F(0, "output-indicator-context",
+ &options->output_indicators[OUTPUT_INDICATOR_CONTEXT],
+ N_("<char>"),
+ N_("specify the character to indicate a context instead of ' '"),
+ PARSE_OPT_NONEG, diff_opt_char),
+
+ OPT_GROUP(N_("Diff rename options")),
+ OPT_CALLBACK_F('B', "break-rewrites", &options->break_opt, N_("<n>[/<m>]"),
+ N_("break complete rewrite changes into pairs of delete and create"),
+ PARSE_OPT_NONEG | PARSE_OPT_OPTARG,
+ diff_opt_break_rewrites),
+ OPT_CALLBACK_F('M', "find-renames", options, N_("<n>"),
+ N_("detect renames"),
+ PARSE_OPT_NONEG | PARSE_OPT_OPTARG,
+ diff_opt_find_renames),
+ OPT_SET_INT_F('D', "irreversible-delete", &options->irreversible_delete,
+ N_("omit the preimage for deletes"),
+ 1, PARSE_OPT_NONEG),
+ OPT_CALLBACK_F('C', "find-copies", options, N_("<n>"),
+ N_("detect copies"),
+ PARSE_OPT_NONEG | PARSE_OPT_OPTARG,
+ diff_opt_find_copies),
+ OPT_BOOL(0, "find-copies-harder", &options->flags.find_copies_harder,
+ N_("use unmodified files as source to find copies")),
+ OPT_SET_INT_F(0, "no-renames", &options->detect_rename,
+ N_("disable rename detection"),
+ 0, PARSE_OPT_NONEG),
+ OPT_BOOL(0, "rename-empty", &options->flags.rename_empty,
+ N_("use empty blobs as rename source")),
+ OPT_CALLBACK_F(0, "follow", options, NULL,
+ N_("continue listing the history of a file beyond renames"),
+ PARSE_OPT_NOARG, diff_opt_follow),
+ OPT_INTEGER('l', NULL, &options->rename_limit,
+ N_("prevent rename/copy detection if the number of rename/copy targets exceeds given limit")),
+
+ OPT_GROUP(N_("Diff algorithm options")),
+ OPT_BIT(0, "minimal", &options->xdl_opts,
+ N_("produce the smallest possible diff"),
+ XDF_NEED_MINIMAL),
+ OPT_BIT_F('w', "ignore-all-space", &options->xdl_opts,
+ N_("ignore whitespace when comparing lines"),
+ XDF_IGNORE_WHITESPACE, PARSE_OPT_NONEG),
+ OPT_BIT_F('b', "ignore-space-change", &options->xdl_opts,
+ N_("ignore changes in amount of whitespace"),
+ XDF_IGNORE_WHITESPACE_CHANGE, PARSE_OPT_NONEG),
+ OPT_BIT_F(0, "ignore-space-at-eol", &options->xdl_opts,
+ N_("ignore changes in whitespace at EOL"),
+ XDF_IGNORE_WHITESPACE_AT_EOL, PARSE_OPT_NONEG),
+ OPT_BIT_F(0, "ignore-cr-at-eol", &options->xdl_opts,
+ N_("ignore carrier-return at the end of line"),
+ XDF_IGNORE_CR_AT_EOL, PARSE_OPT_NONEG),
+ OPT_BIT_F(0, "ignore-blank-lines", &options->xdl_opts,
+ N_("ignore changes whose lines are all blank"),
+ XDF_IGNORE_BLANK_LINES, PARSE_OPT_NONEG),
+ OPT_BIT(0, "indent-heuristic", &options->xdl_opts,
+ N_("heuristic to shift diff hunk boundaries for easy reading"),
+ XDF_INDENT_HEURISTIC),
+ OPT_CALLBACK_F(0, "patience", options, NULL,
+ N_("generate diff using the \"patience diff\" algorithm"),
+ PARSE_OPT_NONEG | PARSE_OPT_NOARG,
+ diff_opt_patience),
+ OPT_BITOP(0, "histogram", &options->xdl_opts,
+ N_("generate diff using the \"histogram diff\" algorithm"),
+ XDF_HISTOGRAM_DIFF, XDF_DIFF_ALGORITHM_MASK),
+ OPT_CALLBACK_F(0, "diff-algorithm", options, N_("<algorithm>"),
+ N_("choose a diff algorithm"),
+ PARSE_OPT_NONEG, diff_opt_diff_algorithm),
+ OPT_CALLBACK_F(0, "anchored", options, N_("<text>"),
+ N_("generate diff using the \"anchored diff\" algorithm"),
+ PARSE_OPT_NONEG, diff_opt_anchored),
+ OPT_CALLBACK_F(0, "word-diff", options, N_("<mode>"),
+ N_("show word diff, using <mode> to delimit changed words"),
+ PARSE_OPT_NONEG | PARSE_OPT_OPTARG, diff_opt_word_diff),
+ OPT_CALLBACK_F(0, "word-diff-regex", options, N_("<regex>"),
+ N_("use <regex> to decide what a word is"),
+ PARSE_OPT_NONEG, diff_opt_word_diff_regex),
+ OPT_CALLBACK_F(0, "color-words", options, N_("<regex>"),
+ N_("equivalent to --word-diff=color --word-diff-regex=<regex>"),
+ PARSE_OPT_NONEG | PARSE_OPT_OPTARG, diff_opt_color_words),
+ OPT_CALLBACK_F(0, "color-moved", options, N_("<mode>"),
+ N_("move lines of code are colored differently"),
+ PARSE_OPT_OPTARG, diff_opt_color_moved),
+ OPT_CALLBACK_F(0, "color-moved-ws", options, N_("<mode>"),
+ N_("how white spaces are ignored in --color-moved"),
+ 0, diff_opt_color_moved_ws),
+
+ OPT_GROUP(N_("Diff other options")),
+ OPT_CALLBACK_F(0, "relative", options, N_("<prefix>"),
+ N_("when run from subdir, exclude changes outside and show relative paths"),
+ PARSE_OPT_NONEG | PARSE_OPT_OPTARG,
+ diff_opt_relative),
+ OPT_BOOL('a', "text", &options->flags.text,
+ N_("treat all files as text")),
+ OPT_BOOL('R', NULL, &options->flags.reverse_diff,
+ N_("swap two inputs, reverse the diff")),
+ OPT_BOOL(0, "exit-code", &options->flags.exit_with_status,
+ N_("exit with 1 if there were differences, 0 otherwise")),
+ OPT_BOOL(0, "quiet", &options->flags.quick,
+ N_("disable all output of the program")),
+ OPT_BOOL(0, "ext-diff", &options->flags.allow_external,
+ N_("allow an external diff helper to be executed")),
+ OPT_CALLBACK_F(0, "textconv", options, NULL,
+ N_("run external text conversion filters when comparing binary files"),
+ PARSE_OPT_NOARG, diff_opt_textconv),
+ OPT_CALLBACK_F(0, "ignore-submodules", options, N_("<when>"),
+ N_("ignore changes to submodules in the diff generation"),
+ PARSE_OPT_NONEG | PARSE_OPT_OPTARG,
+ diff_opt_ignore_submodules),
+ OPT_CALLBACK_F(0, "submodule", options, N_("<format>"),
+ N_("specify how differences in submodules are shown"),
+ PARSE_OPT_NONEG | PARSE_OPT_OPTARG,
+ diff_opt_submodule),
+ OPT_SET_INT_F(0, "ita-invisible-in-index", &options->ita_invisible_in_index,
+ N_("hide 'git add -N' entries from the index"),
+ 1, PARSE_OPT_NONEG),
+ OPT_SET_INT_F(0, "ita-visible-in-index", &options->ita_invisible_in_index,
+ N_("treat 'git add -N' entries as real in the index"),
+ 0, PARSE_OPT_NONEG),
+ OPT_CALLBACK_F('S', NULL, options, N_("<string>"),
+ N_("look for differences that change the number of occurrences of the specified string"),
+ 0, diff_opt_pickaxe_string),
+ OPT_CALLBACK_F('G', NULL, options, N_("<regex>"),
+ N_("look for differences that change the number of occurrences of the specified regex"),
+ 0, diff_opt_pickaxe_regex),
+ OPT_BIT_F(0, "pickaxe-all", &options->pickaxe_opts,
+ N_("show all changes in the changeset with -S or -G"),
+ DIFF_PICKAXE_ALL, PARSE_OPT_NONEG),
+ OPT_BIT_F(0, "pickaxe-regex", &options->pickaxe_opts,
+ N_("treat <string> in -S as extended POSIX regular expression"),
+ DIFF_PICKAXE_REGEX, PARSE_OPT_NONEG),
+ OPT_FILENAME('O', NULL, &options->orderfile,
+ N_("control the order in which files appear in the output")),
+ OPT_CALLBACK_F(0, "find-object", options, N_("<object-id>"),
+ N_("look for differences that change the number of occurrences of the specified object"),
+ PARSE_OPT_NONEG, diff_opt_find_object),
+ OPT_CALLBACK_F(0, "diff-filter", options, N_("[(A|C|D|M|R|T|U|X|B)...[*]]"),
+ N_("select files by diff type"),
+ PARSE_OPT_NONEG, diff_opt_diff_filter),
+ { OPTION_CALLBACK, 0, "output", options, N_("<file>"),
+ N_("Output to a specific file"),
+ PARSE_OPT_NONEG, NULL, 0, diff_opt_output },
+
+ OPT_END()
+ };
+
+ ALLOC_ARRAY(options->parseopts, ARRAY_SIZE(parseopts));
+ memcpy(options->parseopts, parseopts, sizeof(parseopts));
+}
+
+int diff_opt_parse(struct diff_options *options,
+ const char **av, int ac, const char *prefix)
+{
+ if (!prefix)
+ prefix = "";
+
+ ac = parse_options(ac, av, prefix, options->parseopts, NULL,
+ PARSE_OPT_KEEP_DASHDASH |
+ PARSE_OPT_KEEP_UNKNOWN |
+ PARSE_OPT_NO_INTERNAL_HELP |
+ PARSE_OPT_ONE_SHOT |
+ PARSE_OPT_STOP_AT_NON_OPTION);
+
+ return ac;
}
int parse_rename_score(const char **cp_p)
return (int)((num >= scale) ? MAX_SCORE : (MAX_SCORE * num / scale));
}
-static int diff_scoreopt_parse(const char *opt)
-{
- int opt1, opt2, cmd;
-
- if (*opt++ != '-')
- return -1;
- cmd = *opt++;
- if (cmd == '-') {
- /* convert the long-form arguments into short-form versions */
- if (skip_prefix(opt, "break-rewrites", &opt)) {
- if (*opt == 0 || *opt++ == '=')
- cmd = 'B';
- } else if (skip_prefix(opt, "find-copies", &opt)) {
- if (*opt == 0 || *opt++ == '=')
- cmd = 'C';
- } else if (skip_prefix(opt, "find-renames", &opt)) {
- if (*opt == 0 || *opt++ == '=')
- cmd = 'M';
- }
- }
- if (cmd != 'M' && cmd != 'C' && cmd != 'B')
- return -1; /* that is not a -M, -C, or -B option */
-
- opt1 = parse_rename_score(&opt);
- if (cmd != 'B')
- opt2 = 0;
- else {
- if (*opt == 0)
- opt2 = 0;
- else if (*opt != '/')
- return -1; /* we expect -B80/99 or -B80 */
- else {
- opt++;
- opt2 = parse_rename_score(&opt);
- }
- }
- if (*opt != 0)
- return -1;
- return opt1 | (opt2 << 16);
-}
-
struct diff_queue_struct diff_queued_diff;
void diff_q(struct diff_queue_struct *queue, struct diff_filepair *dp)
return strcmp(name_a, name_b);
}
-void diffcore_fix_diff_index(struct diff_options *options)
+void diffcore_fix_diff_index(void)
{
struct diff_queue_struct *q = &diff_queued_diff;
QSORT(q->queue, q->nr, diffnamecmp);
}
+static void add_if_missing(struct repository *r,
+ struct oid_array *to_fetch,
+ const struct diff_filespec *filespec)
+{
+ if (filespec && filespec->oid_valid &&
+ oid_object_info_extended(r, &filespec->oid, NULL,
+ OBJECT_INFO_FOR_PREFETCH))
+ oid_array_append(to_fetch, &filespec->oid);
+}
+
void diffcore_std(struct diff_options *options)
{
+ if (options->repo == the_repository &&
+ repository_format_partial_clone) {
+ /*
+ * Prefetch the diff pairs that are about to be flushed.
+ */
+ int i;
+ struct diff_queue_struct *q = &diff_queued_diff;
+ struct oid_array to_fetch = OID_ARRAY_INIT;
+
+ for (i = 0; i < q->nr; i++) {
+ struct diff_filepair *p = q->queue[i];
+ add_if_missing(options->repo, &to_fetch, p->one);
+ add_if_missing(options->repo, &to_fetch, p->two);
+ }
+ if (to_fetch.nr)
+ /*
+ * NEEDSWORK: Consider deduplicating the OIDs sent.
+ */
+ fetch_objects(repository_format_partial_clone,
+ to_fetch.oid, to_fetch.nr);
+ oid_array_clear(&to_fetch);
+ }
+
/* NOTE please keep the following in sync with diff_tree_combined() */
if (options->skip_stat_unmatch)
diffcore_skip_stat_unmatch(options);
#include "object.h"
#include "oidset.h"
-struct rev_info;
+struct combine_diff_path;
+struct commit;
+struct diff_filespec;
struct diff_options;
struct diff_queue_struct;
-struct strbuf;
-struct diff_filespec;
-struct userdiff_driver;
struct oid_array;
-struct commit;
-struct combine_diff_path;
+struct option;
struct repository;
+struct rev_info;
+struct strbuf;
+struct userdiff_driver;
typedef int (*pathchange_fn_t)(struct diff_options *options,
struct combine_diff_path *path);
#define DIFF_FLAGS_INIT { 0 }
struct diff_flags {
- unsigned recursive:1;
- unsigned tree_in_recursive:1;
- unsigned binary:1;
- unsigned text:1;
- unsigned full_index:1;
- unsigned silent_on_remove:1;
- unsigned find_copies_harder:1;
- unsigned follow_renames:1;
- unsigned rename_empty:1;
- unsigned has_changes:1;
- unsigned quick:1;
- unsigned no_index:1;
- unsigned allow_external:1;
- unsigned exit_with_status:1;
- unsigned reverse_diff:1;
- unsigned check_failed:1;
- unsigned relative_name:1;
- unsigned ignore_submodules:1;
- unsigned dirstat_cumulative:1;
- unsigned dirstat_by_file:1;
- unsigned allow_textconv:1;
- unsigned textconv_set_via_cmdline:1;
- unsigned diff_from_contents:1;
- unsigned dirty_submodules:1;
- unsigned ignore_untracked_in_submodules:1;
- unsigned ignore_dirty_submodules:1;
- unsigned override_submodule_config:1;
- unsigned dirstat_by_line:1;
- unsigned funccontext:1;
- unsigned default_follow_renames:1;
- unsigned stat_with_summary:1;
- unsigned suppress_diff_headers:1;
- unsigned dual_color_diffed_diffs:1;
+ unsigned recursive;
+ unsigned tree_in_recursive;
+ unsigned binary;
+ unsigned text;
+ unsigned full_index;
+ unsigned silent_on_remove;
+ unsigned find_copies_harder;
+ unsigned follow_renames;
+ unsigned rename_empty;
+ unsigned has_changes;
+ unsigned quick;
+ unsigned no_index;
+ unsigned allow_external;
+ unsigned exit_with_status;
+ unsigned reverse_diff;
+ unsigned check_failed;
+ unsigned relative_name;
+ unsigned ignore_submodules;
+ unsigned dirstat_cumulative;
+ unsigned dirstat_by_file;
+ unsigned allow_textconv;
+ unsigned textconv_set_via_cmdline;
+ unsigned diff_from_contents;
+ unsigned dirty_submodules;
+ unsigned ignore_untracked_in_submodules;
+ unsigned ignore_dirty_submodules;
+ unsigned override_submodule_config;
+ unsigned dirstat_by_line;
+ unsigned funccontext;
+ unsigned default_follow_renames;
+ unsigned stat_with_summary;
+ unsigned suppress_diff_headers;
+ unsigned dual_color_diffed_diffs;
};
static inline void diff_flags_or(struct diff_flags *a,
unsigned color_moved_ws_handling;
struct repository *repo;
+ struct option *parseopts;
};
+unsigned diff_filter_bit(char status);
+
void diff_emit_submodule_del(struct diff_options *o, const char *line);
void diff_emit_submodule_add(struct diff_options *o, const char *line);
void diff_emit_submodule_untracked(struct diff_options *o, const char *path);
char status;
unsigned int mode;
struct object_id oid;
+ struct strbuf path;
} parent[FLEX_ARRAY];
};
#define combine_diff_path_size(n, l) \
#define DIFF_PICKAXE_IGNORE_CASE 32
void diffcore_std(struct diff_options *);
-void diffcore_fix_diff_index(struct diff_options *);
+void diffcore_fix_diff_index(void);
#define COMMON_DIFF_OPTIONS_HELP \
"\ncommon diff options:\n" \
int diff_result_code(struct diff_options *, int);
-void diff_no_index(struct repository *, struct rev_info *, int, const char **);
+int diff_no_index(struct rev_info *,
+ int implicit_no_index, int, const char **);
int index_differs_from(struct repository *r, const char *def,
const struct diff_flags *flags,
}
int report_path_error(const char *ps_matched,
- const struct pathspec *pathspec,
- const char *prefix)
+ const struct pathspec *pathspec)
{
/*
* Make sure all pathspec matched; otherwise it is an error.
return path_none;
}
if (!(dir->flags & DIR_NO_GITLINKS)) {
- struct object_id oid;
- if (resolve_gitlink_ref(dirname, "HEAD", &oid) == 0)
+ struct strbuf sb = STRBUF_INIT;
+ strbuf_addstr(&sb, dirname);
+ if (is_nonbare_repository_dir(&sb))
return exclude ? path_excluded : path_untracked;
+ strbuf_release(&sb);
}
return path_recurse;
}
return lstat(f, &sb) == 0;
}
+int repo_file_exists(struct repository *repo, const char *path)
+{
+ if (repo != the_repository)
+ BUG("do not know how to check file existence in arbitrary repo");
+
+ return file_exists(path);
+}
+
static int cmp_icase(char a, char b)
{
if (a == b)
struct stat_data info_exclude_stat;
struct stat_data excludes_file_stat;
uint32_t dir_flags;
- unsigned char info_exclude_sha1[20];
- unsigned char excludes_file_sha1[20];
- char exclude_per_dir[FLEX_ARRAY];
};
#define ouc_offset(x) offsetof(struct ondisk_untracked_cache, x)
-#define ouc_size(len) (ouc_offset(exclude_per_dir) + len + 1)
struct write_data {
int index; /* number of written untracked_cache_dir */
struct write_data wd;
unsigned char varbuf[16];
int varint_len;
- size_t len = strlen(untracked->exclude_per_dir);
+ const unsigned hashsz = the_hash_algo->rawsz;
- FLEX_ALLOC_MEM(ouc, exclude_per_dir, untracked->exclude_per_dir, len);
+ ouc = xcalloc(1, sizeof(*ouc));
stat_data_to_disk(&ouc->info_exclude_stat, &untracked->ss_info_exclude.stat);
stat_data_to_disk(&ouc->excludes_file_stat, &untracked->ss_excludes_file.stat);
- hashcpy(ouc->info_exclude_sha1, untracked->ss_info_exclude.oid.hash);
- hashcpy(ouc->excludes_file_sha1, untracked->ss_excludes_file.oid.hash);
ouc->dir_flags = htonl(untracked->dir_flags);
varint_len = encode_varint(untracked->ident.len, varbuf);
strbuf_add(out, varbuf, varint_len);
strbuf_addbuf(out, &untracked->ident);
- strbuf_add(out, ouc, ouc_size(len));
+ strbuf_add(out, ouc, sizeof(*ouc));
+ strbuf_add(out, untracked->ss_info_exclude.oid.hash, hashsz);
+ strbuf_add(out, untracked->ss_excludes_file.oid.hash, hashsz);
+ strbuf_add(out, untracked->exclude_per_dir, strlen(untracked->exclude_per_dir) + 1);
FREE_AND_NULL(ouc);
if (!untracked->root) {
struct read_data *rd)
{
struct untracked_cache_dir ud, *untracked;
- const unsigned char *next, *data = rd->data, *end = rd->end;
+ const unsigned char *data = rd->data, *end = rd->end;
+ const unsigned char *eos;
unsigned int value;
- int i, len;
+ int i;
memset(&ud, 0, sizeof(ud));
- next = data;
- value = decode_varint(&next);
- if (next > end)
+ value = decode_varint(&data);
+ if (data > end)
return -1;
ud.recurse = 1;
ud.untracked_alloc = value;
ud.untracked_nr = value;
if (ud.untracked_nr)
ALLOC_ARRAY(ud.untracked, ud.untracked_nr);
- data = next;
- next = data;
- ud.dirs_alloc = ud.dirs_nr = decode_varint(&next);
- if (next > end)
+ ud.dirs_alloc = ud.dirs_nr = decode_varint(&data);
+ if (data > end)
return -1;
ALLOC_ARRAY(ud.dirs, ud.dirs_nr);
- data = next;
- len = strlen((const char *)data);
- next = data + len + 1;
- if (next > rd->end)
+ eos = memchr(data, '\0', end - data);
+ if (!eos || eos == end)
return -1;
- *untracked_ = untracked = xmalloc(st_add(sizeof(*untracked), len));
+
+ *untracked_ = untracked = xmalloc(st_add3(sizeof(*untracked), eos - data, 1));
memcpy(untracked, &ud, sizeof(ud));
- memcpy(untracked->name, data, len + 1);
- data = next;
+ memcpy(untracked->name, data, eos - data + 1);
+ data = eos + 1;
for (i = 0; i < untracked->untracked_nr; i++) {
- len = strlen((const char *)data);
- next = data + len + 1;
- if (next > rd->end)
+ eos = memchr(data, '\0', end - data);
+ if (!eos || eos == end)
return -1;
- untracked->untracked[i] = xstrdup((const char*)data);
- data = next;
+ untracked->untracked[i] = xmemdupz(data, eos - data);
+ data = eos + 1;
}
rd->ucd[rd->index++] = untracked;
rd->data = data;
for (i = 0; i < untracked->dirs_nr; i++) {
- len = read_one_dir(untracked->dirs + i, rd);
- if (len < 0)
+ if (read_one_dir(untracked->dirs + i, rd) < 0)
return -1;
}
return 0;
int ident_len;
ssize_t len;
const char *exclude_per_dir;
+ const unsigned hashsz = the_hash_algo->rawsz;
+ const unsigned offset = sizeof(struct ondisk_untracked_cache);
+ const unsigned exclude_per_dir_offset = offset + 2 * hashsz;
if (sz <= 1 || end[-1] != '\0')
return NULL;
ident = (const char *)next;
next += ident_len;
- if (next + ouc_size(0) > end)
+ if (next + exclude_per_dir_offset + 1 > end)
return NULL;
uc = xcalloc(1, sizeof(*uc));
strbuf_add(&uc->ident, ident, ident_len);
load_oid_stat(&uc->ss_info_exclude,
next + ouc_offset(info_exclude_stat),
- next + ouc_offset(info_exclude_sha1));
+ next + offset);
load_oid_stat(&uc->ss_excludes_file,
next + ouc_offset(excludes_file_stat),
- next + ouc_offset(excludes_file_sha1));
+ next + offset + hashsz);
uc->dir_flags = get_be32(next + ouc_offset(dir_flags));
- exclude_per_dir = (const char *)next + ouc_offset(exclude_per_dir);
+ exclude_per_dir = (const char *)next + exclude_per_dir_offset;
uc->exclude_per_dir = xstrdup(exclude_per_dir);
/* NUL after exclude_per_dir is covered by sizeof(*ouc) */
- next += ouc_size(strlen(exclude_per_dir));
+ next += exclude_per_dir_offset + strlen(exclude_per_dir) + 1;
if (next >= end)
goto done2;
};
/*Count the number of slashes for string s*/
-extern int count_slashes(const char *s);
+int count_slashes(const char *s);
/*
* The ordering of these constants is significant, with
#define MATCHED_RECURSIVELY 1
#define MATCHED_FNMATCH 2
#define MATCHED_EXACTLY 3
-extern int simple_length(const char *match);
-extern int no_wildcard(const char *string);
-extern char *common_prefix(const struct pathspec *pathspec);
-extern int match_pathspec(const struct index_state *istate,
- const struct pathspec *pathspec,
- const char *name, int namelen,
- int prefix, char *seen, int is_dir);
-extern int report_path_error(const char *ps_matched, const struct pathspec *pathspec, const char *prefix);
-extern int within_depth(const char *name, int namelen, int depth, int max_depth);
-
-extern int fill_directory(struct dir_struct *dir,
- struct index_state *istate,
- const struct pathspec *pathspec);
-extern int read_directory(struct dir_struct *, struct index_state *istate,
- const char *path, int len,
- const struct pathspec *pathspec);
-
-extern int is_excluded_from_list(const char *pathname, int pathlen,
- const char *basename, int *dtype,
- struct exclude_list *el,
- struct index_state *istate);
+int simple_length(const char *match);
+int no_wildcard(const char *string);
+char *common_prefix(const struct pathspec *pathspec);
+int match_pathspec(const struct index_state *istate,
+ const struct pathspec *pathspec,
+ const char *name, int namelen,
+ int prefix, char *seen, int is_dir);
+int report_path_error(const char *ps_matched, const struct pathspec *pathspec);
+int within_depth(const char *name, int namelen, int depth, int max_depth);
+
+int fill_directory(struct dir_struct *dir,
+ struct index_state *istate,
+ const struct pathspec *pathspec);
+int read_directory(struct dir_struct *, struct index_state *istate,
+ const char *path, int len,
+ const struct pathspec *pathspec);
+
+int is_excluded_from_list(const char *pathname, int pathlen,
+ const char *basename, int *dtype,
+ struct exclude_list *el,
+ struct index_state *istate);
struct dir_entry *dir_add_ignored(struct dir_struct *dir,
struct index_state *istate,
const char *pathname, int len);
* these implement the matching logic for dir.c:excluded_from_list and
* attr.c:path_matches()
*/
-extern int match_basename(const char *, int,
- const char *, int, int, unsigned);
-extern int match_pathname(const char *, int,
- const char *, int,
- const char *, int, int, unsigned);
-
-extern struct exclude *last_exclude_matching(struct dir_struct *dir,
- struct index_state *istate,
- const char *name, int *dtype);
-
-extern int is_excluded(struct dir_struct *dir,
- struct index_state *istate,
- const char *name, int *dtype);
-
-extern struct exclude_list *add_exclude_list(struct dir_struct *dir,
- int group_type, const char *src);
-extern int add_excludes_from_file_to_list(const char *fname, const char *base, int baselen,
- struct exclude_list *el, struct index_state *istate);
-extern void add_excludes_from_file(struct dir_struct *, const char *fname);
-extern int add_excludes_from_blob_to_list(struct object_id *oid,
- const char *base, int baselen,
- struct exclude_list *el);
-extern void parse_exclude_pattern(const char **string, int *patternlen, unsigned *flags, int *nowildcardlen);
-extern void add_exclude(const char *string, const char *base,
- int baselen, struct exclude_list *el, int srcpos);
-extern void clear_exclude_list(struct exclude_list *el);
-extern void clear_directory(struct dir_struct *dir);
-extern int file_exists(const char *);
-
-extern int is_inside_dir(const char *dir);
-extern int dir_inside_of(const char *subdir, const char *dir);
+int match_basename(const char *, int,
+ const char *, int, int, unsigned);
+int match_pathname(const char *, int,
+ const char *, int,
+ const char *, int, int, unsigned);
+
+struct exclude *last_exclude_matching(struct dir_struct *dir,
+ struct index_state *istate,
+ const char *name, int *dtype);
+
+int is_excluded(struct dir_struct *dir,
+ struct index_state *istate,
+ const char *name, int *dtype);
+
+struct exclude_list *add_exclude_list(struct dir_struct *dir,
+ int group_type, const char *src);
+int add_excludes_from_file_to_list(const char *fname, const char *base, int baselen,
+ struct exclude_list *el, struct index_state *istate);
+void add_excludes_from_file(struct dir_struct *, const char *fname);
+int add_excludes_from_blob_to_list(struct object_id *oid,
+ const char *base, int baselen,
+ struct exclude_list *el);
+void parse_exclude_pattern(const char **string, int *patternlen, unsigned *flags, int *nowildcardlen);
+void add_exclude(const char *string, const char *base,
+ int baselen, struct exclude_list *el, int srcpos);
+void clear_exclude_list(struct exclude_list *el);
+void clear_directory(struct dir_struct *dir);
+
+int repo_file_exists(struct repository *repo, const char *path);
+int file_exists(const char *);
+
+int is_inside_dir(const char *dir);
+int dir_inside_of(const char *subdir, const char *dir);
static inline int is_dot_or_dotdot(const char *name)
{
(name[1] == '.' && name[2] == '\0')));
}
-extern int is_empty_dir(const char *dir);
+int is_empty_dir(const char *dir);
-extern void setup_standard_excludes(struct dir_struct *dir);
+void setup_standard_excludes(struct dir_struct *dir);
/* Constants for remove_dir_recursively: */
* This function uses path as temporary scratch space, but restores it
* before returning.
*/
-extern int remove_dir_recursively(struct strbuf *path, int flag);
+int remove_dir_recursively(struct strbuf *path, int flag);
/* tries to remove the path with empty directories along it, ignores ENOENT */
-extern int remove_path(const char *path);
+int remove_path(const char *path);
-extern int fspathcmp(const char *a, const char *b);
-extern int fspathncmp(const char *a, const char *b, size_t count);
+int fspathcmp(const char *a, const char *b);
+int fspathncmp(const char *a, const char *b, size_t count);
/*
* The prefix part of pattern must not contains wildcards.
*/
struct pathspec_item;
-extern int git_fnmatch(const struct pathspec_item *item,
- const char *pattern, const char *string,
- int prefix);
+int git_fnmatch(const struct pathspec_item *item,
+ const char *pattern, const char *string,
+ int prefix);
-extern int submodule_path_match(const struct index_state *istate,
- const struct pathspec *ps,
- const char *submodule_name,
- char *seen);
+int submodule_path_match(const struct index_state *istate,
+ const struct pathspec *ps,
+ const char *submodule_name,
+ char *seen);
static inline int ce_path_match(const struct index_state *istate,
const struct cache_entry *ce,
* When `recurse_into_nested` is set, recurse into any nested submodules,
* connecting them as well.
*/
-extern void connect_work_tree_and_git_dir(const char *work_tree,
- const char *git_dir,
- int recurse_into_nested);
-extern void relocate_gitdir(const char *path,
- const char *old_git_dir,
- const char *new_git_dir);
+void connect_work_tree_and_git_dir(const char *work_tree,
+ const char *git_dir,
+ int recurse_into_nested);
+void relocate_gitdir(const char *path,
+ const char *old_git_dir,
+ const char *new_git_dir);
#endif
p.argv = args;
p.env = env;
p.use_shell = 1;
+ p.trace2_child_class = "editor";
if (start_command(&p) < 0)
return error("unable to start editor '%s'", editor);
static struct strbuf path = STRBUF_INIT;
struct stat st;
+ if (ce->ce_flags & CE_WT_REMOVE) {
+ if (topath)
+ /*
+ * No content and thus no path to create, so we have
+ * no pathname to return.
+ */
+ BUG("Can't remove entry to a path");
+ unlink_entry(ce);
+ return 0;
+ }
+
if (topath)
return write_entry(ce, topath, state, 1);
(*nr_checkouts)++;
return write_entry(ce, path.buf, state, 0);
}
+
+void unlink_entry(const struct cache_entry *ce)
+{
+ const struct submodule *sub = submodule_from_ce(ce);
+ if (sub) {
+ /* state.force is set at the caller. */
+ submodule_move_head(ce->name, "HEAD", NULL,
+ SUBMODULE_MOVE_HEAD_FORCE);
+ }
+ if (!check_leading_path(ce->name, ce_namelen(ce)))
+ return;
+ if (remove_or_warn(ce->ce_mode, ce->name))
+ return;
+ schedule_dir_for_removal(ce->name, ce_namelen(ce));
+}
return -1;
}
+ trace2_cmd_path(buf->buf);
+
return 0;
}
struct argv_array;
-extern void git_set_exec_path(const char *exec_path);
-extern void git_resolve_executable_dir(const char *path);
-extern const char *git_exec_path(void);
-extern void setup_path(void);
-extern const char **prepare_git_cmd(struct argv_array *out, const char **argv);
-extern int execv_git_cmd(const char **argv); /* NULL terminated */
+void git_set_exec_path(const char *exec_path);
+void git_resolve_executable_dir(const char *path);
+const char *git_exec_path(void);
+void setup_path(void);
+const char **prepare_git_cmd(struct argv_array *out, const char **argv);
+int execv_git_cmd(const char **argv); /* NULL terminated */
LAST_ARG_MUST_BE_NULL
-extern int execl_git_cmd(const char *cmd, ...);
-extern char *system_path(const char *path);
+int execl_git_cmd(const char *cmd, ...);
+char *system_path(const char *path);
#endif /* GIT_EXEC_CMD_H */
*/
#define NO_DELTA S_ISUID
+/*
+ * The amount of additional space required in order to write an object into the
+ * current pack. This is the hash lengths at the end of the pack, plus the
+ * length of one object ID.
+ */
+#define PACK_SIZE_THRESHOLD (the_hash_algo->rawsz * 3)
+
struct object_entry {
struct pack_idx_entry idx;
struct object_entry *next;
if (c != last)
die("internal consistency error creating the index");
- tmpfile = write_idx_file(NULL, idx, object_count, &pack_idx_opts, pack_data->sha1);
+ tmpfile = write_idx_file(NULL, idx, object_count, &pack_idx_opts,
+ pack_data->hash);
free(idx);
return tmpfile;
}
struct strbuf name = STRBUF_INIT;
int keep_fd;
- odb_pack_name(&name, pack_data->sha1, "keep");
+ odb_pack_name(&name, pack_data->hash, "keep");
keep_fd = odb_pack_keep(name.buf);
if (keep_fd < 0)
die_errno("cannot create keep file");
if (close(keep_fd))
die_errno("failed to write keep file");
- odb_pack_name(&name, pack_data->sha1, "pack");
+ odb_pack_name(&name, pack_data->hash, "pack");
if (finalize_object_file(pack_data->pack_name, name.buf))
die("cannot store pack file");
- odb_pack_name(&name, pack_data->sha1, "idx");
+ odb_pack_name(&name, pack_data->hash, "idx");
if (finalize_object_file(curr_index_name, name.buf))
die("cannot store index file");
free((void *)curr_index_name);
for (k = 0; k < pack_id; k++) {
struct packed_git *p = all_packs[k];
- odb_pack_name(&name, p->sha1, "keep");
+ odb_pack_name(&name, p->hash, "keep");
unlink_or_warn(name.buf);
}
strbuf_release(&name);
close_pack_windows(pack_data);
finalize_hashfile(pack_file, cur_pack_oid.hash, 0);
- fixup_pack_header_footer(pack_data->pack_fd, pack_data->sha1,
- pack_data->pack_name, object_count,
- cur_pack_oid.hash, pack_size);
+ fixup_pack_header_footer(pack_data->pack_fd, pack_data->hash,
+ pack_data->pack_name, object_count,
+ cur_pack_oid.hash, pack_size);
if (object_count <= unpack_limit) {
if (!loosen_small_pack(pack_data)) {
git_deflate_end(&s);
/* Determine if we should auto-checkpoint. */
- if ((max_packsize && (pack_size + 60 + s.total_out) > max_packsize)
- || (pack_size + 60 + s.total_out) < pack_size) {
+ if ((max_packsize
+ && (pack_size + PACK_SIZE_THRESHOLD + s.total_out) > max_packsize)
+ || (pack_size + PACK_SIZE_THRESHOLD + s.total_out) < pack_size) {
/* This new object needs to *not* have the current pack_id. */
e->pack_id = pack_id + 1;
int status = Z_OK;
/* Determine if we should auto-checkpoint. */
- if ((max_packsize && (pack_size + 60 + len) > max_packsize)
- || (pack_size + 60 + len) < pack_size)
+ if ((max_packsize
+ && (pack_size + PACK_SIZE_THRESHOLD + len) > max_packsize)
+ || (pack_size + PACK_SIZE_THRESHOLD + len) < pack_size)
cycle_packfile();
hashfile_checkpoint(pack_file, &checkpoint);
c += e->name->str_len + 1;
hashcpy(e->versions[0].oid.hash, (unsigned char *)c);
hashcpy(e->versions[1].oid.hash, (unsigned char *)c);
- c += GIT_SHA1_RAWSZ;
+ c += the_hash_algo->rawsz;
}
free(buf);
}
strbuf_addf(b, "%o %s%c",
(unsigned int)(e->versions[v].mode & ~NO_DELTA),
e->name->str_dat, '\0');
- strbuf_add(b, e->versions[v].oid.hash, GIT_SHA1_RAWSZ);
+ strbuf_add(b, e->versions[v].oid.hash, the_hash_algo->rawsz);
}
}
}
for (;;) {
- const char *p;
-
if (unread_command_buf) {
unread_command_buf = 0;
} else {
rc->prev->next = rc;
cmd_tail = rc;
}
- if (skip_prefix(command_buf.buf, "get-mark ", &p)) {
- parse_get_mark(p);
- continue;
- }
- if (skip_prefix(command_buf.buf, "cat-blob ", &p)) {
- parse_cat_blob(p);
- continue;
- }
if (command_buf.buf[0] == '#')
continue;
return 0;
unsigned int i, tmp_hex_oid_len, tmp_fullpath_len;
uintmax_t num_notes = 0;
struct object_id oid;
- char realpath[60];
+ /* hex oid + '/' between each pair of hex digits + NUL */
+ char realpath[GIT_MAX_HEXSZ + ((GIT_MAX_HEXSZ / 2) - 1) + 1];
+ const unsigned hexsz = the_hash_algo->hexsz;
if (!root->tree)
load_tree(root);
* of 2 chars.
*/
if (!e->versions[1].mode ||
- tmp_hex_oid_len > GIT_SHA1_HEXSZ ||
+ tmp_hex_oid_len > hexsz ||
e->name->str_len % 2)
continue;
tmp_fullpath_len += e->name->str_len;
fullpath[tmp_fullpath_len] = '\0';
- if (tmp_hex_oid_len == GIT_SHA1_HEXSZ && !get_oid_hex(hex_oid, &oid)) {
+ if (tmp_hex_oid_len == hexsz && !get_oid_hex(hex_oid, &oid)) {
/* This is a note entry */
if (fanout == 0xff) {
/* Counting mode, no rename */
strbuf_addstr(&uq, p);
p = uq.buf;
}
- read_next_command();
- parse_and_store_blob(&last_blob, &oid, 0);
+ while (read_next_command() != EOF) {
+ const char *v;
+ if (skip_prefix(command_buf.buf, "cat-blob ", &v))
+ parse_cat_blob(v);
+ else {
+ parse_and_store_blob(&last_blob, &oid, 0);
+ break;
+ }
+ }
} else {
enum object_type expected = S_ISDIR(mode) ?
OBJ_TREE: OBJ_BLOB;
struct object_entry *oe;
struct branch *s;
struct object_id oid, commit_oid;
- char path[60];
+ char path[GIT_MAX_RAWSZ * 3];
uint16_t inline_data = 0;
unsigned char new_fanout;
char *buf = read_object_with_reference(&commit_oid,
commit_type, &size,
&commit_oid);
- if (!buf || size < 46)
+ if (!buf || size < the_hash_algo->hexsz + 6)
die("Not a valid commit: %s", p);
free(buf);
} else
static void parse_from_commit(struct branch *b, char *buf, unsigned long size)
{
- if (!buf || size < GIT_SHA1_HEXSZ + 6)
+ if (!buf || size < the_hash_algo->hexsz + 6)
die("Not a valid commit: %s", oid_to_hex(&b->oid));
if (memcmp("tree ", buf, 5)
|| get_oid_hex(buf + 5, &b->branch_tree.versions[1].oid))
char *buf = read_object_with_reference(&n->oid,
commit_type,
&size, &n->oid);
- if (!buf || size < 46)
+ if (!buf || size < the_hash_algo->hexsz + 6)
die("Not a valid commit: %s", from);
free(buf);
} else
file_change_deleteall(b);
else if (skip_prefix(command_buf.buf, "ls ", &v))
parse_ls(v, b);
+ else if (skip_prefix(command_buf.buf, "cat-blob ", &v))
+ parse_cat_blob(v);
else {
unread_command_buf = 1;
break;
die("Unknown mark: %s", command_buf.buf);
xsnprintf(output, sizeof(output), "%s\n", oid_to_hex(&oe->idx.oid));
- cat_blob_write(output, GIT_SHA1_HEXSZ + 1);
+ cat_blob_write(output, the_hash_algo->hexsz + 1);
}
static void parse_cat_blob(const char *p)
{
unsigned long size;
char *buf = NULL;
+ const unsigned hexsz = the_hash_algo->hexsz;
+
if (!oe) {
enum object_type type = oid_object_info(the_repository, oid,
NULL);
/* Peel one layer. */
switch (oe->type) {
case OBJ_TAG:
- if (size < GIT_SHA1_HEXSZ + strlen("object ") ||
+ if (size < hexsz + strlen("object ") ||
get_oid_hex(buf + strlen("object "), oid))
die("Invalid SHA1 in tag: %s", command_buf.buf);
break;
case OBJ_COMMIT:
- if (size < GIT_SHA1_HEXSZ + strlen("tree ") ||
+ if (size < hexsz + strlen("tree ") ||
get_oid_hex(buf + strlen("tree "), oid))
die("Invalid SHA1 in commit: %s", command_buf.buf);
}
return e;
}
-static void print_ls(int mode, const unsigned char *sha1, const char *path)
+static void print_ls(int mode, const unsigned char *hash, const char *path)
{
static struct strbuf line = STRBUF_INIT;
/* mode SP type SP object_name TAB path LF */
strbuf_reset(&line);
strbuf_addf(&line, "%06o %s %s\t",
- mode & ~NO_DELTA, type, sha1_to_hex(sha1));
+ mode & ~NO_DELTA, type, hash_to_hex(hash));
quote_c_style(path, &line, NULL, 0);
strbuf_addch(&line, '\n');
}
const char *v;
if (!strcmp("blob", command_buf.buf))
parse_new_blob();
- else if (skip_prefix(command_buf.buf, "ls ", &v))
- parse_ls(v, NULL);
else if (skip_prefix(command_buf.buf, "commit ", &v))
parse_new_commit(v);
else if (skip_prefix(command_buf.buf, "tag ", &v))
parse_new_tag(v);
else if (skip_prefix(command_buf.buf, "reset ", &v))
parse_reset_branch(v);
+ else if (skip_prefix(command_buf.buf, "ls ", &v))
+ parse_ls(v, NULL);
+ else if (skip_prefix(command_buf.buf, "cat-blob ", &v))
+ parse_cat_blob(v);
+ else if (skip_prefix(command_buf.buf, "get-mark ", &v))
+ parse_get_mark(v);
else if (!strcmp("checkpoint", command_buf.buf))
parse_checkpoint();
else if (!strcmp("done", command_buf.buf))
if (args->stateless_rpc) {
send_sideband(fd, -1, buf->buf, buf->len, LARGE_PACKET_MAX);
packet_flush(fd);
- } else
- write_or_die(fd, buf->buf, buf->len);
+ } else {
+ if (write_in_full(fd, buf->buf, buf->len) < 0)
+ die_errno(_("unable to write to remote"));
+ }
}
static void insert_one_alternate_object(struct fetch_negotiator *negotiator,
next = ref->next;
if (starts_with(ref->name, "refs/") &&
- check_refname_format(ref->name, 0))
- ; /* trash */
- else {
+ check_refname_format(ref->name, 0)) {
+ /*
+ * trash or a peeled value; do not even add it to
+ * unmatched list
+ */
+ free_one_ref(ref);
+ continue;
+ } else {
while (i < nr_sought) {
int cmp = strcmp(ref->name, sought[i]->name);
if (cmp < 0)
}
oidset_clear(&tip_oids);
- for (ref = unmatched; ref; ref = next) {
- next = ref->next;
- free(ref);
- }
+ free_refs(unmatched);
*refs = newlist;
}
/* Send request */
packet_buf_flush(&req_buf);
- write_or_die(fd_out, req_buf.buf, req_buf.len);
+ if (write_in_full(fd_out, req_buf.buf, req_buf.len) < 0)
+ die_errno(_("unable to write request to remote"));
strbuf_release(&req_buf);
return ret;
}
static void receive_shallow_info(struct fetch_pack_args *args,
- struct packet_reader *reader)
+ struct packet_reader *reader,
+ struct oid_array *shallows,
+ struct shallow_info *si)
{
- int line_received = 0;
+ int unshallow_received = 0;
process_section_header(reader, "shallow-info", 0);
while (packet_reader_read(reader) == PACKET_READ_NORMAL) {
if (skip_prefix(reader->line, "shallow ", &arg)) {
if (get_oid_hex(arg, &oid))
die(_("invalid shallow line: %s"), reader->line);
- register_shallow(the_repository, &oid);
- line_received = 1;
+ oid_array_append(shallows, &oid);
continue;
}
if (skip_prefix(reader->line, "unshallow ", &arg)) {
die(_("error in object: %s"), reader->line);
if (unregister_shallow(&oid))
die(_("no shallow found: %s"), reader->line);
- line_received = 1;
+ unshallow_received = 1;
continue;
}
die(_("expected shallow/unshallow, got %s"), reader->line);
reader->status != PACKET_READ_DELIM)
die(_("error processing shallow info: %d"), reader->status);
- if (line_received) {
+ if (args->deepen || unshallow_received) {
+ /*
+ * Treat these as shallow lines caused by our depth settings.
+ * In v0, these lines cannot cause refs to be rejected; do the
+ * same.
+ */
+ int i;
+
+ for (i = 0; i < shallows->nr; i++)
+ register_shallow(the_repository, &shallows->oid[i]);
setup_alternate_shallow(&shallow_lock, &alternate_shallow_file,
NULL);
args->deepen = 1;
+ } else if (shallows->nr) {
+ /*
+ * Treat these as shallow lines caused by the remote being
+ * shallow. In v0, remote refs that reach these objects are
+ * rejected (unless --update-shallow is set); do the same.
+ */
+ prepare_shallow_info(si, shallows);
+ if (si->nr_ours || si->nr_theirs)
+ alternate_shallow_file =
+ setup_temporary_shallow(si->shallow);
+ else
+ alternate_shallow_file = NULL;
} else {
alternate_shallow_file = NULL;
}
}
+static int cmp_name_ref(const void *name, const void *ref)
+{
+ return strcmp(name, (*(struct ref **)ref)->name);
+}
+
static void receive_wanted_refs(struct packet_reader *reader,
struct ref **sought, int nr_sought)
{
while (packet_reader_read(reader) == PACKET_READ_NORMAL) {
struct object_id oid;
const char *end;
- int i;
+ struct ref **found;
if (parse_oid_hex(reader->line, &oid, &end) || *end++ != ' ')
die(_("expected wanted-ref, got '%s'"), reader->line);
- for (i = 0; i < nr_sought; i++) {
- if (!strcmp(end, sought[i]->name)) {
- oidcpy(&sought[i]->old_oid, &oid);
- break;
- }
- }
-
- if (i == nr_sought)
+ found = bsearch(end, sought, nr_sought, sizeof(*sought),
+ cmp_name_ref);
+ if (!found)
die(_("unexpected wanted-ref: '%s'"), reader->line);
+ oidcpy(&(*found)->old_oid, &oid);
}
if (reader->status != PACKET_READ_DELIM)
int fd[2],
const struct ref *orig_ref,
struct ref **sought, int nr_sought,
+ struct oid_array *shallows,
+ struct shallow_info *si,
char **pack_lockfile)
{
struct ref *ref = copy_ref_list(orig_ref);
case FETCH_GET_PACK:
/* Check for shallow-info section */
if (process_section_header(&reader, "shallow-info", 1))
- receive_shallow_info(args, &reader);
+ receive_shallow_info(args, &reader, shallows, si);
if (process_section_header(&reader, "wanted-refs", 1))
receive_wanted_refs(&reader, sought, nr_sought);
}
struct ref *fetch_pack(struct fetch_pack_args *args,
- int fd[], struct child_process *conn,
+ int fd[],
const struct ref *ref,
- const char *dest,
struct ref **sought, int nr_sought,
struct oid_array *shallow,
char **pack_lockfile,
{
struct ref *ref_cpy;
struct shallow_info si;
+ struct oid_array shallows_scratch = OID_ARRAY_INIT;
fetch_pack_setup();
if (nr_sought)
packet_flush(fd[1]);
die(_("no matching remote head"));
}
- prepare_shallow_info(&si, shallow);
- if (version == protocol_v2)
+ if (version == protocol_v2) {
+ if (shallow->nr)
+ BUG("Protocol V2 does not provide shallows at this point in the fetch");
+ memset(&si, 0, sizeof(si));
ref_cpy = do_fetch_pack_v2(args, fd, ref, sought, nr_sought,
+ &shallows_scratch, &si,
pack_lockfile);
- else
+ } else {
+ prepare_shallow_info(&si, shallow);
ref_cpy = do_fetch_pack(args, fd, ref, sought, nr_sought,
&si, pack_lockfile);
+ }
reprepare_packed_git(the_repository);
if (!args->cloning && args->deepen) {
update_shallow(args, sought, nr_sought, &si);
cleanup:
clear_shallow_info(&si);
+ oid_array_clear(&shallows_scratch);
return ref_cpy;
}
* marked as such.
*/
struct ref *fetch_pack(struct fetch_pack_args *args,
- int fd[], struct child_process *conn,
+ int fd[],
const struct ref *ref,
- const char *dest,
struct ref **sought,
int nr_sought,
struct oid_array *shallow,
#define FMT_MERGE_MSG_H
extern int merge_log_config;
-extern int fmt_merge_msg_config(const char *key, const char *value, void *cb);
+int fmt_merge_msg_config(const char *key, const char *value, void *cb);
#endif /* FMT_MERGE_MSG_H */
o_name = NULL;
while (desc.size) {
- unsigned mode;
+ unsigned short mode;
const char *name;
const struct object_id *oid;
* Read the fsmonitor index extension and (if configured) restore the
* CE_FSMONITOR_VALID state.
*/
-extern int read_fsmonitor_extension(struct index_state *istate, const void *data, unsigned long sz);
+int read_fsmonitor_extension(struct index_state *istate, const void *data, unsigned long sz);
/*
* Fill the fsmonitor_dirty ewah bits with their state from the index,
* before it is split during writing.
*/
-extern void fill_fsmonitor_bitmap(struct index_state *istate);
+void fill_fsmonitor_bitmap(struct index_state *istate);
/*
* Write the CE_FSMONITOR_VALID state into the fsmonitor index
* extension. Reads from the fsmonitor_dirty ewah in the index.
*/
-extern void write_fsmonitor_extension(struct strbuf *sb, struct index_state *istate);
+void write_fsmonitor_extension(struct strbuf *sb, struct index_state *istate);
/*
* Add/remove the fsmonitor index extension
*/
-extern void add_fsmonitor(struct index_state *istate);
-extern void remove_fsmonitor(struct index_state *istate);
+void add_fsmonitor(struct index_state *istate);
+void remove_fsmonitor(struct index_state *istate);
/*
* Add/remove the fsmonitor index extension as necessary based on the current
* core.fsmonitor setting.
*/
-extern void tweak_fsmonitor(struct index_state *istate);
+void tweak_fsmonitor(struct index_state *istate);
/*
* Run the configured fsmonitor integration script and clear the
* any corresponding untracked cache directory structures. Optimized to only
* run the first time it is called.
*/
-extern void refresh_fsmonitor(struct index_state *istate);
+void refresh_fsmonitor(struct index_state *istate);
/*
* Set the given cache entries CE_FSMONITOR_VALID bit. This should be
}
command_list () {
- grep -v '^#' "$1"
+ eval "grep -ve '^#' $exclude_programs" <"$1"
}
get_categories () {
EOF
}
+exclude_programs=
+while test "--exclude-program" = "$1"
+do
+ shift
+ exclude_programs="$exclude_programs -e \"^$1 \""
+ shift
+done
+
echo "/* Automatically generated by generate-cmdlist.sh */
struct cmdname_help {
const char *name;
#define FORMAT_PRESERVING(n) __attribute__((format_arg(n)))
-extern int use_gettext_poison(void);
+int use_gettext_poison(void);
#ifndef NO_GETTEXT
-extern void git_setup_gettext(void);
-extern int gettext_width(const char *s);
+void git_setup_gettext(void);
+int gettext_width(const char *s);
#else
static inline void git_setup_gettext(void)
{
#endif
const char *get_preferred_languages(void);
-extern int is_utf8_locale(void);
+int is_utf8_locale(void);
#endif
#include "compat/win32/path-utils.h"
#include "compat/mingw.h"
#elif defined(_MSC_VER)
+#include "compat/win32/path-utils.h"
#include "compat/msvc.h"
#else
#include <sys/utsname.h>
#ifdef MKDIR_WO_TRAILING_SLASH
#define mkdir(a,b) compat_mkdir_wo_trailing_slash((a),(b))
-extern int compat_mkdir_wo_trailing_slash(const char*, mode_t);
+int compat_mkdir_wo_trailing_slash(const char*, mode_t);
#endif
#ifdef NO_STRUCT_ITIMERVAL
#include <libgen.h>
#else
#define basename gitbasename
-extern char *gitbasename(char *);
+char *gitbasename(char *);
#define dirname gitdirname
-extern char *gitdirname(char *);
+char *gitdirname(char *);
#endif
#ifndef NO_ICONV
struct strbuf;
/* General helper functions */
-extern void vreportf(const char *prefix, const char *err, va_list params);
-extern NORETURN void usage(const char *err);
-extern NORETURN void usagef(const char *err, ...) __attribute__((format (printf, 1, 2)));
-extern NORETURN void die(const char *err, ...) __attribute__((format (printf, 1, 2)));
-extern NORETURN void die_errno(const char *err, ...) __attribute__((format (printf, 1, 2)));
-extern int error(const char *err, ...) __attribute__((format (printf, 1, 2)));
-extern int error_errno(const char *err, ...) __attribute__((format (printf, 1, 2)));
-extern void warning(const char *err, ...) __attribute__((format (printf, 1, 2)));
-extern void warning_errno(const char *err, ...) __attribute__((format (printf, 1, 2)));
+void vreportf(const char *prefix, const char *err, va_list params);
+NORETURN void usage(const char *err);
+NORETURN void usagef(const char *err, ...) __attribute__((format (printf, 1, 2)));
+NORETURN void die(const char *err, ...) __attribute__((format (printf, 1, 2)));
+NORETURN void die_errno(const char *err, ...) __attribute__((format (printf, 1, 2)));
+int error(const char *err, ...) __attribute__((format (printf, 1, 2)));
+int error_errno(const char *err, ...) __attribute__((format (printf, 1, 2)));
+void warning(const char *err, ...) __attribute__((format (printf, 1, 2)));
+void warning_errno(const char *err, ...) __attribute__((format (printf, 1, 2)));
#ifndef NO_OPENSSL
#ifdef APPLE_COMMON_CRYPTO
#define error_errno(...) (error_errno(__VA_ARGS__), const_error())
#endif
-extern void set_die_routine(NORETURN_PTR void (*routine)(const char *err, va_list params));
-extern void set_error_routine(void (*routine)(const char *err, va_list params));
+void set_die_routine(NORETURN_PTR void (*routine)(const char *err, va_list params));
+void set_error_routine(void (*routine)(const char *err, va_list params));
extern void (*get_error_routine(void))(const char *err, va_list params);
-extern void set_warn_routine(void (*routine)(const char *warn, va_list params));
+void set_warn_routine(void (*routine)(const char *warn, va_list params));
extern void (*get_warn_routine(void))(const char *warn, va_list params);
-extern void set_die_is_recursing_routine(int (*routine)(void));
+void set_die_is_recursing_routine(int (*routine)(void));
-extern int starts_with(const char *str, const char *prefix);
-extern int istarts_with(const char *str, const char *prefix);
+int starts_with(const char *str, const char *prefix);
+int istarts_with(const char *str, const char *prefix);
/*
* If the string "str" begins with the string found in "prefix", return 1.
#define mmap git_mmap
#define munmap git_munmap
-extern void *git_mmap(void *start, size_t length, int prot, int flags, int fd, off_t offset);
-extern int git_munmap(void *start, size_t length);
+void *git_mmap(void *start, size_t length, int prot, int flags, int fd, off_t offset);
+int git_munmap(void *start, size_t length);
#else /* NO_MMAP || USE_WIN32_MMAP */
#undef stat
#endif
#define stat(path, buf) git_stat(path, buf)
-extern int git_stat(const char *, struct stat *);
+int git_stat(const char *, struct stat *);
#ifdef fstat
#undef fstat
#endif
#define fstat(fd, buf) git_fstat(fd, buf)
-extern int git_fstat(int, struct stat *);
+int git_fstat(int, struct stat *);
#ifdef lstat
#undef lstat
#endif
#define lstat(path, buf) git_lstat(path, buf)
-extern int git_lstat(const char *, struct stat *);
+int git_lstat(const char *, struct stat *);
#endif
#define DEFAULT_PACKED_GIT_LIMIT \
#ifdef NO_PREAD
#define pread git_pread
-extern ssize_t git_pread(int fd, void *buf, size_t count, off_t offset);
+ssize_t git_pread(int fd, void *buf, size_t count, off_t offset);
#endif
/*
* Forward decl that will remind us if its twin in cache.h changes.
* This function is used in compat/pread.c. But we can't include
* cache.h there.
*/
-extern ssize_t read_in_full(int fd, void *buf, size_t count);
+ssize_t read_in_full(int fd, void *buf, size_t count);
#ifdef NO_SETENV
#define setenv gitsetenv
-extern int gitsetenv(const char *, const char *, int);
+int gitsetenv(const char *, const char *, int);
#endif
#ifdef NO_MKDTEMP
#define mkdtemp gitmkdtemp
-extern char *gitmkdtemp(char *);
+char *gitmkdtemp(char *);
#endif
#ifdef NO_UNSETENV
#define unsetenv gitunsetenv
-extern void gitunsetenv(const char *);
+void gitunsetenv(const char *);
#endif
#ifdef NO_STRCASESTR
#define strcasestr gitstrcasestr
-extern char *gitstrcasestr(const char *haystack, const char *needle);
+char *gitstrcasestr(const char *haystack, const char *needle);
#endif
#ifdef NO_STRLCPY
#define strlcpy gitstrlcpy
-extern size_t gitstrlcpy(char *, const char *, size_t);
+size_t gitstrlcpy(char *, const char *, size_t);
#endif
#ifdef NO_STRTOUMAX
#define strtoumax gitstrtoumax
-extern uintmax_t gitstrtoumax(const char *, char **, int);
+uintmax_t gitstrtoumax(const char *, char **, int);
#define strtoimax gitstrtoimax
-extern intmax_t gitstrtoimax(const char *, char **, int);
+intmax_t gitstrtoimax(const char *, char **, int);
#endif
#ifdef NO_HSTRERROR
#define hstrerror githstrerror
-extern const char *githstrerror(int herror);
+const char *githstrerror(int herror);
#endif
#ifdef NO_MEMMEM
# endif
# define fopen(a,b) git_fopen(a,b)
# endif
-extern FILE *git_fopen(const char*, const char*);
+FILE *git_fopen(const char*, const char*);
#endif
#ifdef SNPRINTF_RETURNS_BOGUS
#undef snprintf
#endif
#define snprintf git_snprintf
-extern int git_snprintf(char *str, size_t maxsize,
- const char *format, ...);
+int git_snprintf(char *str, size_t maxsize,
+ const char *format, ...);
#ifdef vsnprintf
#undef vsnprintf
#endif
#define vsnprintf git_vsnprintf
-extern int git_vsnprintf(char *str, size_t maxsize,
- const char *format, va_list ap);
+int git_vsnprintf(char *str, size_t maxsize,
+ const char *format, va_list ap);
#endif
#ifdef __GLIBC_PREREQ
#ifdef NO_PTHREADS
#define atexit git_atexit
-extern int git_atexit(void (*handler)(void));
+int git_atexit(void (*handler)(void));
#endif
typedef void (*try_to_free_t)(size_t);
-extern try_to_free_t set_try_to_free_routine(try_to_free_t);
+try_to_free_t set_try_to_free_routine(try_to_free_t);
static inline size_t st_add(size_t a, size_t b)
{
# define xalloca(size) (xmalloc(size))
# define xalloca_free(p) (free(p))
#endif
-extern char *xstrdup(const char *str);
-extern void *xmalloc(size_t size);
-extern void *xmallocz(size_t size);
-extern void *xmallocz_gently(size_t size);
-extern void *xmemdupz(const void *data, size_t len);
-extern char *xstrndup(const char *str, size_t len);
-extern void *xrealloc(void *ptr, size_t size);
-extern void *xcalloc(size_t nmemb, size_t size);
-extern void *xmmap(void *start, size_t length, int prot, int flags, int fd, off_t offset);
-extern void *xmmap_gently(void *start, size_t length, int prot, int flags, int fd, off_t offset);
-extern int xopen(const char *path, int flags, ...);
-extern ssize_t xread(int fd, void *buf, size_t len);
-extern ssize_t xwrite(int fd, const void *buf, size_t len);
-extern ssize_t xpread(int fd, void *buf, size_t len, off_t offset);
-extern int xdup(int fd);
-extern FILE *xfopen(const char *path, const char *mode);
-extern FILE *xfdopen(int fd, const char *mode);
-extern int xmkstemp(char *temp_filename);
-extern int xmkstemp_mode(char *temp_filename, int mode);
-extern char *xgetcwd(void);
-extern FILE *fopen_for_writing(const char *path);
-extern FILE *fopen_or_warn(const char *path, const char *mode);
+char *xstrdup(const char *str);
+void *xmalloc(size_t size);
+void *xmallocz(size_t size);
+void *xmallocz_gently(size_t size);
+void *xmemdupz(const void *data, size_t len);
+char *xstrndup(const char *str, size_t len);
+void *xrealloc(void *ptr, size_t size);
+void *xcalloc(size_t nmemb, size_t size);
+void *xmmap(void *start, size_t length, int prot, int flags, int fd, off_t offset);
+void *xmmap_gently(void *start, size_t length, int prot, int flags, int fd, off_t offset);
+int xopen(const char *path, int flags, ...);
+ssize_t xread(int fd, void *buf, size_t len);
+ssize_t xwrite(int fd, const void *buf, size_t len);
+ssize_t xpread(int fd, void *buf, size_t len, off_t offset);
+int xdup(int fd);
+FILE *xfopen(const char *path, const char *mode);
+FILE *xfdopen(int fd, const char *mode);
+int xmkstemp(char *temp_filename);
+int xmkstemp_mode(char *temp_filename, int mode);
+char *xgetcwd(void);
+FILE *fopen_for_writing(const char *path);
+FILE *fopen_or_warn(const char *path, const char *mode);
/*
* FREE_AND_NULL(ptr) is like free(ptr) followed by ptr = NULL. Note
}
__attribute__((format (printf, 3, 4)))
-extern int xsnprintf(char *dst, size_t max, const char *fmt, ...);
+int xsnprintf(char *dst, size_t max, const char *fmt, ...);
#ifndef HOST_NAME_MAX
#define HOST_NAME_MAX 256
#endif
-extern int xgethostname(char *buf, size_t len);
+int xgethostname(char *buf, size_t len);
/* in ctype.c, for kwset users */
extern const unsigned char tolower_trans_tbl[256];
#ifdef FILENO_IS_A_MACRO
int git_fileno(FILE *stream);
-# ifndef COMPAT_CODE
+# ifndef COMPAT_CODE_FILENO
# undef fileno
# define fileno(p) git_fileno(p)
# endif
#endif
+#ifdef NEED_ACCESS_ROOT_HANDLER
+int git_access(const char *path, int mode);
+# ifndef COMPAT_CODE_ACCESS
+# ifdef access
+# undef access
+# endif
+# define access(path, mode) git_access(path, mode)
+# endif
+#endif
+
/*
* Our code often opens a path to an optional file, to work on its
* contents when we can successfully open it. We can ignore a failure
return (errno_ == ENOENT || errno_ == ENOTDIR);
}
-extern int cmd_main(int, const char **);
+int cmd_main(int, const char **);
+
+/*
+ * Intercept all calls to exit() and route them to trace2 to
+ * optionally emit a message before calling the real exit().
+ */
+int trace2_cmd_exit_fl(const char *file, int line, int code);
+#define exit(code) exit(trace2_cmd_exit_fl(__FILE__, __LINE__, (code)))
/*
* You can mark a stack variable with UNLEAK(var) to avoid it being
* an annotation, and does nothing in non-leak-checking builds.
*/
#ifdef SUPPRESS_ANNOTATED_LEAKS
-extern void unleak_memory(const void *ptr, size_t len);
+void unleak_memory(const void *ptr, size_t len);
#define UNLEAK(var) unleak_memory(&(var), sizeof(var))
#else
#define UNLEAK(var) do {} while (0)
then
merge_tool="$GIT_DIFF_TOOL"
else
- merge_tool="$(get_merge_tool)" || exit
+ merge_tool="$(get_merge_tool)"
fi
fi
+++ /dev/null
-#!/bin/sh
-#
-# Copyright (c) 2005 Junio C Hamano.
-#
-
-SUBDIRECTORY_OK=Yes
-OPTIONS_KEEPDASHDASH=
-OPTIONS_STUCKLONG=t
-OPTIONS_SPEC="\
-git rebase [-i] [options] [--exec <cmd>] [--onto <newbase>] [<upstream>] [<branch>]
-git rebase [-i] [options] [--exec <cmd>] [--onto <newbase>] --root [<branch>]
-git rebase --continue | --abort | --skip | --edit-todo
---
- Available options are
-v,verbose! display a diffstat of what changed upstream
-q,quiet! be quiet. implies --no-stat
-autostash automatically stash/stash pop before and after
-fork-point use 'merge-base --fork-point' to refine upstream
-onto=! rebase onto given branch instead of upstream
-r,rebase-merges? try to rebase merges instead of skipping them
-p,preserve-merges! try to recreate merges instead of ignoring them
-s,strategy=! use the given merge strategy
-X,strategy-option=! pass the argument through to the merge strategy
-no-ff! cherry-pick all commits, even if unchanged
-f,force-rebase! cherry-pick all commits, even if unchanged
-m,merge! use merging strategies to rebase
-i,interactive! let the user edit the list of commits to rebase
-x,exec=! add exec lines after each commit of the editable list
-k,keep-empty preserve empty commits during rebase
-allow-empty-message allow rebasing commits with empty messages
-stat! display a diffstat of what changed upstream
-n,no-stat! do not show diffstat of what changed upstream
-verify allow pre-rebase hook to run
-rerere-autoupdate allow rerere to update index with resolved conflicts
-root! rebase all reachable commits up to the root(s)
-autosquash move commits that begin with squash!/fixup! under -i
-signoff add a Signed-off-by: line to each commit
-committer-date-is-author-date! passed to 'git am'
-ignore-date! passed to 'git am'
-whitespace=! passed to 'git apply'
-ignore-whitespace! passed to 'git apply'
-C=! passed to 'git apply'
-S,gpg-sign? GPG-sign commits
- Actions:
-continue! continue
-abort! abort and check out the original branch
-skip! skip current patch and continue
-edit-todo! edit the todo list during an interactive rebase
-quit! abort but keep HEAD where it is
-show-current-patch! show the patch file being applied or merged
-reschedule-failed-exec automatically reschedule failed exec commands
-"
-. git-sh-setup
-set_reflog_action rebase
-require_work_tree_exists
-cd_to_toplevel
-
-LF='
-'
-ok_to_skip_pre_rebase=
-
-squash_onto=
-unset onto
-unset restrict_revision
-cmd=
-strategy=
-strategy_opts=
-do_merge=
-merge_dir="$GIT_DIR"/rebase-merge
-apply_dir="$GIT_DIR"/rebase-apply
-verbose=
-diffstat=
-test "$(git config --bool rebase.stat)" = true && diffstat=t
-autostash="$(git config --bool rebase.autostash || echo false)"
-fork_point=auto
-git_am_opt=
-git_format_patch_opt=
-rebase_root=
-force_rebase=
-allow_rerere_autoupdate=
-# Non-empty if a rebase was in progress when 'git rebase' was invoked
-in_progress=
-# One of {am, merge, interactive}
-type=
-# One of {"$GIT_DIR"/rebase-apply, "$GIT_DIR"/rebase-merge}
-state_dir=
-# One of {'', continue, skip, abort}, as parsed from command line
-action=
-rebase_merges=
-rebase_cousins=
-preserve_merges=
-autosquash=
-keep_empty=
-allow_empty_message=--allow-empty-message
-signoff=
-reschedule_failed_exec=
-test "$(git config --bool rebase.autosquash)" = "true" && autosquash=t
-case "$(git config --bool commit.gpgsign)" in
-true) gpg_sign_opt=-S ;;
-*) gpg_sign_opt= ;;
-esac
-test "$(git config --bool rebase.reschedulefailedexec)" = "true" &&
-reschedule_failed_exec=--reschedule-failed-exec
-. git-rebase--common
-
-read_basic_state () {
- test -f "$state_dir/head-name" &&
- test -f "$state_dir/onto" &&
- head_name=$(cat "$state_dir"/head-name) &&
- onto=$(cat "$state_dir"/onto) &&
- # We always write to orig-head, but interactive rebase used to write to
- # head. Fall back to reading from head to cover for the case that the
- # user upgraded git with an ongoing interactive rebase.
- if test -f "$state_dir"/orig-head
- then
- orig_head=$(cat "$state_dir"/orig-head)
- else
- orig_head=$(cat "$state_dir"/head)
- fi &&
- test -f "$state_dir"/quiet && GIT_QUIET=t
- test -f "$state_dir"/verbose && verbose=t
- test -f "$state_dir"/strategy && strategy="$(cat "$state_dir"/strategy)"
- test -f "$state_dir"/strategy_opts &&
- strategy_opts="$(cat "$state_dir"/strategy_opts)"
- test -f "$state_dir"/allow_rerere_autoupdate &&
- allow_rerere_autoupdate="$(cat "$state_dir"/allow_rerere_autoupdate)"
- test -f "$state_dir"/gpg_sign_opt &&
- gpg_sign_opt="$(cat "$state_dir"/gpg_sign_opt)"
- test -f "$state_dir"/signoff && {
- signoff="$(cat "$state_dir"/signoff)"
- force_rebase=t
- }
- test -f "$state_dir"/reschedule-failed-exec &&
- reschedule_failed_exec=t
-}
-
-finish_rebase () {
- rm -f "$(git rev-parse --git-path REBASE_HEAD)"
- apply_autostash &&
- { git gc --auto || true; } &&
- rm -rf "$state_dir"
-}
-
-run_interactive () {
- GIT_CHERRY_PICK_HELP="$resolvemsg"
- export GIT_CHERRY_PICK_HELP
-
- test -n "$keep_empty" && keep_empty="--keep-empty"
- test -n "$rebase_merges" && rebase_merges="--rebase-merges"
- test -n "$rebase_cousins" && rebase_cousins="--rebase-cousins"
- test -n "$autosquash" && autosquash="--autosquash"
- test -n "$verbose" && verbose="--verbose"
- test -n "$force_rebase" && force_rebase="--no-ff"
- test -n "$restrict_revision" && \
- restrict_revision="--restrict-revision=^$restrict_revision"
- test -n "$upstream" && upstream="--upstream=$upstream"
- test -n "$onto" && onto="--onto=$onto"
- test -n "$squash_onto" && squash_onto="--squash-onto=$squash_onto"
- test -n "$onto_name" && onto_name="--onto-name=$onto_name"
- test -n "$head_name" && head_name="--head-name=$head_name"
- test -n "$strategy" && strategy="--strategy=$strategy"
- test -n "$strategy_opts" && strategy_opts="--strategy-opts=$strategy_opts"
- test -n "$switch_to" && switch_to="--switch-to=$switch_to"
- test -n "$cmd" && cmd="--cmd=$cmd"
- test -n "$action" && action="--$action"
-
- exec git rebase--interactive "$action" "$keep_empty" "$rebase_merges" "$rebase_cousins" \
- "$upstream" "$onto" "$squash_onto" "$restrict_revision" \
- "$allow_empty_message" "$autosquash" "$verbose" \
- "$force_rebase" "$onto_name" "$head_name" "$strategy" \
- "$strategy_opts" "$cmd" "$switch_to" \
- "$allow_rerere_autoupdate" "$gpg_sign_opt" "$signoff" \
- "$reschedule_failed_exec"
-}
-
-run_specific_rebase () {
- if [ "$interactive_rebase" = implied ]; then
- GIT_SEQUENCE_EDITOR=:
- export GIT_SEQUENCE_EDITOR
- autosquash=
- fi
-
- if test -n "$interactive_rebase" -a -z "$preserve_merges"
- then
- run_interactive
- else
- . git-rebase--$type
-
- if test -z "$preserve_merges"
- then
- git_rebase__$type
- else
- git_rebase__preserve_merges
- fi
- fi
-
- ret=$?
- if test $ret -eq 0
- then
- finish_rebase
- elif test $ret -eq 2 # special exit status for rebase -p
- then
- apply_autostash &&
- rm -rf "$state_dir" &&
- die "Nothing to do"
- fi
- exit $ret
-}
-
-run_pre_rebase_hook () {
- if test -z "$ok_to_skip_pre_rebase" &&
- test -x "$(git rev-parse --git-path hooks/pre-rebase)"
- then
- "$(git rev-parse --git-path hooks/pre-rebase)" ${1+"$@"} ||
- die "$(gettext "The pre-rebase hook refused to rebase.")"
- fi
-}
-
-test -f "$apply_dir"/applying &&
- die "$(gettext "It looks like 'git am' is in progress. Cannot rebase.")"
-
-if test -d "$apply_dir"
-then
- type=am
- state_dir="$apply_dir"
-elif test -d "$merge_dir"
-then
- type=interactive
- if test -d "$merge_dir"/rewritten
- then
- type=preserve-merges
- interactive_rebase=explicit
- preserve_merges=t
- elif test -f "$merge_dir"/interactive
- then
- interactive_rebase=explicit
- fi
- state_dir="$merge_dir"
-fi
-test -n "$type" && in_progress=t
-
-total_argc=$#
-while test $# != 0
-do
- case "$1" in
- --no-verify)
- ok_to_skip_pre_rebase=yes
- ;;
- --verify)
- ok_to_skip_pre_rebase=
- ;;
- --continue|--skip|--abort|--quit|--edit-todo|--show-current-patch)
- test $total_argc -eq 2 || usage
- action=${1##--}
- ;;
- --onto=*)
- onto="${1#--onto=}"
- ;;
- --exec=*)
- cmd="${cmd}exec ${1#--exec=}${LF}"
- test -z "$interactive_rebase" && interactive_rebase=implied
- ;;
- --interactive)
- interactive_rebase=explicit
- ;;
- --keep-empty)
- keep_empty=yes
- ;;
- --allow-empty-message)
- allow_empty_message=--allow-empty-message
- ;;
- --no-keep-empty)
- keep_empty=
- ;;
- --rebase-merges)
- rebase_merges=t
- test -z "$interactive_rebase" && interactive_rebase=implied
- ;;
- --rebase-merges=*)
- rebase_merges=t
- case "${1#*=}" in
- rebase-cousins) rebase_cousins=t;;
- no-rebase-cousins) rebase_cousins=;;
- *) die "Unknown mode: $1";;
- esac
- test -z "$interactive_rebase" && interactive_rebase=implied
- ;;
- --preserve-merges)
- preserve_merges=t
- test -z "$interactive_rebase" && interactive_rebase=implied
- ;;
- --autosquash)
- autosquash=t
- ;;
- --no-autosquash)
- autosquash=
- ;;
- --fork-point)
- fork_point=t
- ;;
- --no-fork-point)
- fork_point=
- ;;
- --merge)
- do_merge=t
- ;;
- --strategy-option=*)
- strategy_opts="$strategy_opts $(git rev-parse --sq-quote "--${1#--strategy-option=}" | sed -e s/^.//)"
- do_merge=t
- test -z "$strategy" && strategy=recursive
- ;;
- --strategy=*)
- strategy="${1#--strategy=}"
- do_merge=t
- ;;
- --no-stat)
- diffstat=
- ;;
- --stat)
- diffstat=t
- ;;
- --autostash)
- autostash=true
- ;;
- --no-autostash)
- autostash=false
- ;;
- --verbose)
- verbose=t
- diffstat=t
- GIT_QUIET=
- ;;
- --quiet)
- GIT_QUIET=t
- git_am_opt="$git_am_opt -q"
- verbose=
- diffstat=
- ;;
- --whitespace=*)
- git_am_opt="$git_am_opt --whitespace=${1#--whitespace=}"
- case "${1#--whitespace=}" in
- fix|strip)
- force_rebase=t
- ;;
- warn|nowarn|error|error-all)
- ;; # okay, known whitespace option
- *)
- die "fatal: Invalid whitespace option: '${1#*=}'"
- ;;
- esac
- ;;
- --ignore-whitespace)
- git_am_opt="$git_am_opt $1"
- ;;
- --signoff)
- signoff=--signoff
- ;;
- --no-signoff)
- signoff=
- ;;
- --committer-date-is-author-date|--ignore-date)
- git_am_opt="$git_am_opt $1"
- force_rebase=t
- ;;
- -C*[!0-9]*)
- die "fatal: switch \`C' expects a numerical value"
- ;;
- -C*)
- git_am_opt="$git_am_opt $1"
- ;;
- --root)
- rebase_root=t
- ;;
- --force-rebase|--no-ff)
- force_rebase=t
- ;;
- --rerere-autoupdate|--no-rerere-autoupdate)
- allow_rerere_autoupdate="$1"
- ;;
- --gpg-sign)
- gpg_sign_opt=-S
- ;;
- --gpg-sign=*)
- gpg_sign_opt="-S${1#--gpg-sign=}"
- ;;
- --reschedule-failed-exec)
- reschedule_failed_exec=--reschedule-failed-exec
- ;;
- --no-reschedule-failed-exec)
- reschedule_failed_exec=
- ;;
- --)
- shift
- break
- ;;
- *)
- usage
- ;;
- esac
- shift
-done
-test $# -gt 2 && usage
-
-if test -n "$action"
-then
- test -z "$in_progress" && die "$(gettext "No rebase in progress?")"
- # Only interactive rebase uses detailed reflog messages
- if test -n "$interactive_rebase" && test "$GIT_REFLOG_ACTION" = rebase
- then
- GIT_REFLOG_ACTION="rebase -i ($action)"
- export GIT_REFLOG_ACTION
- fi
-fi
-
-if test "$action" = "edit-todo" && test -z "$interactive_rebase"
-then
- die "$(gettext "The --edit-todo action can only be used during interactive rebase.")"
-fi
-
-case "$action" in
-continue)
- # Sanity check
- git rev-parse --verify HEAD >/dev/null ||
- die "$(gettext "Cannot read HEAD")"
- git update-index --ignore-submodules --refresh &&
- git diff-files --quiet --ignore-submodules || {
- echo "$(gettext "You must edit all merge conflicts and then
-mark them as resolved using git add")"
- exit 1
- }
- read_basic_state
- run_specific_rebase
- ;;
-skip)
- output git reset --hard HEAD || exit $?
- read_basic_state
- run_specific_rebase
- ;;
-abort)
- git rerere clear
- read_basic_state
- case "$head_name" in
- refs/*)
- git symbolic-ref -m "rebase: aborting" HEAD $head_name ||
- die "$(eval_gettext "Could not move back to \$head_name")"
- ;;
- esac
- output git reset --hard $orig_head
- finish_rebase
- exit
- ;;
-quit)
- exec rm -rf "$state_dir"
- ;;
-edit-todo)
- run_specific_rebase
- ;;
-show-current-patch)
- run_specific_rebase
- die "BUG: run_specific_rebase is not supposed to return here"
- ;;
-esac
-
-# Make sure no rebase is in progress
-if test -n "$in_progress"
-then
- state_dir_base=${state_dir##*/}
- cmd_live_rebase="git rebase (--continue | --abort | --skip)"
- cmd_clear_stale_rebase="rm -fr \"$state_dir\""
- die "
-$(eval_gettext 'It seems that there is already a $state_dir_base directory, and
-I wonder if you are in the middle of another rebase. If that is the
-case, please try
- $cmd_live_rebase
-If that is not the case, please
- $cmd_clear_stale_rebase
-and run me again. I am stopping in case you still have something
-valuable there.')"
-fi
-
-if test -n "$rebase_root" && test -z "$onto"
-then
- test -z "$interactive_rebase" && interactive_rebase=implied
-fi
-
-if test -n "$keep_empty"
-then
- test -z "$interactive_rebase" && interactive_rebase=implied
-fi
-
-actually_interactive=
-if test -n "$interactive_rebase"
-then
- if test -z "$preserve_merges"
- then
- type=interactive
- else
- type=preserve-merges
- fi
- actually_interactive=t
- state_dir="$merge_dir"
-elif test -n "$do_merge"
-then
- interactive_rebase=implied
- type=interactive
- state_dir="$merge_dir"
-else
- type=am
- state_dir="$apply_dir"
-fi
-
-if test -t 2 && test -z "$GIT_QUIET"
-then
- git_format_patch_opt="$git_format_patch_opt --progress"
-fi
-
-incompatible_opts=$(echo " $git_am_opt " | \
- sed -e 's/ -q / /g' -e 's/^ \(.*\) $/\1/')
-if test -n "$incompatible_opts"
-then
- if test -n "$actually_interactive" || test "$do_merge"
- then
- die "$(gettext "fatal: cannot combine am options with either interactive or merge options")"
- fi
-fi
-
-if test -n "$signoff"
-then
- test -n "$preserve_merges" &&
- die "$(gettext "fatal: cannot combine '--signoff' with '--preserve-merges'")"
- git_am_opt="$git_am_opt $signoff"
- force_rebase=t
-fi
-
-if test -n "$preserve_merges"
-then
- # Note: incompatibility with --signoff handled in signoff block above
- # Note: incompatibility with --interactive is just a strong warning;
- # git-rebase.txt caveats with "unless you know what you are doing"
- test -n "$rebase_merges" &&
- die "$(gettext "fatal: cannot combine '--preserve-merges' with '--rebase-merges'")"
-
- test -n "$reschedule_failed_exec" &&
- die "$(gettext "error: cannot combine '--preserve-merges' with '--reschedule-failed-exec'")"
-fi
-
-if test -n "$rebase_merges"
-then
- test -n "$strategy_opts" &&
- die "$(gettext "fatal: cannot combine '--rebase-merges' with '--strategy-option'")"
- test -n "$strategy" &&
- die "$(gettext "fatal: cannot combine '--rebase-merges' with '--strategy'")"
-fi
-
-if test -z "$rebase_root"
-then
- case "$#" in
- 0)
- if ! upstream_name=$(git rev-parse --symbolic-full-name \
- --verify -q @{upstream} 2>/dev/null)
- then
- . git-parse-remote
- error_on_missing_default_upstream "rebase" "rebase" \
- "against" "git rebase $(gettext '<branch>')"
- fi
-
- test "$fork_point" = auto && fork_point=t
- ;;
- *) upstream_name="$1"
- if test "$upstream_name" = "-"
- then
- upstream_name="@{-1}"
- fi
- shift
- ;;
- esac
- upstream=$(peel_committish "${upstream_name}") ||
- die "$(eval_gettext "invalid upstream '\$upstream_name'")"
- upstream_arg="$upstream_name"
-else
- if test -z "$onto"
- then
- empty_tree=$(git hash-object -t tree /dev/null)
- onto=$(git commit-tree $empty_tree </dev/null)
- squash_onto="$onto"
- fi
- unset upstream_name
- unset upstream
- test $# -gt 1 && usage
- upstream_arg=--root
-fi
-
-# Make sure the branch to rebase onto is valid.
-onto_name=${onto-"$upstream_name"}
-case "$onto_name" in
-*...*)
- if left=${onto_name%...*} right=${onto_name#*...} &&
- onto=$(git merge-base --all ${left:-HEAD} ${right:-HEAD})
- then
- case "$onto" in
- ?*"$LF"?*)
- die "$(eval_gettext "\$onto_name: there are more than one merge bases")"
- ;;
- '')
- die "$(eval_gettext "\$onto_name: there is no merge base")"
- ;;
- esac
- else
- die "$(eval_gettext "\$onto_name: there is no merge base")"
- fi
- ;;
-*)
- onto=$(peel_committish "$onto_name") ||
- die "$(eval_gettext "Does not point to a valid commit: \$onto_name")"
- ;;
-esac
-
-# If the branch to rebase is given, that is the branch we will rebase
-# $branch_name -- branch/commit being rebased, or HEAD (already detached)
-# $orig_head -- commit object name of tip of the branch before rebasing
-# $head_name -- refs/heads/<that-branch> or "detached HEAD"
-switch_to=
-case "$#" in
-1)
- # Is it "rebase other $branchname" or "rebase other $commit"?
- branch_name="$1"
- switch_to="$1"
-
- # Is it a local branch?
- if git show-ref --verify --quiet -- "refs/heads/$branch_name" &&
- orig_head=$(git rev-parse -q --verify "refs/heads/$branch_name")
- then
- head_name="refs/heads/$branch_name"
- # If not is it a valid ref (branch or commit)?
- elif orig_head=$(git rev-parse -q --verify "$branch_name")
- then
- head_name="detached HEAD"
-
- else
- die "$(eval_gettext "fatal: no such branch/commit '\$branch_name'")"
- fi
- ;;
-0)
- # Do not need to switch branches, we are already on it.
- if branch_name=$(git symbolic-ref -q HEAD)
- then
- head_name=$branch_name
- branch_name=$(expr "z$branch_name" : 'zrefs/heads/\(.*\)')
- else
- head_name="detached HEAD"
- branch_name=HEAD
- fi
- orig_head=$(git rev-parse --verify HEAD) || exit
- ;;
-*)
- die "BUG: unexpected number of arguments left to parse"
- ;;
-esac
-
-if test "$fork_point" = t
-then
- new_upstream=$(git merge-base --fork-point "$upstream_name" \
- "${switch_to:-HEAD}")
- if test -n "$new_upstream"
- then
- restrict_revision=$new_upstream
- fi
-fi
-
-if test "$autostash" = true && ! (require_clean_work_tree) 2>/dev/null
-then
- stash_sha1=$(git stash create "autostash") ||
- die "$(gettext 'Cannot autostash')"
-
- mkdir -p "$state_dir" &&
- echo $stash_sha1 >"$state_dir/autostash" &&
- stash_abbrev=$(git rev-parse --short $stash_sha1) &&
- echo "$(eval_gettext 'Created autostash: $stash_abbrev')" &&
- git reset --hard
-fi
-
-require_clean_work_tree "rebase" "$(gettext "Please commit or stash them.")"
-
-# Now we are rebasing commits $upstream..$orig_head (or with --root,
-# everything leading up to $orig_head) on top of $onto
-
-# Check if we are already based on $onto with linear history,
-# but this should be done only when upstream and onto are the same
-# and if this is not an interactive rebase.
-mb=$(git merge-base "$onto" "$orig_head")
-if test -z "$actually_interactive" && test "$upstream" = "$onto" &&
- test "$mb" = "$onto" && test -z "$restrict_revision" &&
- # linear history?
- ! (git rev-list --parents "$onto".."$orig_head" | sane_grep " .* ") > /dev/null
-then
- if test -z "$force_rebase"
- then
- # Lazily switch to the target branch if needed...
- test -z "$switch_to" ||
- GIT_REFLOG_ACTION="$GIT_REFLOG_ACTION: checkout $switch_to" \
- git checkout -q "$switch_to" --
- if test "$branch_name" = "HEAD" &&
- ! git symbolic-ref -q HEAD
- then
- say "$(eval_gettext "HEAD is up to date.")"
- else
- say "$(eval_gettext "Current branch \$branch_name is up to date.")"
- fi
- finish_rebase
- exit 0
- else
- if test "$branch_name" = "HEAD" &&
- ! git symbolic-ref -q HEAD
- then
- say "$(eval_gettext "HEAD is up to date, rebase forced.")"
- else
- say "$(eval_gettext "Current branch \$branch_name is up to date, rebase forced.")"
- fi
- fi
-fi
-
-# If a hook exists, give it a chance to interrupt
-run_pre_rebase_hook "$upstream_arg" "$@"
-
-if test -n "$diffstat"
-then
- if test -n "$verbose"
- then
- if test -z "$mb"
- then
- echo "$(eval_gettext "Changes to \$onto:")"
- else
- echo "$(eval_gettext "Changes from \$mb to \$onto:")"
- fi
- fi
- mb_tree="${mb:-$(git hash-object -t tree /dev/null)}"
- # We want color (if set), but no pager
- GIT_PAGER='' git diff --stat --summary "$mb_tree" "$onto"
-fi
-
-if test -z "$actually_interactive" && test "$mb" = "$orig_head"
-then
- say "$(eval_gettext "Fast-forwarded \$branch_name to \$onto_name.")"
- GIT_REFLOG_ACTION="$GIT_REFLOG_ACTION: checkout $onto_name" \
- git checkout -q "$onto^0" || die "could not detach HEAD"
- # If the $onto is a proper descendant of the tip of the branch, then
- # we just fast-forwarded.
- git update-ref ORIG_HEAD $orig_head
- move_to_original_branch
- finish_rebase
- exit 0
-fi
-
-test -n "$interactive_rebase" && run_specific_rebase
-
-# Detach HEAD and reset the tree
-say "$(gettext "First, rewinding head to replay your work on top of it...")"
-
-GIT_REFLOG_ACTION="$GIT_REFLOG_ACTION: checkout $onto_name" \
- git checkout -q "$onto^0" || die "could not detach HEAD"
-git update-ref ORIG_HEAD $orig_head
-
-if test -n "$rebase_root"
-then
- revisions="$onto..$orig_head"
-else
- revisions="${restrict_revision-$upstream}..$orig_head"
-fi
-
-run_specific_rebase
--- /dev/null
+#!/bin/sh
+# Copyright (c) 2007, Nanako Shiraishi
+
+dashless=$(basename "$0" | sed -e 's/-/ /')
+USAGE="list [<options>]
+ or: $dashless show [<stash>]
+ or: $dashless drop [-q|--quiet] [<stash>]
+ or: $dashless ( pop | apply ) [--index] [-q|--quiet] [<stash>]
+ or: $dashless branch <branchname> [<stash>]
+ or: $dashless save [--patch] [-k|--[no-]keep-index] [-q|--quiet]
+ [-u|--include-untracked] [-a|--all] [<message>]
+ or: $dashless [push [--patch] [-k|--[no-]keep-index] [-q|--quiet]
+ [-u|--include-untracked] [-a|--all] [-m <message>]
+ [-- <pathspec>...]]
+ or: $dashless clear"
+
+SUBDIRECTORY_OK=Yes
+OPTIONS_SPEC=
+START_DIR=$(pwd)
+. git-sh-setup
+require_work_tree
+prefix=$(git rev-parse --show-prefix) || exit 1
+cd_to_toplevel
+
+TMP="$GIT_DIR/.git-stash.$$"
+TMPindex=${GIT_INDEX_FILE-"$(git rev-parse --git-path index)"}.stash.$$
+trap 'rm -f "$TMP-"* "$TMPindex"' 0
+
+ref_stash=refs/stash
+
+if git config --get-colorbool color.interactive; then
+ help_color="$(git config --get-color color.interactive.help 'red bold')"
+ reset_color="$(git config --get-color '' reset)"
+else
+ help_color=
+ reset_color=
+fi
+
+no_changes () {
+ git diff-index --quiet --cached HEAD --ignore-submodules -- "$@" &&
+ git diff-files --quiet --ignore-submodules -- "$@" &&
+ (test -z "$untracked" || test -z "$(untracked_files "$@")")
+}
+
+untracked_files () {
+ if test "$1" = "-z"
+ then
+ shift
+ z=-z
+ else
+ z=
+ fi
+ excl_opt=--exclude-standard
+ test "$untracked" = "all" && excl_opt=
+ git ls-files -o $z $excl_opt -- "$@"
+}
+
+prepare_fallback_ident () {
+ if ! git -c user.useconfigonly=yes var GIT_COMMITTER_IDENT >/dev/null 2>&1
+ then
+ GIT_AUTHOR_NAME="git stash"
+ GIT_AUTHOR_EMAIL=git@stash
+ GIT_COMMITTER_NAME="git stash"
+ GIT_COMMITTER_EMAIL=git@stash
+ export GIT_AUTHOR_NAME
+ export GIT_AUTHOR_EMAIL
+ export GIT_COMMITTER_NAME
+ export GIT_COMMITTER_EMAIL
+ fi
+}
+
+clear_stash () {
+ if test $# != 0
+ then
+ die "$(gettext "git stash clear with parameters is unimplemented")"
+ fi
+ if current=$(git rev-parse --verify --quiet $ref_stash)
+ then
+ git update-ref -d $ref_stash $current
+ fi
+}
+
+maybe_quiet () {
+ case "$1" in
+ --keep-stdout)
+ shift
+ if test -n "$GIT_QUIET"
+ then
+ "$@" 2>/dev/null
+ else
+ "$@"
+ fi
+ ;;
+ *)
+ if test -n "$GIT_QUIET"
+ then
+ "$@" >/dev/null 2>&1
+ else
+ "$@"
+ fi
+ ;;
+ esac
+}
+
+create_stash () {
+
+ prepare_fallback_ident
+
+ stash_msg=
+ untracked=
+ while test $# != 0
+ do
+ case "$1" in
+ -m|--message)
+ shift
+ stash_msg=${1?"BUG: create_stash () -m requires an argument"}
+ ;;
+ -m*)
+ stash_msg=${1#-m}
+ ;;
+ --message=*)
+ stash_msg=${1#--message=}
+ ;;
+ -u|--include-untracked)
+ shift
+ untracked=${1?"BUG: create_stash () -u requires an argument"}
+ ;;
+ --)
+ shift
+ break
+ ;;
+ esac
+ shift
+ done
+
+ git update-index -q --refresh
+ if maybe_quiet no_changes "$@"
+ then
+ exit 0
+ fi
+
+ # state of the base commit
+ if b_commit=$(maybe_quiet --keep-stdout git rev-parse --verify HEAD)
+ then
+ head=$(git rev-list --oneline -n 1 HEAD --)
+ elif test -n "$GIT_QUIET"
+ then
+ exit 1
+ else
+ die "$(gettext "You do not have the initial commit yet")"
+ fi
+
+ if branch=$(git symbolic-ref -q HEAD)
+ then
+ branch=${branch#refs/heads/}
+ else
+ branch='(no branch)'
+ fi
+ msg=$(printf '%s: %s' "$branch" "$head")
+
+ # state of the index
+ i_tree=$(git write-tree) &&
+ i_commit=$(printf 'index on %s\n' "$msg" |
+ git commit-tree $i_tree -p $b_commit) ||
+ die "$(gettext "Cannot save the current index state")"
+
+ if test -n "$untracked"
+ then
+ # Untracked files are stored by themselves in a parentless commit, for
+ # ease of unpacking later.
+ u_commit=$(
+ untracked_files -z "$@" | (
+ GIT_INDEX_FILE="$TMPindex" &&
+ export GIT_INDEX_FILE &&
+ rm -f "$TMPindex" &&
+ git update-index -z --add --remove --stdin &&
+ u_tree=$(git write-tree) &&
+ printf 'untracked files on %s\n' "$msg" | git commit-tree $u_tree &&
+ rm -f "$TMPindex"
+ ) ) || die "$(gettext "Cannot save the untracked files")"
+
+ untracked_commit_option="-p $u_commit";
+ else
+ untracked_commit_option=
+ fi
+
+ if test -z "$patch_mode"
+ then
+
+ # state of the working tree
+ w_tree=$( (
+ git read-tree --index-output="$TMPindex" -m $i_tree &&
+ GIT_INDEX_FILE="$TMPindex" &&
+ export GIT_INDEX_FILE &&
+ git diff-index --name-only -z HEAD -- "$@" >"$TMP-stagenames" &&
+ git update-index -z --add --remove --stdin <"$TMP-stagenames" &&
+ git write-tree &&
+ rm -f "$TMPindex"
+ ) ) ||
+ die "$(gettext "Cannot save the current worktree state")"
+
+ else
+
+ rm -f "$TMP-index" &&
+ GIT_INDEX_FILE="$TMP-index" git read-tree HEAD &&
+
+ # find out what the user wants
+ GIT_INDEX_FILE="$TMP-index" \
+ git add--interactive --patch=stash -- "$@" &&
+
+ # state of the working tree
+ w_tree=$(GIT_INDEX_FILE="$TMP-index" git write-tree) ||
+ die "$(gettext "Cannot save the current worktree state")"
+
+ git diff-tree -p HEAD $w_tree -- >"$TMP-patch" &&
+ test -s "$TMP-patch" ||
+ die "$(gettext "No changes selected")"
+
+ rm -f "$TMP-index" ||
+ die "$(gettext "Cannot remove temporary index (can't happen)")"
+
+ fi
+
+ # create the stash
+ if test -z "$stash_msg"
+ then
+ stash_msg=$(printf 'WIP on %s' "$msg")
+ else
+ stash_msg=$(printf 'On %s: %s' "$branch" "$stash_msg")
+ fi
+ w_commit=$(printf '%s\n' "$stash_msg" |
+ git commit-tree $w_tree -p $b_commit -p $i_commit $untracked_commit_option) ||
+ die "$(gettext "Cannot record working tree state")"
+}
+
+store_stash () {
+ while test $# != 0
+ do
+ case "$1" in
+ -m|--message)
+ shift
+ stash_msg="$1"
+ ;;
+ -m*)
+ stash_msg=${1#-m}
+ ;;
+ --message=*)
+ stash_msg=${1#--message=}
+ ;;
+ -q|--quiet)
+ quiet=t
+ ;;
+ *)
+ break
+ ;;
+ esac
+ shift
+ done
+ test $# = 1 ||
+ die "$(eval_gettext "\"$dashless store\" requires one <commit> argument")"
+
+ w_commit="$1"
+ if test -z "$stash_msg"
+ then
+ stash_msg="Created via \"git stash store\"."
+ fi
+
+ git update-ref --create-reflog -m "$stash_msg" $ref_stash $w_commit
+ ret=$?
+ test $ret != 0 && test -z "$quiet" &&
+ die "$(eval_gettext "Cannot update \$ref_stash with \$w_commit")"
+ return $ret
+}
+
+push_stash () {
+ keep_index=
+ patch_mode=
+ untracked=
+ stash_msg=
+ while test $# != 0
+ do
+ case "$1" in
+ -k|--keep-index)
+ keep_index=t
+ ;;
+ --no-keep-index)
+ keep_index=n
+ ;;
+ -p|--patch)
+ patch_mode=t
+ # only default to keep if we don't already have an override
+ test -z "$keep_index" && keep_index=t
+ ;;
+ -q|--quiet)
+ GIT_QUIET=t
+ ;;
+ -u|--include-untracked)
+ untracked=untracked
+ ;;
+ -a|--all)
+ untracked=all
+ ;;
+ -m|--message)
+ shift
+ test -z ${1+x} && usage
+ stash_msg=$1
+ ;;
+ -m*)
+ stash_msg=${1#-m}
+ ;;
+ --message=*)
+ stash_msg=${1#--message=}
+ ;;
+ --help)
+ show_help
+ ;;
+ --)
+ shift
+ break
+ ;;
+ -*)
+ option="$1"
+ eval_gettextln "error: unknown option for 'stash push': \$option"
+ usage
+ ;;
+ *)
+ break
+ ;;
+ esac
+ shift
+ done
+
+ eval "set $(git rev-parse --sq --prefix "$prefix" -- "$@")"
+
+ if test -n "$patch_mode" && test -n "$untracked"
+ then
+ die "$(gettext "Can't use --patch and --include-untracked or --all at the same time")"
+ fi
+
+ test -n "$untracked" || git ls-files --error-unmatch -- "$@" >/dev/null || exit 1
+
+ git update-index -q --refresh
+ if maybe_quiet no_changes "$@"
+ then
+ say "$(gettext "No local changes to save")"
+ exit 0
+ fi
+
+ git reflog exists $ref_stash ||
+ clear_stash || die "$(gettext "Cannot initialize stash")"
+
+ create_stash -m "$stash_msg" -u "$untracked" -- "$@"
+ store_stash -m "$stash_msg" -q $w_commit ||
+ die "$(gettext "Cannot save the current status")"
+ say "$(eval_gettext "Saved working directory and index state \$stash_msg")"
+
+ if test -z "$patch_mode"
+ then
+ test "$untracked" = "all" && CLEAN_X_OPTION=-x || CLEAN_X_OPTION=
+ if test -n "$untracked" && test $# = 0
+ then
+ git clean --force --quiet -d $CLEAN_X_OPTION
+ fi
+
+ if test $# != 0
+ then
+ test -z "$untracked" && UPDATE_OPTION="-u" || UPDATE_OPTION=
+ test "$untracked" = "all" && FORCE_OPTION="--force" || FORCE_OPTION=
+ git add $UPDATE_OPTION $FORCE_OPTION -- "$@"
+ git diff-index -p --cached --binary HEAD -- "$@" |
+ git apply --index -R
+ else
+ git reset --hard -q
+ fi
+
+ if test "$keep_index" = "t" && test -n "$i_tree"
+ then
+ git read-tree --reset $i_tree
+ git ls-files -z --modified -- "$@" |
+ git checkout-index -z --force --stdin
+ fi
+ else
+ git apply -R < "$TMP-patch" ||
+ die "$(gettext "Cannot remove worktree changes")"
+
+ if test "$keep_index" != "t"
+ then
+ git reset -q -- "$@"
+ fi
+ fi
+}
+
+save_stash () {
+ push_options=
+ while test $# != 0
+ do
+ case "$1" in
+ -q|--quiet)
+ GIT_QUIET=t
+ ;;
+ --)
+ shift
+ break
+ ;;
+ -*)
+ # pass all options through to push_stash
+ push_options="$push_options $1"
+ ;;
+ *)
+ break
+ ;;
+ esac
+ shift
+ done
+
+ stash_msg="$*"
+
+ if test -z "$stash_msg"
+ then
+ push_stash $push_options
+ else
+ push_stash $push_options -m "$stash_msg"
+ fi
+}
+
+have_stash () {
+ git rev-parse --verify --quiet $ref_stash >/dev/null
+}
+
+list_stash () {
+ have_stash || return 0
+ git log --format="%gd: %gs" -g --first-parent -m "$@" $ref_stash --
+}
+
+show_stash () {
+ ALLOW_UNKNOWN_FLAGS=t
+ assert_stash_like "$@"
+
+ if test -z "$FLAGS"
+ then
+ if test "$(git config --bool stash.showStat || echo true)" = "true"
+ then
+ FLAGS=--stat
+ fi
+
+ if test "$(git config --bool stash.showPatch || echo false)" = "true"
+ then
+ FLAGS=${FLAGS}${FLAGS:+ }-p
+ fi
+
+ if test -z "$FLAGS"
+ then
+ return 0
+ fi
+ fi
+
+ git diff ${FLAGS} $b_commit $w_commit
+}
+
+show_help () {
+ exec git help stash
+ exit 1
+}
+
+#
+# Parses the remaining options looking for flags and
+# at most one revision defaulting to ${ref_stash}@{0}
+# if none found.
+#
+# Derives related tree and commit objects from the
+# revision, if one is found.
+#
+# stash records the work tree, and is a merge between the
+# base commit (first parent) and the index tree (second parent).
+#
+# REV is set to the symbolic version of the specified stash-like commit
+# IS_STASH_LIKE is non-blank if ${REV} looks like a stash
+# IS_STASH_REF is non-blank if the ${REV} looks like a stash ref
+# s is set to the SHA1 of the stash commit
+# w_commit is set to the commit containing the working tree
+# b_commit is set to the base commit
+# i_commit is set to the commit containing the index tree
+# u_commit is set to the commit containing the untracked files tree
+# w_tree is set to the working tree
+# b_tree is set to the base tree
+# i_tree is set to the index tree
+# u_tree is set to the untracked files tree
+#
+# GIT_QUIET is set to t if -q is specified
+# INDEX_OPTION is set to --index if --index is specified.
+# FLAGS is set to the remaining flags (if allowed)
+#
+# dies if:
+# * too many revisions specified
+# * no revision is specified and there is no stash stack
+# * a revision is specified which cannot be resolve to a SHA1
+# * a non-existent stash reference is specified
+# * unknown flags were set and ALLOW_UNKNOWN_FLAGS is not "t"
+#
+
+parse_flags_and_rev()
+{
+ test "$PARSE_CACHE" = "$*" && return 0 # optimisation
+ PARSE_CACHE="$*"
+
+ IS_STASH_LIKE=
+ IS_STASH_REF=
+ INDEX_OPTION=
+ s=
+ w_commit=
+ b_commit=
+ i_commit=
+ u_commit=
+ w_tree=
+ b_tree=
+ i_tree=
+ u_tree=
+
+ FLAGS=
+ REV=
+ for opt
+ do
+ case "$opt" in
+ -q|--quiet)
+ GIT_QUIET=-t
+ ;;
+ --index)
+ INDEX_OPTION=--index
+ ;;
+ --help)
+ show_help
+ ;;
+ -*)
+ test "$ALLOW_UNKNOWN_FLAGS" = t ||
+ die "$(eval_gettext "unknown option: \$opt")"
+ FLAGS="${FLAGS}${FLAGS:+ }$opt"
+ ;;
+ *)
+ REV="${REV}${REV:+ }'$opt'"
+ ;;
+ esac
+ done
+
+ eval set -- $REV
+
+ case $# in
+ 0)
+ have_stash || die "$(gettext "No stash entries found.")"
+ set -- ${ref_stash}@{0}
+ ;;
+ 1)
+ :
+ ;;
+ *)
+ die "$(eval_gettext "Too many revisions specified: \$REV")"
+ ;;
+ esac
+
+ case "$1" in
+ *[!0-9]*)
+ :
+ ;;
+ *)
+ set -- "${ref_stash}@{$1}"
+ ;;
+ esac
+
+ REV=$(git rev-parse --symbolic --verify --quiet "$1") || {
+ reference="$1"
+ die "$(eval_gettext "\$reference is not a valid reference")"
+ }
+
+ i_commit=$(git rev-parse --verify --quiet "$REV^2") &&
+ set -- $(git rev-parse "$REV" "$REV^1" "$REV:" "$REV^1:" "$REV^2:" 2>/dev/null) &&
+ s=$1 &&
+ w_commit=$1 &&
+ b_commit=$2 &&
+ w_tree=$3 &&
+ b_tree=$4 &&
+ i_tree=$5 &&
+ IS_STASH_LIKE=t &&
+ test "$ref_stash" = "$(git rev-parse --symbolic-full-name "${REV%@*}")" &&
+ IS_STASH_REF=t
+
+ u_commit=$(git rev-parse --verify --quiet "$REV^3") &&
+ u_tree=$(git rev-parse "$REV^3:" 2>/dev/null)
+}
+
+is_stash_like()
+{
+ parse_flags_and_rev "$@"
+ test -n "$IS_STASH_LIKE"
+}
+
+assert_stash_like() {
+ is_stash_like "$@" || {
+ args="$*"
+ die "$(eval_gettext "'\$args' is not a stash-like commit")"
+ }
+}
+
+is_stash_ref() {
+ is_stash_like "$@" && test -n "$IS_STASH_REF"
+}
+
+assert_stash_ref() {
+ is_stash_ref "$@" || {
+ args="$*"
+ die "$(eval_gettext "'\$args' is not a stash reference")"
+ }
+}
+
+apply_stash () {
+
+ assert_stash_like "$@"
+
+ git update-index -q --refresh || die "$(gettext "unable to refresh index")"
+
+ # current index state
+ c_tree=$(git write-tree) ||
+ die "$(gettext "Cannot apply a stash in the middle of a merge")"
+
+ unstashed_index_tree=
+ if test -n "$INDEX_OPTION" && test "$b_tree" != "$i_tree" &&
+ test "$c_tree" != "$i_tree"
+ then
+ git diff-tree --binary $s^2^..$s^2 | git apply --cached
+ test $? -ne 0 &&
+ die "$(gettext "Conflicts in index. Try without --index.")"
+ unstashed_index_tree=$(git write-tree) ||
+ die "$(gettext "Could not save index tree")"
+ git reset
+ fi
+
+ if test -n "$u_tree"
+ then
+ GIT_INDEX_FILE="$TMPindex" git read-tree "$u_tree" &&
+ GIT_INDEX_FILE="$TMPindex" git checkout-index --all &&
+ rm -f "$TMPindex" ||
+ die "$(gettext "Could not restore untracked files from stash entry")"
+ fi
+
+ eval "
+ GITHEAD_$w_tree='Stashed changes' &&
+ GITHEAD_$c_tree='Updated upstream' &&
+ GITHEAD_$b_tree='Version stash was based on' &&
+ export GITHEAD_$w_tree GITHEAD_$c_tree GITHEAD_$b_tree
+ "
+
+ if test -n "$GIT_QUIET"
+ then
+ GIT_MERGE_VERBOSITY=0 && export GIT_MERGE_VERBOSITY
+ fi
+ if git merge-recursive $b_tree -- $c_tree $w_tree
+ then
+ # No conflict
+ if test -n "$unstashed_index_tree"
+ then
+ git read-tree "$unstashed_index_tree"
+ else
+ a="$TMP-added" &&
+ git diff-index --cached --name-only --diff-filter=A $c_tree >"$a" &&
+ git read-tree --reset $c_tree &&
+ git update-index --add --stdin <"$a" ||
+ die "$(gettext "Cannot unstage modified files")"
+ rm -f "$a"
+ fi
+ squelch=
+ if test -n "$GIT_QUIET"
+ then
+ squelch='>/dev/null 2>&1'
+ fi
+ (cd "$START_DIR" && eval "git status $squelch") || :
+ else
+ # Merge conflict; keep the exit status from merge-recursive
+ status=$?
+ git rerere
+ if test -n "$INDEX_OPTION"
+ then
+ gettextln "Index was not unstashed." >&2
+ fi
+ exit $status
+ fi
+}
+
+pop_stash() {
+ assert_stash_ref "$@"
+
+ if apply_stash "$@"
+ then
+ drop_stash "$@"
+ else
+ status=$?
+ say "$(gettext "The stash entry is kept in case you need it again.")"
+ exit $status
+ fi
+}
+
+drop_stash () {
+ assert_stash_ref "$@"
+
+ git reflog delete --updateref --rewrite "${REV}" &&
+ say "$(eval_gettext "Dropped \${REV} (\$s)")" ||
+ die "$(eval_gettext "\${REV}: Could not drop stash entry")"
+
+ # clear_stash if we just dropped the last stash entry
+ git rev-parse --verify --quiet "$ref_stash@{0}" >/dev/null ||
+ clear_stash
+}
+
+apply_to_branch () {
+ test -n "$1" || die "$(gettext "No branch name specified")"
+ branch=$1
+ shift 1
+
+ set -- --index "$@"
+ assert_stash_like "$@"
+
+ git checkout -b $branch $REV^ &&
+ apply_stash "$@" && {
+ test -z "$IS_STASH_REF" || drop_stash "$@"
+ }
+}
+
+test "$1" = "-p" && set "push" "$@"
+
+PARSE_CACHE='--not-parsed'
+# The default command is "push" if nothing but options are given
+seen_non_option=
+for opt
+do
+ case "$opt" in
+ --) break ;;
+ -*) ;;
+ *) seen_non_option=t; break ;;
+ esac
+done
+
+test -n "$seen_non_option" || set "push" "$@"
+
+# Main command set
+case "$1" in
+list)
+ shift
+ list_stash "$@"
+ ;;
+show)
+ shift
+ show_stash "$@"
+ ;;
+save)
+ shift
+ save_stash "$@"
+ ;;
+push)
+ shift
+ push_stash "$@"
+ ;;
+apply)
+ shift
+ apply_stash "$@"
+ ;;
+clear)
+ shift
+ clear_stash "$@"
+ ;;
+create)
+ shift
+ create_stash -m "$*" && echo "$w_commit"
+ ;;
+store)
+ shift
+ store_stash "$@"
+ ;;
+drop)
+ shift
+ drop_stash "$@"
+ ;;
+pop)
+ shift
+ pop_stash "$@"
+ ;;
+branch)
+ shift
+ apply_to_branch "$@"
+ ;;
+*)
+ case $# in
+ 0)
+ push_stash &&
+ say "$(gettext "(To restore them type \"git stash apply\")")"
+ ;;
+ *)
+ usage
+ esac
+ ;;
+esac
}
}
-diff_mode() {
+diff_mode () {
test "$TOOL_MODE" = diff
}
-merge_mode() {
+merge_mode () {
test "$TOOL_MODE" = merge
}
+gui_mode () {
+ test "$GIT_MERGETOOL_GUI" = true
+}
+
translate_merge_tool_path () {
echo "$1"
}
fi
tools="$tools gvimdiff diffuse diffmerge ecmerge"
tools="$tools p4merge araxis bc codecompare"
+ tools="$tools smerge"
fi
case "${VISUAL:-$EDITOR}" in
*vim*)
}
get_configured_merge_tool () {
- # If first argument is true, find the guitool instead
- if test "$1" = true
- then
- gui_prefix=gui
- fi
-
- # Diff mode first tries diff.(gui)tool and falls back to merge.(gui)tool.
- # Merge mode only checks merge.(gui)tool
+ keys=
if diff_mode
then
- merge_tool=$(git config diff.${gui_prefix}tool || git config merge.${gui_prefix}tool)
+ if gui_mode
+ then
+ keys="diff.guitool merge.guitool diff.tool merge.tool"
+ else
+ keys="diff.tool merge.tool"
+ fi
else
- merge_tool=$(git config merge.${gui_prefix}tool)
+ if gui_mode
+ then
+ keys="merge.guitool merge.tool"
+ else
+ keys="merge.tool"
+ fi
fi
+
+ merge_tool=$(
+ IFS=' '
+ for key in $keys
+ do
+ selected=$(git config $key)
+ if test -n "$selected"
+ then
+ echo "$selected"
+ return
+ fi
+ done)
+
if test -n "$merge_tool" && ! valid_tool "$merge_tool"
then
echo >&2 "git config option $TOOL_MODE.${gui_prefix}tool set to unknown tool: $merge_tool"
}
get_merge_tool () {
+ is_guessed=false
# Check if a merge tool has been configured
merge_tool=$(get_configured_merge_tool)
# Try to guess an appropriate merge tool if no tool has been set.
if test -z "$merge_tool"
then
merge_tool=$(guess_merge_tool) || exit
+ is_guessed=true
fi
echo "$merge_tool"
+ test "$is_guessed" = false
}
mergetool_find_win32_cmd () {
main () {
prompt=$(git config --bool mergetool.prompt)
- gui_tool=false
+ GIT_MERGETOOL_GUI=false
guessed_merge_tool=false
orderfile=
esac
;;
--no-gui)
- gui_tool=false
+ GIT_MERGETOOL_GUI=false
;;
-g|--gui)
- gui_tool=true
+ GIT_MERGETOOL_GUI=true
;;
-y|--no-prompt)
prompt=false
if test -z "$merge_tool"
then
- # Check if a merge tool has been configured
- merge_tool=$(get_configured_merge_tool $gui_tool)
- # Try to guess an appropriate merge tool if no tool has been set.
- if test -z "$merge_tool"
+ if ! merge_tool=$(get_merge_tool)
then
- merge_tool=$(guess_merge_tool) || exit
guessed_merge_tool=true
fi
fi
+++ /dev/null
-#!/bin/sh
-# Copyright (c) 2012 Felipe Contreras
-
-# The first argument can be a url when the fetch/push command was a url
-# instead of a configured remote. In this case, use a generic alias.
-if test "$1" = "testgit::$2"; then
- alias=_
-else
- alias=$1
-fi
-url=$2
-
-dir="$GIT_DIR/testgit/$alias"
-prefix="refs/testgit/$alias"
-
-default_refspec="refs/heads/*:${prefix}/heads/*"
-
-refspec="${GIT_REMOTE_TESTGIT_REFSPEC-$default_refspec}"
-
-test -z "$refspec" && prefix="refs"
-
-GIT_DIR="$url/.git"
-export GIT_DIR
-
-force=
-
-mkdir -p "$dir"
-
-if test -z "$GIT_REMOTE_TESTGIT_NO_MARKS"
-then
- gitmarks="$dir/git.marks"
- testgitmarks="$dir/testgit.marks"
- test -e "$gitmarks" || >"$gitmarks"
- test -e "$testgitmarks" || >"$testgitmarks"
-fi
-
-while read line
-do
- case $line in
- capabilities)
- echo 'import'
- echo 'export'
- test -n "$refspec" && echo "refspec $refspec"
- if test -n "$gitmarks"
- then
- echo "*import-marks $gitmarks"
- echo "*export-marks $gitmarks"
- fi
- test -n "$GIT_REMOTE_TESTGIT_SIGNED_TAGS" && echo "signed-tags"
- test -n "$GIT_REMOTE_TESTGIT_NO_PRIVATE_UPDATE" && echo "no-private-update"
- echo 'option'
- echo
- ;;
- list)
- git for-each-ref --format='? %(refname)' 'refs/heads/'
- head=$(git symbolic-ref HEAD)
- echo "@$head HEAD"
- echo
- ;;
- import*)
- # read all import lines
- while true
- do
- ref="${line#* }"
- refs="$refs $ref"
- read line
- test "${line%% *}" != "import" && break
- done
-
- if test -n "$gitmarks"
- then
- echo "feature import-marks=$gitmarks"
- echo "feature export-marks=$gitmarks"
- fi
-
- if test -n "$GIT_REMOTE_TESTGIT_FAILURE"
- then
- echo "feature done"
- exit 1
- fi
-
- echo "feature done"
- git fast-export \
- ${testgitmarks:+"--import-marks=$testgitmarks"} \
- ${testgitmarks:+"--export-marks=$testgitmarks"} \
- $refs |
- sed -e "s#refs/heads/#${prefix}/heads/#g"
- echo "done"
- ;;
- export)
- if test -n "$GIT_REMOTE_TESTGIT_FAILURE"
- then
- # consume input so fast-export doesn't get SIGPIPE;
- # git would also notice that case, but we want
- # to make sure we are exercising the later
- # error checks
- while read line; do
- test "done" = "$line" && break
- done
- exit 1
- fi
-
- before=$(git for-each-ref --format=' %(refname) %(objectname) ')
-
- git fast-import \
- ${force:+--force} \
- ${testgitmarks:+"--import-marks=$testgitmarks"} \
- ${testgitmarks:+"--export-marks=$testgitmarks"} \
- --quiet
-
- # figure out which refs were updated
- git for-each-ref --format='%(refname) %(objectname)' |
- while read ref a
- do
- case "$before" in
- *" $ref $a "*)
- continue ;; # unchanged
- esac
- if test -z "$GIT_REMOTE_TESTGIT_PUSH_ERROR"
- then
- echo "ok $ref"
- else
- echo "error $ref $GIT_REMOTE_TESTGIT_PUSH_ERROR"
- fi
- done
-
- echo
- ;;
- option\ *)
- read cmd opt val <<-EOF
- $line
- EOF
- case $opt in
- force)
- test $val = "true" && force="true" || force=
- echo "ok"
- ;;
- *)
- echo "unsupported"
- ;;
- esac
- ;;
- '')
- exit
- ;;
- esac
-done
my(%suppress_cc);
if (@suppress_cc) {
foreach my $entry (@suppress_cc) {
+ # Please update $__git_send_email_suppresscc_options
+ # in git-completion.bash when you add new options.
die sprintf(__("Unknown --suppress-cc field: '%s'\n"), $entry)
unless $entry =~ /^(?:all|cccmd|cc|author|self|sob|body|bodycc|misc-by)$/;
$suppress_cc{$entry} = 1;
if ($confirm_unconfigured) {
$confirm = scalar %suppress_cc ? 'compose' : 'auto';
};
+# Please update $__git_send_email_confirm_options in
+# git-completion.bash when you add new options.
die sprintf(__("Unknown --confirm setting: '%s'\n"), $confirm)
unless $confirm =~ /^(?:auto|cc|compose|always|never)/;
if (/\(define-mail-alias\s+"(\S+?)"\s+"(\S+?)"\)/) {
$aliases{$1} = [ $2 ];
}}}
+ # Please update _git_config() in git-completion.bash when you
+ # add new MUAs.
);
if (@alias_files and $aliasfiletype and defined $parse_alias{$aliasfiletype}) {
# Now parse the message body
while(<$fh>) {
$message .= $_;
- if (/^([a-z-]*-by|Cc): (.*)/i) {
+ if (/^([a-z][a-z-]*-by|Cc): (.*)/i) {
chomp;
my ($what, $c) = ($1, $2);
# strip garbage for the address we'll use:
$message = MIME::Base64::decode($message)
if ($from eq 'base64');
- $to = ($message =~ /.{999,}/) ? 'quoted-printable' : '8bit'
+ $to = ($message =~ /(?:.{999,}|\r)/) ? 'quoted-printable' : '8bit'
if $to eq 'auto';
die __("cannot send message as 7bit")
case "$1" in
-h)
echo "$LONG_USAGE"
+ case "$0" in *git-legacy-stash) exit 129;; esac
exit
esac
fi
+++ /dev/null
-#!/bin/sh
-# Copyright (c) 2007, Nanako Shiraishi
-
-dashless=$(basename "$0" | sed -e 's/-/ /')
-USAGE="list [<options>]
- or: $dashless show [<stash>]
- or: $dashless drop [-q|--quiet] [<stash>]
- or: $dashless ( pop | apply ) [--index] [-q|--quiet] [<stash>]
- or: $dashless branch <branchname> [<stash>]
- or: $dashless save [--patch] [-k|--[no-]keep-index] [-q|--quiet]
- [-u|--include-untracked] [-a|--all] [<message>]
- or: $dashless [push [--patch] [-k|--[no-]keep-index] [-q|--quiet]
- [-u|--include-untracked] [-a|--all] [-m <message>]
- [-- <pathspec>...]]
- or: $dashless clear"
-
-SUBDIRECTORY_OK=Yes
-OPTIONS_SPEC=
-START_DIR=$(pwd)
-. git-sh-setup
-require_work_tree
-prefix=$(git rev-parse --show-prefix) || exit 1
-cd_to_toplevel
-
-TMP="$GIT_DIR/.git-stash.$$"
-TMPindex=${GIT_INDEX_FILE-"$(git rev-parse --git-path index)"}.stash.$$
-trap 'rm -f "$TMP-"* "$TMPindex"' 0
-
-ref_stash=refs/stash
-
-if git config --get-colorbool color.interactive; then
- help_color="$(git config --get-color color.interactive.help 'red bold')"
- reset_color="$(git config --get-color '' reset)"
-else
- help_color=
- reset_color=
-fi
-
-no_changes () {
- git diff-index --quiet --cached HEAD --ignore-submodules -- "$@" &&
- git diff-files --quiet --ignore-submodules -- "$@" &&
- (test -z "$untracked" || test -z "$(untracked_files "$@")")
-}
-
-untracked_files () {
- if test "$1" = "-z"
- then
- shift
- z=-z
- else
- z=
- fi
- excl_opt=--exclude-standard
- test "$untracked" = "all" && excl_opt=
- git ls-files -o $z $excl_opt -- "$@"
-}
-
-prepare_fallback_ident () {
- if ! git -c user.useconfigonly=yes var GIT_COMMITTER_IDENT >/dev/null 2>&1
- then
- GIT_AUTHOR_NAME="git stash"
- GIT_AUTHOR_EMAIL=git@stash
- GIT_COMMITTER_NAME="git stash"
- GIT_COMMITTER_EMAIL=git@stash
- export GIT_AUTHOR_NAME
- export GIT_AUTHOR_EMAIL
- export GIT_COMMITTER_NAME
- export GIT_COMMITTER_EMAIL
- fi
-}
-
-clear_stash () {
- if test $# != 0
- then
- die "$(gettext "git stash clear with parameters is unimplemented")"
- fi
- if current=$(git rev-parse --verify --quiet $ref_stash)
- then
- git update-ref -d $ref_stash $current
- fi
-}
-
-create_stash () {
-
- prepare_fallback_ident
-
- stash_msg=
- untracked=
- while test $# != 0
- do
- case "$1" in
- -m|--message)
- shift
- stash_msg=${1?"BUG: create_stash () -m requires an argument"}
- ;;
- -m*)
- stash_msg=${1#-m}
- ;;
- --message=*)
- stash_msg=${1#--message=}
- ;;
- -u|--include-untracked)
- shift
- untracked=${1?"BUG: create_stash () -u requires an argument"}
- ;;
- --)
- shift
- break
- ;;
- esac
- shift
- done
-
- git update-index -q --refresh
- if no_changes "$@"
- then
- exit 0
- fi
-
- # state of the base commit
- if b_commit=$(git rev-parse --verify HEAD)
- then
- head=$(git rev-list --oneline -n 1 HEAD --)
- else
- die "$(gettext "You do not have the initial commit yet")"
- fi
-
- if branch=$(git symbolic-ref -q HEAD)
- then
- branch=${branch#refs/heads/}
- else
- branch='(no branch)'
- fi
- msg=$(printf '%s: %s' "$branch" "$head")
-
- # state of the index
- i_tree=$(git write-tree) &&
- i_commit=$(printf 'index on %s\n' "$msg" |
- git commit-tree $i_tree -p $b_commit) ||
- die "$(gettext "Cannot save the current index state")"
-
- if test -n "$untracked"
- then
- # Untracked files are stored by themselves in a parentless commit, for
- # ease of unpacking later.
- u_commit=$(
- untracked_files -z "$@" | (
- GIT_INDEX_FILE="$TMPindex" &&
- export GIT_INDEX_FILE &&
- rm -f "$TMPindex" &&
- git update-index -z --add --remove --stdin &&
- u_tree=$(git write-tree) &&
- printf 'untracked files on %s\n' "$msg" | git commit-tree $u_tree &&
- rm -f "$TMPindex"
- ) ) || die "$(gettext "Cannot save the untracked files")"
-
- untracked_commit_option="-p $u_commit";
- else
- untracked_commit_option=
- fi
-
- if test -z "$patch_mode"
- then
-
- # state of the working tree
- w_tree=$( (
- git read-tree --index-output="$TMPindex" -m $i_tree &&
- GIT_INDEX_FILE="$TMPindex" &&
- export GIT_INDEX_FILE &&
- git diff-index --name-only -z HEAD -- "$@" >"$TMP-stagenames" &&
- git update-index -z --add --remove --stdin <"$TMP-stagenames" &&
- git write-tree &&
- rm -f "$TMPindex"
- ) ) ||
- die "$(gettext "Cannot save the current worktree state")"
-
- else
-
- rm -f "$TMP-index" &&
- GIT_INDEX_FILE="$TMP-index" git read-tree HEAD &&
-
- # find out what the user wants
- GIT_INDEX_FILE="$TMP-index" \
- git add--interactive --patch=stash -- "$@" &&
-
- # state of the working tree
- w_tree=$(GIT_INDEX_FILE="$TMP-index" git write-tree) ||
- die "$(gettext "Cannot save the current worktree state")"
-
- git diff-tree -p HEAD $w_tree -- >"$TMP-patch" &&
- test -s "$TMP-patch" ||
- die "$(gettext "No changes selected")"
-
- rm -f "$TMP-index" ||
- die "$(gettext "Cannot remove temporary index (can't happen)")"
-
- fi
-
- # create the stash
- if test -z "$stash_msg"
- then
- stash_msg=$(printf 'WIP on %s' "$msg")
- else
- stash_msg=$(printf 'On %s: %s' "$branch" "$stash_msg")
- fi
- w_commit=$(printf '%s\n' "$stash_msg" |
- git commit-tree $w_tree -p $b_commit -p $i_commit $untracked_commit_option) ||
- die "$(gettext "Cannot record working tree state")"
-}
-
-store_stash () {
- while test $# != 0
- do
- case "$1" in
- -m|--message)
- shift
- stash_msg="$1"
- ;;
- -m*)
- stash_msg=${1#-m}
- ;;
- --message=*)
- stash_msg=${1#--message=}
- ;;
- -q|--quiet)
- quiet=t
- ;;
- *)
- break
- ;;
- esac
- shift
- done
- test $# = 1 ||
- die "$(eval_gettext "\"$dashless store\" requires one <commit> argument")"
-
- w_commit="$1"
- if test -z "$stash_msg"
- then
- stash_msg="Created via \"git stash store\"."
- fi
-
- git update-ref --create-reflog -m "$stash_msg" $ref_stash $w_commit
- ret=$?
- test $ret != 0 && test -z "$quiet" &&
- die "$(eval_gettext "Cannot update \$ref_stash with \$w_commit")"
- return $ret
-}
-
-push_stash () {
- keep_index=
- patch_mode=
- untracked=
- stash_msg=
- while test $# != 0
- do
- case "$1" in
- -k|--keep-index)
- keep_index=t
- ;;
- --no-keep-index)
- keep_index=n
- ;;
- -p|--patch)
- patch_mode=t
- # only default to keep if we don't already have an override
- test -z "$keep_index" && keep_index=t
- ;;
- -q|--quiet)
- GIT_QUIET=t
- ;;
- -u|--include-untracked)
- untracked=untracked
- ;;
- -a|--all)
- untracked=all
- ;;
- -m|--message)
- shift
- test -z ${1+x} && usage
- stash_msg=$1
- ;;
- -m*)
- stash_msg=${1#-m}
- ;;
- --message=*)
- stash_msg=${1#--message=}
- ;;
- --help)
- show_help
- ;;
- --)
- shift
- break
- ;;
- -*)
- option="$1"
- eval_gettextln "error: unknown option for 'stash push': \$option"
- usage
- ;;
- *)
- break
- ;;
- esac
- shift
- done
-
- eval "set $(git rev-parse --sq --prefix "$prefix" -- "$@")"
-
- if test -n "$patch_mode" && test -n "$untracked"
- then
- die "$(gettext "Can't use --patch and --include-untracked or --all at the same time")"
- fi
-
- test -n "$untracked" || git ls-files --error-unmatch -- "$@" >/dev/null || exit 1
-
- git update-index -q --refresh
- if no_changes "$@"
- then
- say "$(gettext "No local changes to save")"
- exit 0
- fi
-
- git reflog exists $ref_stash ||
- clear_stash || die "$(gettext "Cannot initialize stash")"
-
- create_stash -m "$stash_msg" -u "$untracked" -- "$@"
- store_stash -m "$stash_msg" -q $w_commit ||
- die "$(gettext "Cannot save the current status")"
- say "$(eval_gettext "Saved working directory and index state \$stash_msg")"
-
- if test -z "$patch_mode"
- then
- test "$untracked" = "all" && CLEAN_X_OPTION=-x || CLEAN_X_OPTION=
- if test -n "$untracked" && test $# = 0
- then
- git clean --force --quiet -d $CLEAN_X_OPTION
- fi
-
- if test $# != 0
- then
- test -z "$untracked" && UPDATE_OPTION="-u" || UPDATE_OPTION=
- test "$untracked" = "all" && FORCE_OPTION="--force" || FORCE_OPTION=
- git add $UPDATE_OPTION $FORCE_OPTION -- "$@"
- git diff-index -p --cached --binary HEAD -- "$@" |
- git apply --index -R
- else
- git reset --hard -q
- fi
-
- if test "$keep_index" = "t" && test -n "$i_tree"
- then
- git read-tree --reset $i_tree
- git ls-files -z --modified -- "$@" |
- git checkout-index -z --force --stdin
- fi
- else
- git apply -R < "$TMP-patch" ||
- die "$(gettext "Cannot remove worktree changes")"
-
- if test "$keep_index" != "t"
- then
- git reset -q -- "$@"
- fi
- fi
-}
-
-save_stash () {
- push_options=
- while test $# != 0
- do
- case "$1" in
- --)
- shift
- break
- ;;
- -*)
- # pass all options through to push_stash
- push_options="$push_options $1"
- ;;
- *)
- break
- ;;
- esac
- shift
- done
-
- stash_msg="$*"
-
- if test -z "$stash_msg"
- then
- push_stash $push_options
- else
- push_stash $push_options -m "$stash_msg"
- fi
-}
-
-have_stash () {
- git rev-parse --verify --quiet $ref_stash >/dev/null
-}
-
-list_stash () {
- have_stash || return 0
- git log --format="%gd: %gs" -g --first-parent -m "$@" $ref_stash --
-}
-
-show_stash () {
- ALLOW_UNKNOWN_FLAGS=t
- assert_stash_like "$@"
-
- if test -z "$FLAGS"
- then
- if test "$(git config --bool stash.showStat || echo true)" = "true"
- then
- FLAGS=--stat
- fi
-
- if test "$(git config --bool stash.showPatch || echo false)" = "true"
- then
- FLAGS=${FLAGS}${FLAGS:+ }-p
- fi
-
- if test -z "$FLAGS"
- then
- return 0
- fi
- fi
-
- git diff ${FLAGS} $b_commit $w_commit
-}
-
-show_help () {
- exec git help stash
- exit 1
-}
-
-#
-# Parses the remaining options looking for flags and
-# at most one revision defaulting to ${ref_stash}@{0}
-# if none found.
-#
-# Derives related tree and commit objects from the
-# revision, if one is found.
-#
-# stash records the work tree, and is a merge between the
-# base commit (first parent) and the index tree (second parent).
-#
-# REV is set to the symbolic version of the specified stash-like commit
-# IS_STASH_LIKE is non-blank if ${REV} looks like a stash
-# IS_STASH_REF is non-blank if the ${REV} looks like a stash ref
-# s is set to the SHA1 of the stash commit
-# w_commit is set to the commit containing the working tree
-# b_commit is set to the base commit
-# i_commit is set to the commit containing the index tree
-# u_commit is set to the commit containing the untracked files tree
-# w_tree is set to the working tree
-# b_tree is set to the base tree
-# i_tree is set to the index tree
-# u_tree is set to the untracked files tree
-#
-# GIT_QUIET is set to t if -q is specified
-# INDEX_OPTION is set to --index if --index is specified.
-# FLAGS is set to the remaining flags (if allowed)
-#
-# dies if:
-# * too many revisions specified
-# * no revision is specified and there is no stash stack
-# * a revision is specified which cannot be resolve to a SHA1
-# * a non-existent stash reference is specified
-# * unknown flags were set and ALLOW_UNKNOWN_FLAGS is not "t"
-#
-
-parse_flags_and_rev()
-{
- test "$PARSE_CACHE" = "$*" && return 0 # optimisation
- PARSE_CACHE="$*"
-
- IS_STASH_LIKE=
- IS_STASH_REF=
- INDEX_OPTION=
- s=
- w_commit=
- b_commit=
- i_commit=
- u_commit=
- w_tree=
- b_tree=
- i_tree=
- u_tree=
-
- FLAGS=
- REV=
- for opt
- do
- case "$opt" in
- -q|--quiet)
- GIT_QUIET=-t
- ;;
- --index)
- INDEX_OPTION=--index
- ;;
- --help)
- show_help
- ;;
- -*)
- test "$ALLOW_UNKNOWN_FLAGS" = t ||
- die "$(eval_gettext "unknown option: \$opt")"
- FLAGS="${FLAGS}${FLAGS:+ }$opt"
- ;;
- *)
- REV="${REV}${REV:+ }'$opt'"
- ;;
- esac
- done
-
- eval set -- $REV
-
- case $# in
- 0)
- have_stash || die "$(gettext "No stash entries found.")"
- set -- ${ref_stash}@{0}
- ;;
- 1)
- :
- ;;
- *)
- die "$(eval_gettext "Too many revisions specified: \$REV")"
- ;;
- esac
-
- case "$1" in
- *[!0-9]*)
- :
- ;;
- *)
- set -- "${ref_stash}@{$1}"
- ;;
- esac
-
- REV=$(git rev-parse --symbolic --verify --quiet "$1") || {
- reference="$1"
- die "$(eval_gettext "\$reference is not a valid reference")"
- }
-
- i_commit=$(git rev-parse --verify --quiet "$REV^2") &&
- set -- $(git rev-parse "$REV" "$REV^1" "$REV:" "$REV^1:" "$REV^2:" 2>/dev/null) &&
- s=$1 &&
- w_commit=$1 &&
- b_commit=$2 &&
- w_tree=$3 &&
- b_tree=$4 &&
- i_tree=$5 &&
- IS_STASH_LIKE=t &&
- test "$ref_stash" = "$(git rev-parse --symbolic-full-name "${REV%@*}")" &&
- IS_STASH_REF=t
-
- u_commit=$(git rev-parse --verify --quiet "$REV^3") &&
- u_tree=$(git rev-parse "$REV^3:" 2>/dev/null)
-}
-
-is_stash_like()
-{
- parse_flags_and_rev "$@"
- test -n "$IS_STASH_LIKE"
-}
-
-assert_stash_like() {
- is_stash_like "$@" || {
- args="$*"
- die "$(eval_gettext "'\$args' is not a stash-like commit")"
- }
-}
-
-is_stash_ref() {
- is_stash_like "$@" && test -n "$IS_STASH_REF"
-}
-
-assert_stash_ref() {
- is_stash_ref "$@" || {
- args="$*"
- die "$(eval_gettext "'\$args' is not a stash reference")"
- }
-}
-
-apply_stash () {
-
- assert_stash_like "$@"
-
- git update-index -q --refresh || die "$(gettext "unable to refresh index")"
-
- # current index state
- c_tree=$(git write-tree) ||
- die "$(gettext "Cannot apply a stash in the middle of a merge")"
-
- unstashed_index_tree=
- if test -n "$INDEX_OPTION" && test "$b_tree" != "$i_tree" &&
- test "$c_tree" != "$i_tree"
- then
- git diff-tree --binary $s^2^..$s^2 | git apply --cached
- test $? -ne 0 &&
- die "$(gettext "Conflicts in index. Try without --index.")"
- unstashed_index_tree=$(git write-tree) ||
- die "$(gettext "Could not save index tree")"
- git reset
- fi
-
- if test -n "$u_tree"
- then
- GIT_INDEX_FILE="$TMPindex" git read-tree "$u_tree" &&
- GIT_INDEX_FILE="$TMPindex" git checkout-index --all &&
- rm -f "$TMPindex" ||
- die "$(gettext "Could not restore untracked files from stash entry")"
- fi
-
- eval "
- GITHEAD_$w_tree='Stashed changes' &&
- GITHEAD_$c_tree='Updated upstream' &&
- GITHEAD_$b_tree='Version stash was based on' &&
- export GITHEAD_$w_tree GITHEAD_$c_tree GITHEAD_$b_tree
- "
-
- if test -n "$GIT_QUIET"
- then
- GIT_MERGE_VERBOSITY=0 && export GIT_MERGE_VERBOSITY
- fi
- if git merge-recursive $b_tree -- $c_tree $w_tree
- then
- # No conflict
- if test -n "$unstashed_index_tree"
- then
- git read-tree "$unstashed_index_tree"
- else
- a="$TMP-added" &&
- git diff-index --cached --name-only --diff-filter=A $c_tree >"$a" &&
- git read-tree --reset $c_tree &&
- git update-index --add --stdin <"$a" ||
- die "$(gettext "Cannot unstage modified files")"
- rm -f "$a"
- fi
- squelch=
- if test -n "$GIT_QUIET"
- then
- squelch='>/dev/null 2>&1'
- fi
- (cd "$START_DIR" && eval "git status $squelch") || :
- else
- # Merge conflict; keep the exit status from merge-recursive
- status=$?
- git rerere
- if test -n "$INDEX_OPTION"
- then
- gettextln "Index was not unstashed." >&2
- fi
- exit $status
- fi
-}
-
-pop_stash() {
- assert_stash_ref "$@"
-
- if apply_stash "$@"
- then
- drop_stash "$@"
- else
- status=$?
- say "$(gettext "The stash entry is kept in case you need it again.")"
- exit $status
- fi
-}
-
-drop_stash () {
- assert_stash_ref "$@"
-
- git reflog delete --updateref --rewrite "${REV}" &&
- say "$(eval_gettext "Dropped \${REV} (\$s)")" ||
- die "$(eval_gettext "\${REV}: Could not drop stash entry")"
-
- # clear_stash if we just dropped the last stash entry
- git rev-parse --verify --quiet "$ref_stash@{0}" >/dev/null ||
- clear_stash
-}
-
-apply_to_branch () {
- test -n "$1" || die "$(gettext "No branch name specified")"
- branch=$1
- shift 1
-
- set -- --index "$@"
- assert_stash_like "$@"
-
- git checkout -b $branch $REV^ &&
- apply_stash "$@" && {
- test -z "$IS_STASH_REF" || drop_stash "$@"
- }
-}
-
-test "$1" = "-p" && set "push" "$@"
-
-PARSE_CACHE='--not-parsed'
-# The default command is "push" if nothing but options are given
-seen_non_option=
-for opt
-do
- case "$opt" in
- --) break ;;
- -*) ;;
- *) seen_non_option=t; break ;;
- esac
-done
-
-test -n "$seen_non_option" || set "push" "$@"
-
-# Main command set
-case "$1" in
-list)
- shift
- list_stash "$@"
- ;;
-show)
- shift
- show_stash "$@"
- ;;
-save)
- shift
- save_stash "$@"
- ;;
-push)
- shift
- push_stash "$@"
- ;;
-apply)
- shift
- apply_stash "$@"
- ;;
-clear)
- shift
- clear_stash "$@"
- ;;
-create)
- shift
- create_stash -m "$*" && echo "$w_commit"
- ;;
-store)
- shift
- store_stash "$@"
- ;;
-drop)
- shift
- drop_stash "$@"
- ;;
-pop)
- shift
- pop_stash "$@"
- ;;
-branch)
- shift
- apply_to_branch "$@"
- ;;
-*)
- case $# in
- 0)
- push_stash &&
- say "$(gettext "(To restore them type \"git stash apply\")")"
- ;;
- *)
- usage
- esac
- ;;
-esac
# Copyright (c) 2007 Lars Hjemli
dashless=$(basename "$0" | sed -e 's/-/ /')
-USAGE="[--quiet] add [-b <branch>] [-f|--force] [--name <name>] [--reference <repository>] [--] <repository> [<path>]
+USAGE="[--quiet] [--cached]
+ or: $dashless [--quiet] add [-b <branch>] [-f|--force] [--name <name>] [--reference <repository>] [--] <repository> [<path>]
or: $dashless [--quiet] status [--cached] [--recursive] [--] [<path>...]
or: $dashless [--quiet] init [--] [<path>...]
or: $dashless [--quiet] deinit [-f|--force] (--all| [--] <path>...)
or: $dashless [--quiet] update [--init] [--remote] [-N|--no-fetch] [-f|--force] [--checkout|--merge|--rebase] [--[no-]recommend-shallow] [--reference <repository>] [--recursive] [--] [<path>...]
+ or: $dashless [--quiet] set-branch (--default|--branch <branch>) [--] <path>
or: $dashless [--quiet] summary [--cached|--files] [--summary-limit <n>] [commit] [--] [<path>...]
or: $dashless [--quiet] foreach [--recursive] <command>
or: $dashless [--quiet] sync [--recursive] [--] [<path>...]
die "$(eval_gettext "'\$sm_path' already exists in the index and is not a submodule")"
fi
+ if test -d "$sm_path" &&
+ test -z $(git -C "$sm_path" rev-parse --show-cdup 2>/dev/null)
+ then
+ git -C "$sm_path" rev-parse --verify -q HEAD >/dev/null ||
+ die "$(eval_gettext "'\$sm_path' does not have a commit checked out")"
+ fi
+
if test -z "$force" &&
! git add --dry-run --ignore-missing --no-warn-embedded-repo "$sm_path" > /dev/null 2>&1
then
shift
done
- git ${wt_prefix:+-C "$wt_prefix"} ${prefix:+--super-prefix "$prefix"} submodule--helper foreach ${GIT_QUIET:+--quiet} ${recursive:+--recursive} "$@"
+ git ${wt_prefix:+-C "$wt_prefix"} ${prefix:+--super-prefix "$prefix"} submodule--helper foreach ${GIT_QUIET:+--quiet} ${recursive:+--recursive} -- "$@"
}
#
shift
done
- git ${wt_prefix:+-C "$wt_prefix"} ${prefix:+--super-prefix "$prefix"} submodule--helper init ${GIT_QUIET:+--quiet} "$@"
+ git ${wt_prefix:+-C "$wt_prefix"} ${prefix:+--super-prefix "$prefix"} submodule--helper init ${GIT_QUIET:+--quiet} -- "$@"
}
#
shift
done
- git ${wt_prefix:+-C "$wt_prefix"} submodule--helper deinit ${GIT_QUIET:+--quiet} ${prefix:+--prefix "$prefix"} ${force:+--force} ${deinit_all:+--all} "$@"
+ git ${wt_prefix:+-C "$wt_prefix"} submodule--helper deinit ${GIT_QUIET:+--quiet} ${prefix:+--prefix "$prefix"} ${force:+--force} ${deinit_all:+--all} -- "$@"
}
is_tip_reachable () (
${depth:+--depth "$depth"} \
$recommend_shallow \
$jobs \
+ -- \
"$@" || echo "#unmatched" $?
} | {
err=
# is not reachable from a ref.
is_tip_reachable "$sm_path" "$sha1" ||
fetch_in_submodule "$sm_path" $depth ||
- say "$(eval_gettext "Unable to fetch in submodule path '\$displaypath'")"
+ say "$(eval_gettext "Unable to fetch in submodule path '\$displaypath'; trying to directly fetch \$sha1:")"
# Now we tried the usual fetch, but $sha1 may
# not be reachable from any of the refs
}
}
+#
+# Configures a submodule's default branch
+#
+# $@ = requested path
+#
+cmd_set_branch() {
+ unset_branch=false
+ branch=
+
+ while test $# -ne 0
+ do
+ case "$1" in
+ -q|--quiet)
+ # we don't do anything with this but we need to accept it
+ ;;
+ -d|--default)
+ unset_branch=true
+ ;;
+ -b|--branch)
+ case "$2" in '') usage ;; esac
+ branch=$2
+ shift
+ ;;
+ --)
+ shift
+ break
+ ;;
+ -*)
+ usage
+ ;;
+ *)
+ break
+ ;;
+ esac
+ shift
+ done
+
+ if test $# -ne 1
+ then
+ usage
+ fi
+
+ # we can't use `git submodule--helper name` here because internally, it
+ # hashes the path so a trailing slash could lead to an unintentional no match
+ name="$(git submodule--helper list "$1" | cut -f2)"
+ if test -z "$name"
+ then
+ exit 1
+ fi
+
+ test -n "$branch"; has_branch=$?
+ test "$unset_branch" = true; has_unset_branch=$?
+
+ if test $((!$has_branch != !$has_unset_branch)) -eq 0
+ then
+ usage
+ fi
+
+ if test $has_branch -eq 0
+ then
+ git submodule--helper config submodule."$name".branch "$branch"
+ else
+ git submodule--helper config --unset submodule."$name".branch
+ fi
+}
+
#
# Show commit summary for submodules in index or working tree
#
shift
done
- git ${wt_prefix:+-C "$wt_prefix"} ${prefix:+--super-prefix "$prefix"} submodule--helper status ${GIT_QUIET:+--quiet} ${cached:+--cached} ${recursive:+--recursive} "$@"
+ git ${wt_prefix:+-C "$wt_prefix"} ${prefix:+--super-prefix "$prefix"} submodule--helper status ${GIT_QUIET:+--quiet} ${cached:+--cached} ${recursive:+--recursive} -- "$@"
}
#
# Sync remote urls for submodules
esac
done
- git ${wt_prefix:+-C "$wt_prefix"} ${prefix:+--super-prefix "$prefix"} submodule--helper sync ${GIT_QUIET:+--quiet} ${recursive:+--recursive} "$@"
+ git ${wt_prefix:+-C "$wt_prefix"} ${prefix:+--super-prefix "$prefix"} submodule--helper sync ${GIT_QUIET:+--quiet} ${recursive:+--recursive} -- "$@"
}
cmd_absorbgitdirs()
while test $# != 0 && test -z "$command"
do
case "$1" in
- add | foreach | init | deinit | update | status | summary | sync | absorbgitdirs)
+ add | foreach | init | deinit | update | set-branch | status | summary | sync | absorbgitdirs)
command=$1
;;
-q|--quiet)
fi
fi
-# "-b branch" is accepted only by "add"
-if test -n "$branch" && test "$command" != add
+# "-b branch" is accepted only by "add" and "set-branch"
+if test -n "$branch" && (test "$command" != add || test "$command" != set-branch)
then
usage
fi
usage
fi
-"cmd_$command" "$@"
+"cmd_$(echo $command | sed -e s/-/_/g)" "$@"
{
struct string_list list = STRING_LIST_INIT_DUP;
int i;
+ int nongit;
+
+ /*
+ * Set up the repository so we can pick up any repo-level config (like
+ * completion.commands).
+ */
+ setup_git_directory_gently(&nongit);
while (*spec) {
const char *sep = strchrnul(spec, ',');
git_set_exec_path(cmd + 1);
else {
puts(git_exec_path());
+ trace2_cmd_name("_query_");
exit(0);
}
} else if (!strcmp(cmd, "--html-path")) {
puts(system_path(GIT_HTML_PATH));
+ trace2_cmd_name("_query_");
exit(0);
} else if (!strcmp(cmd, "--man-path")) {
puts(system_path(GIT_MAN_PATH));
+ trace2_cmd_name("_query_");
exit(0);
} else if (!strcmp(cmd, "--info-path")) {
puts(system_path(GIT_INFO_PATH));
+ trace2_cmd_name("_query_");
exit(0);
} else if (!strcmp(cmd, "-p") || !strcmp(cmd, "--paginate")) {
use_pager = 1;
(*argv)++;
(*argc)--;
} else if (skip_prefix(cmd, "--list-cmds=", &cmd)) {
+ trace2_cmd_name("_query_");
if (!strcmp(cmd, "parseopt")) {
struct string_list list = STRING_LIST_INIT_DUP;
int i;
commit_pager_choice();
child.use_shell = 1;
+ child.trace2_child_class = "shell_alias";
argv_array_push(&child.args, alias_string + 1);
argv_array_pushv(&child.args, (*argv) + 1);
+ trace2_cmd_alias(alias_command, child.args.argv);
+ trace2_cmd_list_config();
+ trace2_cmd_name("_run_shell_alias_");
+
ret = run_command(&child);
if (ret >= 0) /* normal exit */
exit(ret);
/* insert after command name */
memcpy(new_argv + count, *argv + 1, sizeof(char *) * *argcp);
+ trace2_cmd_alias(alias_command, new_argv);
+ trace2_cmd_list_config();
+
*argv = new_argv;
*argcp += count - 1;
setup_work_tree();
trace_argv_printf(argv, "trace: built-in: git");
+ trace2_cmd_name(p->cmd);
+ trace2_cmd_list_config();
validate_cache_entries(the_repository->index);
status = p->fn(argc, argv, prefix);
{ "diff-files", cmd_diff_files, RUN_SETUP | NEED_WORK_TREE | NO_PARSEOPT },
{ "diff-index", cmd_diff_index, RUN_SETUP | NO_PARSEOPT },
{ "diff-tree", cmd_diff_tree, RUN_SETUP | NO_PARSEOPT },
- { "difftool", cmd_difftool, RUN_SETUP | NEED_WORK_TREE },
+ { "difftool", cmd_difftool, RUN_SETUP_GENTLY },
{ "fast-export", cmd_fast_export, RUN_SETUP },
{ "fetch", cmd_fetch, RUN_SETUP },
{ "fetch-pack", cmd_fetch_pack, RUN_SETUP | NO_PARSEOPT },
{ "revert", cmd_revert, RUN_SETUP | NEED_WORK_TREE },
{ "rm", cmd_rm, RUN_SETUP },
{ "send-pack", cmd_send_pack, RUN_SETUP },
- { "serve", cmd_serve, RUN_SETUP },
{ "shortlog", cmd_shortlog, RUN_SETUP_GENTLY | USE_PAGER },
{ "show", cmd_show, RUN_SETUP },
{ "show-branch", cmd_show_branch, RUN_SETUP },
{ "show-index", cmd_show_index },
{ "show-ref", cmd_show_ref, RUN_SETUP },
{ "stage", cmd_add, RUN_SETUP | NEED_WORK_TREE },
+ /*
+ * NEEDSWORK: Until the builtin stash is thoroughly robust and no
+ * longer needs redirection to the stash shell script this is kept as
+ * is, then should be changed to RUN_SETUP | NEED_WORK_TREE
+ */
+ { "stash", cmd_stash },
{ "status", cmd_status, RUN_SETUP | NEED_WORK_TREE },
{ "stripspace", cmd_stripspace },
{ "submodule--helper", cmd_submodule__helper, RUN_SETUP | SUPPORT_SUPER_PREFIX | NO_PARSEOPT },
cmd.clean_on_exit = 1;
cmd.wait_after_clean = 1;
cmd.silent_exec_failure = 1;
+ cmd.trace2_child_class = "dashed";
+
+ trace2_cmd_name("_run_dashed_");
+ /*
+ * The code in run_command() logs trace2 child_start/child_exit
+ * events, so we do not need to report exec/exec_result events here.
+ */
trace_argv_printf(cmd.args.argv, "trace: exec:");
/*
* the program.
*/
status = run_command(&cmd);
+
+ /*
+ * If the child process ran and we are now going to exit, emit a
+ * generic string as our trace2 command verb to indicate that we
+ * launched a dashed command.
+ */
if (status >= 0)
exit(status);
else if (errno != ENOENT)
if (!done_alias)
handle_builtin(*argcp, *argv);
+#if 0 // TODO In GFW, need to amend a7924b655e940b06cb547c235d6bed9767929673 to include trace2_ and _tr2 lines.
+ else if (get_builtin(**argv)) {
+ struct argv_array args = ARGV_ARRAY_INIT;
+ int i;
+
+ /*
+ * The current process is committed to launching a
+ * child process to run the command named in (**argv)
+ * and exiting. Log a generic string as the trace2
+ * command verb to indicate this. Note that the child
+ * process will log the actual verb when it runs.
+ */
+ trace2_cmd_name("_run_git_alias_");
+
+ if (get_super_prefix())
+ die("%s doesn't support --super-prefix", **argv);
+
+ commit_pager_choice();
+
+ argv_array_push(&args, "git");
+ for (i = 0; i < *argcp; i++)
+ argv_array_push(&args, (*argv)[i]);
+
+ trace_argv_printf(args.argv, "trace: exec:");
+
+ /*
+ * if we fail because the command is not found, it is
+ * OK to return. Otherwise, we just pass along the status code.
+ */
+ i = run_command_v_opt_tr2(args.argv, RUN_SILENT_EXEC_FAILURE |
+ RUN_CLEAN_ON_EXIT, "git_alias");
+ if (i >= 0 || errno != ENOENT)
+ exit(i);
+ die("could not execute builtin %s", **argv);
+ }
+#endif // a7924b655e940b06cb547c235d6bed9767929673
+
/* .. then try the external ones */
execv_dashed_external(*argv);
# Bulgarian translation of gitk po-file.
-# Copyright (C) 2014, 2015 Alexander Shopov <ash@kambanaria.org>.
+# Copyright (C) 2014, 2015, 2019 Alexander Shopov <ash@kambanaria.org>.
# This file is distributed under the same license as the git package.
-# Alexander Shopov <ash@kambanaria.org>, 2014, 2015.
+# Alexander Shopov <ash@kambanaria.org>, 2014, 2015, 2019.
#
#
msgid ""
msgstr ""
"Project-Id-Version: gitk master\n"
"Report-Msgid-Bugs-To: \n"
-"POT-Creation-Date: 2015-12-19 11:48+0200\n"
-"PO-Revision-Date: 2015-12-19 11:49+0200\n"
+"POT-Creation-Date: 2019-03-04 11:27+0100\n"
+"PO-Revision-Date: 2019-03-04 11:39+0100\n"
"Last-Translator: Alexander Shopov <ash@kambanaria.org>\n"
"Language-Team: Bulgarian <dict@fsa-bg.org>\n"
"Language: bg\n"
msgid "Couldn't get list of unmerged files:"
msgstr "Списъкът с неслети файлове не може да бъде получен:"
-#: gitk:212 gitk:2399
+#: gitk:212 gitk:2403
msgid "Color words"
msgstr "Оцветяване на думите"
-#: gitk:217 gitk:2399 gitk:8239 gitk:8272
+#: gitk:217 gitk:2403 gitk:8249 gitk:8282
msgid "Markup words"
msgstr "Отбелязване на думите"
#: gitk:324
msgid "Error parsing revisions:"
-msgstr "Грешка при разбор на версиите:"
+msgstr "Грешка при анализ на версиите:"
#: gitk:380
msgid "Error executing --argscmd command:"
msgid "Reading"
msgstr "Прочитане"
-#: gitk:496 gitk:4544
+#: gitk:496 gitk:4549
msgid "Reading commits..."
msgstr "Прочитане на подаванията…"
-#: gitk:499 gitk:1637 gitk:4547
+#: gitk:499 gitk:1641 gitk:4552
msgid "No commits selected"
msgstr "Не са избрани подавания"
-#: gitk:1445 gitk:4064 gitk:12469
+#: gitk:1449 gitk:4069 gitk:12583
msgid "Command line"
msgstr "Команден ред"
-#: gitk:1511
+#: gitk:1515
msgid "Can't parse git log output:"
msgstr "Изходът от „git log“ не може да се анализира:"
-#: gitk:1740
+#: gitk:1744
msgid "No commit information available"
msgstr "Липсва информация за подавания"
-#: gitk:1903 gitk:1932 gitk:4334 gitk:9702 gitk:11274 gitk:11554
+#: gitk:1907 gitk:1936 gitk:4339 gitk:9789 gitk:11388 gitk:11668
msgid "OK"
msgstr "Добре"
-#: gitk:1934 gitk:4336 gitk:9215 gitk:9294 gitk:9424 gitk:9473 gitk:9704
-#: gitk:11275 gitk:11555
+#: gitk:1938 gitk:4341 gitk:9225 gitk:9304 gitk:9434 gitk:9520 gitk:9791
+#: gitk:11389 gitk:11669
msgid "Cancel"
msgstr "Отказ"
-#: gitk:2083
+#: gitk:2087
msgid "&Update"
msgstr "&Обновяване"
-#: gitk:2084
+#: gitk:2088
msgid "&Reload"
msgstr "&Презареждане"
-#: gitk:2085
+#: gitk:2089
msgid "Reread re&ferences"
-msgstr "&Наново прочитане на настройките"
+msgstr "&Наново прочитане"
-#: gitk:2086
+#: gitk:2090
msgid "&List references"
msgstr "&Изброяване на указателите"
-#: gitk:2088
+#: gitk:2092
msgid "Start git &gui"
msgstr "&Стартиране на „git gui“"
-#: gitk:2090
+#: gitk:2094
msgid "&Quit"
msgstr "&Спиране на програмата"
-#: gitk:2082
+#: gitk:2086
msgid "&File"
msgstr "&Файл"
-#: gitk:2094
+#: gitk:2098
msgid "&Preferences"
msgstr "&Настройки"
-#: gitk:2093
+#: gitk:2097
msgid "&Edit"
msgstr "&Редактиране"
-#: gitk:2098
+#: gitk:2102
msgid "&New view..."
msgstr "&Нов изглед…"
-#: gitk:2099
+#: gitk:2103
msgid "&Edit view..."
msgstr "&Редактиране на изгледа…"
-#: gitk:2100
+#: gitk:2104
msgid "&Delete view"
msgstr "&Изтриване на изгледа"
-#: gitk:2102
+#: gitk:2106
msgid "&All files"
msgstr "&Всички файлове"
-#: gitk:2097
+#: gitk:2101
msgid "&View"
msgstr "&Изглед"
-#: gitk:2107 gitk:2117
+#: gitk:2111 gitk:2121
msgid "&About gitk"
msgstr "&Относно gitk"
-#: gitk:2108 gitk:2122
+#: gitk:2112 gitk:2126
msgid "&Key bindings"
msgstr "&Клавишни комбинации"
-#: gitk:2106 gitk:2121
+#: gitk:2110 gitk:2125
msgid "&Help"
msgstr "Помо&щ"
-#: gitk:2199 gitk:8671
+#: gitk:2203 gitk:8681
msgid "SHA1 ID:"
msgstr "SHA1:"
-#: gitk:2243
+#: gitk:2247
msgid "Row"
msgstr "Ред"
-#: gitk:2281
+#: gitk:2285
msgid "Find"
msgstr "Търсене"
-#: gitk:2309
+#: gitk:2313
msgid "commit"
msgstr "подаване"
-#: gitk:2313 gitk:2315 gitk:4706 gitk:4729 gitk:4753 gitk:6774 gitk:6846
-#: gitk:6931
+#: gitk:2317 gitk:2319 gitk:4711 gitk:4734 gitk:4758 gitk:6779 gitk:6851
+#: gitk:6936
msgid "containing:"
msgstr "съдържащо:"
-#: gitk:2316 gitk:3545 gitk:3550 gitk:4782
+#: gitk:2320 gitk:3550 gitk:3555 gitk:4787
msgid "touching paths:"
-msgstr "заÑ\81Ñ\8fгаÑ\89о пътищата:"
+msgstr "в пътищата:"
-#: gitk:2317 gitk:4796
+#: gitk:2321 gitk:4801
msgid "adding/removing string:"
msgstr "добавящо/премахващо низ"
-#: gitk:2318 gitk:4798
+#: gitk:2322 gitk:4803
msgid "changing lines matching:"
msgstr "променящо редове напасващи:"
-#: gitk:2327 gitk:2329 gitk:4785
+#: gitk:2331 gitk:2333 gitk:4790
msgid "Exact"
msgstr "Точно"
-#: gitk:2329 gitk:4873 gitk:6742
+#: gitk:2333 gitk:4878 gitk:6747
msgid "IgnCase"
msgstr "Без регистър"
-#: gitk:2329 gitk:4755 gitk:4871 gitk:6738
+#: gitk:2333 gitk:4760 gitk:4876 gitk:6743
msgid "Regexp"
msgstr "Рег. израз"
-#: gitk:2331 gitk:2332 gitk:4893 gitk:4923 gitk:4930 gitk:6867 gitk:6935
+#: gitk:2335 gitk:2336 gitk:4898 gitk:4928 gitk:4935 gitk:6872 gitk:6940
msgid "All fields"
msgstr "Всички полета"
-#: gitk:2332 gitk:4890 gitk:4923 gitk:6805
+#: gitk:2336 gitk:4895 gitk:4928 gitk:6810
msgid "Headline"
msgstr "Първи ред"
-#: gitk:2333 gitk:4890 gitk:6805 gitk:6935 gitk:7408
+#: gitk:2337 gitk:4895 gitk:6810 gitk:6940 gitk:7413
msgid "Comments"
msgstr "Коментари"
-#: gitk:2333 gitk:4890 gitk:4895 gitk:4930 gitk:6805 gitk:7343 gitk:8849
-#: gitk:8864
+#: gitk:2337 gitk:4895 gitk:4900 gitk:4935 gitk:6810 gitk:7348 gitk:8859
+#: gitk:8874
msgid "Author"
msgstr "Автор"
-#: gitk:2333 gitk:4890 gitk:6805 gitk:7345
+#: gitk:2337 gitk:4895 gitk:6810 gitk:7350
msgid "Committer"
msgstr "Подаващ"
-#: gitk:2367
+#: gitk:2371
msgid "Search"
msgstr "Търсене"
-#: gitk:2375
+#: gitk:2379
msgid "Diff"
msgstr "Разлики"
-#: gitk:2377
+#: gitk:2381
msgid "Old version"
msgstr "Стара версия"
-#: gitk:2379
+#: gitk:2383
msgid "New version"
msgstr "Нова версия"
-#: gitk:2382
+#: gitk:2386
msgid "Lines of context"
msgstr "Контекст в редове"
-#: gitk:2392
+#: gitk:2396
msgid "Ignore space change"
msgstr "Празните знаци без значение"
-#: gitk:2396 gitk:2398 gitk:7978 gitk:8225
+#: gitk:2400 gitk:2402 gitk:7983 gitk:8235
msgid "Line diff"
msgstr "Поредови разлики"
-#: gitk:2463
+#: gitk:2467
msgid "Patch"
msgstr "Кръпка"
-#: gitk:2465
+#: gitk:2469
msgid "Tree"
msgstr "Дърво"
-#: gitk:2635 gitk:2656
+#: gitk:2639 gitk:2660
msgid "Diff this -> selected"
msgstr "Разлики между това и избраното"
-#: gitk:2636 gitk:2657
+#: gitk:2640 gitk:2661
msgid "Diff selected -> this"
msgstr "Разлики между избраното и това"
-#: gitk:2637 gitk:2658
+#: gitk:2641 gitk:2662
msgid "Make patch"
msgstr "Създаване на кръпка"
-#: gitk:2638 gitk:9273
+#: gitk:2642 gitk:9283
msgid "Create tag"
msgstr "Създаване на етикет"
-#: gitk:2639
+#: gitk:2643
msgid "Copy commit summary"
msgstr "Копиране на информацията за подаване"
-#: gitk:2640 gitk:9404
+#: gitk:2644 gitk:9414
msgid "Write commit to file"
msgstr "Запазване на подаването във файл"
-#: gitk:2641 gitk:9461
+#: gitk:2645
msgid "Create new branch"
msgstr "Създаване на нов клон"
-#: gitk:2642
+#: gitk:2646
msgid "Cherry-pick this commit"
msgstr "Отбиране на това подаване"
-#: gitk:2643
+#: gitk:2647
msgid "Reset HEAD branch to here"
msgstr "Привеждане на върха на клона към текущото подаване"
-#: gitk:2644
+#: gitk:2648
msgid "Mark this commit"
msgstr "Отбелязване на това подаване"
-#: gitk:2645
+#: gitk:2649
msgid "Return to mark"
msgstr "Връщане към отбелязаното подаване"
-#: gitk:2646
+#: gitk:2650
msgid "Find descendant of this and mark"
msgstr "Откриване и отбелязване на наследниците"
-#: gitk:2647
+#: gitk:2651
msgid "Compare with marked commit"
msgstr "Сравнение с отбелязаното подаване"
-#: gitk:2648 gitk:2659
+#: gitk:2652 gitk:2663
msgid "Diff this -> marked commit"
msgstr "Разлики между това и отбелязаното"
-#: gitk:2649 gitk:2660
+#: gitk:2653 gitk:2664
msgid "Diff marked commit -> this"
msgstr "Разлики между отбелязаното и това"
-#: gitk:2650
+#: gitk:2654
msgid "Revert this commit"
msgstr "Отмяна на това подаване"
-#: gitk:2666
+#: gitk:2670
msgid "Check out this branch"
msgstr "Изтегляне на този клон"
-#: gitk:2667
+#: gitk:2671
+msgid "Rename this branch"
+msgstr "Преименуване на този клон"
+
+#: gitk:2672
msgid "Remove this branch"
msgstr "Изтриване на този клон"
-#: gitk:2668
+#: gitk:2673
msgid "Copy branch name"
msgstr "Копиране на името на клона"
-#: gitk:2675
+#: gitk:2680
msgid "Highlight this too"
msgstr "Отбелязване и на това"
-#: gitk:2676
+#: gitk:2681
msgid "Highlight this only"
msgstr "Отбелязване само на това"
-#: gitk:2677
+#: gitk:2682
msgid "External diff"
msgstr "Външна програма за разлики"
-#: gitk:2678
+#: gitk:2683
msgid "Blame parent commit"
msgstr "Анотиране на родителското подаване"
-#: gitk:2679
+#: gitk:2684
msgid "Copy path"
msgstr "Копиране на пътя"
-#: gitk:2686
+#: gitk:2691
msgid "Show origin of this line"
msgstr "Показване на произхода на този ред"
-#: gitk:2687
+#: gitk:2692
msgid "Run git gui blame on this line"
msgstr "Изпълнение на „git gui blame“ върху този ред"
-#: gitk:3031
+#: gitk:3036
msgid "About gitk"
msgstr "Относно gitk"
-#: gitk:3033
+#: gitk:3038
msgid ""
"\n"
"Gitk - a commit viewer for git\n"
"\n"
"Използвайте и разпространявайте при условията на ОПЛ на ГНУ"
-#: gitk:3041 gitk:3108 gitk:9890
+#: gitk:3046 gitk:3113 gitk:10004
msgid "Close"
msgstr "Затваряне"
-#: gitk:3062
+#: gitk:3067
msgid "Gitk key bindings"
msgstr "Клавишни комбинации"
-#: gitk:3065
+#: gitk:3070
msgid "Gitk key bindings:"
msgstr "Клавишни комбинации:"
-#: gitk:3067
+#: gitk:3072
#, tcl-format
msgid "<%s-Q>\t\tQuit"
msgstr "<%s-Q>\t\tСпиране на програмата"
-#: gitk:3068
+#: gitk:3073
#, tcl-format
msgid "<%s-W>\t\tClose window"
msgstr "<%s-W>\t\tЗатваряне на прозореца"
-#: gitk:3069
+#: gitk:3074
msgid "<Home>\t\tMove to first commit"
msgstr "<Home>\t\tКъм първото подаване"
-#: gitk:3070
+#: gitk:3075
msgid "<End>\t\tMove to last commit"
msgstr "<End>\t\tКъм последното подаване"
-#: gitk:3071
+#: gitk:3076
msgid "<Up>, p, k\tMove up one commit"
msgstr "<Up>, p, k\tЕдно подаване нагоре"
-#: gitk:3072
+#: gitk:3077
msgid "<Down>, n, j\tMove down one commit"
msgstr "<Down>, n, j\tЕдно подаване надолу"
-#: gitk:3073
+#: gitk:3078
msgid "<Left>, z, h\tGo back in history list"
msgstr "<Left>, z, h\tНазад в историята"
-#: gitk:3074
+#: gitk:3079
msgid "<Right>, x, l\tGo forward in history list"
msgstr "<Right>, x, l\tНапред в историята"
-#: gitk:3075
+#: gitk:3080
#, tcl-format
msgid "<%s-n>\tGo to n-th parent of current commit in history list"
msgstr "<%s-n>\tКъм n-тия родител на текущото подаване в историята"
-#: gitk:3076
+#: gitk:3081
msgid "<PageUp>\tMove up one page in commit list"
msgstr "<PageUp>\tСтраница нагоре в списъка с подаванията"
-#: gitk:3077
+#: gitk:3082
msgid "<PageDown>\tMove down one page in commit list"
msgstr "<PageDown>\tСтраница надолу в списъка с подаванията"
-#: gitk:3078
+#: gitk:3083
#, tcl-format
msgid "<%s-Home>\tScroll to top of commit list"
msgstr "<%s-Home>\tКъм началото на списъка с подаванията"
-#: gitk:3079
+#: gitk:3084
#, tcl-format
msgid "<%s-End>\tScroll to bottom of commit list"
msgstr "<%s-End>\tКъм края на списъка с подаванията"
-#: gitk:3080
+#: gitk:3085
#, tcl-format
msgid "<%s-Up>\tScroll commit list up one line"
msgstr "<%s-Up>\tРед нагоре в списъка с подавания"
-#: gitk:3081
+#: gitk:3086
#, tcl-format
msgid "<%s-Down>\tScroll commit list down one line"
msgstr "<%s-Down>\tРед надолу в списъка с подавания"
-#: gitk:3082
+#: gitk:3087
#, tcl-format
msgid "<%s-PageUp>\tScroll commit list up one page"
msgstr "<%s-PageUp>\tСтраница нагоре в списъка с подавания"
-#: gitk:3083
+#: gitk:3088
#, tcl-format
msgid "<%s-PageDown>\tScroll commit list down one page"
msgstr "<%s-PageDown>\tСтраница надолу в списъка с подавания"
-#: gitk:3084
+#: gitk:3089
msgid "<Shift-Up>\tFind backwards (upwards, later commits)"
msgstr "<Shift-Up>\tТърсене назад (визуално нагоре, исторически — последващи)"
-#: gitk:3085
+#: gitk:3090
msgid "<Shift-Down>\tFind forwards (downwards, earlier commits)"
msgstr ""
"<Shift-Down>\tТърсене напред (визуално надолу, исторически — предхождащи)"
-#: gitk:3086
+#: gitk:3091
msgid "<Delete>, b\tScroll diff view up one page"
msgstr "<Delete>, b\tСтраница нагоре в изгледа за разлики"
-#: gitk:3087
+#: gitk:3092
msgid "<Backspace>\tScroll diff view up one page"
msgstr "<Backspace>\tСтраница надолу в изгледа за разлики"
-#: gitk:3088
+#: gitk:3093
msgid "<Space>\t\tScroll diff view down one page"
msgstr "<Space>\t\tСтраница надолу в изгледа за разлики"
-#: gitk:3089
+#: gitk:3094
msgid "u\t\tScroll diff view up 18 lines"
msgstr "u\t\t18 реда нагоре в изгледа за разлики"
-#: gitk:3090
+#: gitk:3095
msgid "d\t\tScroll diff view down 18 lines"
msgstr "d\t\t18 реда надолу в изгледа за разлики"
-#: gitk:3091
+#: gitk:3096
#, tcl-format
msgid "<%s-F>\t\tFind"
msgstr "<%s-F>\t\tТърсене"
-#: gitk:3092
+#: gitk:3097
#, tcl-format
msgid "<%s-G>\t\tMove to next find hit"
msgstr "<%s-G>\t\tКъм следващата поява"
-#: gitk:3093
+#: gitk:3098
msgid "<Return>\tMove to next find hit"
msgstr "<Return>\tКъм следващата поява"
-#: gitk:3094
+#: gitk:3099
msgid "g\t\tGo to commit"
msgstr "g\t\tКъм последното подаване"
-#: gitk:3095
+#: gitk:3100
msgid "/\t\tFocus the search box"
msgstr "/\t\tФокус върху полето за търсене"
-#: gitk:3096
+#: gitk:3101
msgid "?\t\tMove to previous find hit"
msgstr "?\t\tКъм предишната поява"
-#: gitk:3097
+#: gitk:3102
msgid "f\t\tScroll diff view to next file"
msgstr "f\t\tСледващ файл в изгледа за разлики"
-#: gitk:3098
+#: gitk:3103
#, tcl-format
msgid "<%s-S>\t\tSearch for next hit in diff view"
msgstr "<%s-S>\t\tТърсене на следващата поява в изгледа за разлики"
-#: gitk:3099
+#: gitk:3104
#, tcl-format
msgid "<%s-R>\t\tSearch for previous hit in diff view"
msgstr "<%s-R>\t\tТърсене на предишната поява в изгледа за разлики"
-#: gitk:3100
+#: gitk:3105
#, tcl-format
msgid "<%s-KP+>\tIncrease font size"
msgstr "<%s-KP+>\tПо-голям размер на шрифта"
-#: gitk:3101
+#: gitk:3106
#, tcl-format
msgid "<%s-plus>\tIncrease font size"
msgstr "<%s-plus>\tПо-голям размер на шрифта"
-#: gitk:3102
+#: gitk:3107
#, tcl-format
msgid "<%s-KP->\tDecrease font size"
msgstr "<%s-KP->\tПо-малък размер на шрифта"
-#: gitk:3103
+#: gitk:3108
#, tcl-format
msgid "<%s-minus>\tDecrease font size"
msgstr "<%s-minus>\tПо-малък размер на шрифта"
-#: gitk:3104
+#: gitk:3109
msgid "<F5>\t\tUpdate"
msgstr "<F5>\t\tОбновяване"
-#: gitk:3569 gitk:3578
+#: gitk:3574 gitk:3583
#, tcl-format
msgid "Error creating temporary directory %s:"
msgstr "Грешка при създаването на временната директория „%s“:"
-#: gitk:3591
+#: gitk:3596
#, tcl-format
msgid "Error getting \"%s\" from %s:"
msgstr "Грешка при получаването на „%s“ от %s:"
-#: gitk:3654
+#: gitk:3659
msgid "command failed:"
msgstr "неуспешно изпълнение на команда:"
-#: gitk:3803
+#: gitk:3808
msgid "No such commit"
msgstr "Такова подаване няма"
-#: gitk:3817
+#: gitk:3822
msgid "git gui blame: command failed:"
msgstr "„git gui blame“: неуспешно изпълнение на команда:"
-#: gitk:3848
+#: gitk:3853
#, tcl-format
msgid "Couldn't read merge head: %s"
msgstr "Върхът за сливане не може да бъде прочетен: %s"
-#: gitk:3856
+#: gitk:3861
#, tcl-format
msgid "Error reading index: %s"
msgstr "Грешка при прочитане на индекса: %s"
-#: gitk:3881
+#: gitk:3886
#, tcl-format
msgid "Couldn't start git blame: %s"
msgstr "Командата „git blame“ не може да бъде стартирана: %s"
-#: gitk:3884 gitk:6773
+#: gitk:3889 gitk:6778
msgid "Searching"
msgstr "Търсене"
-#: gitk:3916
+#: gitk:3921
#, tcl-format
msgid "Error running git blame: %s"
msgstr "Грешка при изпълнението на „git blame“: %s"
-#: gitk:3944
+#: gitk:3949
#, tcl-format
msgid "That line comes from commit %s, which is not in this view"
msgstr "Този ред идва от подаването %s, което не е в изгледа"
-#: gitk:3958
+#: gitk:3963
msgid "External diff viewer failed:"
msgstr "Неуспешно изпълнение на външната програма за разлики:"
-#: gitk:4062
+#: gitk:4067
msgid "All files"
msgstr "Всички файлове"
-#: gitk:4086
+#: gitk:4091
msgid "View"
msgstr "Изглед"
-#: gitk:4089
+#: gitk:4094
msgid "Gitk view definition"
msgstr "Дефиниция на изглед в Gitk"
-#: gitk:4093
+#: gitk:4098
msgid "Remember this view"
msgstr "Запазване на този изглед"
-#: gitk:4094
+#: gitk:4099
msgid "References (space separated list):"
msgstr "Указатели (списък с разделител интервал):"
-#: gitk:4095
+#: gitk:4100
msgid "Branches & tags:"
msgstr "Клони и етикети:"
-#: gitk:4096
+#: gitk:4101
msgid "All refs"
msgstr "Всички указатели"
-#: gitk:4097
+#: gitk:4102
msgid "All (local) branches"
msgstr "Всички (локални) клони"
-#: gitk:4098
+#: gitk:4103
msgid "All tags"
msgstr "Всички етикети"
-#: gitk:4099
+#: gitk:4104
msgid "All remote-tracking branches"
msgstr "Всички следящи клони"
-#: gitk:4100
+#: gitk:4105
msgid "Commit Info (regular expressions):"
msgstr "Информация за подаване (рег. изр.):"
-#: gitk:4101
+#: gitk:4106
msgid "Author:"
msgstr "Автор:"
-#: gitk:4102
+#: gitk:4107
msgid "Committer:"
msgstr "Подал:"
-#: gitk:4103
+#: gitk:4108
msgid "Commit Message:"
msgstr "Съобщение при подаване:"
-#: gitk:4104
+#: gitk:4109
msgid "Matches all Commit Info criteria"
msgstr "Съвпадение по всички характеристики на подаването"
-#: gitk:4105
+#: gitk:4110
msgid "Matches no Commit Info criteria"
msgstr "Не съвпада по никоя от характеристиките на подаването"
-#: gitk:4106
+#: gitk:4111
msgid "Changes to Files:"
msgstr "Промени по файловете:"
-#: gitk:4107
+#: gitk:4112
msgid "Fixed String"
msgstr "Дословен низ"
-#: gitk:4108
+#: gitk:4113
msgid "Regular Expression"
msgstr "Регулярен израз"
-#: gitk:4109
+#: gitk:4114
msgid "Search string:"
msgstr "Низ за търсене:"
-#: gitk:4110
+#: gitk:4115
msgid ""
"Commit Dates (\"2 weeks ago\", \"2009-03-17 15:27:38\", \"March 17, 2009 "
"15:27:38\"):"
"Дата на подаване („2 weeks ago“ (преди 2 седмици), „2009-03-17 15:27:38“, "
"„March 17, 2009 15:27:38“):"
-#: gitk:4111
+#: gitk:4116
msgid "Since:"
msgstr "От:"
-#: gitk:4112
+#: gitk:4117
msgid "Until:"
msgstr "До:"
-#: gitk:4113
+#: gitk:4118
msgid "Limit and/or skip a number of revisions (positive integer):"
msgstr ""
"Ограничаване и/или прескачане на определен брой версии (неотрицателно цяло "
"число):"
-#: gitk:4114
+#: gitk:4119
msgid "Number to show:"
msgstr "Брой показани:"
-#: gitk:4115
+#: gitk:4120
msgid "Number to skip:"
msgstr "Брой прескочени:"
-#: gitk:4116
+#: gitk:4121
msgid "Miscellaneous options:"
msgstr "Разни:"
-#: gitk:4117
+#: gitk:4122
msgid "Strictly sort by date"
msgstr "Подреждане по дата"
-#: gitk:4118
+#: gitk:4123
msgid "Mark branch sides"
msgstr "Отбелязване на страните по клона"
-#: gitk:4119
+#: gitk:4124
msgid "Limit to first parent"
msgstr "Само първия родител"
-#: gitk:4120
+#: gitk:4125
msgid "Simple history"
msgstr "Опростена история"
-#: gitk:4121
+#: gitk:4126
msgid "Additional arguments to git log:"
msgstr "Допълнителни аргументи към „git log“:"
-#: gitk:4122
+#: gitk:4127
msgid "Enter files and directories to include, one per line:"
msgstr "Въведете файловете и директориите за включване, по елемент на ред"
-#: gitk:4123
+#: gitk:4128
msgid "Command to generate more commits to include:"
msgstr ""
"Команда за генерирането на допълнителни подавания, които да бъдат включени:"
-#: gitk:4247
+#: gitk:4252
msgid "Gitk: edit view"
msgstr "Gitk: редактиране на изглед"
-#: gitk:4255
+#: gitk:4260
msgid "-- criteria for selecting revisions"
msgstr "— критерии за избор на версии"
-#: gitk:4260
+#: gitk:4265
msgid "View Name"
msgstr "Име на изглед"
-#: gitk:4335
+#: gitk:4340
msgid "Apply (F5)"
msgstr "Прилагане (F5)"
-#: gitk:4373
+#: gitk:4378
msgid "Error in commit selection arguments:"
msgstr "Грешка в аргументите за избор на подавания:"
-#: gitk:4428 gitk:4481 gitk:4943 gitk:4957 gitk:6227 gitk:12410 gitk:12411
+#: gitk:4433 gitk:4486 gitk:4948 gitk:4962 gitk:6232 gitk:12524 gitk:12525
msgid "None"
msgstr "Няма"
-#: gitk:5040 gitk:5045
+#: gitk:5045 gitk:5050
msgid "Descendant"
msgstr "Наследник"
-#: gitk:5041
+#: gitk:5046
msgid "Not descendant"
msgstr "Не е наследник"
-#: gitk:5048 gitk:5053
+#: gitk:5053 gitk:5058
msgid "Ancestor"
msgstr "Предшественик"
-#: gitk:5049
+#: gitk:5054
msgid "Not ancestor"
msgstr "Не е предшественик"
-#: gitk:5343
+#: gitk:5348
msgid "Local changes checked in to index but not committed"
msgstr "Локални промени добавени към индекса, но неподадени"
-#: gitk:5379
+#: gitk:5384
msgid "Local uncommitted changes, not checked in to index"
msgstr "Локални промени извън индекса"
-#: gitk:7153
+#: gitk:7158
msgid "and many more"
msgstr "и още много"
-#: gitk:7156
+#: gitk:7161
msgid "many"
msgstr "много"
-#: gitk:7347
+#: gitk:7352
msgid "Tags:"
msgstr "Етикети:"
-#: gitk:7364 gitk:7370 gitk:8844
+#: gitk:7369 gitk:7375 gitk:8854
msgid "Parent"
msgstr "Родител"
-#: gitk:7375
+#: gitk:7380
msgid "Child"
msgstr "Дете"
-#: gitk:7384
+#: gitk:7389
msgid "Branch"
msgstr "Клон"
-#: gitk:7387
+#: gitk:7392
msgid "Follows"
msgstr "Следва"
-#: gitk:7390
+#: gitk:7395
msgid "Precedes"
msgstr "Предшества"
-#: gitk:7985
+#: gitk:7990
#, tcl-format
msgid "Error getting diffs: %s"
msgstr "Грешка при получаването на разликите: %s"
-#: gitk:8669
+#: gitk:8679
msgid "Goto:"
msgstr "Към ред:"
-#: gitk:8690
+#: gitk:8700
#, tcl-format
msgid "Short SHA1 id %s is ambiguous"
msgstr "Съкратената сума по SHA1 %s не е еднозначна"
-#: gitk:8697
+#: gitk:8707
#, tcl-format
msgid "Revision %s is not known"
msgstr "Непозната версия %s"
-#: gitk:8707
+#: gitk:8717
#, tcl-format
msgid "SHA1 id %s is not known"
msgstr "Непозната сума по SHA1 %s"
-#: gitk:8709
+#: gitk:8719
#, tcl-format
msgid "Revision %s is not in the current view"
msgstr "Версия %s не е в текущия изглед"
-#: gitk:8851 gitk:8866
+#: gitk:8861 gitk:8876
msgid "Date"
msgstr "Дата"
-#: gitk:8854
+#: gitk:8864
msgid "Children"
msgstr "Деца"
-#: gitk:8917
+#: gitk:8927
#, tcl-format
msgid "Reset %s branch to here"
msgstr "Зануляване на клона „%s“ към текущото подаване"
-#: gitk:8919
+#: gitk:8929
msgid "Detached head: can't reset"
msgstr "Несвързан връх: невъзможно зануляване"
-#: gitk:9024 gitk:9030
+#: gitk:9034 gitk:9040
msgid "Skipping merge commit "
msgstr "Пропускане на подаването на сливането"
-#: gitk:9039 gitk:9044
+#: gitk:9049 gitk:9054
msgid "Error getting patch ID for "
msgstr "Грешка при получаването на идентификатора на "
-#: gitk:9040 gitk:9045
+#: gitk:9050 gitk:9055
msgid " - stopping\n"
msgstr " — спиране\n"
-#: gitk:9050 gitk:9053 gitk:9061 gitk:9075 gitk:9084
+#: gitk:9060 gitk:9063 gitk:9071 gitk:9085 gitk:9094
msgid "Commit "
msgstr "Подаване"
-#: gitk:9054
+#: gitk:9064
msgid ""
" is the same patch as\n"
" "
" е същата кръпка като\n"
" "
-#: gitk:9062
+#: gitk:9072
msgid ""
" differs from\n"
" "
" се различава от\n"
" "
-#: gitk:9064
+#: gitk:9074
msgid ""
"Diff of commits:\n"
"\n"
"Разлика между подаванията:\n"
"\n"
-#: gitk:9076 gitk:9085
+#: gitk:9086 gitk:9095
#, tcl-format
msgid " has %s children - stopping\n"
msgstr " има %s деца — спиране\n"
-#: gitk:9104
+#: gitk:9114
#, tcl-format
msgid "Error writing commit to file: %s"
msgstr "Грешка при запазването на подаването във файл: %s"
-#: gitk:9110
+#: gitk:9120
#, tcl-format
msgid "Error diffing commits: %s"
msgstr "Грешка при изчисляването на разликите между подаванията: %s"
-#: gitk:9156
+#: gitk:9166
msgid "Top"
msgstr "Най-горе"
-#: gitk:9157
+#: gitk:9167
msgid "From"
msgstr "От"
-#: gitk:9162
+#: gitk:9172
msgid "To"
msgstr "До"
-#: gitk:9186
+#: gitk:9196
msgid "Generate patch"
msgstr "Генериране на кръпка"
-#: gitk:9188
+#: gitk:9198
msgid "From:"
msgstr "От:"
-#: gitk:9197
+#: gitk:9207
msgid "To:"
msgstr "До:"
-#: gitk:9206
+#: gitk:9216
msgid "Reverse"
msgstr "Обръщане"
-#: gitk:9208 gitk:9418
+#: gitk:9218 gitk:9428
msgid "Output file:"
msgstr "Запазване във файла:"
-#: gitk:9214
+#: gitk:9224
msgid "Generate"
msgstr "Генериране"
-#: gitk:9252
+#: gitk:9262
msgid "Error creating patch:"
msgstr "Грешка при създаването на кръпка:"
-#: gitk:9275 gitk:9406 gitk:9463
+#: gitk:9285 gitk:9416 gitk:9504
msgid "ID:"
msgstr "Идентификатор:"
-#: gitk:9284
+#: gitk:9294
msgid "Tag name:"
msgstr "Име на етикет:"
-#: gitk:9287
+#: gitk:9297
msgid "Tag message is optional"
msgstr "Съобщението за етикет е незадължително"
-#: gitk:9289
+#: gitk:9299
msgid "Tag message:"
msgstr "Съобщение за етикет:"
-#: gitk:9293 gitk:9472
+#: gitk:9303 gitk:9474
msgid "Create"
msgstr "Създаване"
-#: gitk:9311
+#: gitk:9321
msgid "No tag name specified"
msgstr "Липсва име на етикет"
-#: gitk:9315
+#: gitk:9325
#, tcl-format
msgid "Tag \"%s\" already exists"
msgstr "Етикетът „%s“ вече съществува"
-#: gitk:9325
+#: gitk:9335
msgid "Error creating tag:"
msgstr "Грешка при създаването на етикет:"
-#: gitk:9415
+#: gitk:9425
msgid "Command:"
msgstr "Команда:"
-#: gitk:9423
+#: gitk:9433
msgid "Write"
msgstr "Запазване"
-#: gitk:9441
+#: gitk:9451
msgid "Error writing commit:"
msgstr "Грешка при запазването на подаването:"
-#: gitk:9468
+#: gitk:9473
+msgid "Create branch"
+msgstr "Създаване на клон"
+
+#: gitk:9489
+#, tcl-format
+msgid "Rename branch %s"
+msgstr "Преименуване на клона „%s“"
+
+#: gitk:9490
+msgid "Rename"
+msgstr "Преименуване"
+
+#: gitk:9514
msgid "Name:"
msgstr "Име:"
-#: gitk:9491
+#: gitk:9538
msgid "Please specify a name for the new branch"
msgstr "Укажете име за новия клон"
-#: gitk:9496
+#: gitk:9543
#, tcl-format
msgid "Branch '%s' already exists. Overwrite?"
msgstr "Клонът „%s“ вече съществува. Да бъде ли презаписан?"
-#: gitk:9563
+#: gitk:9587
+msgid "Please specify a new name for the branch"
+msgstr "Укажете ново име за клона"
+
+#: gitk:9650
#, tcl-format
msgid "Commit %s is already included in branch %s -- really re-apply it?"
msgstr ""
"Подаването „%s“ вече е включено в клона „%s“ — да бъде ли приложено отново?"
-#: gitk:9568
+#: gitk:9655
msgid "Cherry-picking"
msgstr "Отбиране"
-#: gitk:9577
+#: gitk:9664
#, tcl-format
msgid ""
"Cherry-pick failed because of local changes to file '%s'.\n"
"Неуспешно отбиране, защото във файла „%s“ има локални промени.\n"
"Подайте, занулете или ги скатайте и пробвайте отново."
-#: gitk:9583
+#: gitk:9670
msgid ""
"Cherry-pick failed because of merge conflict.\n"
"Do you wish to run git citool to resolve it?"
"Неуспешно отбиране поради конфликти при сливане.\n"
"Искате ли да ги коригирате чрез „git citool“?"
-#: gitk:9599 gitk:9657
+#: gitk:9686 gitk:9744
msgid "No changes committed"
msgstr "Не са подадени промени"
-#: gitk:9626
+#: gitk:9713
#, tcl-format
msgid "Commit %s is not included in branch %s -- really revert it?"
msgstr "Подаването „%s“ не е включено в клона „%s“. Да бъде ли отменено?"
-#: gitk:9631
+#: gitk:9718
msgid "Reverting"
msgstr "Отмяна"
-#: gitk:9639
+#: gitk:9726
#, tcl-format
msgid ""
"Revert failed because of local changes to the following files:%s Please "
"commit, reset or stash your changes and try again."
msgstr ""
"Неуспешна отмяна, защото във файла „%s“ има локални промени.\n"
-"Подайте, занулете или ги скатайте и пробвайте отново.<"
+"Подайте, занулете или ги скатайте и пробвайте отново."
-#: gitk:9643
+#: gitk:9730
msgid ""
"Revert failed because of merge conflict.\n"
" Do you wish to run git citool to resolve it?"
"Неуспешно отмяна поради конфликти при сливане.\n"
"Искате ли да ги коригирате чрез „git citool“?"
-#: gitk:9686
+#: gitk:9773
msgid "Confirm reset"
msgstr "Потвърждаване на зануляването"
-#: gitk:9688
+#: gitk:9775
#, tcl-format
msgid "Reset branch %s to %s?"
msgstr "Да се занули ли клонът „%s“ към „%s“?"
-#: gitk:9690
+#: gitk:9777
msgid "Reset type:"
msgstr "Вид зануляване:"
-#: gitk:9693
+#: gitk:9780
msgid "Soft: Leave working tree and index untouched"
msgstr "Слабо: работното дърво и индекса остават същите"
-#: gitk:9696
+#: gitk:9783
msgid "Mixed: Leave working tree untouched, reset index"
msgstr "Смесено: работното дърво остава същото, индексът се занулява"
-#: gitk:9699
+#: gitk:9786
msgid ""
"Hard: Reset working tree and index\n"
"(discard ALL local changes)"
msgstr ""
"Силно: зануляване и на работното дърво, и на индекса\n"
-"(*ВСИЧКИ* локални промени ще бъдат безвъзвратно загубени)"
+"(ВСИЧКИ локални промени ще бъдат безвъзвратно загубени)"
-#: gitk:9716
+#: gitk:9803
msgid "Resetting"
msgstr "Зануляване"
-#: gitk:9776
+#: gitk:9876
+#, tcl-format
+msgid "A local branch named %s exists already"
+msgstr "Вече съществува локален клон „%s“."
+
+#: gitk:9884
msgid "Checking out"
msgstr "Изтегляне"
-#: gitk:9829
+#: gitk:9943
msgid "Cannot delete the currently checked-out branch"
msgstr "Текущо изтегленият клон не може да бъде изтрит"
-#: gitk:9835
+#: gitk:9949
#, tcl-format
msgid ""
"The commits on branch %s aren't on any other branch.\n"
"Really delete branch %s?"
msgstr ""
"Подаванията на клона „%s“ не са на никой друг клон.\n"
-"Ð\9dаиÑ\81Ñ\82ина ли да Ñ\81е изÑ\82Ñ\80ие клонÑ\8aÑ\82 „%s“?"
+"Ð\9dаиÑ\81Ñ\82ина ли иÑ\81каÑ\82е да изÑ\82Ñ\80иеÑ\82е клона „%s“?"
-#: gitk:9866
+#: gitk:9980
#, tcl-format
msgid "Tags and heads: %s"
msgstr "Етикети и върхове: %s"
-#: gitk:9883
+#: gitk:9997
msgid "Filter"
msgstr "Филтриране"
-#: gitk:10179
+#: gitk:10293
msgid ""
"Error reading commit topology information; branch and preceding/following "
"tag information will be incomplete."
"Грешка при прочитането на топологията на подаванията. Информацията за клона "
"и предшестващите/следващите етикети ще е непълна."
-#: gitk:11156
+#: gitk:11270
msgid "Tag"
msgstr "Етикет"
-#: gitk:11160
+#: gitk:11274
msgid "Id"
msgstr "Идентификатор"
-#: gitk:11243
+#: gitk:11357
msgid "Gitk font chooser"
msgstr "Избор на шрифт за Gitk"
-#: gitk:11260
+#: gitk:11374
msgid "B"
msgstr "Ч"
-#: gitk:11263
+#: gitk:11377
msgid "I"
msgstr "К"
-#: gitk:11381
+#: gitk:11495
msgid "Commit list display options"
msgstr "Настройки на списъка с подавания"
-#: gitk:11384
+#: gitk:11498
msgid "Maximum graph width (lines)"
msgstr "Максимална широчина на графа (в редове)"
-#: gitk:11388
+#: gitk:11502
#, no-tcl-format
msgid "Maximum graph width (% of pane)"
msgstr "Максимална широчина на графа (% от панела)"
-#: gitk:11391
+#: gitk:11505
msgid "Show local changes"
msgstr "Показване на локалните промени"
-#: gitk:11394
+#: gitk:11508
msgid "Auto-select SHA1 (length)"
msgstr "Автоматично избиране на SHA1 (дължина)"
-#: gitk:11398
+#: gitk:11512
msgid "Hide remote refs"
msgstr "Скриване на отдалечените указатели"
-#: gitk:11402
+#: gitk:11516
msgid "Diff display options"
msgstr "Настройки на показването на разликите"
-#: gitk:11404
+#: gitk:11518
msgid "Tab spacing"
msgstr "Широчина на табулатора"
-#: gitk:11407
+#: gitk:11521
msgid "Display nearby tags/heads"
msgstr "Извеждане на близките етикети и върхове"
-#: gitk:11410
+#: gitk:11524
msgid "Maximum # tags/heads to show"
msgstr "Максимален брой етикети/върхове за показване"
-#: gitk:11413
+#: gitk:11527
msgid "Limit diffs to listed paths"
msgstr "Разлика само в избраните пътища"
-#: gitk:11416
+#: gitk:11530
msgid "Support per-file encodings"
msgstr "Поддръжка на различни кодирания за всеки файл"
-#: gitk:11422 gitk:11569
+#: gitk:11536 gitk:11683
msgid "External diff tool"
msgstr "Външен инструмент за разлики"
-#: gitk:11423
+#: gitk:11537
msgid "Choose..."
msgstr "Избор…"
-#: gitk:11428
+#: gitk:11542
msgid "General options"
msgstr "Общи настройки"
-#: gitk:11431
+#: gitk:11545
msgid "Use themed widgets"
msgstr "Използване на тема за графичните обекти"
-#: gitk:11433
+#: gitk:11547
msgid "(change requires restart)"
msgstr "(промяната изисква рестартиране на Gitk)"
-#: gitk:11435
+#: gitk:11549
msgid "(currently unavailable)"
msgstr "(в момента недостъпно)"
-#: gitk:11446
+#: gitk:11560
msgid "Colors: press to choose"
msgstr "Цветове: избира се с натискане"
-#: gitk:11449
+#: gitk:11563
msgid "Interface"
msgstr "Интерфейс"
-#: gitk:11450
+#: gitk:11564
msgid "interface"
msgstr "интерфейс"
-#: gitk:11453
+#: gitk:11567
msgid "Background"
msgstr "Фон"
-#: gitk:11454 gitk:11484
+#: gitk:11568 gitk:11598
msgid "background"
msgstr "фон"
-#: gitk:11457
+#: gitk:11571
msgid "Foreground"
msgstr "Знаци"
-#: gitk:11458
+#: gitk:11572
msgid "foreground"
msgstr "знаци"
-#: gitk:11461
+#: gitk:11575
msgid "Diff: old lines"
msgstr "Разлика: стари редове"
-#: gitk:11462
+#: gitk:11576
msgid "diff old lines"
msgstr "разлика, стари редове"
-#: gitk:11466
+#: gitk:11580
msgid "Diff: new lines"
msgstr "Разлика: нови редове"
-#: gitk:11467
+#: gitk:11581
msgid "diff new lines"
msgstr "разлика, нови редове"
-#: gitk:11471
+#: gitk:11585
msgid "Diff: hunk header"
msgstr "Разлика: начало на парче"
-#: gitk:11473
+#: gitk:11587
msgid "diff hunk header"
msgstr "разлика, начало на парче"
-#: gitk:11477
+#: gitk:11591
msgid "Marked line bg"
msgstr "Фон на отбелязан ред"
-#: gitk:11479
+#: gitk:11593
msgid "marked line background"
msgstr "фон на отбелязан ред"
-#: gitk:11483
+#: gitk:11597
msgid "Select bg"
msgstr "Избор на фон"
-#: gitk:11492
+#: gitk:11606
msgid "Fonts: press to choose"
msgstr "Шрифтове: избира се с натискане"
-#: gitk:11494
+#: gitk:11608
msgid "Main font"
msgstr "Основен шрифт"
-#: gitk:11495
+#: gitk:11609
msgid "Diff display font"
msgstr "Шрифт за разликите"
-#: gitk:11496
+#: gitk:11610
msgid "User interface font"
msgstr "Шрифт на интерфейса"
-#: gitk:11518
+#: gitk:11632
msgid "Gitk preferences"
msgstr "Настройки на Gitk"
-#: gitk:11527
+#: gitk:11641
msgid "General"
msgstr "Общи"
-#: gitk:11528
+#: gitk:11642
msgid "Colors"
msgstr "Цветове"
-#: gitk:11529
+#: gitk:11643
msgid "Fonts"
msgstr "Шрифтове"
-#: gitk:11579
+#: gitk:11693
#, tcl-format
msgid "Gitk: choose color for %s"
msgstr "Gitk: избор на цвят на „%s“"
-#: gitk:12092
+#: gitk:12206
msgid ""
"Sorry, gitk cannot run with this version of Tcl/Tk.\n"
" Gitk requires at least Tcl/Tk 8.4."
"Тази версия на Tcl/Tk не се поддържа от Gitk.\n"
" Необходима ви е поне Tcl/Tk 8.4."
-#: gitk:12302
+#: gitk:12416
msgid "Cannot find a git repository here."
msgstr "Тук липсва хранилище на Git."
-#: gitk:12349
+#: gitk:12463
#, tcl-format
msgid "Ambiguous argument '%s': both revision and filename"
msgstr "Нееднозначен аргумент „%s“: има и такава версия, и такъв файл"
-#: gitk:12361
+#: gitk:12475
msgid "Bad arguments to gitk:"
msgstr "Неправилни аргументи на gitk:"
# ======================================================================
# input validation and dispatch
+# Various hash size-related values.
+my $sha1_len = 40;
+my $sha256_extra_len = 24;
+my $sha256_len = $sha1_len + $sha256_extra_len;
+
+# A regex matching $len hex characters. $len may be a range (e.g. 7,64).
+sub oid_nlen_regex {
+ my $len = shift;
+ my $hchr = qr/[0-9a-fA-F]/;
+ return qr/(?:(?:$hchr){$len})/;
+}
+
+# A regex matching two sets of $nlen hex characters, prefixed by the literal
+# string $prefix and with the literal string $infix between them.
+sub oid_nlen_prefix_infix_regex {
+ my $nlen = shift;
+ my $prefix = shift;
+ my $infix = shift;
+
+ my $rx = oid_nlen_regex($nlen);
+
+ return qr/^\Q$prefix\E$rx\Q$infix\E$rx$/;
+}
+
+# A regex matching a valid object ID.
+our $oid_regex;
+{
+ my $x = oid_nlen_regex($sha1_len);
+ my $y = oid_nlen_regex($sha256_extra_len);
+ $oid_regex = qr/(?:$x(?:$y)?)/;
+}
+
# input parameters can be collected from a variety of sources (presently, CGI
# and PATH_INFO), so we define an %input_params hash that collects them all
# together during validation: this allows subsequent uses (e.g. href()) to be
return undef unless defined $input;
# textual hashes are O.K.
- if ($input =~ m/^[0-9a-fA-F]{40}$/) {
+ if ($input =~ m/^$oid_regex$/) {
return 1;
}
# it must be correct pathname
sub format_log_line_html {
my $line = shift;
+ # Potentially abbreviated OID.
+ my $regex = oid_nlen_regex("7,64");
+
$line = esc_html($line, -nbsp=>1);
$line =~ s{
\b
(?<!-) # see strbuf_check_tag_ref(). Tags can't start with -
[A-Za-z0-9.-]+
(?!\.) # refs can't end with ".", see check_refname_format()
- -g[0-9a-fA-F]{7,40}
+ -g$regex
|
# Just a normal looking Git SHA1
- [0-9a-fA-F]{7,40}
+ $regex
)
\b
}{
')</span>';
}
# match <hash>
- if ($line =~ m/^index [0-9a-fA-F]{40},[0-9a-fA-F]{40}/) {
+ if ($line =~ oid_nlen_prefix_infix_regex($sha1_len, "index ", ",") |
+ $line =~ oid_nlen_prefix_infix_regex($sha256_len, "index ", ",")) {
# can match only for combined diff
$line = 'index ';
for (my $i = 0; $i < $diffinfo->{'nparents'}; $i++) {
$line .= '0' x 7;
}
- } elsif ($line =~ m/^index [0-9a-fA-F]{40}..[0-9a-fA-F]{40}/) {
+ } elsif ($line =~ oid_nlen_prefix_infix_regex($sha1_len, "index ", "..") |
+ $line =~ oid_nlen_prefix_infix_regex($sha256_len, "index ", "..")) {
# can match only for ordinary diff
my ($from_link, $to_link);
if ($from->{'href'}) {
}
#'100644 blob 0fa3f3a66fb6a137f6ec2c19351ed4d807070ffa panic.c'
- $line =~ m/^([0-9]+) (.+) ([0-9a-fA-F]{40})\t/;
+ $line =~ m/^([0-9]+) (.+) ($oid_regex)\t/;
if (defined $type && $type ne $2) {
# type doesn't match
return undef;
while (my $line = <$fd>) {
chomp $line;
- if ($line =~ m!^([0-9a-fA-F]{40})\srefs/($type.*)$!) {
+ if ($line =~ m!^($oid_regex)\srefs/($type.*)$!) {
if (defined $refs{$1}) {
push @{$refs{$1}}, $2;
} else {
$tag{'id'} = $tag_id;
while (my $line = <$fd>) {
chomp $line;
- if ($line =~ m/^object ([0-9a-fA-F]{40})$/) {
+ if ($line =~ m/^object ($oid_regex)$/) {
$tag{'object'} = $1;
} elsif ($line =~ m/^type (.+)$/) {
$tag{'type'} = $1;
}
my $header = shift @commit_lines;
- if ($header !~ m/^[0-9a-fA-F]{40}/) {
+ if ($header !~ m/^$oid_regex/) {
return;
}
($co{'id'}, my @parents) = split ' ', $header;
while (my $line = shift @commit_lines) {
last if $line eq "\n";
- if ($line =~ m/^tree ([0-9a-fA-F]{40})$/) {
+ if ($line =~ m/^tree ($oid_regex)$/) {
$co{'tree'} = $1;
- } elsif ((!defined $withparents) && ($line =~ m/^parent ([0-9a-fA-F]{40})$/)) {
+ } elsif ((!defined $withparents) && ($line =~ m/^parent ($oid_regex)$/)) {
push @parents, $1;
} elsif ($line =~ m/^author (.*) ([0-9]+) (.*)$/) {
$co{'author'} = to_utf8($1);
# ':100644 100644 03b218260e99b78c6df0ed378e59ed9205ccc96d 3b93d5e7cc7f7dd4ebed13a5cc1a4ad976fc94d8 M ls-files.c'
# ':100644 100644 7f9281985086971d3877aca27704f2aaf9c448ce bc190ebc71bbd923f2b728e505408f5e54bd073a M rev-tree.c'
- if ($line =~ m/^:([0-7]{6}) ([0-7]{6}) ([0-9a-fA-F]{40}) ([0-9a-fA-F]{40}) (.)([0-9]{0,3})\t(.*)$/) {
+ if ($line =~ m/^:([0-7]{6}) ([0-7]{6}) ($oid_regex) ($oid_regex) (.)([0-9]{0,3})\t(.*)$/) {
$res{'from_mode'} = $1;
$res{'to_mode'} = $2;
$res{'from_id'} = $3;
}
# '::100755 100755 100755 60e79ca1b01bc8b057abe17ddab484699a7f5fdb 94067cc5f73388f33722d52ae02f44692bc07490 94067cc5f73388f33722d52ae02f44692bc07490 MR git-gui/git-gui.sh'
# combined diff (for merge commit)
- elsif ($line =~ s/^(::+)((?:[0-7]{6} )+)((?:[0-9a-fA-F]{40} )+)([a-zA-Z]+)\t(.*)$//) {
+ elsif ($line =~ s/^(::+)((?:[0-7]{6} )+)((?:$oid_regex )+)([a-zA-Z]+)\t(.*)$//) {
$res{'nparents'} = length($1);
$res{'from_mode'} = [ split(' ', $2) ];
$res{'to_mode'} = pop @{$res{'from_mode'}};
$res{'to_file'} = unquote($5);
}
# 'c512b523472485aef4fff9e57b229d9d243c967f'
- elsif ($line =~ m/^([0-9a-fA-F]{40})$/) {
+ elsif ($line =~ m/^($oid_regex)$/) {
$res{'commit'} = $1;
}
if ($opts{'-l'}) {
#'100644 blob 0fa3f3a66fb6a137f6ec2c19351ed4d807070ffa 16717 panic.c'
- $line =~ m/^([0-9]+) (.+) ([0-9a-fA-F]{40}) +(-|[0-9]+)\t(.+)$/s;
+ $line =~ m/^([0-9]+) (.+) ($oid_regex) +(-|[0-9]+)\t(.+)$/s;
$res{'mode'} = $1;
$res{'type'} = $2;
}
} else {
#'100644 blob 0fa3f3a66fb6a137f6ec2c19351ed4d807070ffa panic.c'
- $line =~ m/^([0-9]+) (.+) ([0-9a-fA-F]{40})\t(.+)$/s;
+ $line =~ m/^([0-9]+) (.+) ($oid_regex)\t(.+)$/s;
$res{'mode'} = $1;
$res{'type'} = $2;
sub is_deleted {
my $diffinfo = shift;
- return $diffinfo->{'to_id'} eq ('0' x 40);
+ return $diffinfo->{'to_id'} eq ('0' x 40) || $diffinfo->{'to_id'} eq ('0' x 64);
}
# does patch correspond to [previous] difftree raw line
-class => "list subject"},
chop_and_escape_str($co{'title'}, 50) . "<br/>");
} elsif (defined $set{'to_id'}) {
- next if ($set{'to_id'} =~ m/^0{40}$/);
+ next if is_deleted(\%set);
print $cgi->a({-href => href(action=>"blob", hash_base=>$co{'id'},
hash=>$set{'to_id'}, file_name=>$set{'to_file'}),
# the header: <SHA-1> <src lineno> <dst lineno> [<lines in group>]
# no <lines in group> for subsequent lines in group of lines
my ($full_rev, $orig_lineno, $lineno, $group_size) =
- ($line =~ /^([0-9a-f]{40}) (\d+) (\d+)(?: (\d+))?$/);
+ ($line =~ /^($oid_regex) (\d+) (\d+)(?: (\d+))?$/);
if (!exists $metainfo{$full_rev}) {
$metainfo{$full_rev} = { 'nprevious' => 0 };
}
}
# 'previous' <sha1 of parent commit> <filename at commit>
if (exists $meta->{'previous'} &&
- $meta->{'previous'} =~ /^([a-fA-F0-9]{40}) (.*)$/) {
+ $meta->{'previous'} =~ /^($oid_regex) (.*)$/) {
$meta->{'parent'} = $1;
$meta->{'file_parent'} = unquote($2);
}
} else {
die_error(400, "No file name defined");
}
- } elsif ($hash =~ m/^[0-9a-fA-F]{40}$/) {
+ } elsif ($hash =~ m/^$oid_regex$/) {
# blobs defined by non-textual hash id's can be cached
$expires = "+1d";
}
} else {
die_error(400, "No file name defined");
}
- } elsif ($hash =~ m/^[0-9a-fA-F]{40}$/) {
+ } elsif ($hash =~ m/^$oid_regex$/) {
# blobs defined by non-textual hash id's can be cached
$expires = "+1d";
}
# non-textual hash id's can be cached
my $expires;
- if ($hash =~ m/^[0-9a-fA-F]{40}$/) {
+ if ($hash =~ m/^$oid_regex$/) {
$expires = "+1d";
}
my $refs = git_get_references();
close $fd;
#'100644 blob 0fa3f3a66fb6a137f6ec2c19351ed4d807070ffa panic.c'
- unless ($line && $line =~ m/^([0-9]+) (.+) ([0-9a-fA-F]{40})\t/) {
+ unless ($line && $line =~ m/^([0-9]+) (.+) ($oid_regex)\t/) {
die_error(404, "File or directory for given base does not exist");
}
$type = $2;
or die_error(404, "Blob diff not found");
} elsif (defined $hash &&
- $hash =~ /[0-9a-fA-F]{40}/) {
+ $hash =~ $oid_regex) {
# try to find filename from $hash
# read filtered raw output
@difftree =
# ':100644 100644 03b21826... 3b93d5e7... M ls-files.c'
# $hash == to_id
- grep { /^:[0-7]{6} [0-7]{6} [0-9a-fA-F]{40} $hash/ }
+ grep { /^:[0-7]{6} [0-7]{6} $oid_regex $hash/ }
map { chomp; $_ } <$fd>;
close $fd
or die_error(404, "Reading git-diff-tree failed");
$hash ||= $diffinfo{'to_id'};
# non-textual hash id's can be cached
- if ($hash_base =~ m/^[0-9a-fA-F]{40}$/ &&
- $hash_parent_base =~ m/^[0-9a-fA-F]{40}$/) {
+ if ($hash_base =~ m/^$oid_regex$/ &&
+ $hash_parent_base =~ m/^$oid_regex$/) {
$expires = '+1d';
}
$hash_parent ne '-c' && $hash_parent ne '--cc') {
# commitdiff with two commits given
my $hash_parent_short = $hash_parent;
- if ($hash_parent =~ m/^[0-9a-fA-F]{40}$/) {
+ if ($hash_parent =~ m/^$oid_regex$/) {
$hash_parent_short = substr($hash_parent, 0, 7);
}
$formats_nav .=
# non-textual hash id's can be cached
my $expires;
- if ($hash =~ m/^[0-9a-fA-F]{40}$/) {
+ if ($hash =~ m/^$oid_regex$/) {
$expires = "+1d";
}
void *output_priv;
};
-extern void init_grep_defaults(struct repository *);
-extern int grep_config(const char *var, const char *value, void *);
-extern void grep_init(struct grep_opt *, struct repository *repo, const char *prefix);
+void init_grep_defaults(struct repository *);
+int grep_config(const char *var, const char *value, void *);
+void grep_init(struct grep_opt *, struct repository *repo, const char *prefix);
void grep_commit_pattern_type(enum grep_pattern_type, struct grep_opt *opt);
-extern void append_grep_pat(struct grep_opt *opt, const char *pat, size_t patlen, const char *origin, int no, enum grep_pat_token t);
-extern void append_grep_pattern(struct grep_opt *opt, const char *pat, const char *origin, int no, enum grep_pat_token t);
-extern void append_header_grep_pattern(struct grep_opt *, enum grep_header_field, const char *);
-extern void compile_grep_patterns(struct grep_opt *opt);
-extern void free_grep_patterns(struct grep_opt *opt);
-extern int grep_buffer(struct grep_opt *opt, char *buf, unsigned long size);
+void append_grep_pat(struct grep_opt *opt, const char *pat, size_t patlen, const char *origin, int no, enum grep_pat_token t);
+void append_grep_pattern(struct grep_opt *opt, const char *pat, const char *origin, int no, enum grep_pat_token t);
+void append_header_grep_pattern(struct grep_opt *, enum grep_header_field, const char *);
+void compile_grep_patterns(struct grep_opt *opt);
+void free_grep_patterns(struct grep_opt *opt);
+int grep_buffer(struct grep_opt *opt, char *buf, unsigned long size);
struct grep_source {
char *name;
int grep_source(struct grep_opt *opt, struct grep_source *gs);
-extern struct grep_opt *grep_opt_dup(const struct grep_opt *opt);
-extern int grep_threads_ok(const struct grep_opt *opt);
+struct grep_opt *grep_opt_dup(const struct grep_opt *opt);
+int grep_threads_ok(const struct grep_opt *opt);
/*
* Mutex used around access to the attributes machinery if
int hash_algo_by_name(const char *name);
/* Identical, except based on the format ID. */
int hash_algo_by_id(uint32_t format_id);
+/* Identical, except based on the length. */
+int hash_algo_by_length(int len);
/* Identical, except for a pointer to struct git_hash_algo. */
static inline int hash_algo_by_ptr(const struct git_hash_algo *p)
{
* `memihash_cont` is a variant of `memihash` that allows a computation to be
* continued with another chunk of data.
*/
-extern unsigned int strhash(const char *buf);
-extern unsigned int strihash(const char *buf);
-extern unsigned int memhash(const void *buf, size_t len);
-extern unsigned int memihash(const void *buf, size_t len);
-extern unsigned int memihash_cont(unsigned int hash_seed, const void *buf, size_t len);
+unsigned int strhash(const char *buf);
+unsigned int strihash(const char *buf);
+unsigned int memhash(const void *buf, size_t len);
+unsigned int memihash(const void *buf, size_t len);
+unsigned int memihash_cont(unsigned int hash_seed, const void *buf, size_t len);
/*
* Converts a cryptographic hash (e.g. SHA-1) into an int-sized hash code
* parameter may be used to preallocate a sufficiently large table and thus
* prevent expensive resizing. If 0, the table is dynamically resized.
*/
-extern void hashmap_init(struct hashmap *map,
+void hashmap_init(struct hashmap *map,
hashmap_cmp_fn equals_function,
const void *equals_function_data,
size_t initial_size);
* If `free_entries` is true, each hashmap_entry in the map is freed as well
* using stdlibs free().
*/
-extern void hashmap_free(struct hashmap *map, int free_entries);
+void hashmap_free(struct hashmap *map, int free_entries);
/* hashmap_entry functions */
* If an entry with matching hash code is found, `key` and `keydata` are passed
* to `hashmap_cmp_fn` to decide whether the entry matches the key.
*/
-extern void *hashmap_get(const struct hashmap *map, const void *key,
+void *hashmap_get(const struct hashmap *map, const void *key,
const void *keydata);
/*
* `entry` is the hashmap_entry to start the search from, obtained via a previous
* call to `hashmap_get` or `hashmap_get_next`.
*/
-extern void *hashmap_get_next(const struct hashmap *map, const void *entry);
+void *hashmap_get_next(const struct hashmap *map, const void *entry);
/*
* Adds a hashmap entry. This allows to add duplicate entries (i.e.
* `map` is the hashmap structure.
* `entry` is the entry to add.
*/
-extern void hashmap_add(struct hashmap *map, void *entry);
+void hashmap_add(struct hashmap *map, void *entry);
/*
* Adds or replaces a hashmap entry. If the hashmap contains duplicate
* `entry` is the entry to add or replace.
* Returns the replaced entry, or NULL if not found (i.e. the entry was added).
*/
-extern void *hashmap_put(struct hashmap *map, void *entry);
+void *hashmap_put(struct hashmap *map, void *entry);
/*
* Removes a hashmap entry matching the specified key. If the hashmap contains
*
* Argument explanation is the same as in `hashmap_get`.
*/
-extern void *hashmap_remove(struct hashmap *map, const void *key,
+void *hashmap_remove(struct hashmap *map, const void *key,
const void *keydata);
/*
};
/* Initializes a `hashmap_iter` structure. */
-extern void hashmap_iter_init(struct hashmap *map, struct hashmap_iter *iter);
+void hashmap_iter_init(struct hashmap *map, struct hashmap_iter *iter);
/* Returns the next hashmap_entry, or NULL if there are no more entries. */
-extern void *hashmap_iter_next(struct hashmap_iter *iter);
+void *hashmap_iter_next(struct hashmap_iter *iter);
/* Initializes the iterator and returns the first entry, if any. */
static inline void *hashmap_iter_first(struct hashmap *map,
*
* Uses a hashmap to store the pool of interned strings.
*/
-extern const void *memintern(const void *data, size_t len);
+const void *memintern(const void *data, size_t len);
static inline const char *strintern(const char *string)
{
return memintern(string, strlen(string));
{
const char *cmd_list;
- /*
- * There's no actual repository setup at this point (and even
- * if there is, we don't really care; only global config
- * matters). If we accidentally set up a repository, it's ok
- * too since the caller (git --list-cmds=) should exit shortly
- * anyway.
- */
if (git_config_get_string_const("completion.commands", &cmd_list))
return;
const char *p = strchrnul(cmd_list, ' ');
strbuf_add(&sb, cmd_list, p - cmd_list);
- if (*cmd_list == '-')
- string_list_remove(list, cmd_list + 1, 0);
+ if (sb.buf[0] == '-')
+ string_list_remove(list, sb.buf + 1, 0);
else
string_list_insert(list, sb.buf);
strbuf_release(&sb);
putchar(c);
}
-extern void list_common_cmds_help(void);
-extern void list_all_cmds_help(void);
-extern void list_common_guides_help(void);
-extern void list_config_help(int for_human);
+void list_common_cmds_help(void);
+void list_all_cmds_help(void);
+void list_common_guides_help(void);
+void list_config_help(int for_human);
-extern void list_all_main_cmds(struct string_list *list);
-extern void list_all_other_cmds(struct string_list *list);
-extern void list_cmds_by_category(struct string_list *list,
- const char *category);
-extern void list_cmds_by_config(struct string_list *list);
-extern const char *help_unknown_cmd(const char *cmd);
-extern void load_command_list(const char *prefix,
- struct cmdnames *main_cmds,
- struct cmdnames *other_cmds);
-extern void add_cmdname(struct cmdnames *cmds, const char *name, int len);
+void list_all_main_cmds(struct string_list *list);
+void list_all_other_cmds(struct string_list *list);
+void list_cmds_by_category(struct string_list *list,
+ const char *category);
+void list_cmds_by_config(struct string_list *list);
+const char *help_unknown_cmd(const char *cmd);
+void load_command_list(const char *prefix,
+ struct cmdnames *main_cmds,
+ struct cmdnames *other_cmds);
+void add_cmdname(struct cmdnames *cmds, const char *name, int len);
/* Here we require that excludes is a sorted list. */
-extern void exclude_cmds(struct cmdnames *cmds, struct cmdnames *excludes);
-extern int is_in_cmdlist(struct cmdnames *cmds, const char *name);
-extern void list_commands(unsigned int colopts, struct cmdnames *main_cmds, struct cmdnames *other_cmds);
+void exclude_cmds(struct cmdnames *cmds, struct cmdnames *excludes);
+int is_in_cmdlist(struct cmdnames *cmds, const char *name);
+void list_commands(unsigned int colopts, struct cmdnames *main_cmds, struct cmdnames *other_cmds);
/*
* call this to die(), when it is suspected that the user mistyped a
* ref to the command, to give suggested "correct" refs.
*/
-extern void help_unknown_ref(const char *ref, const char *cmd, const char *error);
+void help_unknown_ref(const char *ref, const char *cmd, const char *error);
static inline void list_config_item(struct string_list *list,
const char *prefix,
{"GET", "/objects/info/http-alternates$", get_text_file},
{"GET", "/objects/info/packs$", get_info_packs},
{"GET", "/objects/[0-9a-f]{2}/[0-9a-f]{38}$", get_loose_object},
+ {"GET", "/objects/[0-9a-f]{2}/[0-9a-f]{62}$", get_loose_object},
{"GET", "/objects/pack/pack-[0-9a-f]{40}\\.pack$", get_pack_file},
+ {"GET", "/objects/pack/pack-[0-9a-f]{64}\\.pack$", get_pack_file},
{"GET", "/objects/pack/pack-[0-9a-f]{40}\\.idx$", get_idx_file},
+ {"GET", "/objects/pack/pack-[0-9a-f]{64}\\.idx$", get_idx_file},
{"POST", "/git-upload-pack$", service_rpc},
{"POST", "/git-receive-pack$", service_rpc}
char *url;
char *owner;
char *token;
- char tmpfile_suffix[41];
+ char tmpfile_suffix[GIT_MAX_HEXSZ + 1];
time_t start_time;
long timeout;
int refreshing;
return;
}
- fprintf(stderr, "Fetching pack %s\n", sha1_to_hex(target->sha1));
+ fprintf(stderr, "Fetching pack %s\n",
+ hash_to_hex(target->hash));
fprintf(stderr, " which contains %s\n", oid_to_hex(&request->obj->oid));
preq = new_http_pack_request(target, repo->url);
request->dest = strbuf_detach(&buf, NULL);
append_remote_object_url(&buf, repo->url, hex, 0);
- strbuf_add(&buf, request->lock->tmpfile_suffix, 41);
+ strbuf_add(&buf, request->lock->tmpfile_suffix, the_hash_algo->hexsz + 1);
request->url = strbuf_detach(&buf, NULL);
slot = get_active_slot();
static void handle_new_lock_ctx(struct xml_ctx *ctx, int tag_closed)
{
struct remote_lock *lock = (struct remote_lock *)ctx->userData;
- git_SHA_CTX sha_ctx;
- unsigned char lock_token_sha1[20];
+ git_hash_ctx hash_ctx;
+ unsigned char lock_token_hash[GIT_MAX_RAWSZ];
if (tag_closed && ctx->cdata) {
if (!strcmp(ctx->name, DAV_ACTIVELOCK_OWNER)) {
} else if (!strcmp(ctx->name, DAV_ACTIVELOCK_TOKEN)) {
lock->token = xstrdup(ctx->cdata);
- git_SHA1_Init(&sha_ctx);
- git_SHA1_Update(&sha_ctx, lock->token, strlen(lock->token));
- git_SHA1_Final(lock_token_sha1, &sha_ctx);
+ the_hash_algo->init_fn(&hash_ctx);
+ the_hash_algo->update_fn(&hash_ctx, lock->token, strlen(lock->token));
+ the_hash_algo->final_fn(lock_token_hash, &hash_ctx);
lock->tmpfile_suffix[0] = '_';
- memcpy(lock->tmpfile_suffix + 1, sha1_to_hex(lock_token_sha1), 40);
+ memcpy(lock->tmpfile_suffix + 1, hash_to_hex(lock_token_hash), the_hash_algo->hexsz);
}
}
}
/* extract hex from sharded "xx/x{38}" filename */
static int get_oid_hex_from_objpath(const char *path, struct object_id *oid)
{
- if (strlen(path) != GIT_SHA1_HEXSZ + 1)
+ if (strlen(path) != the_hash_algo->hexsz + 1)
return -1;
if (hex_to_bytes(oid->hash, path, 1))
path += 2;
path++; /* skip '/' */
- return hex_to_bytes(oid->hash + 1, path, GIT_SHA1_RAWSZ - 1);
+ return hex_to_bytes(oid->hash + 1, path, the_hash_algo->rawsz - 1);
}
static void process_ls_object(struct remote_ls_ctx *ls)
return count;
}
-static int update_remote(unsigned char *sha1, struct remote_lock *lock)
+static int update_remote(const struct object_id *oid, struct remote_lock *lock)
{
struct active_request_slot *slot;
struct slot_results results;
dav_headers = get_dav_token_headers(lock, DAV_HEADER_IF);
- strbuf_addf(&out_buffer.buf, "%s\n", sha1_to_hex(sha1));
+ strbuf_addf(&out_buffer.buf, "%s\n", oid_to_hex(oid));
slot = get_active_slot();
slot->results = &results;
run_request_queue();
/* Update the remote branch if all went well */
- if (aborted || !update_remote(ref->new_oid.hash, ref_lock))
+ if (aborted || !update_remote(&ref->new_oid, ref_lock))
rc = 1;
if (!rc)
process_http_object_request(obj_req->req);
obj_req->state = COMPLETE;
+ normalize_curl_result(&obj_req->req->curl_result,
+ obj_req->req->http_code,
+ obj_req->req->errorstr,
+ sizeof(obj_req->req->errorstr));
+
/* Use alternates if necessary */
if (missing_target(obj_req->req)) {
fetch_alternates(walker, alt->base);
char *data;
int i = 0;
+ normalize_curl_result(&slot->curl_result, slot->http_code,
+ curl_errorstr, sizeof(curl_errorstr));
+
if (alt_req->http_specific) {
if (slot->curl_result != CURLE_OK ||
!alt_req->buffer->len) {
if (walker->get_verbosely) {
fprintf(stderr, "Getting pack %s\n",
- sha1_to_hex(target->sha1));
+ hash_to_hex(target->hash));
fprintf(stderr, " which contains %s\n",
- sha1_to_hex(sha1));
+ hash_to_hex(sha1));
}
preq = new_http_pack_request(target, repo->base);
release_object_request(obj_req);
}
-static int fetch_object(struct walker *walker, unsigned char *sha1)
+static int fetch_object(struct walker *walker, unsigned char *hash)
{
- char *hex = sha1_to_hex(sha1);
+ char *hex = hash_to_hex(hash);
int ret = 0;
struct object_request *obj_req = NULL;
struct http_object_request *req;
list_for_each(pos, head) {
obj_req = list_entry(pos, struct object_request, node);
- if (hasheq(obj_req->oid.hash, sha1))
+ if (hasheq(obj_req->oid.hash, hash))
break;
}
if (obj_req == NULL)
req->localfile = -1;
}
- /*
- * we turned off CURLOPT_FAILONERROR to avoid losing a
- * persistent connection and got CURLE_OK.
- */
- if (req->http_code >= 300 && req->curl_result == CURLE_OK &&
- (starts_with(req->url, "http://") ||
- starts_with(req->url, "https://"))) {
- req->curl_result = CURLE_HTTP_RETURNED_ERROR;
- xsnprintf(req->errorstr, sizeof(req->errorstr),
- "HTTP request failed");
- }
+ normalize_curl_result(&req->curl_result, req->http_code,
+ req->errorstr, sizeof(req->errorstr));
if (obj_req->state == ABORTED) {
ret = error("Request for %s aborted", hex);
return ret;
}
-static int fetch(struct walker *walker, unsigned char *sha1)
+static int fetch(struct walker *walker, unsigned char *hash)
{
struct walker_data *data = walker->data;
struct alt_base *altbase = data->alt;
- if (!fetch_object(walker, sha1))
+ if (!fetch_object(walker, hash))
return 0;
while (altbase) {
- if (!http_fetch_pack(walker, altbase, sha1))
+ if (!http_fetch_pack(walker, altbase, hash))
return 0;
fetch_alternates(walker, data->alt->base);
altbase = altbase->next;
}
- return error("Unable to find %s under %s", sha1_to_hex(sha1),
+ return error("Unable to find %s under %s", hash_to_hex(hash),
data->alt->base);
}
memcpy(ptr, buffer->buf.buf + buffer->posn, size);
buffer->posn += size;
- return size;
+ return size / eltsize;
}
#ifndef NO_CURL_IOCTL
struct strbuf *buffer = buffer_;
strbuf_add(buffer, ptr, size);
- return size;
+ return nmemb;
}
size_t fwrite_null(char *ptr, size_t eltsize, size_t nmemb, void *strbuf)
{
- return eltsize * nmemb;
+ return nmemb;
}
static void closedown_active_slot(struct active_request_slot *slot)
return strbuf_detach(&buf, NULL);
}
-static int handle_curl_result(struct slot_results *results)
+void normalize_curl_result(CURLcode *result, long http_code,
+ char *errorstr, size_t errorlen)
{
/*
* If we see a failing http code with CURLE_OK, we have turned off
* Likewise, if we see a redirect (30x code), that means we turned off
* redirect-following, and we should treat the result as an error.
*/
- if (results->curl_result == CURLE_OK &&
- results->http_code >= 300) {
- results->curl_result = CURLE_HTTP_RETURNED_ERROR;
+ if (*result == CURLE_OK && http_code >= 300) {
+ *result = CURLE_HTTP_RETURNED_ERROR;
/*
* Normally curl will already have put the "reason phrase"
* from the server into curl_errorstr; unfortunately without
* FAILONERROR it is lost, so we can give only the numeric
* status code.
*/
- xsnprintf(curl_errorstr, sizeof(curl_errorstr),
+ xsnprintf(errorstr, errorlen,
"The requested URL returned error: %ld",
- results->http_code);
+ http_code);
}
+}
+
+static int handle_curl_result(struct slot_results *results)
+{
+ normalize_curl_result(&results->curl_result, results->http_code,
+ curl_errorstr, sizeof(curl_errorstr));
if (results->curl_result == CURLE_OK) {
credential_approve(&http_auth);
url = quote_ref_url(base, ref->name);
if (http_get_strbuf(url, &buffer, &options) == HTTP_OK) {
strbuf_rtrim(&buffer);
- if (buffer.len == 40)
+ if (buffer.len == the_hash_algo->hexsz)
ret = get_oid_hex(buffer.buf, &ref->old_oid);
else if (starts_with(buffer.buf, "ref: ")) {
ref->symref = xstrdup(buffer.buf + 5);
}
/* Helpers for fetching packs */
-static char *fetch_pack_index(unsigned char *sha1, const char *base_url)
+static char *fetch_pack_index(unsigned char *hash, const char *base_url)
{
char *url, *tmp;
struct strbuf buf = STRBUF_INIT;
if (http_is_verbose)
- fprintf(stderr, "Getting index for pack %s\n", sha1_to_hex(sha1));
+ fprintf(stderr, "Getting index for pack %s\n", hash_to_hex(hash));
end_url_with_slash(&buf, base_url);
- strbuf_addf(&buf, "objects/pack/pack-%s.idx", sha1_to_hex(sha1));
+ strbuf_addf(&buf, "objects/pack/pack-%s.idx", hash_to_hex(hash));
url = strbuf_detach(&buf, NULL);
- strbuf_addf(&buf, "%s.temp", sha1_pack_index_name(sha1));
+ strbuf_addf(&buf, "%s.temp", sha1_pack_index_name(hash));
tmp = strbuf_detach(&buf, NULL);
if (http_get_file(url, tmp, NULL) != HTTP_OK) {
int http_get_info_packs(const char *base_url, struct packed_git **packs_head)
{
struct http_get_options options = {0};
- int ret = 0, i = 0;
- char *url, *data;
+ int ret = 0;
+ char *url;
+ const char *data;
struct strbuf buf = STRBUF_INIT;
- unsigned char hash[GIT_MAX_RAWSZ];
- const unsigned hexsz = the_hash_algo->hexsz;
+ struct object_id oid;
end_url_with_slash(&buf, base_url);
strbuf_addstr(&buf, "objects/info/packs");
goto cleanup;
data = buf.buf;
- while (i < buf.len) {
- switch (data[i]) {
- case 'P':
- i++;
- if (i + hexsz + 12 <= buf.len &&
- starts_with(data + i, " pack-") &&
- starts_with(data + i + hexsz + 6, ".pack\n")) {
- get_sha1_hex(data + i + 6, hash);
- fetch_and_setup_pack_index(packs_head, hash,
- base_url);
- i += hexsz + 11;
- break;
- }
- default:
- while (i < buf.len && data[i] != '\n')
- i++;
+ while (*data) {
+ if (skip_prefix(data, "P pack-", &data) &&
+ !parse_oid_hex(data, &oid, &data) &&
+ skip_prefix(data, ".pack", &data) &&
+ (*data == '\n' || *data == '\0')) {
+ fetch_and_setup_pack_index(packs_head, oid.hash, base_url);
+ } else {
+ data = strchrnul(data, '\n');
}
- i++;
+ if (*data)
+ data++; /* skip past newline */
}
cleanup:
return -1;
}
- unlink(sha1_pack_index_name(p->sha1));
+ unlink(sha1_pack_index_name(p->hash));
- if (finalize_object_file(preq->tmpfile.buf, sha1_pack_name(p->sha1))
- || finalize_object_file(tmp_idx, sha1_pack_index_name(p->sha1))) {
+ if (finalize_object_file(preq->tmpfile.buf, sha1_pack_name(p->hash))
+ || finalize_object_file(tmp_idx, sha1_pack_index_name(p->hash))) {
free(tmp_idx);
return -1;
}
end_url_with_slash(&buf, base_url);
strbuf_addf(&buf, "objects/pack/pack-%s.pack",
- sha1_to_hex(target->sha1));
+ hash_to_hex(target->hash));
preq->url = strbuf_detach(&buf, NULL);
- strbuf_addf(&preq->tmpfile, "%s.temp", sha1_pack_name(target->sha1));
+ strbuf_addf(&preq->tmpfile, "%s.temp", sha1_pack_name(target->hash));
preq->packfile = fopen(preq->tmpfile.buf, "a");
if (!preq->packfile) {
error("Unable to open local file %s for pack",
if (http_is_verbose)
fprintf(stderr,
"Resuming fetch of pack %s at byte %"PRIuMAX"\n",
- sha1_to_hex(target->sha1), (uintmax_t)prev_posn);
+ hash_to_hex(target->hash),
+ (uintmax_t)prev_posn);
http_opt_request_remainder(preq->slot->curl, prev_posn);
}
BUG("curl_easy_getinfo for HTTP code failed: %s",
curl_easy_strerror(c));
if (slot->http_code >= 300)
- return size;
+ return nmemb;
}
do {
ssize_t retval = xwrite(freq->localfile,
(char *) ptr + posn, size - posn);
if (retval < 0)
- return posn;
+ return posn / eltsize;
posn += retval;
} while (posn < size);
freq->stream.next_out = expn;
freq->stream.avail_out = sizeof(expn);
freq->zret = git_inflate(&freq->stream, Z_SYNC_FLUSH);
- git_SHA1_Update(&freq->c, expn,
- sizeof(expn) - freq->stream.avail_out);
+ the_hash_algo->update_fn(&freq->c, expn,
+ sizeof(expn) - freq->stream.avail_out);
} while (freq->stream.avail_in && freq->zret == Z_OK);
- return size;
+ return nmemb;
}
struct http_object_request *new_http_object_request(const char *base_url,
git_inflate_init(&freq->stream);
- git_SHA1_Init(&freq->c);
+ the_hash_algo->init_fn(&freq->c);
freq->url = get_remote_object_url(base_url, hex, 0);
if (prev_read == -1) {
memset(&freq->stream, 0, sizeof(freq->stream));
git_inflate_init(&freq->stream);
- git_SHA1_Init(&freq->c);
+ the_hash_algo->init_fn(&freq->c);
if (prev_posn>0) {
prev_posn = 0;
lseek(freq->localfile, 0, SEEK_SET);
}
git_inflate_end(&freq->stream);
- git_SHA1_Final(freq->real_oid.hash, &freq->c);
+ the_hash_algo->final_fn(freq->real_oid.hash, &freq->c);
if (freq->zret != Z_STREAM_END) {
unlink_or_warn(freq->tmpfile.buf);
return -1;
};
/* Curl request read/write callbacks */
-extern size_t fread_buffer(char *ptr, size_t eltsize, size_t nmemb, void *strbuf);
-extern size_t fwrite_buffer(char *ptr, size_t eltsize, size_t nmemb, void *strbuf);
-extern size_t fwrite_null(char *ptr, size_t eltsize, size_t nmemb, void *strbuf);
+size_t fread_buffer(char *ptr, size_t eltsize, size_t nmemb, void *strbuf);
+size_t fwrite_buffer(char *ptr, size_t eltsize, size_t nmemb, void *strbuf);
+size_t fwrite_null(char *ptr, size_t eltsize, size_t nmemb, void *strbuf);
#ifndef NO_CURL_IOCTL
-extern curlioerr ioctl_buffer(CURL *handle, int cmd, void *clientp);
+curlioerr ioctl_buffer(CURL *handle, int cmd, void *clientp);
#endif
/* Slot lifecycle functions */
-extern struct active_request_slot *get_active_slot(void);
-extern int start_active_slot(struct active_request_slot *slot);
-extern void run_active_slot(struct active_request_slot *slot);
-extern void finish_all_active_slots(void);
+struct active_request_slot *get_active_slot(void);
+int start_active_slot(struct active_request_slot *slot);
+void run_active_slot(struct active_request_slot *slot);
+void finish_all_active_slots(void);
/*
* This will run one slot to completion in a blocking manner, similar to how
struct slot_results *results);
#ifdef USE_CURL_MULTI
-extern void fill_active_slots(void);
-extern void add_fill_function(void *data, int (*fill)(void *));
-extern void step_active_slots(void);
+void fill_active_slots(void);
+void add_fill_function(void *data, int (*fill)(void *));
+void step_active_slots(void);
#endif
-extern void http_init(struct remote *remote, const char *url,
- int proactive_auth);
-extern void http_cleanup(void);
-extern struct curl_slist *http_copy_default_headers(void);
+void http_init(struct remote *remote, const char *url,
+ int proactive_auth);
+void http_cleanup(void);
+struct curl_slist *http_copy_default_headers(void);
extern long int git_curl_ipresolve;
extern int active_requests;
#define missing_target(a) missing__target((a)->http_code, (a)->curl_result)
+/*
+ * Normalize curl results to handle CURL_FAILONERROR (or lack thereof). Failing
+ * http codes have their "result" converted to CURLE_HTTP_RETURNED_ERROR, and
+ * an appropriate string placed in the errorstr buffer (pass curl_errorstr if
+ * you don't have a custom buffer).
+ */
+void normalize_curl_result(CURLcode *result, long http_code, char *errorstr,
+ size_t errorlen);
+
/* Helpers for modifying and creating URLs */
-extern void append_remote_object_url(struct strbuf *buf, const char *url,
- const char *hex,
- int only_two_digit_prefix);
-extern char *get_remote_object_url(const char *url, const char *hex,
- int only_two_digit_prefix);
+void append_remote_object_url(struct strbuf *buf, const char *url,
+ const char *hex,
+ int only_two_digit_prefix);
+char *get_remote_object_url(const char *url, const char *hex,
+ int only_two_digit_prefix);
/* Options for http_get_*() */
struct http_get_options {
*/
int http_get_strbuf(const char *url, struct strbuf *result, struct http_get_options *options);
-extern int http_fetch_ref(const char *base, struct ref *ref);
+int http_fetch_ref(const char *base, struct ref *ref);
/* Helpers for fetching packs */
-extern int http_get_info_packs(const char *base_url,
- struct packed_git **packs_head);
+int http_get_info_packs(const char *base_url,
+ struct packed_git **packs_head);
struct http_pack_request {
char *url;
struct active_request_slot *slot;
};
-extern struct http_pack_request *new_http_pack_request(
+struct http_pack_request *new_http_pack_request(
struct packed_git *target, const char *base_url);
-extern int finish_http_pack_request(struct http_pack_request *preq);
-extern void release_http_pack_request(struct http_pack_request *preq);
+int finish_http_pack_request(struct http_pack_request *preq);
+void release_http_pack_request(struct http_pack_request *preq);
/* Helpers for fetching object */
struct http_object_request {
long http_code;
struct object_id oid;
struct object_id real_oid;
- git_SHA_CTX c;
+ git_hash_ctx c;
git_zstream stream;
int zret;
int rename;
struct active_request_slot *slot;
};
-extern struct http_object_request *new_http_object_request(
+struct http_object_request *new_http_object_request(
const char *base_url, const struct object_id *oid);
-extern void process_http_object_request(struct http_object_request *freq);
-extern int finish_http_object_request(struct http_object_request *freq);
-extern void abort_http_object_request(struct http_object_request *freq);
-extern void release_http_object_request(struct http_object_request *freq);
+void process_http_object_request(struct http_object_request *freq);
+int finish_http_object_request(struct http_object_request *freq);
+void abort_http_object_request(struct http_object_request *freq);
+void release_http_object_request(struct http_object_request *freq);
/* setup routine for curl_easy_setopt CURLOPT_DEBUGFUNCTION */
void setup_curl_trace(CURL *handle);
static struct strbuf git_default_name = STRBUF_INIT;
static struct strbuf git_default_email = STRBUF_INIT;
static struct strbuf git_default_date = STRBUF_INIT;
+static struct strbuf git_author_name = STRBUF_INIT;
+static struct strbuf git_author_email = STRBUF_INIT;
+static struct strbuf git_committer_name = STRBUF_INIT;
+static struct strbuf git_committer_email = STRBUF_INIT;
static int default_email_is_bogus;
static int default_name_is_bogus;
"\n");
const char *fmt_ident(const char *name, const char *email,
- const char *date_str, int flag)
+ enum want_ident whose_ident, const char *date_str, int flag)
{
static struct strbuf ident = STRBUF_INIT;
int strict = (flag & IDENT_STRICT);
int want_date = !(flag & IDENT_NO_DATE);
int want_name = !(flag & IDENT_NO_NAME);
+ if (!email) {
+ if (whose_ident == WANT_AUTHOR_IDENT && git_author_email.len)
+ email = git_author_email.buf;
+ else if (whose_ident == WANT_COMMITTER_IDENT && git_committer_email.len)
+ email = git_committer_email.buf;
+ }
if (!email) {
if (strict && ident_use_config_only
&& !(ident_config_given & IDENT_MAIL_GIVEN)) {
if (want_name) {
int using_default = 0;
+ if (!name) {
+ if (whose_ident == WANT_AUTHOR_IDENT && git_author_name.len)
+ name = git_author_name.buf;
+ else if (whose_ident == WANT_COMMITTER_IDENT &&
+ git_committer_name.len)
+ name = git_committer_name.buf;
+ }
if (!name) {
if (strict && ident_use_config_only
&& !(ident_config_given & IDENT_NAME_GIVEN)) {
return ident.buf;
}
-const char *fmt_name(const char *name, const char *email)
+const char *fmt_name(enum want_ident whose_ident)
{
- return fmt_ident(name, email, NULL, IDENT_STRICT | IDENT_NO_DATE);
+ char *name = NULL;
+ char *email = NULL;
+
+ switch (whose_ident) {
+ case WANT_BLANK_IDENT:
+ break;
+ case WANT_AUTHOR_IDENT:
+ name = getenv("GIT_AUTHOR_NAME");
+ email = getenv("GIT_AUTHOR_EMAIL");
+ break;
+ case WANT_COMMITTER_IDENT:
+ name = getenv("GIT_COMMITTER_NAME");
+ email = getenv("GIT_COMMITTER_EMAIL");
+ break;
+ }
+ return fmt_ident(name, email, whose_ident, NULL,
+ IDENT_STRICT | IDENT_NO_DATE);
}
const char *git_author_info(int flag)
author_ident_explicitly_given |= IDENT_MAIL_GIVEN;
return fmt_ident(getenv("GIT_AUTHOR_NAME"),
getenv("GIT_AUTHOR_EMAIL"),
+ WANT_AUTHOR_IDENT,
getenv("GIT_AUTHOR_DATE"),
flag);
}
committer_ident_explicitly_given |= IDENT_MAIL_GIVEN;
return fmt_ident(getenv("GIT_COMMITTER_NAME"),
getenv("GIT_COMMITTER_EMAIL"),
+ WANT_COMMITTER_IDENT,
getenv("GIT_COMMITTER_DATE"),
flag);
}
return ident_is_sufficient(author_ident_explicitly_given);
}
-int git_ident_config(const char *var, const char *value, void *data)
+static int set_ident(const char *var, const char *value)
{
- if (!strcmp(var, "user.useconfigonly")) {
- ident_use_config_only = git_config_bool(var, value);
+ if (!strcmp(var, "author.name")) {
+ if (!value)
+ return config_error_nonbool(var);
+ strbuf_reset(&git_author_name);
+ strbuf_addstr(&git_author_name, value);
+ author_ident_explicitly_given |= IDENT_NAME_GIVEN;
+ ident_config_given |= IDENT_NAME_GIVEN;
+ return 0;
+ }
+
+ if (!strcmp(var, "author.email")) {
+ if (!value)
+ return config_error_nonbool(var);
+ strbuf_reset(&git_author_email);
+ strbuf_addstr(&git_author_email, value);
+ author_ident_explicitly_given |= IDENT_MAIL_GIVEN;
+ ident_config_given |= IDENT_MAIL_GIVEN;
+ return 0;
+ }
+
+ if (!strcmp(var, "committer.name")) {
+ if (!value)
+ return config_error_nonbool(var);
+ strbuf_reset(&git_committer_name);
+ strbuf_addstr(&git_committer_name, value);
+ committer_ident_explicitly_given |= IDENT_NAME_GIVEN;
+ ident_config_given |= IDENT_NAME_GIVEN;
+ return 0;
+ }
+
+ if (!strcmp(var, "committer.email")) {
+ if (!value)
+ return config_error_nonbool(var);
+ strbuf_reset(&git_committer_email);
+ strbuf_addstr(&git_committer_email, value);
+ committer_ident_explicitly_given |= IDENT_MAIL_GIVEN;
+ ident_config_given |= IDENT_MAIL_GIVEN;
return 0;
}
return 0;
}
+int git_ident_config(const char *var, const char *value, void *data)
+{
+ if (!strcmp(var, "user.useconfigonly")) {
+ ident_use_config_only = git_config_bool(var, value);
+ return 0;
+ }
+
+ return set_ident(var, value);
+}
+
+static void set_env_if(const char *key, const char *value, int *given, int bit)
+{
+ if ((*given & bit) || getenv(key))
+ return; /* nothing to do */
+ setenv(key, value, 0);
+ *given |= bit;
+}
+
+void prepare_fallback_ident(const char *name, const char *email)
+{
+ set_env_if("GIT_AUTHOR_NAME", name,
+ &author_ident_explicitly_given, IDENT_NAME_GIVEN);
+ set_env_if("GIT_AUTHOR_EMAIL", email,
+ &author_ident_explicitly_given, IDENT_MAIL_GIVEN);
+ set_env_if("GIT_COMMITTER_NAME", name,
+ &committer_ident_explicitly_given, IDENT_NAME_GIVEN);
+ set_env_if("GIT_COMMITTER_EMAIL", email,
+ &committer_ident_explicitly_given, IDENT_MAIL_GIVEN);
+}
+
static int buf_cmp(const char *a_begin, const char *a_end,
const char *b_begin, const char *b_end)
{
khval_t *vals; \
} kh_##name##_t;
-#define __KHASH_PROTOTYPES(name, khkey_t, khval_t) \
- extern kh_##name##_t *kh_init_##name(void); \
- extern void kh_destroy_##name(kh_##name##_t *h); \
- extern void kh_clear_##name(kh_##name##_t *h); \
- extern khint_t kh_get_##name(const kh_##name##_t *h, khkey_t key); \
- extern int kh_resize_##name(kh_##name##_t *h, khint_t new_n_buckets); \
- extern khint_t kh_put_##name(kh_##name##_t *h, khkey_t key, int *ret); \
- extern void kh_del_##name(kh_##name##_t *h, khint_t x);
+#define __KHASH_PROTOTYPES(name, khkey_t, khval_t) \
+ kh_##name##_t *kh_init_##name(void); \
+ void kh_destroy_##name(kh_##name##_t *h); \
+ void kh_clear_##name(kh_##name##_t *h); \
+ khint_t kh_get_##name(const kh_##name##_t *h, khkey_t key); \
+ int kh_resize_##name(kh_##name##_t *h, khint_t new_n_buckets); \
+ khint_t kh_put_##name(kh_##name##_t *h, khkey_t key, int *ret); \
+ void kh_del_##name(kh_##name##_t *h, khint_t x);
#define __KHASH_IMPL(name, SCOPE, khkey_t, khval_t, kh_is_map, __hash_func, __hash_equal) \
SCOPE kh_##name##_t *kh_init_##name(void) { \
KHASH_INIT(sha1_pos, const unsigned char *, int, 1, sha1hash, __kh_oid_cmp)
typedef kh_sha1_pos_t khash_sha1_pos;
+static inline unsigned int oid_hash(struct object_id oid)
+{
+ return sha1hash(oid.hash);
+}
+
+static inline int oid_equal(struct object_id a, struct object_id b)
+{
+ return oideq(&a, &b);
+}
+
+KHASH_INIT(oid, struct object_id, int, 0, oid_hash, oid_equal)
+
+KHASH_INIT(oid_map, struct object_id, void *, 1, oid_hash, oid_equal)
+typedef kh_oid_t khash_oid_map;
+
+KHASH_INIT(oid_pos, struct object_id, int, 1, oid_hash, oid_equal)
+typedef kh_oid_pos_t khash_oid_pos;
+
#endif /* __AC_KHASH_H */
if enough memory cannot be obtained. The argument if non-NULL
specifies a table of character translations to be applied to all
pattern and search text. */
-extern kwset_t kwsalloc(unsigned char const *);
+kwset_t kwsalloc(unsigned char const *);
/* Incrementally extend the keyword set to include the given string.
Return NULL for success, or an error message. Remember an index
number for each keyword included in the set. */
-extern const char *kwsincr(kwset_t, char const *, size_t);
+const char *kwsincr(kwset_t, char const *, size_t);
/* When the keyword set has been completely built, prepare it for
use. Return NULL for success, or an error message. */
-extern const char *kwsprep(kwset_t);
+const char *kwsprep(kwset_t);
/* Search through the given buffer for a member of the keyword set.
Return a pointer to the leftmost longest match found, or NULL if
the matching substring in the integer it points to. Similarly,
if foundindex is non-NULL, store the index of the particular
keyword found therein. */
-extern size_t kwsexec(kwset_t, char const *, size_t, struct kwsmatch *);
+size_t kwsexec(kwset_t, char const *, size_t, struct kwsmatch *);
/* Deallocate the given keyword set and all its associated storage. */
-extern void kwsfree(kwset_t);
+void kwsfree(kwset_t);
static void fill_blob_sha1(struct commit *commit, struct diff_filespec *spec)
{
- unsigned mode;
+ unsigned short mode;
struct object_id oid;
if (get_tree_entry(&commit->object.oid, spec->path, &oid, &mode))
int line_log_print(struct rev_info *rev, struct commit *commit)
{
- struct line_log_data *range = lookup_line_range(rev, commit);
show_log(rev);
- dump_diff_hacky(rev, range);
+ if (!(rev->diffopt.output_format & DIFF_FORMAT_NO_OUTPUT)) {
+ struct line_log_data *range = lookup_line_range(rev, commit);
+ dump_diff_hacky(rev, range);
+ }
return 1;
}
struct range_set target;
};
-extern void range_set_init(struct range_set *, size_t prealloc);
-extern void range_set_release(struct range_set *);
+void range_set_init(struct range_set *, size_t prealloc);
+void range_set_release(struct range_set *);
/* Range includes start; excludes end */
-extern void range_set_append_unsafe(struct range_set *, long start, long end);
+void range_set_append_unsafe(struct range_set *, long start, long end);
/* New range must begin at or after end of last added range */
-extern void range_set_append(struct range_set *, long start, long end);
+void range_set_append(struct range_set *, long start, long end);
/*
* In-place pass of sorting and merging the ranges in the range set,
* to sort and make the ranges disjoint.
*/
-extern void sort_and_merge_range_set(struct range_set *);
+void sort_and_merge_range_set(struct range_set *);
/* Linked list of interesting files and their associated ranges. The
* list must be kept sorted by path.
struct diff_ranges diff;
};
-extern void line_log_init(struct rev_info *rev, const char *prefix, struct string_list *args);
+void line_log_init(struct rev_info *rev, const char *prefix, struct string_list *args);
-extern int line_log_filter(struct rev_info *rev);
+int line_log_filter(struct rev_info *rev);
-extern int line_log_print(struct rev_info *rev, struct commit *commit);
+int line_log_print(struct rev_info *rev, struct commit *commit);
#endif /* LINE_LOG_H */
filter_options->sparse_path_value = strdup(v0);
return 0;
}
+ /*
+ * Please update _git_fetch() in git-completion.bash when you
+ * add new filters
+ */
if (errbuf)
strbuf_addf(errbuf, "invalid filter-spec '%s'", arg);
if (S_ISDIR(entry.mode)) {
struct tree *t = lookup_tree(ctx->revs->repo, &entry.oid);
+ if (!t) {
+ die(_("entry '%s' in tree %s has tree mode, "
+ "but is not a tree"),
+ entry.path, oid_to_hex(&tree->object.oid));
+ }
t->object.flags |= NOT_USER_GIVEN;
process_tree(ctx, t, base, entry.path);
}
base, entry.path);
else {
struct blob *b = lookup_blob(ctx->revs->repo, &entry.oid);
+ if (!b) {
+ die(_("entry '%s' in tree %s has blob mode, "
+ "but is not a blob"),
+ entry.path, oid_to_hex(&tree->object.oid));
+ }
b->object.flags |= NOT_USER_GIVEN;
process_blob(ctx, b, base, entry.path);
}
struct tree *tree = get_commit_tree(commit);
tree->object.flags |= NOT_USER_GIVEN;
add_pending_tree(ctx->revs, tree);
+ } else if (commit->object.parsed) {
+ die(_("unable to load root tree for commit %s"),
+ oid_to_hex(&commit->object.oid));
}
ctx->show_commit(commit, ctx->show_data);
* timeout_ms is -1, retry indefinitely. The flags argument and error
* handling are described above.
*/
-extern int hold_lock_file_for_update_timeout(
+int hold_lock_file_for_update_timeout(
struct lock_file *lk, const char *path,
int flags, long timeout_ms);
* of `hold_lock_file_for_update()` to lock `path`. `err` should be the
* `errno` set by the failing call.
*/
-extern void unable_to_lock_message(const char *path, int err,
- struct strbuf *buf);
+void unable_to_lock_message(const char *path, int err,
+ struct strbuf *buf);
/*
* Emit an appropriate error message and `die()` following the failure
* `errno` set by the failing
* call.
*/
-extern NORETURN void unable_to_lock_die(const char *path, int err);
+NORETURN void unable_to_lock_die(const char *path, int err);
/*
* Associate a stdio stream with the lockfile (which must still be
* Return the path of the file that is locked by the specified
* lock_file object. The caller must free the memory.
*/
-extern char *get_locked_file_path(struct lock_file *lk);
+char *get_locked_file_path(struct lock_file *lk);
/*
* If the lockfile is still open, close it (and the file pointer if it
* call `commit_lock_file()` for a `lock_file` object that is not
* currently locked.
*/
-extern int commit_lock_file(struct lock_file *lk);
+int commit_lock_file(struct lock_file *lk);
/*
* Like `commit_lock_file()`, but rename the lockfile to the provided
*/
if (ctx.need_8bit_cte >= 0 && opt->add_signoff)
ctx.need_8bit_cte =
- has_non_ascii(fmt_name(getenv("GIT_COMMITTER_NAME"),
- getenv("GIT_COMMITTER_EMAIL")));
+ has_non_ascii(fmt_name(WANT_COMMITTER_IDENT));
ctx.date_mode = opt->date_mode;
ctx.date_mode_explicit = opt->date_mode_explicit;
ctx.abbrev = opt->diffopt.abbrev;
struct repository;
struct argv_array;
struct packet_reader;
-extern int ls_refs(struct repository *r, struct argv_array *keys,
- struct packet_reader *request);
+int ls_refs(struct repository *r, struct argv_array *keys,
+ struct packet_reader *request);
#endif /* LS_REFS_H */
int input_error;
};
-extern void setup_mailinfo(struct mailinfo *);
-extern int mailinfo(struct mailinfo *, const char *msg, const char *patch);
-extern void clear_mailinfo(struct mailinfo *);
+void setup_mailinfo(struct mailinfo *);
+int mailinfo(struct mailinfo *, const char *msg, const char *patch);
+void clear_mailinfo(struct mailinfo *);
#endif /* MAILINFO_H */
while (one.size) {
const char *path;
const struct object_id *elem;
- unsigned mode;
+ unsigned short mode;
int score;
elem = tree_entry_extract(&one, &path, &mode);
rewrite_here = NULL;
while (desc.size) {
const char *name;
- unsigned mode;
+ unsigned short mode;
tree_entry_extract(&desc, &name, &mode);
if (strlen(name) == toplen &&
if (add_score < del_score) {
/* We need to pick a subtree of two */
- unsigned mode;
+ unsigned short mode;
if (!*del_prefix)
return;
const char *shift_prefix)
{
struct object_id sub1, sub2;
- unsigned mode1, mode2;
+ unsigned short mode1, mode2;
unsigned candidate = 0;
/* Can hash2 be a tree at shift_prefix in tree hash1? */
struct blob;
struct index_state;
-extern void *merge_blobs(struct index_state *, const char *,
- struct blob *, struct blob *,
- struct blob *, unsigned long *);
+void *merge_blobs(struct index_state *, const char *,
+ struct blob *, struct blob *,
+ struct blob *, unsigned long *);
#endif /* MERGE_BLOBS_H */
hashmap_init(map, (hashmap_cmp_fn) collision_cmp, NULL, 0);
}
-static void flush_output(struct merge_options *o)
+static void flush_output(struct merge_options *opt)
{
- if (o->buffer_output < 2 && o->obuf.len) {
- fputs(o->obuf.buf, stdout);
- strbuf_reset(&o->obuf);
+ if (opt->buffer_output < 2 && opt->obuf.len) {
+ fputs(opt->obuf.buf, stdout);
+ strbuf_reset(&opt->obuf);
}
}
-static int err(struct merge_options *o, const char *err, ...)
+static int err(struct merge_options *opt, const char *err, ...)
{
va_list params;
- if (o->buffer_output < 2)
- flush_output(o);
+ if (opt->buffer_output < 2)
+ flush_output(opt);
else {
- strbuf_complete(&o->obuf, '\n');
- strbuf_addstr(&o->obuf, "error: ");
+ strbuf_complete(&opt->obuf, '\n');
+ strbuf_addstr(&opt->obuf, "error: ");
}
va_start(params, err);
- strbuf_vaddf(&o->obuf, err, params);
+ strbuf_vaddf(&opt->obuf, err, params);
va_end(params);
- if (o->buffer_output > 1)
- strbuf_addch(&o->obuf, '\n');
+ if (opt->buffer_output > 1)
+ strbuf_addch(&opt->obuf, '\n');
else {
- error("%s", o->obuf.buf);
- strbuf_reset(&o->obuf);
+ error("%s", opt->obuf.buf);
+ strbuf_reset(&opt->obuf);
}
return -1;
return lookup_tree(repo, &shifted);
}
+static inline void set_commit_tree(struct commit *c, struct tree *t)
+{
+ c->maybe_tree = t;
+}
+
static struct commit *make_virtual_commit(struct repository *repo,
struct tree *tree,
const char *comment)
struct commit *commit = alloc_commit_node(repo);
set_merge_remote_desc(commit, comment, (struct object *)commit);
- commit->maybe_tree = tree;
+ set_commit_tree(commit, tree);
commit->object.parsed = 1;
return commit;
}
RENAME_TWO_FILES_TO_ONE
};
-struct rename_conflict_info {
- enum rename_type rename_type;
- struct diff_filepair *pair1;
- struct diff_filepair *pair2;
- const char *branch1;
- const char *branch2;
- struct stage_data *dst_entry1;
- struct stage_data *dst_entry2;
- struct diff_filespec ren1_other;
- struct diff_filespec ren2_other;
-};
-
/*
* Since we want to write the index eventually, we cannot reuse the index
* for these (temporary) data.
*/
struct stage_data {
- struct {
- unsigned mode;
- struct object_id oid;
- } stages[4];
+ struct diff_filespec stages[4]; /* mostly for oid & mode; maybe path */
struct rename_conflict_info *rename_conflict_info;
unsigned processed:1;
};
+struct rename {
+ unsigned processed:1;
+ struct diff_filepair *pair;
+ const char *branch; /* branch that the rename occurred on */
+ /*
+ * If directory rename detection affected this rename, what was its
+ * original type ('A' or 'R') and it's original destination before
+ * the directory rename (otherwise, '\0' and NULL for these two vars).
+ */
+ char dir_rename_original_type;
+ char *dir_rename_original_dest;
+ /*
+ * Purpose of src_entry and dst_entry:
+ *
+ * If 'before' is renamed to 'after' then src_entry will contain
+ * the versions of 'before' from the merge_base, HEAD, and MERGE in
+ * stages 1, 2, and 3; dst_entry will contain the respective
+ * versions of 'after' in corresponding locations. Thus, we have a
+ * total of six modes and oids, though some will be null. (Stage 0
+ * is ignored; we're interested in handling conflicts.)
+ *
+ * Since we don't turn on break-rewrites by default, neither
+ * src_entry nor dst_entry can have all three of their stages have
+ * non-null oids, meaning at most four of the six will be non-null.
+ * Also, since this is a rename, both src_entry and dst_entry will
+ * have at least one non-null oid, meaning at least two will be
+ * non-null. Of the six oids, a typical rename will have three be
+ * non-null. Only two implies a rename/delete, and four implies a
+ * rename/add.
+ */
+ struct stage_data *src_entry;
+ struct stage_data *dst_entry;
+};
+
+struct rename_conflict_info {
+ enum rename_type rename_type;
+ struct rename *ren1;
+ struct rename *ren2;
+};
+
static inline void setup_rename_conflict_info(enum rename_type rename_type,
- struct diff_filepair *pair1,
- struct diff_filepair *pair2,
- const char *branch1,
- const char *branch2,
- struct stage_data *dst_entry1,
- struct stage_data *dst_entry2,
- struct merge_options *o,
- struct stage_data *src_entry1,
- struct stage_data *src_entry2)
-{
- int ostage1 = 0, ostage2;
+ struct merge_options *opt,
+ struct rename *ren1,
+ struct rename *ren2)
+{
struct rename_conflict_info *ci;
/*
* When we have two renames involved, it's easiest to get the
* correct things into stage 2 and 3, and to make sure that the
* content merge puts HEAD before the other branch if we just
- * ensure that branch1 == o->branch1. So, simply flip arguments
+ * ensure that branch1 == opt->branch1. So, simply flip arguments
* around if we don't have that.
*/
- if (dst_entry2 && branch1 != o->branch1) {
- setup_rename_conflict_info(rename_type,
- pair2, pair1,
- branch2, branch1,
- dst_entry2, dst_entry1,
- o,
- src_entry2, src_entry1);
+ if (ren2 && ren1->branch != opt->branch1) {
+ setup_rename_conflict_info(rename_type, opt, ren2, ren1);
return;
}
ci = xcalloc(1, sizeof(struct rename_conflict_info));
ci->rename_type = rename_type;
- ci->pair1 = pair1;
- ci->branch1 = branch1;
- ci->branch2 = branch2;
-
- ci->dst_entry1 = dst_entry1;
- dst_entry1->rename_conflict_info = ci;
- dst_entry1->processed = 0;
-
- assert(!pair2 == !dst_entry2);
- if (dst_entry2) {
- ci->dst_entry2 = dst_entry2;
- ci->pair2 = pair2;
- dst_entry2->rename_conflict_info = ci;
- }
+ ci->ren1 = ren1;
+ ci->ren2 = ren2;
- /*
- * For each rename, there could have been
- * modifications on the side of history where that
- * file was not renamed.
- */
- if (rename_type == RENAME_ADD ||
- rename_type == RENAME_TWO_FILES_TO_ONE) {
- ostage1 = o->branch1 == branch1 ? 3 : 2;
-
- ci->ren1_other.path = pair1->one->path;
- oidcpy(&ci->ren1_other.oid, &src_entry1->stages[ostage1].oid);
- ci->ren1_other.mode = src_entry1->stages[ostage1].mode;
- }
-
- if (rename_type == RENAME_TWO_FILES_TO_ONE) {
- ostage2 = ostage1 ^ 1;
-
- ci->ren2_other.path = pair2->one->path;
- oidcpy(&ci->ren2_other.oid, &src_entry2->stages[ostage2].oid);
- ci->ren2_other.mode = src_entry2->stages[ostage2].mode;
+ ci->ren1->dst_entry->processed = 0;
+ ci->ren1->dst_entry->rename_conflict_info = ci;
+ if (ren2) {
+ ci->ren2->dst_entry->rename_conflict_info = ci;
}
}
-static int show(struct merge_options *o, int v)
+static int show(struct merge_options *opt, int v)
{
- return (!o->call_depth && o->verbosity >= v) || o->verbosity >= 5;
+ return (!opt->call_depth && opt->verbosity >= v) || opt->verbosity >= 5;
}
__attribute__((format (printf, 3, 4)))
-static void output(struct merge_options *o, int v, const char *fmt, ...)
+static void output(struct merge_options *opt, int v, const char *fmt, ...)
{
va_list ap;
- if (!show(o, v))
+ if (!show(opt, v))
return;
- strbuf_addchars(&o->obuf, ' ', o->call_depth * 2);
+ strbuf_addchars(&opt->obuf, ' ', opt->call_depth * 2);
va_start(ap, fmt);
- strbuf_vaddf(&o->obuf, fmt, ap);
+ strbuf_vaddf(&opt->obuf, fmt, ap);
va_end(ap);
- strbuf_addch(&o->obuf, '\n');
- if (!o->buffer_output)
- flush_output(o);
+ strbuf_addch(&opt->obuf, '\n');
+ if (!opt->buffer_output)
+ flush_output(opt);
}
-static void output_commit_title(struct merge_options *o, struct commit *commit)
+static void output_commit_title(struct merge_options *opt, struct commit *commit)
{
struct merge_remote_desc *desc;
- strbuf_addchars(&o->obuf, ' ', o->call_depth * 2);
+ strbuf_addchars(&opt->obuf, ' ', opt->call_depth * 2);
desc = merge_remote_util(commit);
if (desc)
- strbuf_addf(&o->obuf, "virtual %s\n", desc->name);
+ strbuf_addf(&opt->obuf, "virtual %s\n", desc->name);
else {
- strbuf_add_unique_abbrev(&o->obuf, &commit->object.oid,
+ strbuf_add_unique_abbrev(&opt->obuf, &commit->object.oid,
DEFAULT_ABBREV);
- strbuf_addch(&o->obuf, ' ');
+ strbuf_addch(&opt->obuf, ' ');
if (parse_commit(commit) != 0)
- strbuf_addstr(&o->obuf, _("(bad commit)\n"));
+ strbuf_addstr(&opt->obuf, _("(bad commit)\n"));
else {
const char *title;
const char *msg = get_commit_buffer(commit, NULL);
int len = find_commit_subject(msg, &title);
if (len)
- strbuf_addf(&o->obuf, "%.*s\n", len, title);
+ strbuf_addf(&opt->obuf, "%.*s\n", len, title);
unuse_commit_buffer(commit, msg);
}
}
- flush_output(o);
+ flush_output(opt);
}
-static int add_cacheinfo(struct merge_options *o,
- unsigned int mode, const struct object_id *oid,
+static int add_cacheinfo(struct merge_options *opt,
+ const struct diff_filespec *blob,
const char *path, int stage, int refresh, int options)
{
- struct index_state *istate = o->repo->index;
+ struct index_state *istate = opt->repo->index;
struct cache_entry *ce;
int ret;
- ce = make_cache_entry(istate, mode, oid ? oid : &null_oid, path, stage, 0);
+ ce = make_cache_entry(istate, blob->mode, &blob->oid, path, stage, 0);
if (!ce)
- return err(o, _("add_cacheinfo failed for path '%s'; merge aborting."), path);
+ return err(opt, _("add_cacheinfo failed for path '%s'; merge aborting."), path);
ret = add_index_entry(istate, ce, options);
if (refresh) {
nce = refresh_cache_entry(istate, ce,
CE_MATCH_REFRESH | CE_MATCH_IGNORE_MISSING);
if (!nce)
- return err(o, _("add_cacheinfo failed to refresh for path '%s'; merge aborting."), path);
+ return err(opt, _("add_cacheinfo failed to refresh for path '%s'; merge aborting."), path);
if (nce != ce)
ret = add_index_entry(istate, nce, options);
}
init_tree_desc(desc, tree->buffer, tree->size);
}
-static int unpack_trees_start(struct merge_options *o,
+static int unpack_trees_start(struct merge_options *opt,
struct tree *common,
struct tree *head,
struct tree *merge)
struct tree_desc t[3];
struct index_state tmp_index = { NULL };
- memset(&o->unpack_opts, 0, sizeof(o->unpack_opts));
- if (o->call_depth)
- o->unpack_opts.index_only = 1;
+ memset(&opt->unpack_opts, 0, sizeof(opt->unpack_opts));
+ if (opt->call_depth)
+ opt->unpack_opts.index_only = 1;
else
- o->unpack_opts.update = 1;
- o->unpack_opts.merge = 1;
- o->unpack_opts.head_idx = 2;
- o->unpack_opts.fn = threeway_merge;
- o->unpack_opts.src_index = o->repo->index;
- o->unpack_opts.dst_index = &tmp_index;
- o->unpack_opts.aggressive = !merge_detect_rename(o);
- setup_unpack_trees_porcelain(&o->unpack_opts, "merge");
+ opt->unpack_opts.update = 1;
+ opt->unpack_opts.merge = 1;
+ opt->unpack_opts.head_idx = 2;
+ opt->unpack_opts.fn = threeway_merge;
+ opt->unpack_opts.src_index = opt->repo->index;
+ opt->unpack_opts.dst_index = &tmp_index;
+ opt->unpack_opts.aggressive = !merge_detect_rename(opt);
+ setup_unpack_trees_porcelain(&opt->unpack_opts, "merge");
init_tree_desc_from_tree(t+0, common);
init_tree_desc_from_tree(t+1, head);
init_tree_desc_from_tree(t+2, merge);
- rc = unpack_trees(3, t, &o->unpack_opts);
- cache_tree_free(&o->repo->index->cache_tree);
+ rc = unpack_trees(3, t, &opt->unpack_opts);
+ cache_tree_free(&opt->repo->index->cache_tree);
/*
- * Update o->repo->index to match the new results, AFTER saving a copy
- * in o->orig_index. Update src_index to point to the saved copy.
+ * Update opt->repo->index to match the new results, AFTER saving a copy
+ * in opt->orig_index. Update src_index to point to the saved copy.
* (verify_uptodate() checks src_index, and the original index is
* the one that had the necessary modification timestamps.)
*/
- o->orig_index = *o->repo->index;
- *o->repo->index = tmp_index;
- o->unpack_opts.src_index = &o->orig_index;
+ opt->orig_index = *opt->repo->index;
+ *opt->repo->index = tmp_index;
+ opt->unpack_opts.src_index = &opt->orig_index;
return rc;
}
-static void unpack_trees_finish(struct merge_options *o)
+static void unpack_trees_finish(struct merge_options *opt)
{
- discard_index(&o->orig_index);
- clear_unpack_trees_porcelain(&o->unpack_opts);
+ discard_index(&opt->orig_index);
+ clear_unpack_trees_porcelain(&opt->unpack_opts);
}
-struct tree *write_tree_from_memory(struct merge_options *o)
+struct tree *write_tree_from_memory(struct merge_options *opt)
{
struct tree *result = NULL;
- struct index_state *istate = o->repo->index;
+ struct index_state *istate = opt->repo->index;
if (unmerged_index(istate)) {
int i;
if (!cache_tree_fully_valid(istate->cache_tree) &&
cache_tree_update(istate, 0) < 0) {
- err(o, _("error building trees"));
+ err(opt, _("error building trees"));
return NULL;
}
- result = lookup_tree(o->repo, &istate->cache_tree->oid);
+ result = lookup_tree(opt->repo, &istate->cache_tree->oid);
return result;
}
{
struct path_hashmap_entry *entry;
int baselen = base->len;
- struct merge_options *o = context;
+ struct merge_options *opt = context;
strbuf_addstr(base, path);
FLEX_ALLOC_MEM(entry, path, base->buf, base->len);
hashmap_entry_init(entry, path_hash(entry->path));
- hashmap_add(&o->current_file_dir_set, entry);
+ hashmap_add(&opt->current_file_dir_set, entry);
strbuf_setlen(base, baselen);
return (S_ISDIR(mode) ? READ_TREE_RECURSIVE : 0);
}
-static void get_files_dirs(struct merge_options *o, struct tree *tree)
+static void get_files_dirs(struct merge_options *opt, struct tree *tree)
{
struct pathspec match_all;
memset(&match_all, 0, sizeof(match_all));
read_tree_recursive(the_repository, tree, "", 0, 0,
- &match_all, save_files_dirs, o);
+ &match_all, save_files_dirs, opt);
}
static int get_tree_entry_if_blob(const struct object_id *tree,
const char *path,
- struct object_id *hashy,
- unsigned int *mode_o)
+ struct diff_filespec *dfs)
{
int ret;
- ret = get_tree_entry(tree, path, hashy, mode_o);
- if (S_ISDIR(*mode_o)) {
- oidcpy(hashy, &null_oid);
- *mode_o = 0;
+ ret = get_tree_entry(tree, path, &dfs->oid, &dfs->mode);
+ if (S_ISDIR(dfs->mode)) {
+ oidcpy(&dfs->oid, &null_oid);
+ dfs->mode = 0;
}
return ret;
}
{
struct string_list_item *item;
struct stage_data *e = xcalloc(1, sizeof(struct stage_data));
- get_tree_entry_if_blob(&o->object.oid, path,
- &e->stages[1].oid, &e->stages[1].mode);
- get_tree_entry_if_blob(&a->object.oid, path,
- &e->stages[2].oid, &e->stages[2].mode);
- get_tree_entry_if_blob(&b->object.oid, path,
- &e->stages[3].oid, &e->stages[3].mode);
+ get_tree_entry_if_blob(&o->object.oid, path, &e->stages[1]);
+ get_tree_entry_if_blob(&a->object.oid, path, &e->stages[2]);
+ get_tree_entry_if_blob(&b->object.oid, path, &e->stages[3]);
item = string_list_insert(entries, path);
item->util = e;
return e;
return onelen - twolen;
}
-static void record_df_conflict_files(struct merge_options *o,
+static void record_df_conflict_files(struct merge_options *opt,
struct string_list *entries)
{
/* If there is a D/F conflict and the file for such a conflict
* If we're merging merge-bases, we don't want to bother with
* any working directory changes.
*/
- if (o->call_depth)
+ if (opt->call_depth)
return;
/* Ensure D/F conflicts are adjacent in the entries list. */
df_sorted_entries.cmp = string_list_df_name_compare;
string_list_sort(&df_sorted_entries);
- string_list_clear(&o->df_conflict_file_set, 1);
+ string_list_clear(&opt->df_conflict_file_set, 1);
for (i = 0; i < df_sorted_entries.nr; i++) {
const char *path = df_sorted_entries.items[i].string;
int len = strlen(path);
len > last_len &&
memcmp(path, last_file, last_len) == 0 &&
path[last_len] == '/') {
- string_list_insert(&o->df_conflict_file_set, last_file);
+ string_list_insert(&opt->df_conflict_file_set, last_file);
}
/*
string_list_clear(&df_sorted_entries, 0);
}
-struct rename {
- struct diff_filepair *pair;
- /*
- * Purpose of src_entry and dst_entry:
- *
- * If 'before' is renamed to 'after' then src_entry will contain
- * the versions of 'before' from the merge_base, HEAD, and MERGE in
- * stages 1, 2, and 3; dst_entry will contain the respective
- * versions of 'after' in corresponding locations. Thus, we have a
- * total of six modes and oids, though some will be null. (Stage 0
- * is ignored; we're interested in handling conflicts.)
- *
- * Since we don't turn on break-rewrites by default, neither
- * src_entry nor dst_entry can have all three of their stages have
- * non-null oids, meaning at most four of the six will be non-null.
- * Also, since this is a rename, both src_entry and dst_entry will
- * have at least one non-null oid, meaning at least two will be
- * non-null. Of the six oids, a typical rename will have three be
- * non-null. Only two implies a rename/delete, and four implies a
- * rename/add.
- */
- struct stage_data *src_entry;
- struct stage_data *dst_entry;
- unsigned add_turned_into_rename:1;
- unsigned processed:1;
-};
-
static int update_stages(struct merge_options *opt, const char *path,
const struct diff_filespec *o,
const struct diff_filespec *a,
if (remove_file_from_index(opt->repo->index, path))
return -1;
if (o)
- if (add_cacheinfo(opt, o->mode, &o->oid, path, 1, 0, options))
+ if (add_cacheinfo(opt, o, path, 1, 0, options))
return -1;
if (a)
- if (add_cacheinfo(opt, a->mode, &a->oid, path, 2, 0, options))
+ if (add_cacheinfo(opt, a, path, 2, 0, options))
return -1;
if (b)
- if (add_cacheinfo(opt, b->mode, &b->oid, path, 3, 0, options))
+ if (add_cacheinfo(opt, b, path, 3, 0, options))
return -1;
return 0;
}
oidcpy(&entry->stages[3].oid, &b->oid);
}
-static int remove_file(struct merge_options *o, int clean,
+static int remove_file(struct merge_options *opt, int clean,
const char *path, int no_wd)
{
- int update_cache = o->call_depth || clean;
- int update_working_directory = !o->call_depth && !no_wd;
+ int update_cache = opt->call_depth || clean;
+ int update_working_directory = !opt->call_depth && !no_wd;
if (update_cache) {
- if (remove_file_from_index(o->repo->index, path))
+ if (remove_file_from_index(opt->repo->index, path))
return -1;
}
if (update_working_directory) {
if (ignore_case) {
struct cache_entry *ce;
- ce = index_file_exists(o->repo->index, path, strlen(path),
+ ce = index_file_exists(opt->repo->index, path, strlen(path),
ignore_case);
if (ce && ce_stage(ce) == 0 && strcmp(path, ce->name))
return 0;
out->buf[i] = '_';
}
-static char *unique_path(struct merge_options *o, const char *path, const char *branch)
+static char *unique_path(struct merge_options *opt, const char *path, const char *branch)
{
struct path_hashmap_entry *entry;
struct strbuf newpath = STRBUF_INIT;
add_flattened_path(&newpath, branch);
base_len = newpath.len;
- while (hashmap_get_from_hash(&o->current_file_dir_set,
+ while (hashmap_get_from_hash(&opt->current_file_dir_set,
path_hash(newpath.buf), newpath.buf) ||
- (!o->call_depth && file_exists(newpath.buf))) {
+ (!opt->call_depth && file_exists(newpath.buf))) {
strbuf_setlen(&newpath, base_len);
strbuf_addf(&newpath, "_%d", suffix++);
}
FLEX_ALLOC_MEM(entry, path, newpath.buf, newpath.len);
hashmap_entry_init(entry, path_hash(entry->path));
- hashmap_add(&o->current_file_dir_set, entry);
+ hashmap_add(&opt->current_file_dir_set, entry);
return strbuf_detach(&newpath, NULL);
}
* Returns whether path was tracked in the index before the merge started,
* and its oid and mode match the specified values
*/
-static int was_tracked_and_matches(struct merge_options *o, const char *path,
- const struct object_id *oid, unsigned mode)
+static int was_tracked_and_matches(struct merge_options *opt, const char *path,
+ const struct diff_filespec *blob)
{
- int pos = index_name_pos(&o->orig_index, path, strlen(path));
+ int pos = index_name_pos(&opt->orig_index, path, strlen(path));
struct cache_entry *ce;
if (0 > pos)
return 0;
/* See if the file we were tracking before matches */
- ce = o->orig_index.cache[pos];
- return (oid_eq(&ce->oid, oid) && ce->ce_mode == mode);
+ ce = opt->orig_index.cache[pos];
+ return (oid_eq(&ce->oid, &blob->oid) && ce->ce_mode == blob->mode);
}
/*
* Returns whether path was tracked in the index before the merge started
*/
-static int was_tracked(struct merge_options *o, const char *path)
+static int was_tracked(struct merge_options *opt, const char *path)
{
- int pos = index_name_pos(&o->orig_index, path, strlen(path));
+ int pos = index_name_pos(&opt->orig_index, path, strlen(path));
if (0 <= pos)
/* we were tracking this path before the merge */
return 0;
}
-static int would_lose_untracked(struct merge_options *o, const char *path)
+static int would_lose_untracked(struct merge_options *opt, const char *path)
{
- struct index_state *istate = o->repo->index;
+ struct index_state *istate = opt->repo->index;
/*
* This may look like it can be simplified to:
- * return !was_tracked(o, path) && file_exists(path)
+ * return !was_tracked(opt, path) && file_exists(path)
* but it can't. This function needs to know whether path was in
* the working tree due to EITHER having been tracked in the index
* before the merge OR having been put into the working copy and
return file_exists(path);
}
-static int was_dirty(struct merge_options *o, const char *path)
+static int was_dirty(struct merge_options *opt, const char *path)
{
struct cache_entry *ce;
int dirty = 1;
- if (o->call_depth || !was_tracked(o, path))
+ if (opt->call_depth || !was_tracked(opt, path))
return !dirty;
- ce = index_file_exists(o->unpack_opts.src_index,
+ ce = index_file_exists(opt->unpack_opts.src_index,
path, strlen(path), ignore_case);
- dirty = verify_uptodate(ce, &o->unpack_opts) != 0;
+ dirty = verify_uptodate(ce, &opt->unpack_opts) != 0;
return dirty;
}
-static int make_room_for_path(struct merge_options *o, const char *path)
+static int make_room_for_path(struct merge_options *opt, const char *path)
{
int status, i;
const char *msg = _("failed to create path '%s'%s");
/* Unlink any D/F conflict files that are in the way */
- for (i = 0; i < o->df_conflict_file_set.nr; i++) {
- const char *df_path = o->df_conflict_file_set.items[i].string;
+ for (i = 0; i < opt->df_conflict_file_set.nr; i++) {
+ const char *df_path = opt->df_conflict_file_set.items[i].string;
size_t pathlen = strlen(path);
size_t df_pathlen = strlen(df_path);
if (df_pathlen < pathlen &&
path[df_pathlen] == '/' &&
strncmp(path, df_path, df_pathlen) == 0) {
- output(o, 3,
+ output(opt, 3,
_("Removing %s to make room for subdirectory\n"),
df_path);
unlink(df_path);
- unsorted_string_list_delete_item(&o->df_conflict_file_set,
+ unsorted_string_list_delete_item(&opt->df_conflict_file_set,
i, 0);
break;
}
if (status) {
if (status == SCLD_EXISTS)
/* something else exists */
- return err(o, msg, path, _(": perhaps a D/F conflict?"));
- return err(o, msg, path, "");
+ return err(opt, msg, path, _(": perhaps a D/F conflict?"));
+ return err(opt, msg, path, "");
}
/*
* Do not unlink a file in the work tree if we are not
* tracking it.
*/
- if (would_lose_untracked(o, path))
- return err(o, _("refusing to lose untracked file at '%s'"),
+ if (would_lose_untracked(opt, path))
+ return err(opt, _("refusing to lose untracked file at '%s'"),
path);
/* Successful unlink is good.. */
if (errno == ENOENT)
return 0;
/* .. but not some other error (who really cares what?) */
- return err(o, msg, path, _(": perhaps a D/F conflict?"));
+ return err(opt, msg, path, _(": perhaps a D/F conflict?"));
}
-static int update_file_flags(struct merge_options *o,
- const struct object_id *oid,
- unsigned mode,
+static int update_file_flags(struct merge_options *opt,
+ const struct diff_filespec *contents,
const char *path,
int update_cache,
int update_wd)
{
int ret = 0;
- if (o->call_depth)
+ if (opt->call_depth)
update_wd = 0;
if (update_wd) {
void *buf;
unsigned long size;
- if (S_ISGITLINK(mode)) {
+ if (S_ISGITLINK(contents->mode)) {
/*
* We may later decide to recursively descend into
* the submodule directory and update its index
goto update_index;
}
- buf = read_object_file(oid, &type, &size);
+ buf = read_object_file(&contents->oid, &type, &size);
if (!buf)
- return err(o, _("cannot read object %s '%s'"), oid_to_hex(oid), path);
+ return err(opt, _("cannot read object %s '%s'"),
+ oid_to_hex(&contents->oid), path);
if (type != OBJ_BLOB) {
- ret = err(o, _("blob expected for %s '%s'"), oid_to_hex(oid), path);
+ ret = err(opt, _("blob expected for %s '%s'"),
+ oid_to_hex(&contents->oid), path);
goto free_buf;
}
- if (S_ISREG(mode)) {
+ if (S_ISREG(contents->mode)) {
struct strbuf strbuf = STRBUF_INIT;
- if (convert_to_working_tree(o->repo->index, path, buf, size, &strbuf)) {
+ if (convert_to_working_tree(opt->repo->index, path, buf, size, &strbuf)) {
free(buf);
size = strbuf.len;
buf = strbuf_detach(&strbuf, NULL);
}
}
- if (make_room_for_path(o, path) < 0) {
+ if (make_room_for_path(opt, path) < 0) {
update_wd = 0;
goto free_buf;
}
- if (S_ISREG(mode) || (!has_symlinks && S_ISLNK(mode))) {
+ if (S_ISREG(contents->mode) ||
+ (!has_symlinks && S_ISLNK(contents->mode))) {
int fd;
- if (mode & 0100)
- mode = 0777;
- else
- mode = 0666;
+ int mode = (contents->mode & 0100 ? 0777 : 0666);
+
fd = open(path, O_WRONLY | O_TRUNC | O_CREAT, mode);
if (fd < 0) {
- ret = err(o, _("failed to open '%s': %s"),
+ ret = err(opt, _("failed to open '%s': %s"),
path, strerror(errno));
goto free_buf;
}
write_in_full(fd, buf, size);
close(fd);
- } else if (S_ISLNK(mode)) {
+ } else if (S_ISLNK(contents->mode)) {
char *lnk = xmemdupz(buf, size);
safe_create_leading_directories_const(path);
unlink(path);
if (symlink(lnk, path))
- ret = err(o, _("failed to symlink '%s': %s"),
+ ret = err(opt, _("failed to symlink '%s': %s"),
path, strerror(errno));
free(lnk);
} else
- ret = err(o,
+ ret = err(opt,
_("do not know what to do with %06o %s '%s'"),
- mode, oid_to_hex(oid), path);
+ contents->mode, oid_to_hex(&contents->oid), path);
free_buf:
free(buf);
}
update_index:
if (!ret && update_cache)
- if (add_cacheinfo(o, mode, oid, path, 0, update_wd,
+ if (add_cacheinfo(opt, contents, path, 0, update_wd,
ADD_CACHE_OK_TO_ADD))
return -1;
return ret;
}
-static int update_file(struct merge_options *o,
+static int update_file(struct merge_options *opt,
int clean,
- const struct object_id *oid,
- unsigned mode,
+ const struct diff_filespec *contents,
const char *path)
{
- return update_file_flags(o, oid, mode, path, o->call_depth || clean, !o->call_depth);
+ return update_file_flags(opt, contents, path,
+ opt->call_depth || clean, !opt->call_depth);
}
/* Low level file merging, update and removal */
struct merge_file_info {
- struct object_id oid;
- unsigned mode;
+ struct diff_filespec blob; /* mostly use oid & mode; sometimes path */
unsigned clean:1,
merge:1;
};
-static int merge_3way(struct merge_options *o,
+static int merge_3way(struct merge_options *opt,
mmbuffer_t *result_buf,
- const struct diff_filespec *one,
+ const struct diff_filespec *o,
const struct diff_filespec *a,
const struct diff_filespec *b,
const char *branch1,
char *base_name, *name1, *name2;
int merge_status;
- ll_opts.renormalize = o->renormalize;
+ ll_opts.renormalize = opt->renormalize;
ll_opts.extra_marker_size = extra_marker_size;
- ll_opts.xdl_opts = o->xdl_opts;
+ ll_opts.xdl_opts = opt->xdl_opts;
- if (o->call_depth) {
+ if (opt->call_depth) {
ll_opts.virtual_ancestor = 1;
ll_opts.variant = 0;
} else {
- switch (o->recursive_variant) {
+ switch (opt->recursive_variant) {
case MERGE_RECURSIVE_OURS:
ll_opts.variant = XDL_MERGE_FAVOR_OURS;
break;
}
}
+ assert(a->path && b->path);
if (strcmp(a->path, b->path) ||
- (o->ancestor != NULL && strcmp(a->path, one->path) != 0)) {
- base_name = o->ancestor == NULL ? NULL :
- mkpathdup("%s:%s", o->ancestor, one->path);
+ (opt->ancestor != NULL && strcmp(a->path, o->path) != 0)) {
+ base_name = opt->ancestor == NULL ? NULL :
+ mkpathdup("%s:%s", opt->ancestor, o->path);
name1 = mkpathdup("%s:%s", branch1, a->path);
name2 = mkpathdup("%s:%s", branch2, b->path);
} else {
- base_name = o->ancestor == NULL ? NULL :
- mkpathdup("%s", o->ancestor);
+ base_name = opt->ancestor == NULL ? NULL :
+ mkpathdup("%s", opt->ancestor);
name1 = mkpathdup("%s", branch1);
name2 = mkpathdup("%s", branch2);
}
- read_mmblob(&orig, &one->oid);
+ read_mmblob(&orig, &o->oid);
read_mmblob(&src1, &a->oid);
read_mmblob(&src2, &b->oid);
merge_status = ll_merge(result_buf, a->path, &orig, base_name,
&src1, name1, &src2, name2,
- o->repo->index, &ll_opts);
+ opt->repo->index, &ll_opts);
free(base_name);
free(name1);
struct commit *commit;
int contains_another;
- char merged_revision[42];
+ char merged_revision[GIT_MAX_HEXSZ + 2];
const char *rev_args[] = { "rev-list", "--merges", "--ancestry-path",
"--all", merged_revision, NULL };
struct rev_info revs;
strbuf_release(&sb);
}
-static int merge_submodule(struct merge_options *o,
+static int is_valid(const struct diff_filespec *dfs)
+{
+ return dfs->mode != 0 && !is_null_oid(&dfs->oid);
+}
+
+static int merge_submodule(struct merge_options *opt,
struct object_id *result, const char *path,
const struct object_id *base, const struct object_id *a,
const struct object_id *b)
struct object_array merges;
int i;
- int search = !o->call_depth;
+ int search = !opt->call_depth;
/* store a in result in case we fail */
oidcpy(result, a);
return 0;
if (add_submodule_odb(path)) {
- output(o, 1, _("Failed to merge submodule %s (not checked out)"), path);
+ output(opt, 1, _("Failed to merge submodule %s (not checked out)"), path);
return 0;
}
- if (!(commit_base = lookup_commit_reference(o->repo, base)) ||
- !(commit_a = lookup_commit_reference(o->repo, a)) ||
- !(commit_b = lookup_commit_reference(o->repo, b))) {
- output(o, 1, _("Failed to merge submodule %s (commits not present)"), path);
+ if (!(commit_base = lookup_commit_reference(opt->repo, base)) ||
+ !(commit_a = lookup_commit_reference(opt->repo, a)) ||
+ !(commit_b = lookup_commit_reference(opt->repo, b))) {
+ output(opt, 1, _("Failed to merge submodule %s (commits not present)"), path);
return 0;
}
/* check whether both changes are forward */
if (!in_merge_bases(commit_base, commit_a) ||
!in_merge_bases(commit_base, commit_b)) {
- output(o, 1, _("Failed to merge submodule %s (commits don't follow merge-base)"), path);
+ output(opt, 1, _("Failed to merge submodule %s (commits don't follow merge-base)"), path);
return 0;
}
/* Case #1: a is contained in b or vice versa */
if (in_merge_bases(commit_a, commit_b)) {
oidcpy(result, b);
- if (show(o, 3)) {
- output(o, 3, _("Fast-forwarding submodule %s to the following commit:"), path);
- output_commit_title(o, commit_b);
- } else if (show(o, 2))
- output(o, 2, _("Fast-forwarding submodule %s"), path);
+ if (show(opt, 3)) {
+ output(opt, 3, _("Fast-forwarding submodule %s to the following commit:"), path);
+ output_commit_title(opt, commit_b);
+ } else if (show(opt, 2))
+ output(opt, 2, _("Fast-forwarding submodule %s"), path);
else
; /* no output */
}
if (in_merge_bases(commit_b, commit_a)) {
oidcpy(result, a);
- if (show(o, 3)) {
- output(o, 3, _("Fast-forwarding submodule %s to the following commit:"), path);
- output_commit_title(o, commit_a);
- } else if (show(o, 2))
- output(o, 2, _("Fast-forwarding submodule %s"), path);
+ if (show(opt, 3)) {
+ output(opt, 3, _("Fast-forwarding submodule %s to the following commit:"), path);
+ output_commit_title(opt, commit_a);
+ } else if (show(opt, 2))
+ output(opt, 2, _("Fast-forwarding submodule %s"), path);
else
; /* no output */
return 0;
/* find commit which merges them */
- parent_count = find_first_merges(o->repo, &merges, path,
+ parent_count = find_first_merges(opt->repo, &merges, path,
commit_a, commit_b);
switch (parent_count) {
case 0:
- output(o, 1, _("Failed to merge submodule %s (merge following commits not found)"), path);
+ output(opt, 1, _("Failed to merge submodule %s (merge following commits not found)"), path);
break;
case 1:
- output(o, 1, _("Failed to merge submodule %s (not fast-forward)"), path);
- output(o, 2, _("Found a possible merge resolution for the submodule:\n"));
+ output(opt, 1, _("Failed to merge submodule %s (not fast-forward)"), path);
+ output(opt, 2, _("Found a possible merge resolution for the submodule:\n"));
print_commit((struct commit *) merges.objects[0].item);
- output(o, 2, _(
+ output(opt, 2, _(
"If this is correct simply add it to the index "
"for example\n"
"by using:\n\n"
break;
default:
- output(o, 1, _("Failed to merge submodule %s (multiple merges found)"), path);
+ output(opt, 1, _("Failed to merge submodule %s (multiple merges found)"), path);
for (i = 0; i < merges.nr; i++)
print_commit((struct commit *) merges.objects[i].item);
}
return 0;
}
-static int merge_mode_and_contents(struct merge_options *o,
- const struct diff_filespec *one,
+static int merge_mode_and_contents(struct merge_options *opt,
+ const struct diff_filespec *o,
const struct diff_filespec *a,
const struct diff_filespec *b,
const char *filename,
const int extra_marker_size,
struct merge_file_info *result)
{
- if (o->branch1 != branch1) {
+ if (opt->branch1 != branch1) {
/*
* It's weird getting a reverse merge with HEAD on the bottom
* side of the conflict markers and the other branch on the
* top. Fix that.
*/
- return merge_mode_and_contents(o, one, b, a,
+ return merge_mode_and_contents(opt, o, b, a,
filename,
branch2, branch1,
extra_marker_size, result);
if ((S_IFMT & a->mode) != (S_IFMT & b->mode)) {
result->clean = 0;
if (S_ISREG(a->mode)) {
- result->mode = a->mode;
- oidcpy(&result->oid, &a->oid);
+ result->blob.mode = a->mode;
+ oidcpy(&result->blob.oid, &a->oid);
} else {
- result->mode = b->mode;
- oidcpy(&result->oid, &b->oid);
+ result->blob.mode = b->mode;
+ oidcpy(&result->blob.oid, &b->oid);
}
} else {
- if (!oid_eq(&a->oid, &one->oid) && !oid_eq(&b->oid, &one->oid))
+ if (!oid_eq(&a->oid, &o->oid) && !oid_eq(&b->oid, &o->oid))
result->merge = 1;
/*
* Merge modes
*/
- if (a->mode == b->mode || a->mode == one->mode)
- result->mode = b->mode;
+ if (a->mode == b->mode || a->mode == o->mode)
+ result->blob.mode = b->mode;
else {
- result->mode = a->mode;
- if (b->mode != one->mode) {
+ result->blob.mode = a->mode;
+ if (b->mode != o->mode) {
result->clean = 0;
result->merge = 1;
}
}
- if (oid_eq(&a->oid, &b->oid) || oid_eq(&a->oid, &one->oid))
- oidcpy(&result->oid, &b->oid);
- else if (oid_eq(&b->oid, &one->oid))
- oidcpy(&result->oid, &a->oid);
+ if (oid_eq(&a->oid, &b->oid) || oid_eq(&a->oid, &o->oid))
+ oidcpy(&result->blob.oid, &b->oid);
+ else if (oid_eq(&b->oid, &o->oid))
+ oidcpy(&result->blob.oid, &a->oid);
else if (S_ISREG(a->mode)) {
mmbuffer_t result_buf;
int ret = 0, merge_status;
- merge_status = merge_3way(o, &result_buf, one, a, b,
+ merge_status = merge_3way(opt, &result_buf, o, a, b,
branch1, branch2,
extra_marker_size);
if ((merge_status < 0) || !result_buf.ptr)
- ret = err(o, _("Failed to execute internal merge"));
+ ret = err(opt, _("Failed to execute internal merge"));
if (!ret &&
write_object_file(result_buf.ptr, result_buf.size,
- blob_type, &result->oid))
- ret = err(o, _("Unable to add %s to database"),
+ blob_type, &result->blob.oid))
+ ret = err(opt, _("Unable to add %s to database"),
a->path);
free(result_buf.ptr);
return ret;
result->clean = (merge_status == 0);
} else if (S_ISGITLINK(a->mode)) {
- result->clean = merge_submodule(o, &result->oid,
- one->path,
- &one->oid,
+ result->clean = merge_submodule(opt, &result->blob.oid,
+ o->path,
+ &o->oid,
&a->oid,
&b->oid);
} else if (S_ISLNK(a->mode)) {
- switch (o->recursive_variant) {
+ switch (opt->recursive_variant) {
case MERGE_RECURSIVE_NORMAL:
- oidcpy(&result->oid, &a->oid);
+ oidcpy(&result->blob.oid, &a->oid);
if (!oid_eq(&a->oid, &b->oid))
result->clean = 0;
break;
case MERGE_RECURSIVE_OURS:
- oidcpy(&result->oid, &a->oid);
+ oidcpy(&result->blob.oid, &a->oid);
break;
case MERGE_RECURSIVE_THEIRS:
- oidcpy(&result->oid, &b->oid);
+ oidcpy(&result->blob.oid, &b->oid);
break;
}
} else
}
if (result->merge)
- output(o, 2, _("Auto-merging %s"), filename);
+ output(opt, 2, _("Auto-merging %s"), filename);
return 0;
}
-static int handle_rename_via_dir(struct merge_options *o,
- struct diff_filepair *pair,
- const char *rename_branch,
- const char *other_branch)
+static int handle_rename_via_dir(struct merge_options *opt,
+ struct rename_conflict_info *ci)
{
/*
* Handle file adds that need to be renamed due to directory rename
* there is no content merge to do; just move the file into the
* desired final location.
*/
- const struct diff_filespec *dest = pair->two;
+ const struct rename *ren = ci->ren1;
+ const struct diff_filespec *dest = ren->pair->two;
+ char *file_path = dest->path;
+ int mark_conflicted = (opt->detect_directory_renames == 1);
+ assert(ren->dir_rename_original_dest);
- if (!o->call_depth && would_lose_untracked(o, dest->path)) {
- char *alt_path = unique_path(o, dest->path, rename_branch);
+ if (!opt->call_depth && would_lose_untracked(opt, dest->path)) {
+ mark_conflicted = 1;
+ file_path = unique_path(opt, dest->path, ren->branch);
+ output(opt, 1, _("Error: Refusing to lose untracked file at %s; "
+ "writing to %s instead."),
+ dest->path, file_path);
+ }
- output(o, 1, _("Error: Refusing to lose untracked file at %s; "
- "writing to %s instead."),
- dest->path, alt_path);
+ if (mark_conflicted) {
/*
- * Write the file in worktree at alt_path, but not in the
- * index. Instead, write to dest->path for the index but
- * only at the higher appropriate stage.
+ * Write the file in worktree at file_path. In the index,
+ * only record the file at dest->path in the appropriate
+ * higher stage.
*/
- if (update_file(o, 0, &dest->oid, dest->mode, alt_path))
+ if (update_file(opt, 0, dest, file_path))
return -1;
- free(alt_path);
- return update_stages(o, dest->path, NULL,
- rename_branch == o->branch1 ? dest : NULL,
- rename_branch == o->branch1 ? NULL : dest);
+ if (file_path != dest->path)
+ free(file_path);
+ if (update_stages(opt, dest->path, NULL,
+ ren->branch == opt->branch1 ? dest : NULL,
+ ren->branch == opt->branch1 ? NULL : dest))
+ return -1;
+ return 0; /* not clean, but conflicted */
+ } else {
+ /* Update dest->path both in index and in worktree */
+ if (update_file(opt, 1, dest, dest->path))
+ return -1;
+ return 1; /* clean */
}
-
- /* Update dest->path both in index and in worktree */
- if (update_file(o, 1, &dest->oid, dest->mode, dest->path))
- return -1;
- return 0;
}
-static int handle_change_delete(struct merge_options *o,
+static int handle_change_delete(struct merge_options *opt,
const char *path, const char *old_path,
- const struct object_id *o_oid, int o_mode,
- const struct object_id *changed_oid,
- int changed_mode,
+ const struct diff_filespec *o,
+ const struct diff_filespec *changed,
const char *change_branch,
const char *delete_branch,
const char *change, const char *change_past)
const char *update_path = path;
int ret = 0;
- if (dir_in_way(o->repo->index, path, !o->call_depth, 0) ||
- (!o->call_depth && would_lose_untracked(o, path))) {
- update_path = alt_path = unique_path(o, path, change_branch);
+ if (dir_in_way(opt->repo->index, path, !opt->call_depth, 0) ||
+ (!opt->call_depth && would_lose_untracked(opt, path))) {
+ update_path = alt_path = unique_path(opt, path, change_branch);
}
- if (o->call_depth) {
+ if (opt->call_depth) {
/*
* We cannot arbitrarily accept either a_sha or b_sha as
* correct; since there is no true "middle point" between
* them, simply reuse the base version for virtual merge base.
*/
- ret = remove_file_from_index(o->repo->index, path);
+ ret = remove_file_from_index(opt->repo->index, path);
if (!ret)
- ret = update_file(o, 0, o_oid, o_mode, update_path);
+ ret = update_file(opt, 0, o, update_path);
} else {
/*
* Despite the four nearly duplicate messages and argument
*/
if (!alt_path) {
if (!old_path) {
- output(o, 1, _("CONFLICT (%s/delete): %s deleted in %s "
+ output(opt, 1, _("CONFLICT (%s/delete): %s deleted in %s "
"and %s in %s. Version %s of %s left in tree."),
change, path, delete_branch, change_past,
change_branch, change_branch, path);
} else {
- output(o, 1, _("CONFLICT (%s/delete): %s deleted in %s "
+ output(opt, 1, _("CONFLICT (%s/delete): %s deleted in %s "
"and %s to %s in %s. Version %s of %s left in tree."),
change, old_path, delete_branch, change_past, path,
change_branch, change_branch, path);
}
} else {
if (!old_path) {
- output(o, 1, _("CONFLICT (%s/delete): %s deleted in %s "
+ output(opt, 1, _("CONFLICT (%s/delete): %s deleted in %s "
"and %s in %s. Version %s of %s left in tree at %s."),
change, path, delete_branch, change_past,
change_branch, change_branch, path, alt_path);
} else {
- output(o, 1, _("CONFLICT (%s/delete): %s deleted in %s "
+ output(opt, 1, _("CONFLICT (%s/delete): %s deleted in %s "
"and %s to %s in %s. Version %s of %s left in tree at %s."),
change, old_path, delete_branch, change_past, path,
change_branch, change_branch, path, alt_path);
}
/*
* No need to call update_file() on path when change_branch ==
- * o->branch1 && !alt_path, since that would needlessly touch
+ * opt->branch1 && !alt_path, since that would needlessly touch
* path. We could call update_file_flags() with update_cache=0
* and update_wd=0, but that's a no-op.
*/
- if (change_branch != o->branch1 || alt_path)
- ret = update_file(o, 0, changed_oid, changed_mode, update_path);
+ if (change_branch != opt->branch1 || alt_path)
+ ret = update_file(opt, 0, changed, update_path);
}
free(alt_path);
return ret;
}
-static int handle_rename_delete(struct merge_options *o,
- struct diff_filepair *pair,
- const char *rename_branch,
- const char *delete_branch)
+static int handle_rename_delete(struct merge_options *opt,
+ struct rename_conflict_info *ci)
{
- const struct diff_filespec *orig = pair->one;
- const struct diff_filespec *dest = pair->two;
-
- if (handle_change_delete(o,
- o->call_depth ? orig->path : dest->path,
- o->call_depth ? NULL : orig->path,
- &orig->oid, orig->mode,
- &dest->oid, dest->mode,
+ const struct rename *ren = ci->ren1;
+ const struct diff_filespec *orig = ren->pair->one;
+ const struct diff_filespec *dest = ren->pair->two;
+ const char *rename_branch = ren->branch;
+ const char *delete_branch = (opt->branch1 == ren->branch ?
+ opt->branch2 : opt->branch1);
+
+ if (handle_change_delete(opt,
+ opt->call_depth ? orig->path : dest->path,
+ opt->call_depth ? NULL : orig->path,
+ orig, dest,
rename_branch, delete_branch,
_("rename"), _("renamed")))
return -1;
- if (o->call_depth)
- return remove_file_from_index(o->repo->index, dest->path);
+ if (opt->call_depth)
+ return remove_file_from_index(opt->repo->index, dest->path);
else
- return update_stages(o, dest->path, NULL,
- rename_branch == o->branch1 ? dest : NULL,
- rename_branch == o->branch1 ? NULL : dest);
+ return update_stages(opt, dest->path, NULL,
+ rename_branch == opt->branch1 ? dest : NULL,
+ rename_branch == opt->branch1 ? NULL : dest);
}
-static struct diff_filespec *filespec_from_entry(struct diff_filespec *target,
- struct stage_data *entry,
- int stage)
-{
- struct object_id *oid = &entry->stages[stage].oid;
- unsigned mode = entry->stages[stage].mode;
- if (mode == 0 || is_null_oid(oid))
- return NULL;
- oidcpy(&target->oid, oid);
- target->mode = mode;
- return target;
-}
-
-static int handle_file_collision(struct merge_options *o,
+static int handle_file_collision(struct merge_options *opt,
const char *collide_path,
const char *prev_path1,
const char *prev_path2,
const char *branch1, const char *branch2,
- const struct object_id *a_oid,
- unsigned int a_mode,
- const struct object_id *b_oid,
- unsigned int b_mode)
+ struct diff_filespec *a,
+ struct diff_filespec *b)
{
struct merge_file_info mfi;
- struct diff_filespec null, a, b;
+ struct diff_filespec null;
char *alt_path = NULL;
const char *update_path = collide_path;
/*
* It's easiest to get the correct things into stage 2 and 3, and
* to make sure that the content merge puts HEAD before the other
- * branch if we just ensure that branch1 == o->branch1. So, simply
+ * branch if we just ensure that branch1 == opt->branch1. So, simply
* flip arguments around if we don't have that.
*/
- if (branch1 != o->branch1) {
- return handle_file_collision(o, collide_path,
+ if (branch1 != opt->branch1) {
+ return handle_file_collision(opt, collide_path,
prev_path2, prev_path1,
branch2, branch1,
- b_oid, b_mode,
- a_oid, a_mode);
+ b, a);
}
/*
* In the recursive case, we just opt to undo renames
*/
- if (o->call_depth && (prev_path1 || prev_path2)) {
- /* Put first file (a_oid, a_mode) in its original spot */
+ if (opt->call_depth && (prev_path1 || prev_path2)) {
+ /* Put first file (a->oid, a->mode) in its original spot */
if (prev_path1) {
- if (update_file(o, 1, a_oid, a_mode, prev_path1))
+ if (update_file(opt, 1, a, prev_path1))
return -1;
} else {
- if (update_file(o, 1, a_oid, a_mode, collide_path))
+ if (update_file(opt, 1, a, collide_path))
return -1;
}
- /* Put second file (b_oid, b_mode) in its original spot */
+ /* Put second file (b->oid, b->mode) in its original spot */
if (prev_path2) {
- if (update_file(o, 1, b_oid, b_mode, prev_path2))
+ if (update_file(opt, 1, b, prev_path2))
return -1;
} else {
- if (update_file(o, 1, b_oid, b_mode, collide_path))
+ if (update_file(opt, 1, b, collide_path))
return -1;
}
/* Don't leave something at collision path if unrenaming both */
if (prev_path1 && prev_path2)
- remove_file(o, 1, collide_path, 0);
+ remove_file(opt, 1, collide_path, 0);
return 0;
}
/* Remove rename sources if rename/add or rename/rename(2to1) */
if (prev_path1)
- remove_file(o, 1, prev_path1,
- o->call_depth || would_lose_untracked(o, prev_path1));
+ remove_file(opt, 1, prev_path1,
+ opt->call_depth || would_lose_untracked(opt, prev_path1));
if (prev_path2)
- remove_file(o, 1, prev_path2,
- o->call_depth || would_lose_untracked(o, prev_path2));
+ remove_file(opt, 1, prev_path2,
+ opt->call_depth || would_lose_untracked(opt, prev_path2));
/*
* Remove the collision path, if it wouldn't cause dirty contents
* or an untracked file to get lost. We'll either overwrite with
* merged contents, or just write out to differently named files.
*/
- if (was_dirty(o, collide_path)) {
- output(o, 1, _("Refusing to lose dirty file at %s"),
+ if (was_dirty(opt, collide_path)) {
+ output(opt, 1, _("Refusing to lose dirty file at %s"),
collide_path);
- update_path = alt_path = unique_path(o, collide_path, "merged");
- } else if (would_lose_untracked(o, collide_path)) {
+ update_path = alt_path = unique_path(opt, collide_path, "merged");
+ } else if (would_lose_untracked(opt, collide_path)) {
/*
* Only way we get here is if both renames were from
* a directory rename AND user had an untracked file
* at the location where both files end up after the
* two directory renames. See testcase 10d of t6043.
*/
- output(o, 1, _("Refusing to lose untracked file at "
+ output(opt, 1, _("Refusing to lose untracked file at "
"%s, even though it's in the way."),
collide_path);
- update_path = alt_path = unique_path(o, collide_path, "merged");
+ update_path = alt_path = unique_path(opt, collide_path, "merged");
} else {
/*
* FIXME: It's possible that the two files are identical
* merge-recursive interoperate anyway, so punting for
* now...
*/
- remove_file(o, 0, collide_path, 0);
+ remove_file(opt, 0, collide_path, 0);
}
/* Store things in diff_filespecs for functions that need it */
- memset(&a, 0, sizeof(struct diff_filespec));
- memset(&b, 0, sizeof(struct diff_filespec));
- null.path = a.path = b.path = (char *)collide_path;
+ null.path = (char *)collide_path;
oidcpy(&null.oid, &null_oid);
null.mode = 0;
- oidcpy(&a.oid, a_oid);
- a.mode = a_mode;
- a.oid_valid = 1;
- oidcpy(&b.oid, b_oid);
- b.mode = b_mode;
- b.oid_valid = 1;
-
- if (merge_mode_and_contents(o, &null, &a, &b, collide_path,
- branch1, branch2, o->call_depth * 2, &mfi))
+
+ if (merge_mode_and_contents(opt, &null, a, b, collide_path,
+ branch1, branch2, opt->call_depth * 2, &mfi))
return -1;
mfi.clean &= !alt_path;
- if (update_file(o, mfi.clean, &mfi.oid, mfi.mode, update_path))
+ if (update_file(opt, mfi.clean, &mfi.blob, update_path))
return -1;
- if (!mfi.clean && !o->call_depth &&
- update_stages(o, collide_path, NULL, &a, &b))
+ if (!mfi.clean && !opt->call_depth &&
+ update_stages(opt, collide_path, NULL, a, b))
return -1;
free(alt_path);
/*
return mfi.clean;
}
-static int handle_rename_add(struct merge_options *o,
+static int handle_rename_add(struct merge_options *opt,
struct rename_conflict_info *ci)
{
/* a was renamed to c, and a separate c was added. */
- struct diff_filespec *a = ci->pair1->one;
- struct diff_filespec *c = ci->pair1->two;
+ struct diff_filespec *a = ci->ren1->pair->one;
+ struct diff_filespec *c = ci->ren1->pair->two;
char *path = c->path;
char *prev_path_desc;
struct merge_file_info mfi;
- int other_stage = (ci->branch1 == o->branch1 ? 3 : 2);
+ const char *rename_branch = ci->ren1->branch;
+ const char *add_branch = (opt->branch1 == rename_branch ?
+ opt->branch2 : opt->branch1);
+ int other_stage = (ci->ren1->branch == opt->branch1 ? 3 : 2);
- output(o, 1, _("CONFLICT (rename/add): "
+ output(opt, 1, _("CONFLICT (rename/add): "
"Rename %s->%s in %s. Added %s in %s"),
- a->path, c->path, ci->branch1,
- c->path, ci->branch2);
+ a->path, c->path, rename_branch,
+ c->path, add_branch);
prev_path_desc = xstrfmt("version of %s from %s", path, a->path);
- if (merge_mode_and_contents(o, a, c, &ci->ren1_other, prev_path_desc,
- o->branch1, o->branch2,
- 1 + o->call_depth * 2, &mfi))
+ if (merge_mode_and_contents(opt, a, c,
+ &ci->ren1->src_entry->stages[other_stage],
+ prev_path_desc,
+ opt->branch1, opt->branch2,
+ 1 + opt->call_depth * 2, &mfi))
return -1;
free(prev_path_desc);
- return handle_file_collision(o,
+ ci->ren1->dst_entry->stages[other_stage].path = mfi.blob.path = c->path;
+ return handle_file_collision(opt,
c->path, a->path, NULL,
- ci->branch1, ci->branch2,
- &mfi.oid, mfi.mode,
- &ci->dst_entry1->stages[other_stage].oid,
- ci->dst_entry1->stages[other_stage].mode);
+ rename_branch, add_branch,
+ &mfi.blob,
+ &ci->ren1->dst_entry->stages[other_stage]);
}
-static char *find_path_for_conflict(struct merge_options *o,
+static char *find_path_for_conflict(struct merge_options *opt,
const char *path,
const char *branch1,
const char *branch2)
{
char *new_path = NULL;
- if (dir_in_way(o->repo->index, path, !o->call_depth, 0)) {
- new_path = unique_path(o, path, branch1);
- output(o, 1, _("%s is a directory in %s adding "
+ if (dir_in_way(opt->repo->index, path, !opt->call_depth, 0)) {
+ new_path = unique_path(opt, path, branch1);
+ output(opt, 1, _("%s is a directory in %s adding "
"as %s instead"),
path, branch2, new_path);
- } else if (would_lose_untracked(o, path)) {
- new_path = unique_path(o, path, branch1);
- output(o, 1, _("Refusing to lose untracked file"
+ } else if (would_lose_untracked(opt, path)) {
+ new_path = unique_path(opt, path, branch1);
+ output(opt, 1, _("Refusing to lose untracked file"
" at %s; adding as %s instead"),
path, new_path);
}
return new_path;
}
-static int handle_rename_rename_1to2(struct merge_options *o,
+static int handle_rename_rename_1to2(struct merge_options *opt,
struct rename_conflict_info *ci)
{
/* One file was renamed in both branches, but to different names. */
struct merge_file_info mfi;
- struct diff_filespec other;
struct diff_filespec *add;
- struct diff_filespec *one = ci->pair1->one;
- struct diff_filespec *a = ci->pair1->two;
- struct diff_filespec *b = ci->pair2->two;
+ struct diff_filespec *o = ci->ren1->pair->one;
+ struct diff_filespec *a = ci->ren1->pair->two;
+ struct diff_filespec *b = ci->ren2->pair->two;
char *path_desc;
- output(o, 1, _("CONFLICT (rename/rename): "
+ output(opt, 1, _("CONFLICT (rename/rename): "
"Rename \"%s\"->\"%s\" in branch \"%s\" "
"rename \"%s\"->\"%s\" in \"%s\"%s"),
- one->path, a->path, ci->branch1,
- one->path, b->path, ci->branch2,
- o->call_depth ? _(" (left unresolved)") : "");
+ o->path, a->path, ci->ren1->branch,
+ o->path, b->path, ci->ren2->branch,
+ opt->call_depth ? _(" (left unresolved)") : "");
path_desc = xstrfmt("%s and %s, both renamed from %s",
- a->path, b->path, one->path);
- if (merge_mode_and_contents(o, one, a, b, path_desc,
- ci->branch1, ci->branch2,
- o->call_depth * 2, &mfi))
+ a->path, b->path, o->path);
+ if (merge_mode_and_contents(opt, o, a, b, path_desc,
+ ci->ren1->branch, ci->ren2->branch,
+ opt->call_depth * 2, &mfi))
return -1;
free(path_desc);
- if (o->call_depth) {
+ if (opt->call_depth) {
/*
* FIXME: For rename/add-source conflicts (if we could detect
* such), this is wrong. We should instead find a unique
* pathname and then either rename the add-source file to that
* unique path, or use that unique path instead of src here.
*/
- if (update_file(o, 0, &mfi.oid, mfi.mode, one->path))
+ if (update_file(opt, 0, &mfi.blob, o->path))
return -1;
/*
* such cases, we should keep the added file around,
* resolving the conflict at that path in its favor.
*/
- add = filespec_from_entry(&other, ci->dst_entry1, 2 ^ 1);
- if (add) {
- if (update_file(o, 0, &add->oid, add->mode, a->path))
+ add = &ci->ren1->dst_entry->stages[2 ^ 1];
+ if (is_valid(add)) {
+ if (update_file(opt, 0, add, a->path))
return -1;
}
else
- remove_file_from_index(o->repo->index, a->path);
- add = filespec_from_entry(&other, ci->dst_entry2, 3 ^ 1);
- if (add) {
- if (update_file(o, 0, &add->oid, add->mode, b->path))
+ remove_file_from_index(opt->repo->index, a->path);
+ add = &ci->ren2->dst_entry->stages[3 ^ 1];
+ if (is_valid(add)) {
+ if (update_file(opt, 0, add, b->path))
return -1;
}
else
- remove_file_from_index(o->repo->index, b->path);
+ remove_file_from_index(opt->repo->index, b->path);
} else {
/*
* For each destination path, we need to see if there is a
* rename/add collision. If not, we can write the file out
* to the specified location.
*/
- add = filespec_from_entry(&other, ci->dst_entry1, 2 ^ 1);
- if (add) {
- if (handle_file_collision(o, a->path,
+ add = &ci->ren1->dst_entry->stages[2 ^ 1];
+ if (is_valid(add)) {
+ add->path = mfi.blob.path = a->path;
+ if (handle_file_collision(opt, a->path,
NULL, NULL,
- ci->branch1, ci->branch2,
- &mfi.oid, mfi.mode,
- &add->oid, add->mode) < 0)
+ ci->ren1->branch,
+ ci->ren2->branch,
+ &mfi.blob, add) < 0)
return -1;
} else {
- char *new_path = find_path_for_conflict(o, a->path,
- ci->branch1,
- ci->branch2);
- if (update_file(o, 0, &mfi.oid, mfi.mode, new_path ? new_path : a->path))
+ char *new_path = find_path_for_conflict(opt, a->path,
+ ci->ren1->branch,
+ ci->ren2->branch);
+ if (update_file(opt, 0, &mfi.blob,
+ new_path ? new_path : a->path))
return -1;
free(new_path);
- if (update_stages(o, a->path, NULL, a, NULL))
+ if (update_stages(opt, a->path, NULL, a, NULL))
return -1;
}
- add = filespec_from_entry(&other, ci->dst_entry2, 3 ^ 1);
- if (add) {
- if (handle_file_collision(o, b->path,
+ add = &ci->ren2->dst_entry->stages[3 ^ 1];
+ if (is_valid(add)) {
+ add->path = mfi.blob.path = b->path;
+ if (handle_file_collision(opt, b->path,
NULL, NULL,
- ci->branch1, ci->branch2,
- &add->oid, add->mode,
- &mfi.oid, mfi.mode) < 0)
+ ci->ren1->branch,
+ ci->ren2->branch,
+ add, &mfi.blob) < 0)
return -1;
} else {
- char *new_path = find_path_for_conflict(o, b->path,
- ci->branch2,
- ci->branch1);
- if (update_file(o, 0, &mfi.oid, mfi.mode, new_path ? new_path : b->path))
+ char *new_path = find_path_for_conflict(opt, b->path,
+ ci->ren2->branch,
+ ci->ren1->branch);
+ if (update_file(opt, 0, &mfi.blob,
+ new_path ? new_path : b->path))
return -1;
free(new_path);
- if (update_stages(o, b->path, NULL, NULL, b))
+ if (update_stages(opt, b->path, NULL, NULL, b))
return -1;
}
}
return 0;
}
-static int handle_rename_rename_2to1(struct merge_options *o,
+static int handle_rename_rename_2to1(struct merge_options *opt,
struct rename_conflict_info *ci)
{
/* Two files, a & b, were renamed to the same thing, c. */
- struct diff_filespec *a = ci->pair1->one;
- struct diff_filespec *b = ci->pair2->one;
- struct diff_filespec *c1 = ci->pair1->two;
- struct diff_filespec *c2 = ci->pair2->two;
+ struct diff_filespec *a = ci->ren1->pair->one;
+ struct diff_filespec *b = ci->ren2->pair->one;
+ struct diff_filespec *c1 = ci->ren1->pair->two;
+ struct diff_filespec *c2 = ci->ren2->pair->two;
char *path = c1->path; /* == c2->path */
char *path_side_1_desc;
char *path_side_2_desc;
struct merge_file_info mfi_c1;
struct merge_file_info mfi_c2;
+ int ostage1, ostage2;
- output(o, 1, _("CONFLICT (rename/rename): "
+ output(opt, 1, _("CONFLICT (rename/rename): "
"Rename %s->%s in %s. "
"Rename %s->%s in %s"),
- a->path, c1->path, ci->branch1,
- b->path, c2->path, ci->branch2);
+ a->path, c1->path, ci->ren1->branch,
+ b->path, c2->path, ci->ren2->branch);
path_side_1_desc = xstrfmt("version of %s from %s", path, a->path);
path_side_2_desc = xstrfmt("version of %s from %s", path, b->path);
- if (merge_mode_and_contents(o, a, c1, &ci->ren1_other, path_side_1_desc,
- o->branch1, o->branch2,
- 1 + o->call_depth * 2, &mfi_c1) ||
- merge_mode_and_contents(o, b, &ci->ren2_other, c2, path_side_2_desc,
- o->branch1, o->branch2,
- 1 + o->call_depth * 2, &mfi_c2))
+ ostage1 = ci->ren1->branch == opt->branch1 ? 3 : 2;
+ ostage2 = ostage1 ^ 1;
+ ci->ren1->src_entry->stages[ostage1].path = a->path;
+ ci->ren2->src_entry->stages[ostage2].path = b->path;
+ if (merge_mode_and_contents(opt, a, c1,
+ &ci->ren1->src_entry->stages[ostage1],
+ path_side_1_desc,
+ opt->branch1, opt->branch2,
+ 1 + opt->call_depth * 2, &mfi_c1) ||
+ merge_mode_and_contents(opt, b,
+ &ci->ren2->src_entry->stages[ostage2],
+ c2, path_side_2_desc,
+ opt->branch1, opt->branch2,
+ 1 + opt->call_depth * 2, &mfi_c2))
return -1;
free(path_side_1_desc);
free(path_side_2_desc);
+ mfi_c1.blob.path = path;
+ mfi_c2.blob.path = path;
- return handle_file_collision(o, path, a->path, b->path,
- ci->branch1, ci->branch2,
- &mfi_c1.oid, mfi_c1.mode,
- &mfi_c2.oid, mfi_c2.mode);
+ return handle_file_collision(opt, path, a->path, b->path,
+ ci->ren1->branch, ci->ren2->branch,
+ &mfi_c1.blob, &mfi_c2.blob);
}
/*
* Get the diff_filepairs changed between o_tree and tree.
*/
-static struct diff_queue_struct *get_diffpairs(struct merge_options *o,
+static struct diff_queue_struct *get_diffpairs(struct merge_options *opt,
struct tree *o_tree,
struct tree *tree)
{
struct diff_queue_struct *ret;
struct diff_options opts;
- repo_diff_setup(o->repo, &opts);
+ repo_diff_setup(opt->repo, &opts);
opts.flags.recursive = 1;
opts.flags.rename_empty = 0;
- opts.detect_rename = merge_detect_rename(o);
+ opts.detect_rename = merge_detect_rename(opt);
/*
* We do not have logic to handle the detection of copies. In
* fact, it may not even make sense to add such logic: would we
*/
if (opts.detect_rename > DIFF_DETECT_RENAME)
opts.detect_rename = DIFF_DETECT_RENAME;
- opts.rename_limit = o->merge_rename_limit >= 0 ? o->merge_rename_limit :
- o->diff_rename_limit >= 0 ? o->diff_rename_limit :
+ opts.rename_limit = opt->merge_rename_limit >= 0 ? opt->merge_rename_limit :
+ opt->diff_rename_limit >= 0 ? opt->diff_rename_limit :
1000;
- opts.rename_score = o->rename_score;
- opts.show_rename_progress = o->show_rename_progress;
+ opts.rename_score = opt->rename_score;
+ opts.show_rename_progress = opt->show_rename_progress;
opts.output_format = DIFF_FORMAT_NO_OUTPUT;
diff_setup_done(&opts);
diff_tree_oid(&o_tree->object.oid, &tree->object.oid, "", &opts);
diffcore_std(&opts);
- if (opts.needed_rename_limit > o->needed_rename_limit)
- o->needed_rename_limit = opts.needed_rename_limit;
+ if (opts.needed_rename_limit > opt->needed_rename_limit)
+ opt->needed_rename_limit = opts.needed_rename_limit;
ret = xmalloc(sizeof(*ret));
*ret = diff_queued_diff;
static int tree_has_path(struct tree *tree, const char *path)
{
struct object_id hashy;
- unsigned int mode_o;
+ unsigned short mode_o;
return !get_tree_entry(&tree->object.oid, path,
&hashy, &mode_o);
* level conflicts for the renamed location. If there is a rename and
* there are no conflicts, return the new name. Otherwise, return NULL.
*/
-static char *handle_path_level_conflicts(struct merge_options *o,
+static char *handle_path_level_conflicts(struct merge_options *opt,
const char *path,
struct dir_rename_entry *entry,
struct hashmap *collisions,
/* This should only happen when entry->non_unique_new_dir set */
if (!entry->non_unique_new_dir)
BUG("entry->non_unqiue_dir not set and !new_path");
- output(o, 1, _("CONFLICT (directory rename split): "
+ output(opt, 1, _("CONFLICT (directory rename split): "
"Unclear where to place %s because directory "
"%s was renamed to multiple other directories, "
"with no destination getting a majority of the "
collision_ent->reported_already = 1;
strbuf_add_separated_string_list(&collision_paths, ", ",
&collision_ent->source_files);
- output(o, 1, _("CONFLICT (implicit dir rename): Existing "
+ output(opt, 1, _("CONFLICT (implicit dir rename): Existing "
"file/dir at %s in the way of implicit "
"directory rename(s) putting the following "
"path(s) there: %s."),
collision_ent->reported_already = 1;
strbuf_add_separated_string_list(&collision_paths, ", ",
&collision_ent->source_files);
- output(o, 1, _("CONFLICT (implicit dir rename): Cannot map "
+ output(opt, 1, _("CONFLICT (implicit dir rename): Cannot map "
"more than one path to %s; implicit directory "
"renames tried to put these paths there: %s"),
new_path, collision_paths.buf);
* causes conflicts for files within those merged directories, then
* that should be detected at the individual path level.
*/
-static void handle_directory_level_conflicts(struct merge_options *o,
+static void handle_directory_level_conflicts(struct merge_options *opt,
struct hashmap *dir_re_head,
struct tree *head,
struct hashmap *dir_re_merge,
* know that head_ent->new_dir and merge_ent->new_dir
* are different strings.
*/
- output(o, 1, _("CONFLICT (rename/rename): "
+ output(opt, 1, _("CONFLICT (rename/rename): "
"Rename directory %s->%s in %s. "
"Rename directory %s->%s in %s"),
- head_ent->dir, head_ent->new_dir.buf, o->branch1,
- head_ent->dir, merge_ent->new_dir.buf, o->branch2);
+ head_ent->dir, head_ent->new_dir.buf, opt->branch1,
+ head_ent->dir, merge_ent->new_dir.buf, opt->branch2);
string_list_append(&remove_from_head,
head_ent->dir)->util = head_ent;
strbuf_release(&head_ent->new_dir);
remove_hashmap_entries(dir_re_merge, &remove_from_merge);
}
-static struct hashmap *get_directory_renames(struct diff_queue_struct *pairs,
- struct tree *tree)
+static struct hashmap *get_directory_renames(struct diff_queue_struct *pairs)
{
struct hashmap *dir_renames;
struct hashmap_iter iter;
}
}
-static char *check_for_directory_rename(struct merge_options *o,
+static char *check_for_directory_rename(struct merge_options *opt,
const char *path,
struct tree *tree,
struct hashmap *dir_renames,
*/
oentry = dir_rename_find_entry(dir_rename_exclusions, entry->new_dir.buf);
if (oentry) {
- output(o, 1, _("WARNING: Avoiding applying %s -> %s rename "
+ output(opt, 1, _("WARNING: Avoiding applying %s -> %s rename "
"to %s, because %s itself was renamed."),
entry->dir, entry->new_dir.buf, path, entry->new_dir.buf);
} else {
- new_path = handle_path_level_conflicts(o, path, entry,
+ new_path = handle_path_level_conflicts(opt, path, entry,
collisions, tree);
*clean_merge &= (new_path != NULL);
}
return new_path;
}
-static void apply_directory_rename_modifications(struct merge_options *o,
+static void apply_directory_rename_modifications(struct merge_options *opt,
struct diff_filepair *pair,
char *new_path,
struct rename *re,
struct tree *o_tree,
struct tree *a_tree,
struct tree *b_tree,
- struct string_list *entries,
- int *clean)
+ struct string_list *entries)
{
struct string_list_item *item;
int stage = (tree == a_tree ? 2 : 3);
* saying the file would have been overwritten), but it might
* be dirty, though.
*/
- update_wd = !was_dirty(o, pair->two->path);
+ update_wd = !was_dirty(opt, pair->two->path);
if (!update_wd)
- output(o, 1, _("Refusing to lose dirty file at %s"),
+ output(opt, 1, _("Refusing to lose dirty file at %s"),
pair->two->path);
- remove_file(o, 1, pair->two->path, !update_wd);
+ remove_file(opt, 1, pair->two->path, !update_wd);
/* Find or create a new re->dst_entry */
item = string_list_lookup(entries, new_path);
&re->dst_entry->stages[stage].oid,
&re->dst_entry->stages[stage].mode);
- /* Update pair status */
- if (pair->status == 'A') {
- /*
- * Recording rename information for this add makes it look
- * like a rename/delete conflict. Make sure we can
- * correctly handle this as an add that was moved to a new
- * directory instead of reporting a rename/delete conflict.
- */
- re->add_turned_into_rename = 1;
- }
+ /*
+ * Record the original change status (or 'type' of change). If it
+ * was originally an add ('A'), this lets us differentiate later
+ * between a RENAME_DELETE conflict and RENAME_VIA_DIR (they
+ * otherwise look the same). If it was originally a rename ('R'),
+ * this lets us remember and report accurately about the transitive
+ * renaming that occurred via the directory rename detection. Also,
+ * record the original destination name.
+ */
+ re->dir_rename_original_type = pair->status;
+ re->dir_rename_original_dest = pair->two->path;
+
/*
* We don't actually look at pair->status again, but it seems
* pedagogically correct to adjust it.
* to be able to associate the correct cache entries with the rename
* information; tree is always equal to either a_tree or b_tree.
*/
-static struct string_list *get_renames(struct merge_options *o,
+static struct string_list *get_renames(struct merge_options *opt,
+ const char *branch,
struct diff_queue_struct *pairs,
struct hashmap *dir_renames,
struct hashmap *dir_rename_exclusions,
diff_free_filepair(pair);
continue;
}
- new_path = check_for_directory_rename(o, pair->two->path, tree,
+ new_path = check_for_directory_rename(opt, pair->two->path, tree,
dir_renames,
dir_rename_exclusions,
&collisions,
re = xmalloc(sizeof(*re));
re->processed = 0;
- re->add_turned_into_rename = 0;
re->pair = pair;
+ re->branch = branch;
+ re->dir_rename_original_type = '\0';
+ re->dir_rename_original_dest = NULL;
item = string_list_lookup(entries, re->pair->one->path);
if (!item)
re->src_entry = insert_stage_data(re->pair->one->path,
item = string_list_insert(renames, pair->one->path);
item->util = re;
if (new_path)
- apply_directory_rename_modifications(o, pair, new_path,
+ apply_directory_rename_modifications(opt, pair, new_path,
re, tree, o_tree,
a_tree, b_tree,
- entries,
- clean_merge);
+ entries);
}
hashmap_iter_init(&collisions, &iter);
return renames;
}
-static int process_renames(struct merge_options *o,
+static int process_renames(struct merge_options *opt,
struct string_list *a_renames,
struct string_list *b_renames)
{
for (i = 0, j = 0; i < a_renames->nr || j < b_renames->nr;) {
struct string_list *renames1, *renames2Dst;
struct rename *ren1 = NULL, *ren2 = NULL;
- const char *branch1, *branch2;
const char *ren1_src, *ren1_dst;
struct string_list_item *lookup;
if (ren1) {
renames1 = a_renames;
renames2Dst = &b_by_dst;
- branch1 = o->branch1;
- branch2 = o->branch2;
} else {
renames1 = b_renames;
renames2Dst = &a_by_dst;
- branch1 = o->branch2;
- branch2 = o->branch1;
SWAP(ren2, ren1);
}
* the base stage (think of rename +
* add-source cases).
*/
- remove_file(o, 1, ren1_src, 1);
+ remove_file(opt, 1, ren1_src, 1);
update_entry(ren1->dst_entry,
ren1->pair->one,
ren1->pair->two,
ren2->pair->two);
}
- setup_rename_conflict_info(rename_type,
- ren1->pair,
- ren2->pair,
- branch1,
- branch2,
- ren1->dst_entry,
- ren2->dst_entry,
- o,
- NULL,
- NULL);
+ setup_rename_conflict_info(rename_type, opt, ren1, ren2);
} else if ((lookup = string_list_lookup(renames2Dst, ren1_dst))) {
/* Two different files renamed to the same thing */
char *ren2_dst;
ren2->src_entry->processed = 1;
setup_rename_conflict_info(RENAME_TWO_FILES_TO_ONE,
- ren1->pair,
- ren2->pair,
- branch1,
- branch2,
- ren1->dst_entry,
- ren2->dst_entry,
- o,
- ren1->src_entry,
- ren2->src_entry);
-
+ opt, ren1, ren2);
} else {
/* Renamed in 1, maybe changed in 2 */
/* we only use sha1 and mode of these */
* stage and in other_stage (think of rename +
* add-source case).
*/
- remove_file(o, 1, ren1_src,
- renamed_stage == 2 || !was_tracked(o, ren1_src));
+ remove_file(opt, 1, ren1_src,
+ renamed_stage == 2 || !was_tracked(opt, ren1_src));
oidcpy(&src_other.oid,
&ren1->src_entry->stages[other_stage].oid);
try_merge = 0;
if (oid_eq(&src_other.oid, &null_oid) &&
- ren1->add_turned_into_rename) {
+ ren1->dir_rename_original_type == 'A') {
setup_rename_conflict_info(RENAME_VIA_DIR,
- ren1->pair,
- NULL,
- branch1,
- branch2,
- ren1->dst_entry,
- NULL,
- o,
- NULL,
- NULL);
+ opt, ren1, NULL);
} else if (oid_eq(&src_other.oid, &null_oid)) {
setup_rename_conflict_info(RENAME_DELETE,
- ren1->pair,
- NULL,
- branch1,
- branch2,
- ren1->dst_entry,
- NULL,
- o,
- NULL,
- NULL);
+ opt, ren1, NULL);
} else if ((dst_other.mode == ren1->pair->two->mode) &&
oid_eq(&dst_other.oid, &ren1->pair->two->oid)) {
/*
* update_file_flags() instead of
* update_file().
*/
- if (update_file_flags(o,
- &ren1->pair->two->oid,
- ren1->pair->two->mode,
+ if (update_file_flags(opt,
+ ren1->pair->two,
ren1_dst,
1, /* update_cache */
0 /* update_wd */))
* file, then the merge will be clean.
*/
setup_rename_conflict_info(RENAME_ADD,
- ren1->pair,
- NULL,
- branch1,
- branch2,
- ren1->dst_entry,
- NULL,
- o,
- ren1->src_entry,
- NULL);
+ opt, ren1, NULL);
} else
try_merge = 1;
if (clean_merge < 0)
goto cleanup_and_return;
if (try_merge) {
- struct diff_filespec *one, *a, *b;
+ struct diff_filespec *o, *a, *b;
src_other.path = (char *)ren1_src;
- one = ren1->pair->one;
+ o = ren1->pair->one;
if (a_renames == renames1) {
a = ren1->pair->two;
b = &src_other;
b = ren1->pair->two;
a = &src_other;
}
- update_entry(ren1->dst_entry, one, a, b);
+ update_entry(ren1->dst_entry, o, a, b);
setup_rename_conflict_info(RENAME_NORMAL,
- ren1->pair,
- NULL,
- branch1,
- NULL,
- ren1->dst_entry,
- NULL,
- o,
- NULL,
- NULL);
+ opt, ren1, NULL);
}
}
}
free(pairs);
}
-static int detect_and_process_renames(struct merge_options *o,
+static int detect_and_process_renames(struct merge_options *opt,
struct tree *common,
struct tree *head,
struct tree *merge,
ri->head_renames = NULL;
ri->merge_renames = NULL;
- if (!merge_detect_rename(o))
+ if (!merge_detect_rename(opt))
return 1;
- head_pairs = get_diffpairs(o, common, head);
- merge_pairs = get_diffpairs(o, common, merge);
+ head_pairs = get_diffpairs(opt, common, head);
+ merge_pairs = get_diffpairs(opt, common, merge);
- if (o->detect_directory_renames) {
- dir_re_head = get_directory_renames(head_pairs, head);
- dir_re_merge = get_directory_renames(merge_pairs, merge);
+ if (opt->detect_directory_renames) {
+ dir_re_head = get_directory_renames(head_pairs);
+ dir_re_merge = get_directory_renames(merge_pairs);
- handle_directory_level_conflicts(o,
+ handle_directory_level_conflicts(opt,
dir_re_head, head,
dir_re_merge, merge);
} else {
dir_rename_init(dir_re_merge);
}
- ri->head_renames = get_renames(o, head_pairs,
+ ri->head_renames = get_renames(opt, opt->branch1, head_pairs,
dir_re_merge, dir_re_head, head,
common, head, merge, entries,
&clean);
if (clean < 0)
goto cleanup;
- ri->merge_renames = get_renames(o, merge_pairs,
+ ri->merge_renames = get_renames(opt, opt->branch2, merge_pairs,
dir_re_head, dir_re_merge, merge,
common, head, merge, entries,
&clean);
if (clean < 0)
goto cleanup;
- clean &= process_renames(o, ri->head_renames, ri->merge_renames);
+ clean &= process_renames(opt, ri->head_renames, ri->merge_renames);
cleanup:
/*
final_cleanup_rename(re_info->merge_renames);
}
-static struct object_id *stage_oid(const struct object_id *oid, unsigned mode)
-{
- return (is_null_oid(oid) || mode == 0) ? NULL: (struct object_id *)oid;
-}
-
-static int read_oid_strbuf(struct merge_options *o,
+static int read_oid_strbuf(struct merge_options *opt,
const struct object_id *oid,
struct strbuf *dst)
{
unsigned long size;
buf = read_object_file(oid, &type, &size);
if (!buf)
- return err(o, _("cannot read object %s"), oid_to_hex(oid));
+ return err(opt, _("cannot read object %s"), oid_to_hex(oid));
if (type != OBJ_BLOB) {
free(buf);
- return err(o, _("object %s is not a blob"), oid_to_hex(oid));
+ return err(opt, _("object %s is not a blob"), oid_to_hex(oid));
}
strbuf_attach(dst, buf, size, size + 1);
return 0;
}
static int blob_unchanged(struct merge_options *opt,
- const struct object_id *o_oid,
- unsigned o_mode,
- const struct object_id *a_oid,
- unsigned a_mode,
+ const struct diff_filespec *o,
+ const struct diff_filespec *a,
int renormalize, const char *path)
{
- struct strbuf o = STRBUF_INIT;
- struct strbuf a = STRBUF_INIT;
+ struct strbuf obuf = STRBUF_INIT;
+ struct strbuf abuf = STRBUF_INIT;
int ret = 0; /* assume changed for safety */
+ const struct index_state *idx = opt->repo->index;
- if (a_mode != o_mode)
+ if (a->mode != o->mode)
return 0;
- if (oid_eq(o_oid, a_oid))
+ if (oid_eq(&o->oid, &a->oid))
return 1;
if (!renormalize)
return 0;
- assert(o_oid && a_oid);
- if (read_oid_strbuf(opt, o_oid, &o) || read_oid_strbuf(opt, a_oid, &a))
+ if (read_oid_strbuf(opt, &o->oid, &obuf) ||
+ read_oid_strbuf(opt, &a->oid, &abuf))
goto error_return;
/*
* Note: binary | is used so that both renormalizations are
* performed. Comparison can be skipped if both files are
* unchanged since their sha1s have already been compared.
*/
- if (renormalize_buffer(opt->repo->index, path, o.buf, o.len, &o) |
- renormalize_buffer(opt->repo->index, path, a.buf, a.len, &a))
- ret = (o.len == a.len && !memcmp(o.buf, a.buf, o.len));
+ if (renormalize_buffer(idx, path, obuf.buf, obuf.len, &obuf) |
+ renormalize_buffer(idx, path, abuf.buf, abuf.len, &abuf))
+ ret = (obuf.len == abuf.len && !memcmp(obuf.buf, abuf.buf, obuf.len));
error_return:
- strbuf_release(&o);
- strbuf_release(&a);
+ strbuf_release(&obuf);
+ strbuf_release(&abuf);
return ret;
}
-static int handle_modify_delete(struct merge_options *o,
+static int handle_modify_delete(struct merge_options *opt,
const char *path,
- struct object_id *o_oid, int o_mode,
- struct object_id *a_oid, int a_mode,
- struct object_id *b_oid, int b_mode)
+ const struct diff_filespec *o,
+ const struct diff_filespec *a,
+ const struct diff_filespec *b)
{
const char *modify_branch, *delete_branch;
- struct object_id *changed_oid;
- int changed_mode;
-
- if (a_oid) {
- modify_branch = o->branch1;
- delete_branch = o->branch2;
- changed_oid = a_oid;
- changed_mode = a_mode;
+ const struct diff_filespec *changed;
+
+ if (is_valid(a)) {
+ modify_branch = opt->branch1;
+ delete_branch = opt->branch2;
+ changed = a;
} else {
- modify_branch = o->branch2;
- delete_branch = o->branch1;
- changed_oid = b_oid;
- changed_mode = b_mode;
+ modify_branch = opt->branch2;
+ delete_branch = opt->branch1;
+ changed = b;
}
- return handle_change_delete(o,
+ return handle_change_delete(opt,
path, NULL,
- o_oid, o_mode,
- changed_oid, changed_mode,
+ o, changed,
modify_branch, delete_branch,
_("modify"), _("modified"));
}
-static int handle_content_merge(struct merge_options *o,
+static int handle_content_merge(struct merge_file_info *mfi,
+ struct merge_options *opt,
const char *path,
int is_dirty,
- struct object_id *o_oid, int o_mode,
- struct object_id *a_oid, int a_mode,
- struct object_id *b_oid, int b_mode,
- struct rename_conflict_info *rename_conflict_info)
+ const struct diff_filespec *o,
+ const struct diff_filespec *a,
+ const struct diff_filespec *b,
+ struct rename_conflict_info *ci)
{
const char *reason = _("content");
- const char *path1 = NULL, *path2 = NULL;
- struct merge_file_info mfi;
- struct diff_filespec one, a, b;
unsigned df_conflict_remains = 0;
- if (!o_oid) {
+ if (!is_valid(o))
reason = _("add/add");
- o_oid = (struct object_id *)&null_oid;
- }
- one.path = a.path = b.path = (char *)path;
- oidcpy(&one.oid, o_oid);
- one.mode = o_mode;
- oidcpy(&a.oid, a_oid);
- a.mode = a_mode;
- oidcpy(&b.oid, b_oid);
- b.mode = b_mode;
-
- if (rename_conflict_info) {
- struct diff_filepair *pair1 = rename_conflict_info->pair1;
-
- path1 = (o->branch1 == rename_conflict_info->branch1) ?
- pair1->two->path : pair1->one->path;
- /* If rename_conflict_info->pair2 != NULL, we are in
- * RENAME_ONE_FILE_TO_ONE case. Otherwise, we have a
- * normal rename.
- */
- path2 = (rename_conflict_info->pair2 ||
- o->branch2 == rename_conflict_info->branch1) ?
- pair1->two->path : pair1->one->path;
- one.path = pair1->one->path;
- a.path = (char *)path1;
- b.path = (char *)path2;
-
- if (dir_in_way(o->repo->index, path, !o->call_depth,
- S_ISGITLINK(pair1->two->mode)))
- df_conflict_remains = 1;
- }
- if (merge_mode_and_contents(o, &one, &a, &b, path,
- o->branch1, o->branch2,
- o->call_depth * 2, &mfi))
+
+ assert(o->path && a->path && b->path);
+ if (ci && dir_in_way(opt->repo->index, path, !opt->call_depth,
+ S_ISGITLINK(ci->ren1->pair->two->mode)))
+ df_conflict_remains = 1;
+
+ if (merge_mode_and_contents(opt, o, a, b, path,
+ opt->branch1, opt->branch2,
+ opt->call_depth * 2, mfi))
return -1;
/*
* b) The merge matches what was in HEAD (content, mode, pathname)
* c) The target path is usable (i.e. not involved in D/F conflict)
*/
- if (mfi.clean &&
- was_tracked_and_matches(o, path, &mfi.oid, mfi.mode) &&
+ if (mfi->clean && was_tracked_and_matches(opt, path, &mfi->blob) &&
!df_conflict_remains) {
int pos;
struct cache_entry *ce;
- output(o, 3, _("Skipped %s (merged same as existing)"), path);
- if (add_cacheinfo(o, mfi.mode, &mfi.oid, path,
- 0, (!o->call_depth && !is_dirty), 0))
+ output(opt, 3, _("Skipped %s (merged same as existing)"), path);
+ if (add_cacheinfo(opt, &mfi->blob, path,
+ 0, (!opt->call_depth && !is_dirty), 0))
return -1;
/*
* However, add_cacheinfo() will delete the old cache entry
* flag to avoid making the file appear as if it were
* deleted by the user.
*/
- pos = index_name_pos(&o->orig_index, path, strlen(path));
- ce = o->orig_index.cache[pos];
+ pos = index_name_pos(&opt->orig_index, path, strlen(path));
+ ce = opt->orig_index.cache[pos];
if (ce_skip_worktree(ce)) {
- pos = index_name_pos(o->repo->index, path, strlen(path));
- ce = o->repo->index->cache[pos];
+ pos = index_name_pos(opt->repo->index, path, strlen(path));
+ ce = opt->repo->index->cache[pos];
ce->ce_flags |= CE_SKIP_WORKTREE;
}
- return mfi.clean;
+ return mfi->clean;
}
- if (!mfi.clean) {
- if (S_ISGITLINK(mfi.mode))
+ if (!mfi->clean) {
+ if (S_ISGITLINK(mfi->blob.mode))
reason = _("submodule");
- output(o, 1, _("CONFLICT (%s): Merge conflict in %s"),
+ output(opt, 1, _("CONFLICT (%s): Merge conflict in %s"),
reason, path);
- if (rename_conflict_info && !df_conflict_remains)
- if (update_stages(o, path, &one, &a, &b))
+ if (ci && !df_conflict_remains)
+ if (update_stages(opt, path, o, a, b))
return -1;
}
if (df_conflict_remains || is_dirty) {
char *new_path;
- if (o->call_depth) {
- remove_file_from_index(o->repo->index, path);
+ if (opt->call_depth) {
+ remove_file_from_index(opt->repo->index, path);
} else {
- if (!mfi.clean) {
- if (update_stages(o, path, &one, &a, &b))
+ if (!mfi->clean) {
+ if (update_stages(opt, path, o, a, b))
return -1;
} else {
- int file_from_stage2 = was_tracked(o, path);
- struct diff_filespec merged;
- oidcpy(&merged.oid, &mfi.oid);
- merged.mode = mfi.mode;
-
- if (update_stages(o, path, NULL,
- file_from_stage2 ? &merged : NULL,
- file_from_stage2 ? NULL : &merged))
+ int file_from_stage2 = was_tracked(opt, path);
+
+ if (update_stages(opt, path, NULL,
+ file_from_stage2 ? &mfi->blob : NULL,
+ file_from_stage2 ? NULL : &mfi->blob))
return -1;
}
}
- new_path = unique_path(o, path, rename_conflict_info->branch1);
+ new_path = unique_path(opt, path, ci->ren1->branch);
if (is_dirty) {
- output(o, 1, _("Refusing to lose dirty file at %s"),
+ output(opt, 1, _("Refusing to lose dirty file at %s"),
path);
}
- output(o, 1, _("Adding as %s instead"), new_path);
- if (update_file(o, 0, &mfi.oid, mfi.mode, new_path)) {
+ output(opt, 1, _("Adding as %s instead"), new_path);
+ if (update_file(opt, 0, &mfi->blob, new_path)) {
free(new_path);
return -1;
}
free(new_path);
- mfi.clean = 0;
- } else if (update_file(o, mfi.clean, &mfi.oid, mfi.mode, path))
+ mfi->clean = 0;
+ } else if (update_file(opt, mfi->clean, &mfi->blob, path))
return -1;
- return !is_dirty && mfi.clean;
+ return !is_dirty && mfi->clean;
}
-static int handle_rename_normal(struct merge_options *o,
+static int handle_rename_normal(struct merge_options *opt,
const char *path,
- struct object_id *o_oid, unsigned int o_mode,
- struct object_id *a_oid, unsigned int a_mode,
- struct object_id *b_oid, unsigned int b_mode,
+ const struct diff_filespec *o,
+ const struct diff_filespec *a,
+ const struct diff_filespec *b,
struct rename_conflict_info *ci)
{
+ struct rename *ren = ci->ren1;
+ struct merge_file_info mfi;
+ int clean;
+ int side = (ren->branch == opt->branch1 ? 2 : 3);
+
/* Merge the content and write it out */
- return handle_content_merge(o, path, was_dirty(o, path),
- o_oid, o_mode, a_oid, a_mode, b_oid, b_mode,
- ci);
+ clean = handle_content_merge(&mfi, opt, path, was_dirty(opt, path),
+ o, a, b, ci);
+
+ if (clean && opt->detect_directory_renames == 1 &&
+ ren->dir_rename_original_dest) {
+ if (update_stages(opt, path,
+ NULL,
+ side == 2 ? &mfi.blob : NULL,
+ side == 2 ? NULL : &mfi.blob))
+ return -1;
+ clean = 0; /* not clean, but conflicted */
+ }
+ return clean;
+}
+
+static void dir_rename_warning(const char *msg,
+ int is_add,
+ int clean,
+ struct merge_options *opt,
+ struct rename *ren)
+{
+ const char *other_branch;
+ other_branch = (ren->branch == opt->branch1 ?
+ opt->branch2 : opt->branch1);
+ if (is_add) {
+ output(opt, clean ? 2 : 1, msg,
+ ren->pair->one->path, ren->branch,
+ other_branch, ren->pair->two->path);
+ return;
+ }
+ output(opt, clean ? 2 : 1, msg,
+ ren->pair->one->path, ren->dir_rename_original_dest, ren->branch,
+ other_branch, ren->pair->two->path);
+}
+static int warn_about_dir_renamed_entries(struct merge_options *opt,
+ struct rename *ren)
+{
+ const char *msg;
+ int clean = 1, is_add;
+
+ if (!ren)
+ return clean;
+
+ /* Return early if ren was not affected/created by a directory rename */
+ if (!ren->dir_rename_original_dest)
+ return clean;
+
+ /* Sanity checks */
+ assert(opt->detect_directory_renames > 0);
+ assert(ren->dir_rename_original_type == 'A' ||
+ ren->dir_rename_original_type == 'R');
+
+ /* Check whether to treat directory renames as a conflict */
+ clean = (opt->detect_directory_renames == 2);
+
+ is_add = (ren->dir_rename_original_type == 'A');
+ if (ren->dir_rename_original_type == 'A' && clean) {
+ msg = _("Path updated: %s added in %s inside a "
+ "directory that was renamed in %s; moving it to %s.");
+ } else if (ren->dir_rename_original_type == 'A' && !clean) {
+ msg = _("CONFLICT (file location): %s added in %s "
+ "inside a directory that was renamed in %s, "
+ "suggesting it should perhaps be moved to %s.");
+ } else if (ren->dir_rename_original_type == 'R' && clean) {
+ msg = _("Path updated: %s renamed to %s in %s, inside a "
+ "directory that was renamed in %s; moving it to %s.");
+ } else if (ren->dir_rename_original_type == 'R' && !clean) {
+ msg = _("CONFLICT (file location): %s renamed to %s in %s, "
+ "inside a directory that was renamed in %s, "
+ "suggesting it should perhaps be moved to %s.");
+ } else {
+ BUG("Impossible dir_rename_original_type/clean combination");
+ }
+ dir_rename_warning(msg, is_add, clean, opt, ren);
+
+ return clean;
}
/* Per entry merge function */
-static int process_entry(struct merge_options *o,
+static int process_entry(struct merge_options *opt,
const char *path, struct stage_data *entry)
{
int clean_merge = 1;
- int normalize = o->renormalize;
- unsigned o_mode = entry->stages[1].mode;
- unsigned a_mode = entry->stages[2].mode;
- unsigned b_mode = entry->stages[3].mode;
- struct object_id *o_oid = stage_oid(&entry->stages[1].oid, o_mode);
- struct object_id *a_oid = stage_oid(&entry->stages[2].oid, a_mode);
- struct object_id *b_oid = stage_oid(&entry->stages[3].oid, b_mode);
+ int normalize = opt->renormalize;
+
+ struct diff_filespec *o = &entry->stages[1];
+ struct diff_filespec *a = &entry->stages[2];
+ struct diff_filespec *b = &entry->stages[3];
+ int o_valid = is_valid(o);
+ int a_valid = is_valid(a);
+ int b_valid = is_valid(b);
+ o->path = a->path = b->path = (char*)path;
entry->processed = 1;
if (entry->rename_conflict_info) {
- struct rename_conflict_info *conflict_info = entry->rename_conflict_info;
- switch (conflict_info->rename_type) {
+ struct rename_conflict_info *ci = entry->rename_conflict_info;
+ struct diff_filespec *temp;
+ int path_clean;
+
+ path_clean = warn_about_dir_renamed_entries(opt, ci->ren1);
+ path_clean &= warn_about_dir_renamed_entries(opt, ci->ren2);
+
+ /*
+ * For cases with a single rename, {o,a,b}->path have all been
+ * set to the rename target path; we need to set two of these
+ * back to the rename source.
+ * For rename/rename conflicts, we'll manually fix paths below.
+ */
+ temp = (opt->branch1 == ci->ren1->branch) ? b : a;
+ o->path = temp->path = ci->ren1->pair->one->path;
+ if (ci->ren2) {
+ assert(opt->branch1 == ci->ren1->branch);
+ }
+
+ switch (ci->rename_type) {
case RENAME_NORMAL:
case RENAME_ONE_FILE_TO_ONE:
- clean_merge = handle_rename_normal(o,
- path,
- o_oid, o_mode,
- a_oid, a_mode,
- b_oid, b_mode,
- conflict_info);
+ clean_merge = handle_rename_normal(opt, path, o, a, b,
+ ci);
break;
case RENAME_VIA_DIR:
- clean_merge = 1;
- if (handle_rename_via_dir(o,
- conflict_info->pair1,
- conflict_info->branch1,
- conflict_info->branch2))
- clean_merge = -1;
+ clean_merge = handle_rename_via_dir(opt, ci);
break;
case RENAME_ADD:
/*
* two-way merged cleanly with the added file, I
* guess it's a clean merge?
*/
- clean_merge = handle_rename_add(o, conflict_info);
+ clean_merge = handle_rename_add(opt, ci);
break;
case RENAME_DELETE:
clean_merge = 0;
- if (handle_rename_delete(o,
- conflict_info->pair1,
- conflict_info->branch1,
- conflict_info->branch2))
+ if (handle_rename_delete(opt, ci))
clean_merge = -1;
break;
case RENAME_ONE_FILE_TO_TWO:
+ /*
+ * Manually fix up paths; note:
+ * ren[12]->pair->one->path are equal.
+ */
+ o->path = ci->ren1->pair->one->path;
+ a->path = ci->ren1->pair->two->path;
+ b->path = ci->ren2->pair->two->path;
+
clean_merge = 0;
- if (handle_rename_rename_1to2(o, conflict_info))
+ if (handle_rename_rename_1to2(opt, ci))
clean_merge = -1;
break;
case RENAME_TWO_FILES_TO_ONE:
+ /*
+ * Manually fix up paths; note,
+ * ren[12]->pair->two->path are actually equal.
+ */
+ o->path = NULL;
+ a->path = ci->ren1->pair->two->path;
+ b->path = ci->ren2->pair->two->path;
+
/*
* Probably unclean merge, but if the two renamed
* files merge cleanly and the two resulting files
* can then be two-way merged cleanly, I guess it's
* a clean merge?
*/
- clean_merge = handle_rename_rename_2to1(o,
- conflict_info);
+ clean_merge = handle_rename_rename_2to1(opt, ci);
break;
default:
entry->processed = 0;
break;
}
- } else if (o_oid && (!a_oid || !b_oid)) {
+ if (path_clean < clean_merge)
+ clean_merge = path_clean;
+ } else if (o_valid && (!a_valid || !b_valid)) {
/* Case A: Deleted in one */
- if ((!a_oid && !b_oid) ||
- (!b_oid && blob_unchanged(o, o_oid, o_mode, a_oid, a_mode, normalize, path)) ||
- (!a_oid && blob_unchanged(o, o_oid, o_mode, b_oid, b_mode, normalize, path))) {
+ if ((!a_valid && !b_valid) ||
+ (!b_valid && blob_unchanged(opt, o, a, normalize, path)) ||
+ (!a_valid && blob_unchanged(opt, o, b, normalize, path))) {
/* Deleted in both or deleted in one and
* unchanged in the other */
- if (a_oid)
- output(o, 2, _("Removing %s"), path);
+ if (a_valid)
+ output(opt, 2, _("Removing %s"), path);
/* do not touch working file if it did not exist */
- remove_file(o, 1, path, !a_oid);
+ remove_file(opt, 1, path, !a_valid);
} else {
/* Modify/delete; deleted side may have put a directory in the way */
clean_merge = 0;
- if (handle_modify_delete(o, path, o_oid, o_mode,
- a_oid, a_mode, b_oid, b_mode))
+ if (handle_modify_delete(opt, path, o, a, b))
clean_merge = -1;
}
- } else if ((!o_oid && a_oid && !b_oid) ||
- (!o_oid && !a_oid && b_oid)) {
+ } else if ((!o_valid && a_valid && !b_valid) ||
+ (!o_valid && !a_valid && b_valid)) {
/* Case B: Added in one. */
/* [nothing|directory] -> ([nothing|directory], file) */
const char *add_branch;
const char *other_branch;
- unsigned mode;
- const struct object_id *oid;
const char *conf;
+ const struct diff_filespec *contents;
- if (a_oid) {
- add_branch = o->branch1;
- other_branch = o->branch2;
- mode = a_mode;
- oid = a_oid;
+ if (a_valid) {
+ add_branch = opt->branch1;
+ other_branch = opt->branch2;
+ contents = a;
conf = _("file/directory");
} else {
- add_branch = o->branch2;
- other_branch = o->branch1;
- mode = b_mode;
- oid = b_oid;
+ add_branch = opt->branch2;
+ other_branch = opt->branch1;
+ contents = b;
conf = _("directory/file");
}
- if (dir_in_way(o->repo->index, path,
- !o->call_depth && !S_ISGITLINK(a_mode),
+ if (dir_in_way(opt->repo->index, path,
+ !opt->call_depth && !S_ISGITLINK(a->mode),
0)) {
- char *new_path = unique_path(o, path, add_branch);
+ char *new_path = unique_path(opt, path, add_branch);
clean_merge = 0;
- output(o, 1, _("CONFLICT (%s): There is a directory with name %s in %s. "
+ output(opt, 1, _("CONFLICT (%s): There is a directory with name %s in %s. "
"Adding %s as %s"),
conf, path, other_branch, path, new_path);
- if (update_file(o, 0, oid, mode, new_path))
+ if (update_file(opt, 0, contents, new_path))
clean_merge = -1;
- else if (o->call_depth)
- remove_file_from_index(o->repo->index, path);
+ else if (opt->call_depth)
+ remove_file_from_index(opt->repo->index, path);
free(new_path);
} else {
- output(o, 2, _("Adding %s"), path);
+ output(opt, 2, _("Adding %s"), path);
/* do not overwrite file if already present */
- if (update_file_flags(o, oid, mode, path, 1, !a_oid))
+ if (update_file_flags(opt, contents, path, 1, !a_valid))
clean_merge = -1;
}
- } else if (a_oid && b_oid) {
- if (!o_oid) {
+ } else if (a_valid && b_valid) {
+ if (!o_valid) {
/* Case C: Added in both (check for same permissions) */
- output(o, 1,
+ output(opt, 1,
_("CONFLICT (add/add): Merge conflict in %s"),
path);
- clean_merge = handle_file_collision(o,
+ clean_merge = handle_file_collision(opt,
path, NULL, NULL,
- o->branch1,
- o->branch2,
- a_oid, a_mode,
- b_oid, b_mode);
+ opt->branch1,
+ opt->branch2,
+ a, b);
} else {
/* case D: Modified in both, but differently. */
+ struct merge_file_info mfi;
int is_dirty = 0; /* unpack_trees would have bailed if dirty */
- clean_merge = handle_content_merge(o, path,
+ clean_merge = handle_content_merge(&mfi, opt, path,
is_dirty,
- o_oid, o_mode,
- a_oid, a_mode,
- b_oid, b_mode,
- NULL);
+ o, a, b, NULL);
}
- } else if (!o_oid && !a_oid && !b_oid) {
+ } else if (!o_valid && !a_valid && !b_valid) {
/*
* this entry was deleted altogether. a_mode == 0 means
* we had that path and want to actively remove it.
*/
- remove_file(o, 1, path, !a_mode);
+ remove_file(opt, 1, path, !a->mode);
} else
BUG("fatal merge failure, shouldn't happen.");
return clean_merge;
}
-int merge_trees(struct merge_options *o,
+int merge_trees(struct merge_options *opt,
struct tree *head,
struct tree *merge,
struct tree *common,
struct tree **result)
{
- struct index_state *istate = o->repo->index;
+ struct index_state *istate = opt->repo->index;
int code, clean;
struct strbuf sb = STRBUF_INIT;
- if (!o->call_depth && repo_index_has_changes(o->repo, head, &sb)) {
- err(o, _("Your local changes to the following files would be overwritten by merge:\n %s"),
+ if (!opt->call_depth && repo_index_has_changes(opt->repo, head, &sb)) {
+ err(opt, _("Your local changes to the following files would be overwritten by merge:\n %s"),
sb.buf);
return -1;
}
- if (o->subtree_shift) {
- merge = shift_tree_object(o->repo, head, merge, o->subtree_shift);
- common = shift_tree_object(o->repo, head, common, o->subtree_shift);
+ if (opt->subtree_shift) {
+ merge = shift_tree_object(opt->repo, head, merge, opt->subtree_shift);
+ common = shift_tree_object(opt->repo, head, common, opt->subtree_shift);
}
if (oid_eq(&common->object.oid, &merge->object.oid)) {
- output(o, 0, _("Already up to date!"));
+ output(opt, 0, _("Already up to date!"));
*result = head;
return 1;
}
- code = unpack_trees_start(o, common, head, merge);
+ code = unpack_trees_start(opt, common, head, merge);
if (code != 0) {
- if (show(o, 4) || o->call_depth)
- err(o, _("merging of trees %s and %s failed"),
+ if (show(opt, 4) || opt->call_depth)
+ err(opt, _("merging of trees %s and %s failed"),
oid_to_hex(&head->object.oid),
oid_to_hex(&merge->object.oid));
- unpack_trees_finish(o);
+ unpack_trees_finish(opt);
return -1;
}
* opposed to decaring a local hashmap is for convenience
* so that we don't have to pass it to around.
*/
- hashmap_init(&o->current_file_dir_set, path_hashmap_cmp, NULL, 512);
- get_files_dirs(o, head);
- get_files_dirs(o, merge);
+ hashmap_init(&opt->current_file_dir_set, path_hashmap_cmp, NULL, 512);
+ get_files_dirs(opt, head);
+ get_files_dirs(opt, merge);
- entries = get_unmerged(o->repo->index);
- clean = detect_and_process_renames(o, common, head, merge,
+ entries = get_unmerged(opt->repo->index);
+ clean = detect_and_process_renames(opt, common, head, merge,
entries, &re_info);
- record_df_conflict_files(o, entries);
+ record_df_conflict_files(opt, entries);
if (clean < 0)
goto cleanup;
for (i = entries->nr-1; 0 <= i; i--) {
const char *path = entries->items[i].string;
struct stage_data *e = entries->items[i].util;
if (!e->processed) {
- int ret = process_entry(o, path, e);
+ int ret = process_entry(opt, path, e);
if (!ret)
clean = 0;
else if (ret < 0) {
string_list_clear(entries, 1);
free(entries);
- hashmap_free(&o->current_file_dir_set, 1);
+ hashmap_free(&opt->current_file_dir_set, 1);
if (clean < 0) {
- unpack_trees_finish(o);
+ unpack_trees_finish(opt);
return clean;
}
}
else
clean = 1;
- unpack_trees_finish(o);
+ unpack_trees_finish(opt);
- if (o->call_depth && !(*result = write_tree_from_memory(o)))
+ if (opt->call_depth && !(*result = write_tree_from_memory(opt)))
return -1;
return clean;
* Merge the commits h1 and h2, return the resulting virtual
* commit object and a flag indicating the cleanness of the merge.
*/
-int merge_recursive(struct merge_options *o,
+int merge_recursive(struct merge_options *opt,
struct commit *h1,
struct commit *h2,
struct commit_list *ca,
struct tree *mrtree;
int clean;
- if (show(o, 4)) {
- output(o, 4, _("Merging:"));
- output_commit_title(o, h1);
- output_commit_title(o, h2);
+ if (show(opt, 4)) {
+ output(opt, 4, _("Merging:"));
+ output_commit_title(opt, h1);
+ output_commit_title(opt, h2);
}
if (!ca) {
ca = reverse_commit_list(ca);
}
- if (show(o, 5)) {
+ if (show(opt, 5)) {
unsigned cnt = commit_list_count(ca);
- output(o, 5, Q_("found %u common ancestor:",
+ output(opt, 5, Q_("found %u common ancestor:",
"found %u common ancestors:", cnt), cnt);
for (iter = ca; iter; iter = iter->next)
- output_commit_title(o, iter->item);
+ output_commit_title(opt, iter->item);
}
merged_common_ancestors = pop_commit(&ca);
/* if there is no common ancestor, use an empty tree */
struct tree *tree;
- tree = lookup_tree(o->repo, o->repo->hash_algo->empty_tree);
- merged_common_ancestors = make_virtual_commit(o->repo, tree, "ancestor");
+ tree = lookup_tree(opt->repo, opt->repo->hash_algo->empty_tree);
+ merged_common_ancestors = make_virtual_commit(opt->repo, tree, "ancestor");
}
for (iter = ca; iter; iter = iter->next) {
const char *saved_b1, *saved_b2;
- o->call_depth++;
+ opt->call_depth++;
/*
* When the merge fails, the result contains files
* with conflict markers. The cleanness flag is
* overwritten it: the committed "conflicts" were
* already resolved.
*/
- discard_index(o->repo->index);
- saved_b1 = o->branch1;
- saved_b2 = o->branch2;
- o->branch1 = "Temporary merge branch 1";
- o->branch2 = "Temporary merge branch 2";
- if (merge_recursive(o, merged_common_ancestors, iter->item,
+ discard_index(opt->repo->index);
+ saved_b1 = opt->branch1;
+ saved_b2 = opt->branch2;
+ opt->branch1 = "Temporary merge branch 1";
+ opt->branch2 = "Temporary merge branch 2";
+ if (merge_recursive(opt, merged_common_ancestors, iter->item,
NULL, &merged_common_ancestors) < 0)
return -1;
- o->branch1 = saved_b1;
- o->branch2 = saved_b2;
- o->call_depth--;
+ opt->branch1 = saved_b1;
+ opt->branch2 = saved_b2;
+ opt->call_depth--;
if (!merged_common_ancestors)
- return err(o, _("merge returned no commit"));
+ return err(opt, _("merge returned no commit"));
}
- discard_index(o->repo->index);
- if (!o->call_depth)
- repo_read_index(o->repo);
+ discard_index(opt->repo->index);
+ if (!opt->call_depth)
+ repo_read_index(opt->repo);
- o->ancestor = "merged common ancestors";
- clean = merge_trees(o, get_commit_tree(h1), get_commit_tree(h2),
+ opt->ancestor = "merged common ancestors";
+ clean = merge_trees(opt, get_commit_tree(h1), get_commit_tree(h2),
get_commit_tree(merged_common_ancestors),
&mrtree);
if (clean < 0) {
- flush_output(o);
+ flush_output(opt);
return clean;
}
- if (o->call_depth) {
- *result = make_virtual_commit(o->repo, mrtree, "merged tree");
+ if (opt->call_depth) {
+ *result = make_virtual_commit(opt->repo, mrtree, "merged tree");
commit_list_insert(h1, &(*result)->parents);
commit_list_insert(h2, &(*result)->parents->next);
}
- flush_output(o);
- if (!o->call_depth && o->buffer_output < 2)
- strbuf_release(&o->obuf);
- if (show(o, 2))
+ flush_output(opt);
+ if (!opt->call_depth && opt->buffer_output < 2)
+ strbuf_release(&opt->obuf);
+ if (show(opt, 2))
diff_warn_rename_limit("merge.renamelimit",
- o->needed_rename_limit, 0);
+ opt->needed_rename_limit, 0);
return clean;
}
return (struct commit *)object;
}
-int merge_recursive_generic(struct merge_options *o,
+int merge_recursive_generic(struct merge_options *opt,
const struct object_id *head,
const struct object_id *merge,
int num_base_list,
{
int clean;
struct lock_file lock = LOCK_INIT;
- struct commit *head_commit = get_ref(o->repo, head, o->branch1);
- struct commit *next_commit = get_ref(o->repo, merge, o->branch2);
+ struct commit *head_commit = get_ref(opt->repo, head, opt->branch1);
+ struct commit *next_commit = get_ref(opt->repo, merge, opt->branch2);
struct commit_list *ca = NULL;
if (base_list) {
int i;
for (i = 0; i < num_base_list; ++i) {
struct commit *base;
- if (!(base = get_ref(o->repo, base_list[i], oid_to_hex(base_list[i]))))
- return err(o, _("Could not parse object '%s'"),
+ if (!(base = get_ref(opt->repo, base_list[i], oid_to_hex(base_list[i]))))
+ return err(opt, _("Could not parse object '%s'"),
oid_to_hex(base_list[i]));
commit_list_insert(base, &ca);
}
}
- repo_hold_locked_index(o->repo, &lock, LOCK_DIE_ON_ERROR);
- clean = merge_recursive(o, head_commit, next_commit, ca,
+ repo_hold_locked_index(opt->repo, &lock, LOCK_DIE_ON_ERROR);
+ clean = merge_recursive(opt, head_commit, next_commit, ca,
result);
if (clean < 0) {
rollback_lock_file(&lock);
return clean;
}
- if (write_locked_index(o->repo->index, &lock,
+ if (write_locked_index(opt->repo->index, &lock,
COMMIT_LOCK | SKIP_IF_UNCHANGED))
- return err(o, _("Unable to write index."));
+ return err(opt, _("Unable to write index."));
return clean ? 0 : 1;
}
-static void merge_recursive_config(struct merge_options *o)
+static void merge_recursive_config(struct merge_options *opt)
{
char *value = NULL;
- git_config_get_int("merge.verbosity", &o->verbosity);
- git_config_get_int("diff.renamelimit", &o->diff_rename_limit);
- git_config_get_int("merge.renamelimit", &o->merge_rename_limit);
+ git_config_get_int("merge.verbosity", &opt->verbosity);
+ git_config_get_int("diff.renamelimit", &opt->diff_rename_limit);
+ git_config_get_int("merge.renamelimit", &opt->merge_rename_limit);
if (!git_config_get_string("diff.renames", &value)) {
- o->diff_detect_rename = git_config_rename("diff.renames", value);
+ opt->diff_detect_rename = git_config_rename("diff.renames", value);
free(value);
}
if (!git_config_get_string("merge.renames", &value)) {
- o->merge_detect_rename = git_config_rename("merge.renames", value);
+ opt->merge_detect_rename = git_config_rename("merge.renames", value);
+ free(value);
+ }
+ if (!git_config_get_string("merge.directoryrenames", &value)) {
+ int boolval = git_parse_maybe_bool(value);
+ if (0 <= boolval) {
+ opt->detect_directory_renames = boolval ? 2 : 0;
+ } else if (!strcasecmp(value, "conflict")) {
+ opt->detect_directory_renames = 1;
+ } /* avoid erroring on values from future versions of git */
free(value);
}
git_config(git_xmerge_config, NULL);
}
-void init_merge_options(struct merge_options *o,
+void init_merge_options(struct merge_options *opt,
struct repository *repo)
{
const char *merge_verbosity;
- memset(o, 0, sizeof(struct merge_options));
- o->repo = repo;
- o->verbosity = 2;
- o->buffer_output = 1;
- o->diff_rename_limit = -1;
- o->merge_rename_limit = -1;
- o->renormalize = 0;
- o->diff_detect_rename = -1;
- o->merge_detect_rename = -1;
- o->detect_directory_renames = 1;
- merge_recursive_config(o);
+ memset(opt, 0, sizeof(struct merge_options));
+ opt->repo = repo;
+ opt->verbosity = 2;
+ opt->buffer_output = 1;
+ opt->diff_rename_limit = -1;
+ opt->merge_rename_limit = -1;
+ opt->renormalize = 0;
+ opt->diff_detect_rename = -1;
+ opt->merge_detect_rename = -1;
+ opt->detect_directory_renames = 1;
+ merge_recursive_config(opt);
merge_verbosity = getenv("GIT_MERGE_VERBOSITY");
if (merge_verbosity)
- o->verbosity = strtol(merge_verbosity, NULL, 10);
- if (o->verbosity >= 5)
- o->buffer_output = 0;
- strbuf_init(&o->obuf, 0);
- string_list_init(&o->df_conflict_file_set, 1);
+ opt->verbosity = strtol(merge_verbosity, NULL, 10);
+ if (opt->verbosity >= 5)
+ opt->buffer_output = 0;
+ strbuf_init(&opt->obuf, 0);
+ string_list_init(&opt->df_conflict_file_set, 1);
}
-int parse_merge_opt(struct merge_options *o, const char *s)
+int parse_merge_opt(struct merge_options *opt, const char *s)
{
const char *arg;
if (!s || !*s)
return -1;
if (!strcmp(s, "ours"))
- o->recursive_variant = MERGE_RECURSIVE_OURS;
+ opt->recursive_variant = MERGE_RECURSIVE_OURS;
else if (!strcmp(s, "theirs"))
- o->recursive_variant = MERGE_RECURSIVE_THEIRS;
+ opt->recursive_variant = MERGE_RECURSIVE_THEIRS;
else if (!strcmp(s, "subtree"))
- o->subtree_shift = "";
+ opt->subtree_shift = "";
else if (skip_prefix(s, "subtree=", &arg))
- o->subtree_shift = arg;
+ opt->subtree_shift = arg;
else if (!strcmp(s, "patience"))
- o->xdl_opts = DIFF_WITH_ALG(o, PATIENCE_DIFF);
+ opt->xdl_opts = DIFF_WITH_ALG(opt, PATIENCE_DIFF);
else if (!strcmp(s, "histogram"))
- o->xdl_opts = DIFF_WITH_ALG(o, HISTOGRAM_DIFF);
+ opt->xdl_opts = DIFF_WITH_ALG(opt, HISTOGRAM_DIFF);
else if (skip_prefix(s, "diff-algorithm=", &arg)) {
long value = parse_algorithm_value(arg);
if (value < 0)
return -1;
/* clear out previous settings */
- DIFF_XDL_CLR(o, NEED_MINIMAL);
- o->xdl_opts &= ~XDF_DIFF_ALGORITHM_MASK;
- o->xdl_opts |= value;
+ DIFF_XDL_CLR(opt, NEED_MINIMAL);
+ opt->xdl_opts &= ~XDF_DIFF_ALGORITHM_MASK;
+ opt->xdl_opts |= value;
}
else if (!strcmp(s, "ignore-space-change"))
- DIFF_XDL_SET(o, IGNORE_WHITESPACE_CHANGE);
+ DIFF_XDL_SET(opt, IGNORE_WHITESPACE_CHANGE);
else if (!strcmp(s, "ignore-all-space"))
- DIFF_XDL_SET(o, IGNORE_WHITESPACE);
+ DIFF_XDL_SET(opt, IGNORE_WHITESPACE);
else if (!strcmp(s, "ignore-space-at-eol"))
- DIFF_XDL_SET(o, IGNORE_WHITESPACE_AT_EOL);
+ DIFF_XDL_SET(opt, IGNORE_WHITESPACE_AT_EOL);
else if (!strcmp(s, "ignore-cr-at-eol"))
- DIFF_XDL_SET(o, IGNORE_CR_AT_EOL);
+ DIFF_XDL_SET(opt, IGNORE_CR_AT_EOL);
else if (!strcmp(s, "renormalize"))
- o->renormalize = 1;
+ opt->renormalize = 1;
else if (!strcmp(s, "no-renormalize"))
- o->renormalize = 0;
+ opt->renormalize = 0;
else if (!strcmp(s, "no-renames"))
- o->merge_detect_rename = 0;
+ opt->merge_detect_rename = 0;
else if (!strcmp(s, "find-renames")) {
- o->merge_detect_rename = 1;
- o->rename_score = 0;
+ opt->merge_detect_rename = 1;
+ opt->rename_score = 0;
}
else if (skip_prefix(s, "find-renames=", &arg) ||
skip_prefix(s, "rename-threshold=", &arg)) {
- if ((o->rename_score = parse_rename_score(&arg)) == -1 || *arg != 0)
+ if ((opt->rename_score = parse_rename_score(&arg)) == -1 || *arg != 0)
return -1;
- o->merge_detect_rename = 1;
+ opt->merge_detect_rename = 1;
}
+ /*
+ * Please update $__git_merge_strategy_options in
+ * git-completion.bash when you add new options
+ */
else
return -1;
return 0;
--- /dev/null
+diff_cmd () {
+ "$merge_tool_path" mergetool "$LOCAL" "$REMOTE" -o "$MERGED"
+}
+
+merge_cmd () {
+ if $base_present
+ then
+ "$merge_tool_path" mergetool "$BASE" "$LOCAL" "$REMOTE" -o "$MERGED"
+ else
+ "$merge_tool_path" mergetool "$LOCAL" "$REMOTE" -o "$MERGED"
+ fi
+}
#include "sha1-lookup.h"
#include "midx.h"
#include "progress.h"
+#include "trace2.h"
#define MIDX_SIGNATURE 0x4d494458 /* "MIDX" */
#define MIDX_VERSION 1
midx_map = xmmap(NULL, midx_size, PROT_READ, MAP_PRIVATE, fd, 0);
- FLEX_ALLOC_MEM(m, object_dir, object_dir, strlen(object_dir));
+ FLEX_ALLOC_STR(m, object_dir, object_dir);
m->fd = fd;
m->data = midx_map;
m->data_len = midx_size;
m->pack_names[i]);
}
+ trace2_data_intmax("midx", the_repository, "load/num_packs", m->num_packs);
+ trace2_data_intmax("midx", the_repository, "load/num_objects", m->num_objects);
+
return m;
cleanup_fail:
m->fd = -1;
for (i = 0; i < m->num_packs; i++) {
- if (m->packs[i]) {
- close_pack(m->packs[i]);
- free(m->packs[i]);
- }
+ if (m->packs[i])
+ m->packs[i]->multi_pack_index = 0;
}
FREE_AND_NULL(m->packs);
FREE_AND_NULL(m->pack_names);
}
-int prepare_midx_pack(struct multi_pack_index *m, uint32_t pack_int_id)
+int prepare_midx_pack(struct repository *r, struct multi_pack_index *m, uint32_t pack_int_id)
{
struct strbuf pack_name = STRBUF_INIT;
+ struct packed_git *p;
if (pack_int_id >= m->num_packs)
die(_("bad pack-int-id: %u (%u total packs)"),
strbuf_addf(&pack_name, "%s/pack/%s", m->object_dir,
m->pack_names[pack_int_id]);
- m->packs[pack_int_id] = add_packed_git(pack_name.buf, pack_name.len, m->local);
+ p = add_packed_git(pack_name.buf, pack_name.len, m->local);
strbuf_release(&pack_name);
- return !m->packs[pack_int_id];
+
+ if (!p)
+ return 1;
+
+ p->multi_pack_index = 1;
+ m->packs[pack_int_id] = p;
+ install_packed_git(r, p);
+ list_add_tail(&p->mru, &r->objects->packed_git_mru);
+
+ return 0;
}
int bsearch_midx(const struct object_id *oid, struct multi_pack_index *m, uint32_t *result)
return get_be32(m->chunk_object_offsets + pos * MIDX_CHUNK_OFFSET_WIDTH);
}
-static int nth_midxed_pack_entry(struct multi_pack_index *m, struct pack_entry *e, uint32_t pos)
+static int nth_midxed_pack_entry(struct repository *r,
+ struct multi_pack_index *m,
+ struct pack_entry *e,
+ uint32_t pos)
{
uint32_t pack_int_id;
struct packed_git *p;
pack_int_id = nth_midxed_pack_int_id(m, pos);
- if (prepare_midx_pack(m, pack_int_id))
+ if (prepare_midx_pack(r, m, pack_int_id))
die(_("error preparing packfile from multi-pack-index"));
p = m->packs[pack_int_id];
return 1;
}
-int fill_midx_entry(const struct object_id *oid, struct pack_entry *e, struct multi_pack_index *m)
+int fill_midx_entry(struct repository * r,
+ const struct object_id *oid,
+ struct pack_entry *e,
+ struct multi_pack_index *m)
{
uint32_t pos;
if (!bsearch_midx(oid, m, &pos))
return 0;
- return nth_midxed_pack_entry(m, e, pos);
+ return nth_midxed_pack_entry(r, m, e, pos);
+}
+
+/* Match "foo.idx" against either "foo.pack" _or_ "foo.idx". */
+static int cmp_idx_or_pack_name(const char *idx_or_pack_name,
+ const char *idx_name)
+{
+ /* Skip past any initial matching prefix. */
+ while (*idx_name && *idx_name == *idx_or_pack_name) {
+ idx_name++;
+ idx_or_pack_name++;
+ }
+
+ /*
+ * If we didn't match completely, we may have matched "pack-1234." and
+ * be left with "idx" and "pack" respectively, which is also OK. We do
+ * not have to check for "idx" and "idx", because that would have been
+ * a complete match (and in that case these strcmps will be false, but
+ * we'll correctly return 0 from the final strcmp() below.
+ *
+ * Technically this matches "fooidx" and "foopack", but we'd never have
+ * such names in the first place.
+ */
+ if (!strcmp(idx_name, "idx") && !strcmp(idx_or_pack_name, "pack"))
+ return 0;
+
+ /*
+ * This not only checks for a complete match, but also orders based on
+ * the first non-identical character, which means our ordering will
+ * match a raw strcmp(). That makes it OK to use this to binary search
+ * a naively-sorted list.
+ */
+ return strcmp(idx_or_pack_name, idx_name);
}
-int midx_contains_pack(struct multi_pack_index *m, const char *idx_name)
+int midx_contains_pack(struct multi_pack_index *m, const char *idx_or_pack_name)
{
uint32_t first = 0, last = m->num_packs;
int cmp;
current = m->pack_names[mid];
- cmp = strcmp(idx_name, current);
+ cmp = cmp_idx_or_pack_name(idx_or_pack_name, current);
if (!cmp)
return 1;
if (cmp > 0) {
va_end(ap);
}
-int verify_midx_file(const char *object_dir)
+struct pair_pos_vs_id
+{
+ uint32_t pos;
+ uint32_t pack_int_id;
+};
+
+static int compare_pair_pos_vs_id(const void *_a, const void *_b)
+{
+ struct pair_pos_vs_id *a = (struct pair_pos_vs_id *)_a;
+ struct pair_pos_vs_id *b = (struct pair_pos_vs_id *)_b;
+
+ return b->pack_int_id - a->pack_int_id;
+}
+
+/*
+ * Limit calls to display_progress() for performance reasons.
+ * The interval here was arbitrarily chosen.
+ */
+#define SPARSE_PROGRESS_INTERVAL (1 << 12)
+#define midx_display_sparse_progress(progress, n) \
+ do { \
+ uint64_t _n = (n); \
+ if ((_n & (SPARSE_PROGRESS_INTERVAL - 1)) == 0) \
+ display_progress(progress, _n); \
+ } while (0)
+
+int verify_midx_file(struct repository *r, const char *object_dir)
{
+ struct pair_pos_vs_id *pairs = NULL;
uint32_t i;
struct progress *progress;
struct multi_pack_index *m = load_multi_pack_index(object_dir, 1);
if (!m)
return 0;
+ progress = start_progress(_("Looking for referenced packfiles"),
+ m->num_packs);
for (i = 0; i < m->num_packs; i++) {
- if (prepare_midx_pack(m, i))
+ if (prepare_midx_pack(r, m, i))
midx_report("failed to load pack in position %d", i);
+
+ display_progress(progress, i + 1);
}
+ stop_progress(&progress);
for (i = 0; i < 255; i++) {
uint32_t oid_fanout1 = ntohl(m->chunk_oid_fanout[i]);
i, oid_fanout1, oid_fanout2, i + 1);
}
+ progress = start_sparse_progress(_("Verifying OID order in MIDX"),
+ m->num_objects - 1);
for (i = 0; i < m->num_objects - 1; i++) {
struct object_id oid1, oid2;
if (oidcmp(&oid1, &oid2) >= 0)
midx_report(_("oid lookup out of order: oid[%d] = %s >= %s = oid[%d]"),
i, oid_to_hex(&oid1), oid_to_hex(&oid2), i + 1);
+
+ midx_display_sparse_progress(progress, i + 1);
+ }
+ stop_progress(&progress);
+
+ /*
+ * Create an array mapping each object to its packfile id. Sort it
+ * to group the objects by packfile. Use this permutation to visit
+ * each of the objects and only require 1 packfile to be open at a
+ * time.
+ */
+ ALLOC_ARRAY(pairs, m->num_objects);
+ for (i = 0; i < m->num_objects; i++) {
+ pairs[i].pos = i;
+ pairs[i].pack_int_id = nth_midxed_pack_int_id(m, i);
}
- progress = start_progress(_("Verifying object offsets"), m->num_objects);
+ progress = start_sparse_progress(_("Sorting objects by packfile"),
+ m->num_objects);
+ display_progress(progress, 0); /* TODO: Measure QSORT() progress */
+ QSORT(pairs, m->num_objects, compare_pair_pos_vs_id);
+ stop_progress(&progress);
+
+ progress = start_sparse_progress(_("Verifying object offsets"), m->num_objects);
for (i = 0; i < m->num_objects; i++) {
struct object_id oid;
struct pack_entry e;
off_t m_offset, p_offset;
- nth_midxed_object_oid(&oid, m, i);
- if (!fill_midx_entry(&oid, &e, m)) {
+ if (i > 0 && pairs[i-1].pack_int_id != pairs[i].pack_int_id &&
+ m->packs[pairs[i-1].pack_int_id])
+ {
+ close_pack_fd(m->packs[pairs[i-1].pack_int_id]);
+ close_pack_index(m->packs[pairs[i-1].pack_int_id]);
+ }
+
+ nth_midxed_object_oid(&oid, m, pairs[i].pos);
+
+ if (!fill_midx_entry(r, &oid, &e, m)) {
midx_report(_("failed to load pack entry for oid[%d] = %s"),
- i, oid_to_hex(&oid));
+ pairs[i].pos, oid_to_hex(&oid));
continue;
}
if (m_offset != p_offset)
midx_report(_("incorrect object offset for oid[%d] = %s: %"PRIx64" != %"PRIx64),
- i, oid_to_hex(&oid), m_offset, p_offset);
+ pairs[i].pos, oid_to_hex(&oid), m_offset, p_offset);
- display_progress(progress, i + 1);
+ midx_display_sparse_progress(progress, i + 1);
}
stop_progress(&progress);
+ free(pairs);
+
return verify_midx_error;
}
struct object_id;
struct pack_entry;
+struct repository;
#define GIT_TEST_MULTI_PACK_INDEX "GIT_TEST_MULTI_PACK_INDEX"
};
struct multi_pack_index *load_multi_pack_index(const char *object_dir, int local);
-int prepare_midx_pack(struct multi_pack_index *m, uint32_t pack_int_id);
+int prepare_midx_pack(struct repository *r, struct multi_pack_index *m, uint32_t pack_int_id);
int bsearch_midx(const struct object_id *oid, struct multi_pack_index *m, uint32_t *result);
struct object_id *nth_midxed_object_oid(struct object_id *oid,
struct multi_pack_index *m,
uint32_t n);
-int fill_midx_entry(const struct object_id *oid, struct pack_entry *e, struct multi_pack_index *m);
-int midx_contains_pack(struct multi_pack_index *m, const char *idx_name);
+int fill_midx_entry(struct repository *r, const struct object_id *oid, struct pack_entry *e, struct multi_pack_index *m);
+int midx_contains_pack(struct multi_pack_index *m, const char *idx_or_pack_name);
int prepare_multi_pack_index_one(struct repository *r, const char *object_dir, int local);
int write_midx_file(const char *object_dir);
void clear_midx_file(struct repository *r);
-int verify_midx_file(const char *object_dir);
+int verify_midx_file(struct repository *r, const char *object_dir);
void close_midx(struct multi_pack_index *m);
static int path_to_oid(const char *path, struct object_id *oid)
{
- char hex_oid[GIT_SHA1_HEXSZ];
+ char hex_oid[GIT_MAX_HEXSZ];
int i = 0;
- while (*path && i < GIT_SHA1_HEXSZ) {
+ while (*path && i < the_hash_algo->hexsz) {
if (*path != '/')
hex_oid[i++] = *path;
path++;
}
- if (*path || i != GIT_SHA1_HEXSZ)
+ if (*path || i != the_hash_algo->hexsz)
return -1;
return get_oid_hex(hex_oid, oid);
}
#define GET_NIBBLE(n, sha1) ((((sha1)[(n) >> 1]) >> ((~(n) & 0x01) << 2)) & 0x0f)
-#define KEY_INDEX (GIT_SHA1_RAWSZ - 1)
-#define FANOUT_PATH_SEPARATORS ((GIT_SHA1_HEXSZ / 2) - 1)
+#define KEY_INDEX (the_hash_algo->rawsz - 1)
+#define FANOUT_PATH_SEPARATORS (the_hash_algo->rawsz - 1)
+#define FANOUT_PATH_SEPARATORS_MAX ((GIT_MAX_HEXSZ / 2) - 1)
#define SUBTREE_SHA1_PREFIXCMP(key_sha1, subtree_sha1) \
(memcmp(key_sha1, subtree_sha1, subtree_sha1[KEY_INDEX]))
struct leaf_node *entry)
{
struct leaf_node *l;
- struct int_node *parent_stack[GIT_SHA1_RAWSZ];
+ struct int_node *parent_stack[GIT_MAX_RAWSZ];
unsigned char i, j;
void **p = note_tree_search(t, &tree, &n, entry->key_oid.hash);
void *buf;
struct tree_desc desc;
struct name_entry entry;
+ const unsigned hashsz = the_hash_algo->rawsz;
buf = fill_tree_descriptor(&desc, &subtree->val_oid);
if (!buf)
oid_to_hex(&subtree->val_oid));
prefix_len = subtree->key_oid.hash[KEY_INDEX];
- if (prefix_len >= GIT_SHA1_RAWSZ)
+ if (prefix_len >= hashsz)
BUG("prefix_len (%"PRIuMAX") is out of range", (uintmax_t)prefix_len);
if (prefix_len * 2 < n)
BUG("prefix_len (%"PRIuMAX") is too small", (uintmax_t)prefix_len);
struct leaf_node *l;
size_t path_len = strlen(entry.path);
- if (path_len == 2 * (GIT_SHA1_RAWSZ - prefix_len)) {
+ if (path_len == 2 * (hashsz - prefix_len)) {
/* This is potentially the remainder of the SHA-1 */
if (!S_ISREG(entry.mode))
goto handle_non_note;
if (hex_to_bytes(object_oid.hash + prefix_len, entry.path,
- GIT_SHA1_RAWSZ - prefix_len))
+ hashsz - prefix_len))
goto handle_non_note; /* entry.path is not a SHA1 */
type = PTR_TYPE_NOTE;
* except for the last byte, where we write
* the length:
*/
- memset(object_oid.hash + len, 0, GIT_SHA1_RAWSZ - len - 1);
+ memset(object_oid.hash + len, 0, hashsz - len - 1);
object_oid.hash[KEY_INDEX] = (unsigned char)len;
type = PTR_TYPE_SUBTREE;
return fanout + 1;
}
-/* hex SHA1 + 19 * '/' + NUL */
-#define FANOUT_PATH_MAX GIT_SHA1_HEXSZ + FANOUT_PATH_SEPARATORS + 1
+/* hex oid + '/' between each pair of hex digits + NUL */
+#define FANOUT_PATH_MAX GIT_MAX_HEXSZ + FANOUT_PATH_SEPARATORS_MAX + 1
-static void construct_path_with_fanout(const unsigned char *sha1,
+static void construct_path_with_fanout(const unsigned char *hash,
unsigned char fanout, char *path)
{
unsigned int i = 0, j = 0;
- const char *hex_sha1 = sha1_to_hex(sha1);
- assert(fanout < GIT_SHA1_RAWSZ);
+ const char *hex_hash = hash_to_hex(hash);
+ assert(fanout < the_hash_algo->rawsz);
while (fanout) {
- path[i++] = hex_sha1[j++];
- path[i++] = hex_sha1[j++];
+ path[i++] = hex_hash[j++];
+ path[i++] = hex_hash[j++];
path[i++] = '/';
fanout--;
}
- xsnprintf(path + i, FANOUT_PATH_MAX - i, "%s", hex_sha1 + j);
+ xsnprintf(path + i, FANOUT_PATH_MAX - i, "%s", hex_hash + j);
}
static int for_each_note_helper(struct notes_tree *t, struct int_node *tree,
static void write_tree_entry(struct strbuf *buf, unsigned int mode,
const char *path, unsigned int path_len, const
- unsigned char *sha1)
+ unsigned char *hash)
{
strbuf_addf(buf, "%o %.*s%c", mode, path_len, path, '\0');
- strbuf_add(buf, sha1, GIT_SHA1_RAWSZ);
+ strbuf_add(buf, hash, the_hash_algo->rawsz);
}
static void tree_write_stack_init_subtree(struct tree_write_stack *tws,
n = (struct tree_write_stack *)
xmalloc(sizeof(struct tree_write_stack));
n->next = NULL;
- strbuf_init(&n->buf, 256 * (32 + GIT_SHA1_HEXSZ)); /* assume 256 entries per tree */
+ strbuf_init(&n->buf, 256 * (32 + the_hash_algo->hexsz)); /* assume 256 entries per tree */
n->path[0] = n->path[1] = '\0';
tws->next = n;
tws->path[0] = path[0];
note_path[note_path_len] = '\0';
mode = 040000;
}
- assert(note_path_len <= GIT_SHA1_HEXSZ + FANOUT_PATH_SEPARATORS);
+ assert(note_path_len <= GIT_MAX_HEXSZ + FANOUT_PATH_SEPARATORS);
/* Weave non-note entries into note entries */
return write_each_non_note_until(note_path, d) ||
combine_notes_fn combine_notes, int flags)
{
struct object_id oid, object_oid;
- unsigned mode;
+ unsigned short mode;
struct leaf_node root_tree;
if (!t)
/* Prepare for traversal of current notes tree */
root.next = NULL; /* last forward entry in list is grounded */
- strbuf_init(&root.buf, 256 * (32 + GIT_SHA1_HEXSZ)); /* assume 256 entries */
+ strbuf_init(&root.buf, 256 * (32 + the_hash_algo->hexsz)); /* assume 256 entries */
root.path[0] = root.path[1] = '\0';
cb_data.root = &root;
cb_data.next_non_note = t->first_non_note;
while (l) {
if (flags & NOTES_PRUNE_VERBOSE)
- printf("%s\n", sha1_to_hex(l->sha1));
+ printf("%s\n", hash_to_hex(l->sha1));
if (!(flags & NOTES_PRUNE_DRYRUN))
remove_note(t, l->sha1);
l = l->next;
pack_keep_in_core:1,
freshened:1,
do_not_close:1,
- pack_promisor:1;
- unsigned char sha1[20];
+ pack_promisor:1,
+ multi_pack_index:1;
+ unsigned char hash[GIT_MAX_RAWSZ];
struct revindex_entry *revindex;
/* something like ".git/objects/pack/xxxxx.pack" */
char pack_name[FLEX_ARRAY]; /* more */
/* A most-recently-used ordered version of the packed_git list. */
struct list_head packed_git_mru;
- /*
- * A linked list containing all packfiles, starting with those
- * contained in the multi_pack_index.
- */
- struct packed_git *all_packs;
-
/*
* A fast, rough count of the number of objects in the repository.
* These two fields are not meant for direct access. Use
void *map_loose_object(struct repository *r, const struct object_id *oid,
unsigned long *size);
-extern void *read_object_file_extended(struct repository *r,
- const struct object_id *oid,
- enum object_type *type,
- unsigned long *size, int lookup_replace);
+void *read_object_file_extended(struct repository *r,
+ const struct object_id *oid,
+ enum object_type *type,
+ unsigned long *size, int lookup_replace);
static inline void *repo_read_object_file(struct repository *r,
const struct object_id *oid,
enum object_type *type,
/* Read and unpack an object file into memory, write memory to an object file */
int oid_object_info(struct repository *r, const struct object_id *, unsigned long *);
-extern int hash_object_file(const void *buf, unsigned long len,
- const char *type, struct object_id *oid);
+int hash_object_file(const void *buf, unsigned long len,
+ const char *type, struct object_id *oid);
-extern int write_object_file(const void *buf, unsigned long len,
- const char *type, struct object_id *oid);
+int write_object_file(const void *buf, unsigned long len,
+ const char *type, struct object_id *oid);
-extern int hash_object_file_literally(const void *buf, unsigned long len,
- const char *type, struct object_id *oid,
- unsigned flags);
+int hash_object_file_literally(const void *buf, unsigned long len,
+ const char *type, struct object_id *oid,
+ unsigned flags);
-extern int pretend_object_file(void *, unsigned long, enum object_type,
- struct object_id *oid);
+int pretend_object_file(void *, unsigned long, enum object_type,
+ struct object_id *oid);
-extern int force_object_loose(const struct object_id *oid, time_t mtime);
+int force_object_loose(const struct object_id *oid, time_t mtime);
/*
* Open the loose object at path, check its hash, and return the contents,
* with the specified name. This function does not respect replace
* references.
*/
-extern int has_loose_object_nonlocal(const struct object_id *);
+int has_loose_object_nonlocal(const struct object_id *);
-extern void assert_oid_type(const struct object_id *oid, enum object_type expect);
+void assert_oid_type(const struct object_id *oid, enum object_type expect);
struct object_info {
/* Request */
#define OBJECT_INFO_QUICK 8
/* Do not check loose object */
#define OBJECT_INFO_IGNORE_LOOSE 16
+/*
+ * Do not attempt to fetch the object if missing (even if fetch_is_missing is
+ * nonzero). This is meant for bulk prefetching of missing blobs in a partial
+ * clone. Implies OBJECT_INFO_QUICK.
+ */
+#define OBJECT_INFO_FOR_PREFETCH (32 + OBJECT_INFO_QUICK)
int oid_object_info_extended(struct repository *r,
const struct object_id *,
struct object_id oid;
};
-extern const char *type_name(unsigned int type);
-extern int type_from_string_gently(const char *str, ssize_t, int gentle);
+const char *type_name(unsigned int type);
+int type_from_string_gently(const char *str, ssize_t, int gentle);
#define type_from_string(str) type_from_string_gently(str, -1, 0)
/*
* Return the current number of buckets in the object hashmap.
*/
-extern unsigned int get_max_object_index(void);
+unsigned int get_max_object_index(void);
/*
* Return the object from the specified bucket in the object hashmap.
*/
-extern struct object *get_indexed_object(unsigned int);
+struct object *get_indexed_object(unsigned int);
/*
* This can be used to see if we have heard of the object before, but
*/
struct object *lookup_object(struct repository *r, const unsigned char *sha1);
-extern void *create_object(struct repository *r, const unsigned char *sha1, void *obj);
+void *create_object(struct repository *r, const unsigned char *sha1, void *obj);
void *object_as_type(struct repository *r, struct object *obj, enum object_type type, int quiet);
/*
* Clear the specified object flags from all in-core commit objects.
*/
-extern void clear_commit_marks_all(unsigned int flags);
+void clear_commit_marks_all(unsigned int flags);
#endif /* OBJECT_H */
* parameter may be used to preallocate a sufficiently large table and thus
* prevent expensive resizing. If 0, the table is dynamically resized.
*/
-extern void oidmap_init(struct oidmap *map, size_t initial_size);
+void oidmap_init(struct oidmap *map, size_t initial_size);
/*
* Frees an oidmap structure and allocated memory.
* If `free_entries` is true, each oidmap_entry in the map is freed as well
* using stdlibs free().
*/
-extern void oidmap_free(struct oidmap *map, int free_entries);
+void oidmap_free(struct oidmap *map, int free_entries);
/*
* Returns the oidmap entry for the specified oid, or NULL if not found.
*/
-extern void *oidmap_get(const struct oidmap *map,
- const struct object_id *key);
+void *oidmap_get(const struct oidmap *map,
+ const struct object_id *key);
/*
* Adds or replaces an oidmap entry.
*
* Returns the replaced entry, or NULL if not found (i.e. the entry was added).
*/
-extern void *oidmap_put(struct oidmap *map, void *entry);
+void *oidmap_put(struct oidmap *map, void *entry);
/*
* Removes an oidmap entry matching the specified oid.
*
* Returns the removed entry, or NULL if not found.
*/
-extern void *oidmap_remove(struct oidmap *map, const struct object_id *key);
+void *oidmap_remove(struct oidmap *map, const struct object_id *key);
struct oidmap_iter {
* table overhead.
*/
-static inline unsigned int oid_hash(struct object_id oid)
-{
- return sha1hash(oid.hash);
-}
-
-static inline int oid_equal(struct object_id a, struct object_id b)
-{
- return oideq(&a, &b);
-}
-
-KHASH_INIT(oid, struct object_id, int, 0, oid_hash, oid_equal)
-
/**
* A single oidset; should be zero-initialized (or use OIDSET_INIT).
*/
seen_objects_nr = 0;
}
-static uint32_t find_object_pos(const unsigned char *sha1)
+static uint32_t find_object_pos(const unsigned char *hash)
{
- struct object_entry *entry = packlist_find(writer.to_pack, sha1, NULL);
+ struct object_entry *entry = packlist_find(writer.to_pack, hash, NULL);
if (!entry) {
die("Failed to write bitmap index. Packfile doesn't have full closure "
- "(object %s is missing)", sha1_to_hex(sha1));
+ "(object %s is missing)", hash_to_hex(hash));
}
return oe_in_pack_pos(writer.to_pack, entry);
header.entry_count = htonl(writer.selected_nr);
hashcpy(header.checksum, writer.pack_checksum);
- hashwrite(f, &header, sizeof(header));
+ hashwrite(f, &header, sizeof(header) - GIT_MAX_RAWSZ + the_hash_algo->rawsz);
dump_bitmap(f, writer.commits);
dump_bitmap(f, writer.trees);
dump_bitmap(f, writer.blobs);
* commit.
*/
struct stored_bitmap {
- unsigned char sha1[20];
+ struct object_id oid;
struct ewah_bitmap *root;
struct stored_bitmap *xor;
int flags;
struct ewah_bitmap *blobs;
struct ewah_bitmap *tags;
- /* Map from SHA1 -> `stored_bitmap` for all the bitmapped commits */
- khash_sha1 *bitmaps;
+ /* Map from object ID -> `stored_bitmap` for all the bitmapped commits */
+ kh_oid_map_t *bitmaps;
/* Number of bitmapped commits */
uint32_t entry_count;
struct object **objects;
uint32_t *hashes;
uint32_t count, alloc;
- khash_sha1_pos *positions;
+ kh_oid_pos_t *positions;
} ext_index;
/* Bitmap result of the last performed walk */
{
struct bitmap_disk_header *header = (void *)index->map;
- if (index->map_size < sizeof(*header) + 20)
+ if (index->map_size < sizeof(*header) + the_hash_algo->rawsz)
return error("Corrupted bitmap index (missing header data)");
if (memcmp(header->magic, BITMAP_IDX_SIGNATURE, sizeof(BITMAP_IDX_SIGNATURE)) != 0)
"(Git requires BITMAP_OPT_FULL_DAG)");
if (flags & BITMAP_OPT_HASH_CACHE) {
- unsigned char *end = index->map + index->map_size - 20;
+ unsigned char *end = index->map + index->map_size - the_hash_algo->rawsz;
index->hashes = ((uint32_t *)end) - index->pack->num_objects;
}
}
index->entry_count = ntohl(header->entry_count);
- index->map_pos += sizeof(*header);
+ index->map_pos += sizeof(*header) - GIT_MAX_RAWSZ + the_hash_algo->rawsz;
return 0;
}
static struct stored_bitmap *store_bitmap(struct bitmap_index *index,
struct ewah_bitmap *root,
- const unsigned char *sha1,
+ const unsigned char *hash,
struct stored_bitmap *xor_with,
int flags)
{
stored->root = root;
stored->xor = xor_with;
stored->flags = flags;
- hashcpy(stored->sha1, sha1);
+ oidread(&stored->oid, hash);
- hash_pos = kh_put_sha1(index->bitmaps, stored->sha1, &ret);
+ hash_pos = kh_put_oid_map(index->bitmaps, stored->oid, &ret);
/* a 0 return code means the insertion succeeded with no changes,
* because the SHA1 already existed on the map. this is bad, there
* shouldn't be duplicated commits in the index */
if (ret == 0) {
- error("Duplicate entry in bitmap index: %s", sha1_to_hex(sha1));
+ error("Duplicate entry in bitmap index: %s", hash_to_hex(hash));
return NULL;
}
{
assert(bitmap_git->map);
- bitmap_git->bitmaps = kh_init_sha1();
- bitmap_git->ext_index.positions = kh_init_sha1_pos();
- load_pack_revindex(bitmap_git->pack);
+ bitmap_git->bitmaps = kh_init_oid_map();
+ bitmap_git->ext_index.positions = kh_init_oid_pos();
+ if (load_pack_revindex(bitmap_git->pack))
+ goto failed;
if (!(bitmap_git->commits = read_bitmap_1(bitmap_git)) ||
!(bitmap_git->trees = read_bitmap_1(bitmap_git)) ||
};
static inline int bitmap_position_extended(struct bitmap_index *bitmap_git,
- const unsigned char *sha1)
+ const struct object_id *oid)
{
- khash_sha1_pos *positions = bitmap_git->ext_index.positions;
- khiter_t pos = kh_get_sha1_pos(positions, sha1);
+ khash_oid_pos *positions = bitmap_git->ext_index.positions;
+ khiter_t pos = kh_get_oid_pos(positions, *oid);
if (pos < kh_end(positions)) {
int bitmap_pos = kh_value(positions, pos);
}
static inline int bitmap_position_packfile(struct bitmap_index *bitmap_git,
- const unsigned char *sha1)
+ const struct object_id *oid)
{
- off_t offset = find_pack_entry_one(sha1, bitmap_git->pack);
+ off_t offset = find_pack_entry_one(oid->hash, bitmap_git->pack);
if (!offset)
return -1;
}
static int bitmap_position(struct bitmap_index *bitmap_git,
- const unsigned char *sha1)
+ const struct object_id *oid)
{
- int pos = bitmap_position_packfile(bitmap_git, sha1);
- return (pos >= 0) ? pos : bitmap_position_extended(bitmap_git, sha1);
+ int pos = bitmap_position_packfile(bitmap_git, oid);
+ return (pos >= 0) ? pos : bitmap_position_extended(bitmap_git, oid);
}
static int ext_index_add_object(struct bitmap_index *bitmap_git,
int hash_ret;
int bitmap_pos;
- hash_pos = kh_put_sha1_pos(eindex->positions, object->oid.hash, &hash_ret);
+ hash_pos = kh_put_oid_pos(eindex->positions, object->oid, &hash_ret);
if (hash_ret > 0) {
if (eindex->count >= eindex->alloc) {
eindex->alloc = (eindex->alloc + 16) * 3 / 2;
struct bitmap_show_data *data = data_;
int bitmap_pos;
- bitmap_pos = bitmap_position(data->bitmap_git, object->oid.hash);
+ bitmap_pos = bitmap_position(data->bitmap_git, &object->oid);
if (bitmap_pos < 0)
bitmap_pos = ext_index_add_object(data->bitmap_git, object,
static int add_to_include_set(struct bitmap_index *bitmap_git,
struct include_data *data,
- const unsigned char *sha1,
+ const struct object_id *oid,
int bitmap_pos)
{
khiter_t hash_pos;
if (bitmap_get(data->base, bitmap_pos))
return 0;
- hash_pos = kh_get_sha1(bitmap_git->bitmaps, sha1);
+ hash_pos = kh_get_oid_map(bitmap_git->bitmaps, *oid);
if (hash_pos < kh_end(bitmap_git->bitmaps)) {
struct stored_bitmap *st = kh_value(bitmap_git->bitmaps, hash_pos);
bitmap_or_ewah(data->base, lookup_stored_bitmap(st));
struct include_data *data = _data;
int bitmap_pos;
- bitmap_pos = bitmap_position(data->bitmap_git, commit->object.oid.hash);
+ bitmap_pos = bitmap_position(data->bitmap_git, &commit->object.oid);
if (bitmap_pos < 0)
bitmap_pos = ext_index_add_object(data->bitmap_git,
(struct object *)commit,
NULL);
- if (!add_to_include_set(data->bitmap_git, data, commit->object.oid.hash,
+ if (!add_to_include_set(data->bitmap_git, data, &commit->object.oid,
bitmap_pos)) {
struct commit_list *parent = commit->parents;
roots = roots->next;
if (object->type == OBJ_COMMIT) {
- khiter_t pos = kh_get_sha1(bitmap_git->bitmaps, object->oid.hash);
+ khiter_t pos = kh_get_oid_map(bitmap_git->bitmaps, object->oid);
if (pos < kh_end(bitmap_git->bitmaps)) {
struct stored_bitmap *st = kh_value(bitmap_git->bitmaps, pos);
int pos;
roots = roots->next;
- pos = bitmap_position(bitmap_git, object->oid.hash);
+ pos = bitmap_position(bitmap_git, &object->oid);
if (pos < 0 || base == NULL || !bitmap_get(base, pos)) {
object->flags &= ~UNINTERESTING;
fprintf(stderr, "Failed to reuse at %d (%016llx)\n",
reuse_objects, result->words[i]);
- fprintf(stderr, " %s\n", sha1_to_hex(sha1));
+ fprintf(stderr, " %s\n", hash_to_hex(sha1));
}
#endif
struct bitmap_test_data *tdata = data;
int bitmap_pos;
- bitmap_pos = bitmap_position(tdata->bitmap_git, object->oid.hash);
+ bitmap_pos = bitmap_position(tdata->bitmap_git, &object->oid);
if (bitmap_pos < 0)
die("Object not in bitmap: %s\n", oid_to_hex(&object->oid));
int bitmap_pos;
bitmap_pos = bitmap_position(tdata->bitmap_git,
- commit->object.oid.hash);
+ &commit->object.oid);
if (bitmap_pos < 0)
die("Object not in bitmap: %s\n", oid_to_hex(&commit->object.oid));
bitmap_git->version, bitmap_git->entry_count);
root = revs->pending.objects[0].item;
- pos = kh_get_sha1(bitmap_git->bitmaps, root->oid.hash);
+ pos = kh_get_oid_map(bitmap_git->bitmaps, root->oid);
if (pos < kh_end(bitmap_git->bitmaps)) {
struct stored_bitmap *st = kh_value(bitmap_git->bitmaps, pos);
lookup_stored_bitmap(stored),
rebuild)) {
hash_pos = kh_put_sha1(reused_bitmaps,
- stored->sha1,
+ stored->oid.hash,
&hash_ret);
kh_value(reused_bitmaps, hash_pos) =
bitmap_to_ewah(rebuild);
ewah_pool_free(b->trees);
ewah_pool_free(b->blobs);
ewah_pool_free(b->tags);
- kh_destroy_sha1(b->bitmaps);
+ kh_destroy_oid_map(b->bitmaps);
free(b->ext_index.objects);
free(b->ext_index.hashes);
bitmap_free(b->result);
free(b);
}
-int bitmap_has_sha1_in_uninteresting(struct bitmap_index *bitmap_git,
- const unsigned char *sha1)
+int bitmap_has_oid_in_uninteresting(struct bitmap_index *bitmap_git,
+ const struct object_id *oid)
{
int pos;
if (!bitmap_git->haves)
return 0; /* walk had no "haves" */
- pos = bitmap_position_packfile(bitmap_git, sha1);
+ pos = bitmap_position_packfile(bitmap_git, oid);
if (pos < 0)
return 0;
uint16_t version;
uint16_t options;
uint32_t entry_count;
- unsigned char checksum[20];
+ unsigned char checksum[GIT_MAX_RAWSZ];
};
static const char BITMAP_IDX_SIGNATURE[] = {'B', 'I', 'T', 'M'};
* queried to see if a particular object was reachable from any of the
* objects flagged as UNINTERESTING.
*/
-int bitmap_has_sha1_in_uninteresting(struct bitmap_index *, const unsigned char *sha1);
+int bitmap_has_oid_in_uninteresting(struct bitmap_index *, const struct object_id *oid);
void bitmap_writer_show_progress(int show);
void bitmap_writer_set_checksum(unsigned char *sha1);
* this fall back code, just stay simple and fall back to using
* in_pack[] array.
*/
-void oe_map_new_pack(struct packing_data *pack,
- struct packed_git *p)
+void oe_map_new_pack(struct packing_data *pack)
{
uint32_t i;
return pack->in_pack[e - pack->objects];
}
-void oe_map_new_pack(struct packing_data *pack,
- struct packed_git *p);
+void oe_map_new_pack(struct packing_data *pack);
+
static inline void oe_set_in_pack(struct packing_data *pack,
struct object_entry *e,
struct packed_git *p)
{
if (!p->index)
- oe_map_new_pack(pack, p);
+ oe_map_new_pack(pack);
if (pack->in_pack_by_idx)
e->in_pack_idx = p->index;
else
#include "cache.h"
#include "pack-revindex.h"
#include "object-store.h"
+#include "packfile.h"
/*
* Pack index for existing packs give us easy access to the offsets into
sort_revindex(p->revindex, num_ent, p->pack_size);
}
-void load_pack_revindex(struct packed_git *p)
+int load_pack_revindex(struct packed_git *p)
{
- if (!p->revindex)
+ if (!p->revindex) {
+ if (open_pack_index(p))
+ return -1;
create_pack_revindex(p);
+ }
+ return 0;
}
int find_revindex_position(struct packed_git *p, off_t ofs)
{
int pos;
- load_pack_revindex(p);
+ if (load_pack_revindex(p))
+ return NULL;
+
pos = find_revindex_position(p, ofs);
if (pos < 0)
unsigned int nr;
};
-void load_pack_revindex(struct packed_git *p);
+int load_pack_revindex(struct packed_git *p);
int find_revindex_position(struct packed_git *p, off_t ofs);
struct revindex_entry *find_pack_revindex(struct packed_git *p, off_t ofs);
uint32_t *anomaly;
};
-extern void reset_pack_idx_option(struct pack_idx_option *);
+void reset_pack_idx_option(struct pack_idx_option *);
/*
* Packed object index header
/* Note, the data argument could be NULL if object type is blob */
typedef int (*verify_fn)(const struct object_id *, enum object_type, unsigned long, void*, int*);
-extern const char *write_idx_file(const char *index_name, struct pack_idx_entry **objects, int nr_objects, const struct pack_idx_option *, const unsigned char *sha1);
-extern int check_pack_crc(struct packed_git *p, struct pack_window **w_curs, off_t offset, off_t len, unsigned int nr);
-extern int verify_pack_index(struct packed_git *);
-extern int verify_pack(struct repository *, struct packed_git *, verify_fn fn, struct progress *, uint32_t);
-extern off_t write_pack_header(struct hashfile *f, uint32_t);
-extern void fixup_pack_header_footer(int, unsigned char *, const char *, uint32_t, unsigned char *, off_t);
-extern char *index_pack_lockfile(int fd);
+const char *write_idx_file(const char *index_name, struct pack_idx_entry **objects, int nr_objects, const struct pack_idx_option *, const unsigned char *sha1);
+int check_pack_crc(struct packed_git *p, struct pack_window **w_curs, off_t offset, off_t len, unsigned int nr);
+int verify_pack_index(struct packed_git *);
+int verify_pack(struct repository *, struct packed_git *, verify_fn fn, struct progress *, uint32_t);
+off_t write_pack_header(struct hashfile *f, uint32_t);
+void fixup_pack_header_footer(int, unsigned char *, const char *, uint32_t, unsigned char *, off_t);
+char *index_pack_lockfile(int fd);
/*
* The "hdr" output buffer should be at least this big, which will handle sizes
* up to 2^67.
*/
#define MAX_PACK_OBJECT_HEADER 10
-extern int encode_in_pack_object_header(unsigned char *hdr, int hdr_len,
- enum object_type, uintmax_t);
+int encode_in_pack_object_header(unsigned char *hdr, int hdr_len,
+ enum object_type, uintmax_t);
#define PH_ERROR_EOF (-1)
#define PH_ERROR_PACK_SIGNATURE (-2)
#define PH_ERROR_PROTOCOL (-3)
-extern int read_pack_header(int fd, struct pack_header *);
+int read_pack_header(int fd, struct pack_header *);
-extern struct hashfile *create_tmp_packfile(char **pack_tmp_name);
-extern void finish_tmp_packfile(struct strbuf *name_buffer, const char *pack_tmp_name, struct pack_idx_entry **written_list, uint32_t nr_written, struct pack_idx_option *pack_idx_opts, unsigned char sha1[]);
+struct hashfile *create_tmp_packfile(char **pack_tmp_name);
+void finish_tmp_packfile(struct strbuf *name_buffer, const char *pack_tmp_name, struct pack_idx_entry **written_list, uint32_t nr_written, struct pack_idx_option *pack_idx_opts, unsigned char sha1[]);
#endif
struct packed_git *p = alloc_packed_git(alloc);
memcpy(p->pack_name, path, alloc); /* includes NUL */
- hashcpy(p->sha1, sha1);
+ hashcpy(p->hash, sha1);
if (check_packed_git_idx(idx_path, p)) {
free(p);
return NULL;
}
}
-static int close_pack_fd(struct packed_git *p)
+int close_pack_fd(struct packed_git *p)
{
if (p->pack_fd < 0)
return 0;
#endif
}
+const char *pack_basename(struct packed_git *p)
+{
+ const char *ret = strrchr(p->pack_name, '/');
+ if (ret)
+ ret = ret + 1; /* skip past slash */
+ else
+ ret = p->pack_name; /* we only have a base */
+ return ret;
+}
+
/*
* Do not call this directly as this leaks p->pack_fd on error return;
* call open_packed_git() instead.
if (!p->index_data) {
struct multi_pack_index *m;
- const char *pack_name = strrchr(p->pack_name, '/');
+ const char *pack_name = pack_basename(p);
for (m = the_repository->objects->multi_pack_index;
m; m = m->next) {
p->pack_local = local;
p->mtime = st.st_mtime;
if (path_len < the_hash_algo->hexsz ||
- get_sha1_hex(path + path_len - the_hash_algo->hexsz, p->sha1))
- hashclr(p->sha1);
+ get_sha1_hex(path + path_len - the_hash_algo->hexsz, p->hash))
+ hashclr(p->hash);
return p;
}
* all unreachable objects about to be pruned, in which case they're not really
* interesting as a measure of repo size in the first place.
*/
-unsigned long approximate_object_count(void)
+unsigned long repo_approximate_object_count(struct repository *r)
{
- if (!the_repository->objects->approximate_object_count_valid) {
+ if (!r->objects->approximate_object_count_valid) {
unsigned long count;
struct multi_pack_index *m;
struct packed_git *p;
- prepare_packed_git(the_repository);
+ prepare_packed_git(r);
count = 0;
- for (m = get_multi_pack_index(the_repository); m; m = m->next)
+ for (m = get_multi_pack_index(r); m; m = m->next)
count += m->num_objects;
- for (p = the_repository->objects->packed_git; p; p = p->next) {
+ for (p = r->objects->packed_git; p; p = p->next) {
if (open_pack_index(p))
continue;
count += p->num_objects;
}
- the_repository->objects->approximate_object_count = count;
+ r->objects->approximate_object_count = count;
}
- return the_repository->objects->approximate_object_count;
+ return r->objects->approximate_object_count;
}
static void *get_next_packed_git(const void *p)
}
rearrange_packed_git(r);
- r->objects->all_packs = NULL;
-
prepare_packed_git_mru(r);
r->objects->packed_git_initialized = 1;
}
struct packed_git *get_all_packs(struct repository *r)
{
- prepare_packed_git(r);
-
- if (!r->objects->all_packs) {
- struct packed_git *p = r->objects->packed_git;
- struct multi_pack_index *m;
-
- for (m = r->objects->multi_pack_index; m; m = m->next) {
- uint32_t i;
- for (i = 0; i < m->num_packs; i++) {
- if (!prepare_midx_pack(m, i)) {
- m->packs[i]->next = p;
- p = m->packs[i];
- }
- }
- }
+ struct multi_pack_index *m;
- r->objects->all_packs = p;
+ prepare_packed_git(r);
+ for (m = r->objects->multi_pack_index; m; m = m->next) {
+ uint32_t i;
+ for (i = 0; i < m->num_packs; i++)
+ prepare_midx_pack(r, m, i);
}
- return r->objects->all_packs;
+ return r->objects->packed_git;
}
struct list_head *get_packed_git_mru(struct repository *r)
return 0;
for (m = r->objects->multi_pack_index; m; m = m->next) {
- if (fill_midx_entry(oid, e, m))
+ if (fill_midx_entry(r, oid, e, m))
return 1;
}
list_for_each(pos, &r->objects->packed_git_mru) {
struct packed_git *p = list_entry(pos, struct packed_git, mru);
- if (fill_pack_entry(oid, e, p)) {
+ if (!p->multi_pack_index && fill_pack_entry(oid, e, p)) {
list_move(&p->mru, &r->objects->packed_git_mru);
return 1;
}
uint32_t i;
int r = 0;
- if (flags & FOR_EACH_OBJECT_PACK_ORDER)
- load_pack_revindex(p);
+ if (flags & FOR_EACH_OBJECT_PACK_ORDER) {
+ if (load_pack_revindex(p))
+ return -1;
+ }
for (i = 0; i < p->num_objects; i++) {
uint32_t pos;
*
* Example: odb_pack_name(out, sha1, "idx") => ".git/objects/pack/pack-1234..idx"
*/
-extern char *odb_pack_name(struct strbuf *buf, const unsigned char *sha1, const char *ext);
+char *odb_pack_name(struct strbuf *buf, const unsigned char *sha1, const char *ext);
/*
* Return the name of the (local) packfile with the specified sha1 in
* its name. The return value is a pointer to memory that is
* overwritten each time this function is called.
*/
-extern char *sha1_pack_name(const unsigned char *sha1);
+char *sha1_pack_name(const unsigned char *sha1);
/*
* Return the name of the (local) pack index file with the specified
* sha1 in its name. The return value is a pointer to memory that is
* overwritten each time this function is called.
*/
-extern char *sha1_pack_index_name(const unsigned char *sha1);
+char *sha1_pack_index_name(const unsigned char *sha1);
-extern struct packed_git *parse_pack_index(unsigned char *sha1, const char *idx_path);
+/*
+ * Return the basename of the packfile, omitting any containing directory
+ * (e.g., "pack-1234abcd[...].pack").
+ */
+const char *pack_basename(struct packed_git *p);
+
+struct packed_git *parse_pack_index(unsigned char *sha1, const char *idx_path);
typedef void each_file_in_pack_dir_fn(const char *full_path, size_t full_path_len,
const char *file_pach, void *data);
#define PACKDIR_FILE_GARBAGE 4
extern void (*report_garbage)(unsigned seen_bits, const char *path);
-extern void reprepare_packed_git(struct repository *r);
-extern void install_packed_git(struct repository *r, struct packed_git *pack);
+void reprepare_packed_git(struct repository *r);
+void install_packed_git(struct repository *r, struct packed_git *pack);
struct packed_git *get_packed_git(struct repository *r);
struct list_head *get_packed_git_mru(struct repository *r);
* Give a rough count of objects in the repository. This sacrifices accuracy
* for speed.
*/
-unsigned long approximate_object_count(void);
+unsigned long repo_approximate_object_count(struct repository *r);
+#define approximate_object_count() repo_approximate_object_count(the_repository)
-extern struct packed_git *find_sha1_pack(const unsigned char *sha1,
- struct packed_git *packs);
+struct packed_git *find_sha1_pack(const unsigned char *sha1,
+ struct packed_git *packs);
-extern void pack_report(void);
+void pack_report(void);
/*
* mmap the index file for the specified packfile (if it is not
* already mmapped). Return 0 on success.
*/
-extern int open_pack_index(struct packed_git *);
+int open_pack_index(struct packed_git *);
/*
* munmap the index file for the specified packfile (if it is
* currently mmapped).
*/
-extern void close_pack_index(struct packed_git *);
+void close_pack_index(struct packed_git *);
+
+int close_pack_fd(struct packed_git *p);
-extern uint32_t get_pack_fanout(struct packed_git *p, uint32_t value);
+uint32_t get_pack_fanout(struct packed_git *p, uint32_t value);
-extern unsigned char *use_pack(struct packed_git *, struct pack_window **, off_t, unsigned long *);
-extern void close_pack_windows(struct packed_git *);
-extern void close_pack(struct packed_git *);
-extern void close_all_packs(struct raw_object_store *o);
-extern void unuse_pack(struct pack_window **);
-extern void clear_delta_base_cache(void);
-extern struct packed_git *add_packed_git(const char *path, size_t path_len, int local);
+unsigned char *use_pack(struct packed_git *, struct pack_window **, off_t, unsigned long *);
+void close_pack_windows(struct packed_git *);
+void close_pack(struct packed_git *);
+void close_all_packs(struct raw_object_store *o);
+void unuse_pack(struct pack_window **);
+void clear_delta_base_cache(void);
+struct packed_git *add_packed_git(const char *path, size_t path_len, int local);
/*
* Make sure that a pointer access into an mmap'd index file is within bounds,
* (like the 64-bit extended offset table), as we compare the size to the
* fixed-length parts when we open the file.
*/
-extern void check_pack_index_ptr(const struct packed_git *p, const void *ptr);
+void check_pack_index_ptr(const struct packed_git *p, const void *ptr);
/*
* Perform binary search on a pack-index for a given oid. Packfile is expected to
* at the SHA-1 within the mmapped index. Return NULL if there is an
* error.
*/
-extern const unsigned char *nth_packed_object_sha1(struct packed_git *, uint32_t n);
+const unsigned char *nth_packed_object_sha1(struct packed_git *, uint32_t n);
/*
* Like nth_packed_object_sha1, but write the data into the object specified by
* the the first argument. Returns the first argument on success, and NULL on
* error.
*/
-extern const struct object_id *nth_packed_object_oid(struct object_id *, struct packed_git *, uint32_t n);
+const struct object_id *nth_packed_object_oid(struct object_id *, struct packed_git *, uint32_t n);
/*
* Return the offset of the nth object within the specified packfile.
* The index must already be opened.
*/
-extern off_t nth_packed_object_offset(const struct packed_git *, uint32_t n);
+off_t nth_packed_object_offset(const struct packed_git *, uint32_t n);
/*
* If the object named sha1 is present in the specified packfile,
* return its offset within the packfile; otherwise, return 0.
*/
-extern off_t find_pack_entry_one(const unsigned char *sha1, struct packed_git *);
+off_t find_pack_entry_one(const unsigned char *sha1, struct packed_git *);
-extern int is_pack_valid(struct packed_git *);
-extern void *unpack_entry(struct repository *r, struct packed_git *, off_t, enum object_type *, unsigned long *);
-extern unsigned long unpack_object_header_buffer(const unsigned char *buf, unsigned long len, enum object_type *type, unsigned long *sizep);
-extern unsigned long get_size_from_delta(struct packed_git *, struct pack_window **, off_t);
-extern int unpack_object_header(struct packed_git *, struct pack_window **, off_t *, unsigned long *);
+int is_pack_valid(struct packed_git *);
+void *unpack_entry(struct repository *r, struct packed_git *, off_t, enum object_type *, unsigned long *);
+unsigned long unpack_object_header_buffer(const unsigned char *buf, unsigned long len, enum object_type *type, unsigned long *sizep);
+unsigned long get_size_from_delta(struct packed_git *, struct pack_window **, off_t);
+int unpack_object_header(struct packed_git *, struct pack_window **, off_t *, unsigned long *);
-extern void release_pack_memory(size_t);
+void release_pack_memory(size_t);
/* global flag to enable extra checks when accessing packed objects */
extern int do_check_packed_object_crc;
-extern int packed_object_info(struct repository *r,
- struct packed_git *pack,
- off_t offset, struct object_info *);
+int packed_object_info(struct repository *r,
+ struct packed_git *pack,
+ off_t offset, struct object_info *);
-extern void mark_bad_packed_object(struct packed_git *p, const unsigned char *sha1);
-extern const struct packed_git *has_packed_and_bad(struct repository *r, const unsigned char *sha1);
+void mark_bad_packed_object(struct packed_git *p, const unsigned char *sha1);
+const struct packed_git *has_packed_and_bad(struct repository *r, const unsigned char *sha1);
/*
* Iff a pack file in the given repository contains the object named by sha1,
* return true and store its location to e.
*/
-extern int find_pack_entry(struct repository *r, const struct object_id *oid, struct pack_entry *e);
+int find_pack_entry(struct repository *r, const struct object_id *oid, struct pack_entry *e);
-extern int has_object_pack(const struct object_id *oid);
+int has_object_pack(const struct object_id *oid);
-extern int has_pack_index(const unsigned char *sha1);
+int has_pack_index(const unsigned char *sha1);
/*
* Return 1 if an object in a promisor packfile is or refers to the given
* object, 0 otherwise.
*/
-extern int is_promisor_object(const struct object_id *oid);
+int is_promisor_object(const struct object_id *oid);
/*
* Expose a function for fuzz testing.
* have a convenient entry-point for fuzz testing. For real uses, you should
* probably use open_pack_index() or parse_pack_index() instead.
*/
-extern int load_idx(const char *path, const unsigned int hashsz, void *idx_map,
- size_t idx_size, struct packed_git *p);
+int load_idx(const char *path, const unsigned int hashsz, void *idx_map,
+ size_t idx_size, struct packed_git *p);
#endif
argv_array_push(&pager_process->args, pager);
pager_process->use_shell = 1;
setup_pager_env(&pager_process->env_array);
+ pager_process->trace2_child_class = "pager";
}
void setup_pager(void)
opt->long_name);
if (v && v < MINIMUM_ABBREV)
v = MINIMUM_ABBREV;
- else if (v > 40)
- v = 40;
+ else if (v > the_hash_algo->hexsz)
+ v = the_hash_algo->hexsz;
}
*(int *)(opt->value) = v;
return 0;
return 0;
}
+int parse_opt_commit(const struct option *opt, const char *arg, int unset)
+{
+ struct object_id oid;
+ struct commit *commit;
+ struct commit **target = opt->value;
+
+ if (!arg)
+ return -1;
+ if (get_oid(arg, &oid))
+ return error("malformed object name %s", arg);
+ commit = lookup_commit_reference(the_repository, &oid);
+ if (!commit)
+ return error("no such commit %s", arg);
+ *target = commit;
+ return 0;
+}
+
int parse_opt_object_name(const struct option *opt, const char *arg, int unset)
{
struct object_id oid;
return 0;
}
+int parse_opt_object_id(const struct option *opt, const char *arg, int unset)
+{
+ struct object_id oid;
+ struct object_id *target = opt->value;
+
+ if (unset) {
+ *target = null_oid;
+ return 0;
+ }
+ if (!arg)
+ return -1;
+ if (get_oid(arg, &oid))
+ return error(_("malformed object name '%s'"), arg);
+ *target = oid;
+ return 0;
+}
+
int parse_opt_tertiary(const struct option *opt, const char *arg, int unset)
{
int *target = opt->value;
* "-h" output even if it's not being handled directly by
* parse_options().
*/
-int parse_opt_unknown_cb(const struct option *opt, const char *arg, int unset)
+enum parse_opt_result parse_opt_unknown_cb(struct parse_opt_ctx_t *ctx,
+ const struct option *opt,
+ const char *arg, int unset)
{
- return -2;
+ BUG_ON_OPT_ARG(arg);
+ return PARSE_OPT_UNKNOWN;
}
/**
#include "color.h"
#include "utf8.h"
+static int disallow_abbreviated_options;
+
#define OPT_SHORT 1
#define OPT_UNSET 2
return error("BUG: switch '%c' %s", opt->short_name, reason);
}
-static int get_arg(struct parse_opt_ctx_t *p, const struct option *opt,
- int flags, const char **arg)
+static enum parse_opt_result get_arg(struct parse_opt_ctx_t *p,
+ const struct option *opt,
+ int flags, const char **arg)
{
if (p->opt) {
*arg = p->opt;
*file = prefix_filename(prefix, *file);
}
-static int opt_command_mode_error(const struct option *opt,
- const struct option *all_opts,
- int flags)
+static enum parse_opt_result opt_command_mode_error(
+ const struct option *opt,
+ const struct option *all_opts,
+ int flags)
{
const struct option *that;
struct strbuf that_name = STRBUF_INIT;
error(_("%s is incompatible with %s"),
optname(opt, flags), that_name.buf);
strbuf_release(&that_name);
- return -1;
+ return PARSE_OPT_ERROR;
}
return error(_("%s : incompatible with something else"),
optname(opt, flags));
}
-static int get_value(struct parse_opt_ctx_t *p,
- const struct option *opt,
- const struct option *all_opts,
- int flags)
+static enum parse_opt_result get_value(struct parse_opt_ctx_t *p,
+ const struct option *opt,
+ const struct option *all_opts,
+ int flags)
{
const char *s, *arg;
const int unset = flags & OPT_UNSET;
switch (opt->type) {
case OPTION_LOWLEVEL_CALLBACK:
- return (*(parse_opt_ll_cb *)opt->callback)(p, opt, unset);
+ return opt->ll_callback(p, opt, NULL, unset);
case OPTION_BIT:
if (unset)
*(int *)opt->value &= ~opt->defval;
return 0;
+ case OPTION_BITOP:
+ if (unset)
+ BUG("BITOP can't have unset form");
+ *(int *)opt->value &= ~opt->extra;
+ *(int *)opt->value |= opt->defval;
+ return 0;
+
case OPTION_COUNTUP:
if (*(int *)opt->value < 0)
*(int *)opt->value = 0;
return err;
case OPTION_CALLBACK:
+ {
+ const char *p_arg = NULL;
+ int p_unset;
+
if (unset)
- return (*opt->callback)(opt, NULL, 1) ? (-1) : 0;
- if (opt->flags & PARSE_OPT_NOARG)
- return (*opt->callback)(opt, NULL, 0) ? (-1) : 0;
- if (opt->flags & PARSE_OPT_OPTARG && !p->opt)
- return (*opt->callback)(opt, NULL, 0) ? (-1) : 0;
- if (get_arg(p, opt, flags, &arg))
+ p_unset = 1;
+ else if (opt->flags & PARSE_OPT_NOARG)
+ p_unset = 0;
+ else if (opt->flags & PARSE_OPT_OPTARG && !p->opt)
+ p_unset = 0;
+ else if (get_arg(p, opt, flags, &arg))
return -1;
- return (*opt->callback)(opt, arg, 0) ? (-1) : 0;
-
+ else {
+ p_unset = 0;
+ p_arg = arg;
+ }
+ if (opt->callback)
+ return (*opt->callback)(opt, p_arg, p_unset) ? (-1) : 0;
+ else
+ return (*opt->ll_callback)(p, opt, p_arg, p_unset);
+ }
case OPTION_INTEGER:
if (unset) {
*(int *)opt->value = 0;
}
}
-static int parse_short_opt(struct parse_opt_ctx_t *p, const struct option *options)
+static enum parse_opt_result parse_short_opt(struct parse_opt_ctx_t *p,
+ const struct option *options)
{
const struct option *all_opts = options;
const struct option *numopt = NULL;
len++;
arg = xmemdupz(p->opt, len);
p->opt = p->opt[len] ? p->opt + len : NULL;
- rc = (*numopt->callback)(numopt, arg, 0) ? (-1) : 0;
+ if (numopt->callback)
+ rc = (*numopt->callback)(numopt, arg, 0) ? (-1) : 0;
+ else
+ rc = (*numopt->ll_callback)(p, numopt, arg, 0);
free(arg);
return rc;
}
- return -2;
+ return PARSE_OPT_UNKNOWN;
+}
+
+static int has_string(const char *it, const char **array)
+{
+ while (*array)
+ if (!strcmp(it, *(array++)))
+ return 1;
+ return 0;
}
-static int parse_long_opt(struct parse_opt_ctx_t *p, const char *arg,
- const struct option *options)
+static int is_alias(struct parse_opt_ctx_t *ctx,
+ const struct option *one_opt,
+ const struct option *another_opt)
+{
+ const char **group;
+
+ if (!ctx->alias_groups)
+ return 0;
+
+ if (!one_opt->long_name || !another_opt->long_name)
+ return 0;
+
+ for (group = ctx->alias_groups; *group; group += 3) {
+ /* it and other are from the same family? */
+ if (has_string(one_opt->long_name, group) &&
+ has_string(another_opt->long_name, group))
+ return 1;
+ }
+ return 0;
+}
+
+static enum parse_opt_result parse_long_opt(
+ struct parse_opt_ctx_t *p, const char *arg,
+ const struct option *options)
{
const struct option *all_opts = options;
const char *arg_end = strchrnul(arg, '=');
optname(options, flags));
if (*rest)
continue;
+ if (options->value)
+ *(int *)options->value = options->defval;
p->out[p->cpidx++] = arg - 2;
- return 0;
+ return PARSE_OPT_DONE;
}
if (!rest) {
/* abbreviated? */
- if (!strncmp(long_name, arg, arg_end - arg)) {
+ if (!(p->flags & PARSE_OPT_KEEP_UNKNOWN) &&
+ !strncmp(long_name, arg, arg_end - arg)) {
is_abbreviated:
- if (abbrev_option) {
+ if (abbrev_option &&
+ !is_alias(p, abbrev_option, options)) {
/*
* If this is abbreviated, it is
* ambiguous. So when there is no
return get_value(p, options, all_opts, flags ^ opt_flags);
}
+ if (disallow_abbreviated_options && (ambiguous_option || abbrev_option))
+ die("disallowed abbreviated or ambiguous option '%.*s'",
+ (int)(arg_end - arg), arg);
+
if (ambiguous_option) {
error(_("ambiguous option: %s "
"(could be --%s%s or --%s%s)"),
ambiguous_option->long_name,
(abbrev_flags & OPT_UNSET) ? "no-" : "",
abbrev_option->long_name);
- return -3;
+ return PARSE_OPT_HELP;
}
if (abbrev_option)
return get_value(p, abbrev_option, all_opts, abbrev_flags);
- return -2;
+ return PARSE_OPT_UNKNOWN;
}
static int parse_nodash_opt(struct parse_opt_ctx_t *p, const char *arg,
if ((opts->flags & PARSE_OPT_OPTARG) ||
!(opts->flags & PARSE_OPT_NOARG))
err |= optbug(opts, "should not accept an argument");
+ break;
+ case OPTION_CALLBACK:
+ if (!opts->callback && !opts->ll_callback)
+ BUG("OPTION_CALLBACK needs one callback");
+ if (opts->callback && opts->ll_callback)
+ BUG("OPTION_CALLBACK can't have two callbacks");
+ break;
+ case OPTION_LOWLEVEL_CALLBACK:
+ if (!opts->ll_callback)
+ BUG("OPTION_LOWLEVEL_CALLBACK needs a callback");
+ if (opts->callback)
+ BUG("OPTION_LOWLEVEL_CALLBACK needs no high level callback");
+ break;
+ case OPTION_ALIAS:
+ BUG("OPT_ALIAS() should not remain at this point. "
+ "Are you using parse_options_step() directly?\n"
+ "That case is not supported yet.");
default:
; /* ok. (usually accepts an argument) */
}
exit(128);
}
-void parse_options_start(struct parse_opt_ctx_t *ctx,
- int argc, const char **argv, const char *prefix,
- const struct option *options, int flags)
+static void parse_options_start_1(struct parse_opt_ctx_t *ctx,
+ int argc, const char **argv, const char *prefix,
+ const struct option *options, int flags)
{
- memset(ctx, 0, sizeof(*ctx));
- ctx->argc = ctx->total = argc - 1;
- ctx->argv = argv + 1;
- ctx->out = argv;
+ ctx->argc = argc;
+ ctx->argv = argv;
+ if (!(flags & PARSE_OPT_ONE_SHOT)) {
+ ctx->argc--;
+ ctx->argv++;
+ }
+ ctx->total = ctx->argc;
+ ctx->out = argv;
ctx->prefix = prefix;
ctx->cpidx = ((flags & PARSE_OPT_KEEP_ARGV0) != 0);
ctx->flags = flags;
if ((flags & PARSE_OPT_KEEP_UNKNOWN) &&
- (flags & PARSE_OPT_STOP_AT_NON_OPTION))
+ (flags & PARSE_OPT_STOP_AT_NON_OPTION) &&
+ !(flags & PARSE_OPT_ONE_SHOT))
BUG("STOP_AT_NON_OPTION and KEEP_UNKNOWN don't go together");
+ if ((flags & PARSE_OPT_ONE_SHOT) &&
+ (flags & PARSE_OPT_KEEP_ARGV0))
+ BUG("Can't keep argv0 if you don't have it");
parse_options_check(options);
}
+void parse_options_start(struct parse_opt_ctx_t *ctx,
+ int argc, const char **argv, const char *prefix,
+ const struct option *options, int flags)
+{
+ memset(ctx, 0, sizeof(*ctx));
+ parse_options_start_1(ctx, argc, argv, prefix, options, flags);
+}
+
static void show_negated_gitcomp(const struct option *opts, int nr_noopts)
{
int printed_dashdash = 0;
}
}
-static int show_gitcomp(struct parse_opt_ctx_t *ctx,
- const struct option *opts)
+static int show_gitcomp(const struct option *opts)
{
const struct option *original_opts = opts;
int nr_noopts = 0;
return PARSE_OPT_COMPLETE;
}
+/*
+ * Scan and may produce a new option[] array, which should be used
+ * instead of the original 'options'.
+ *
+ * Right now this is only used to preprocess and substitue
+ * OPTION_ALIAS.
+ */
+static struct option *preprocess_options(struct parse_opt_ctx_t *ctx,
+ const struct option *options)
+{
+ struct option *newopt;
+ int i, nr, alias;
+ int nr_aliases = 0;
+
+ for (nr = 0; options[nr].type != OPTION_END; nr++) {
+ if (options[nr].type == OPTION_ALIAS)
+ nr_aliases++;
+ }
+
+ if (!nr_aliases)
+ return NULL;
+
+ ALLOC_ARRAY(newopt, nr + 1);
+ COPY_ARRAY(newopt, options, nr + 1);
+
+ /* each alias has two string pointers and NULL */
+ CALLOC_ARRAY(ctx->alias_groups, 3 * (nr_aliases + 1));
+
+ for (alias = 0, i = 0; i < nr; i++) {
+ int short_name;
+ const char *long_name;
+ const char *source;
+ int j;
+
+ if (newopt[i].type != OPTION_ALIAS)
+ continue;
+
+ short_name = newopt[i].short_name;
+ long_name = newopt[i].long_name;
+ source = newopt[i].value;
+
+ if (!long_name)
+ BUG("An alias must have long option name");
+
+ for (j = 0; j < nr; j++) {
+ const char *name = options[j].long_name;
+
+ if (!name || strcmp(name, source))
+ continue;
+
+ if (options[j].type == OPTION_ALIAS)
+ BUG("No please. Nested aliases are not supported.");
+
+ /*
+ * NEEDSWORK: this is a bit inconsistent because
+ * usage_with_options() on the original options[] will print
+ * help string as "alias of %s" but "git cmd -h" will
+ * print the original help string.
+ */
+ memcpy(newopt + i, options + j, sizeof(*newopt));
+ newopt[i].short_name = short_name;
+ newopt[i].long_name = long_name;
+ break;
+ }
+
+ if (j == nr)
+ BUG("could not find source option '%s' of alias '%s'",
+ source, newopt[i].long_name);
+ ctx->alias_groups[alias * 3 + 0] = newopt[i].long_name;
+ ctx->alias_groups[alias * 3 + 1] = options[j].long_name;
+ ctx->alias_groups[alias * 3 + 2] = NULL;
+ alias++;
+ }
+
+ return newopt;
+}
+
static int usage_with_options_internal(struct parse_opt_ctx_t *,
const char * const *,
const struct option *, int, int);
for (; ctx->argc; ctx->argc--, ctx->argv++) {
const char *arg = ctx->argv[0];
+ if (ctx->flags & PARSE_OPT_ONE_SHOT &&
+ ctx->argc != ctx->total)
+ break;
+
if (*arg != '-' || !arg[1]) {
if (parse_nodash_opt(ctx, arg, options) == 0)
continue;
/* lone --git-completion-helper is asked by git-completion.bash */
if (ctx->total == 1 && !strcmp(arg + 1, "-git-completion-helper"))
- return show_gitcomp(ctx, options);
+ return show_gitcomp(options);
if (arg[1] != '-') {
ctx->opt = arg + 1;
switch (parse_short_opt(ctx, options)) {
- case -1:
+ case PARSE_OPT_ERROR:
return PARSE_OPT_ERROR;
- case -2:
+ case PARSE_OPT_UNKNOWN:
if (ctx->opt)
check_typos(arg + 1, options);
if (internal_help && *ctx->opt == 'h')
goto show_usage;
goto unknown;
+ case PARSE_OPT_NON_OPTION:
+ case PARSE_OPT_HELP:
+ case PARSE_OPT_COMPLETE:
+ BUG("parse_short_opt() cannot return these");
+ case PARSE_OPT_DONE:
+ break;
}
if (ctx->opt)
check_typos(arg + 1, options);
while (ctx->opt) {
switch (parse_short_opt(ctx, options)) {
- case -1:
+ case PARSE_OPT_ERROR:
return PARSE_OPT_ERROR;
- case -2:
+ case PARSE_OPT_UNKNOWN:
if (internal_help && *ctx->opt == 'h')
goto show_usage;
ctx->argv[0] = xstrdup(ctx->opt - 1);
*(char *)ctx->argv[0] = '-';
goto unknown;
+ case PARSE_OPT_NON_OPTION:
+ case PARSE_OPT_COMPLETE:
+ case PARSE_OPT_HELP:
+ BUG("parse_short_opt() cannot return these");
+ case PARSE_OPT_DONE:
+ break;
}
}
continue;
if (internal_help && !strcmp(arg + 2, "help"))
goto show_usage;
switch (parse_long_opt(ctx, arg + 2, options)) {
- case -1:
+ case PARSE_OPT_ERROR:
return PARSE_OPT_ERROR;
- case -2:
+ case PARSE_OPT_UNKNOWN:
goto unknown;
- case -3:
+ case PARSE_OPT_HELP:
goto show_usage;
+ case PARSE_OPT_NON_OPTION:
+ case PARSE_OPT_COMPLETE:
+ BUG("parse_long_opt() cannot return these");
+ case PARSE_OPT_DONE:
+ break;
}
continue;
unknown:
+ if (ctx->flags & PARSE_OPT_ONE_SHOT)
+ break;
if (!(ctx->flags & PARSE_OPT_KEEP_UNKNOWN))
return PARSE_OPT_UNKNOWN;
ctx->out[ctx->cpidx++] = ctx->argv[0];
int parse_options_end(struct parse_opt_ctx_t *ctx)
{
+ if (ctx->flags & PARSE_OPT_ONE_SHOT)
+ return ctx->total - ctx->argc;
+
MOVE_ARRAY(ctx->out + ctx->cpidx, ctx->argv, ctx->argc);
ctx->out[ctx->cpidx + ctx->argc] = NULL;
return ctx->cpidx + ctx->argc;
int flags)
{
struct parse_opt_ctx_t ctx;
+ struct option *real_options;
+
+ disallow_abbreviated_options =
+ git_env_bool("GIT_TEST_DISALLOW_ABBREVIATED_OPTIONS", 0);
- parse_options_start(&ctx, argc, argv, prefix, options, flags);
+ memset(&ctx, 0, sizeof(ctx));
+ real_options = preprocess_options(&ctx, options);
+ if (real_options)
+ options = real_options;
+ parse_options_start_1(&ctx, argc, argv, prefix, options, flags);
switch (parse_options_step(&ctx, options, usagestr)) {
case PARSE_OPT_HELP:
case PARSE_OPT_ERROR:
}
precompose_argv(argc, argv);
+ free(real_options);
+ free(ctx.alias_groups);
return parse_options_end(&ctx);
}
fputc('\n', outfile);
pad = USAGE_OPTS_WIDTH;
}
+ if (opts->type == OPTION_ALIAS) {
+ fprintf(outfile, "%*s", pad + USAGE_GAP, "");
+ fprintf_ln(outfile, _("alias of --%s"),
+ (const char *)opts->value);
+ continue;
+ }
fprintf(outfile, "%*s%s\n", pad + USAGE_GAP, "", _(opts->help));
}
fputc('\n', outfile);
OPTION_ARGUMENT,
OPTION_GROUP,
OPTION_NUMBER,
+ OPTION_ALIAS,
/* options with no arguments */
OPTION_BIT,
OPTION_NEGBIT,
+ OPTION_BITOP,
OPTION_COUNTUP,
OPTION_SET_INT,
OPTION_CMDMODE,
PARSE_OPT_STOP_AT_NON_OPTION = 2,
PARSE_OPT_KEEP_ARGV0 = 4,
PARSE_OPT_KEEP_UNKNOWN = 8,
- PARSE_OPT_NO_INTERNAL_HELP = 16
+ PARSE_OPT_NO_INTERNAL_HELP = 16,
+ PARSE_OPT_ONE_SHOT = 32
};
enum parse_opt_option_flags {
typedef int parse_opt_cb(const struct option *, const char *arg, int unset);
struct parse_opt_ctx_t;
-typedef int parse_opt_ll_cb(struct parse_opt_ctx_t *ctx,
- const struct option *opt, int unset);
+typedef enum parse_opt_result parse_opt_ll_cb(struct parse_opt_ctx_t *ctx,
+ const struct option *opt,
+ const char *arg, int unset);
/*
* `type`::
* the option takes optional argument.
*
* `callback`::
- * pointer to the callback to use for OPTION_CALLBACK or
- * OPTION_LOWLEVEL_CALLBACK.
+ * pointer to the callback to use for OPTION_CALLBACK
*
* `defval`::
* default value to fill (*->value) with for PARSE_OPT_OPTARG.
* OPTION_{BIT,SET_INT} store the {mask,integer} to put in the value when met.
* CALLBACKS can use it like they want.
+ *
+ * `ll_callback`::
+ * pointer to the callback to use for OPTION_LOWLEVEL_CALLBACK
+ *
*/
struct option {
enum parse_opt_type type;
int flags;
parse_opt_cb *callback;
intptr_t defval;
+ parse_opt_ll_cb *ll_callback;
+ intptr_t extra;
};
#define OPT_BIT_F(s, l, v, h, b, f) { OPTION_BIT, (s), (l), (v), NULL, (h), \
#define OPT_SET_INT_F(s, l, v, h, i, f) { OPTION_SET_INT, (s), (l), (v), NULL, \
(h), PARSE_OPT_NOARG | (f), NULL, (i) }
#define OPT_BOOL_F(s, l, v, h, f) OPT_SET_INT_F(s, l, v, h, 1, f)
+#define OPT_CALLBACK_F(s, l, v, a, h, f, cb) \
+ { OPTION_CALLBACK, (s), (l), (v), (a), (h), (f), (cb) }
+#define OPT_STRING_F(s, l, v, a, h, f) { OPTION_STRING, (s), (l), (v), (a), (h), (f) }
+#define OPT_INTEGER_F(s, l, v, h, f) { OPTION_INTEGER, (s), (l), (v), N_("n"), (h), (f) }
#define OPT_END() { OPTION_END }
-#define OPT_ARGUMENT(l, h) { OPTION_ARGUMENT, 0, (l), NULL, NULL, \
- (h), PARSE_OPT_NOARG}
+#define OPT_ARGUMENT(l, v, h) { OPTION_ARGUMENT, 0, (l), (v), NULL, \
+ (h), PARSE_OPT_NOARG, NULL, 1 }
#define OPT_GROUP(h) { OPTION_GROUP, 0, NULL, NULL, NULL, (h) }
#define OPT_BIT(s, l, v, h, b) OPT_BIT_F(s, l, v, h, b, 0)
+#define OPT_BITOP(s, l, v, h, set, clear) { OPTION_BITOP, (s), (l), (v), NULL, (h), \
+ PARSE_OPT_NOARG|PARSE_OPT_NONEG, NULL, \
+ (set), NULL, (clear) }
#define OPT_NEGBIT(s, l, v, h, b) { OPTION_NEGBIT, (s), (l), (v), NULL, \
(h), PARSE_OPT_NOARG, NULL, (b) }
#define OPT_COUNTUP(s, l, v, h) OPT_COUNTUP_F(s, l, v, h, 0)
(h), PARSE_OPT_NOARG | PARSE_OPT_HIDDEN, NULL, 1}
#define OPT_CMDMODE(s, l, v, h, i) { OPTION_CMDMODE, (s), (l), (v), NULL, \
(h), PARSE_OPT_NOARG|PARSE_OPT_NONEG, NULL, (i) }
-#define OPT_INTEGER(s, l, v, h) { OPTION_INTEGER, (s), (l), (v), N_("n"), (h) }
+#define OPT_INTEGER(s, l, v, h) OPT_INTEGER_F(s, l, v, h, 0)
#define OPT_MAGNITUDE(s, l, v, h) { OPTION_MAGNITUDE, (s), (l), (v), \
N_("n"), (h), PARSE_OPT_NONEG }
-#define OPT_STRING(s, l, v, a, h) { OPTION_STRING, (s), (l), (v), (a), (h) }
+#define OPT_STRING(s, l, v, a, h) OPT_STRING_F(s, l, v, a, h, 0)
#define OPT_STRING_LIST(s, l, v, a, h) \
{ OPTION_CALLBACK, (s), (l), (v), (a), \
(h), 0, &parse_opt_string_list }
#define OPT_EXPIRY_DATE(s, l, v, h) \
{ OPTION_CALLBACK, (s), (l), (v), N_("expiry-date"),(h), 0, \
parse_opt_expiry_date_cb }
-#define OPT_CALLBACK(s, l, v, a, h, f) \
- { OPTION_CALLBACK, (s), (l), (v), (a), (h), 0, (f) }
+#define OPT_CALLBACK(s, l, v, a, h, f) OPT_CALLBACK_F(s, l, v, a, h, 0, f)
#define OPT_NUMBER_CALLBACK(v, h, f) \
{ OPTION_NUMBER, 0, NULL, (v), NULL, (h), \
PARSE_OPT_NOARG | PARSE_OPT_NONEG, (f) }
N_("no-op (backward compatibility)"), \
PARSE_OPT_HIDDEN | PARSE_OPT_NOARG, parse_opt_noop_cb }
-/* parse_options() will filter out the processed options and leave the
- * non-option arguments in argv[]. usagestr strings should be marked
- * for translation with N_().
+#define OPT_ALIAS(s, l, source_long_name) \
+ { OPTION_ALIAS, (s), (l), (source_long_name) }
+
+/*
+ * parse_options() will filter out the processed options and leave the
+ * non-option arguments in argv[]. argv0 is assumed program name and
+ * skipped.
+ *
+ * usagestr strings should be marked for translation with N_().
+ *
* Returns the number of arguments left in argv[].
+ *
+ * In one-shot mode, argv0 is not a program name, argv[] is left
+ * untouched and parse_options() returns the number of options
+ * processed.
*/
-extern int parse_options(int argc, const char **argv, const char *prefix,
- const struct option *options,
- const char * const usagestr[], int flags);
+int parse_options(int argc, const char **argv, const char *prefix,
+ const struct option *options,
+ const char * const usagestr[], int flags);
-extern NORETURN void usage_with_options(const char * const *usagestr,
- const struct option *options);
+NORETURN void usage_with_options(const char * const *usagestr,
+ const struct option *options);
-extern NORETURN void usage_msg_opt(const char *msg,
- const char * const *usagestr,
- const struct option *options);
+NORETURN void usage_msg_opt(const char *msg,
+ const char * const *usagestr,
+ const struct option *options);
-extern int optbug(const struct option *opt, const char *reason);
+int optbug(const struct option *opt, const char *reason);
const char *optname(const struct option *opt, int flags);
/*
BUG("option callback does not expect an argument"); \
} while (0)
+/*
+ * Similar to the assertions above, but checks that "arg" is always non-NULL.
+ * This assertion also implies BUG_ON_OPT_NEG(), letting you declare both
+ * assertions in a single line.
+ */
+#define BUG_ON_OPT_NEG_NOARG(unset, arg) do { \
+ BUG_ON_OPT_NEG(unset); \
+ if(!(arg)) \
+ BUG("option callback expects an argument"); \
+} while(0)
+
/*----- incremental advanced APIs -----*/
-enum {
- PARSE_OPT_COMPLETE = -2,
- PARSE_OPT_HELP = -1,
- PARSE_OPT_DONE,
+enum parse_opt_result {
+ PARSE_OPT_COMPLETE = -3,
+ PARSE_OPT_HELP = -2,
+ PARSE_OPT_ERROR = -1, /* must be the same as error() */
+ PARSE_OPT_DONE = 0, /* fixed so that "return 0" works */
PARSE_OPT_NON_OPTION,
- PARSE_OPT_ERROR,
PARSE_OPT_UNKNOWN
};
const char *opt;
int flags;
const char *prefix;
+ const char **alias_groups; /* must be in groups of 3 elements! */
+ struct option *updated_options;
};
-extern void parse_options_start(struct parse_opt_ctx_t *ctx,
- int argc, const char **argv, const char *prefix,
- const struct option *options, int flags);
+void parse_options_start(struct parse_opt_ctx_t *ctx,
+ int argc, const char **argv, const char *prefix,
+ const struct option *options, int flags);
-extern int parse_options_step(struct parse_opt_ctx_t *ctx,
- const struct option *options,
- const char * const usagestr[]);
+int parse_options_step(struct parse_opt_ctx_t *ctx,
+ const struct option *options,
+ const char * const usagestr[]);
-extern int parse_options_end(struct parse_opt_ctx_t *ctx);
+int parse_options_end(struct parse_opt_ctx_t *ctx);
-extern struct option *parse_options_concat(struct option *a, struct option *b);
+struct option *parse_options_concat(struct option *a, struct option *b);
/*----- some often used options -----*/
-extern int parse_opt_abbrev_cb(const struct option *, const char *, int);
-extern int parse_opt_expiry_date_cb(const struct option *, const char *, int);
-extern int parse_opt_color_flag_cb(const struct option *, const char *, int);
-extern int parse_opt_verbosity_cb(const struct option *, const char *, int);
-extern int parse_opt_object_name(const struct option *, const char *, int);
-extern int parse_opt_commits(const struct option *, const char *, int);
-extern int parse_opt_tertiary(const struct option *, const char *, int);
-extern int parse_opt_string_list(const struct option *, const char *, int);
-extern int parse_opt_noop_cb(const struct option *, const char *, int);
-extern int parse_opt_unknown_cb(const struct option *, const char *, int);
-extern int parse_opt_passthru(const struct option *, const char *, int);
-extern int parse_opt_passthru_argv(const struct option *, const char *, int);
+int parse_opt_abbrev_cb(const struct option *, const char *, int);
+int parse_opt_expiry_date_cb(const struct option *, const char *, int);
+int parse_opt_color_flag_cb(const struct option *, const char *, int);
+int parse_opt_verbosity_cb(const struct option *, const char *, int);
+/* value is struct oid_array* */
+int parse_opt_object_name(const struct option *, const char *, int);
+/* value is struct object_id* */
+int parse_opt_object_id(const struct option *, const char *, int);
+int parse_opt_commits(const struct option *, const char *, int);
+int parse_opt_commit(const struct option *, const char *, int);
+int parse_opt_tertiary(const struct option *, const char *, int);
+int parse_opt_string_list(const struct option *, const char *, int);
+int parse_opt_noop_cb(const struct option *, const char *, int);
+int parse_opt_unknown_cb(struct parse_opt_ctx_t *ctx, const struct option *, const char *, int);
+int parse_opt_passthru(const struct option *, const char *, int);
+int parse_opt_passthru_argv(const struct option *, const char *, int);
#define OPT__VERBOSE(var, h) OPT_COUNTUP('v', "verbose", (var), (h))
#define OPT__QUIET(var, h) OPT_COUNTUP('q', "quiet", (var), (h))
#define OPT_NO_CONTAINS(v, h) _OPT_CONTAINS_OR_WITH("no-contains", v, h, PARSE_OPT_NONEG)
#define OPT_WITH(v, h) _OPT_CONTAINS_OR_WITH("with", v, h, PARSE_OPT_HIDDEN | PARSE_OPT_NONEG)
#define OPT_WITHOUT(v, h) _OPT_CONTAINS_OR_WITH("without", v, h, PARSE_OPT_HIDDEN | PARSE_OPT_NONEG)
+#define OPT_CLEANUP(v) OPT_STRING(0, "cleanup", v, N_("mode"), N_("how to strip spaces and #comments from message"))
#endif
{ 1, 1, 0, "logs" },
{ 1, 1, 1, "logs/HEAD" },
{ 0, 1, 1, "logs/refs/bisect" },
+ { 0, 1, 1, "logs/refs/rewritten" },
+ { 0, 1, 1, "logs/refs/worktree" },
{ 0, 1, 0, "lost-found" },
{ 0, 1, 0, "objects" },
{ 0, 1, 0, "refs" },
{ 0, 1, 1, "refs/bisect" },
+ { 0, 1, 1, "refs/rewritten" },
{ 0, 1, 1, "refs/worktree" },
{ 0, 1, 0, "remotes" },
{ 0, 1, 0, "worktrees" },
/*
* Return a statically allocated path.
*/
-extern const char *mkpath(const char *fmt, ...)
+const char *mkpath(const char *fmt, ...)
__attribute__((format (printf, 1, 2)));
/*
* Return a path.
*/
-extern char *mkpathdup(const char *fmt, ...)
+char *mkpathdup(const char *fmt, ...)
__attribute__((format (printf, 1, 2)));
/*
* Construct a path and place the result in the provided buffer `buf`.
*/
-extern char *mksnpath(char *buf, size_t n, const char *fmt, ...)
+char *mksnpath(char *buf, size_t n, const char *fmt, ...)
__attribute__((format (printf, 3, 4)));
/*
* Constructs a path into the common git directory of repository `repo` and
* append it in the provided buffer `sb`.
*/
-extern void strbuf_git_common_path(struct strbuf *sb,
- const struct repository *repo,
- const char *fmt, ...)
+void strbuf_git_common_path(struct strbuf *sb,
+ const struct repository *repo,
+ const char *fmt, ...)
__attribute__((format (printf, 3, 4)));
/*
* Return a statically allocated path into the main repository's
* (the_repository) common git directory.
*/
-extern const char *git_common_path(const char *fmt, ...)
+const char *git_common_path(const char *fmt, ...)
__attribute__((format (printf, 1, 2)));
/*
* Return a path into the git directory of repository `repo`.
*/
-extern char *repo_git_path(const struct repository *repo,
- const char *fmt, ...)
+char *repo_git_path(const struct repository *repo,
+ const char *fmt, ...)
__attribute__((format (printf, 2, 3)));
/*
* Construct a path into the git directory of repository `repo` and append it
* to the provided buffer `sb`.
*/
-extern void strbuf_repo_git_path(struct strbuf *sb,
- const struct repository *repo,
- const char *fmt, ...)
+void strbuf_repo_git_path(struct strbuf *sb,
+ const struct repository *repo,
+ const char *fmt, ...)
__attribute__((format (printf, 3, 4)));
/*
* Return a statically allocated path into the main repository's
* (the_repository) git directory.
*/
-extern const char *git_path(const char *fmt, ...)
+const char *git_path(const char *fmt, ...)
__attribute__((format (printf, 1, 2)));
/*
* Return a path into the main repository's (the_repository) git directory.
*/
-extern char *git_pathdup(const char *fmt, ...)
+char *git_pathdup(const char *fmt, ...)
__attribute__((format (printf, 1, 2)));
/*
* and place it in the provided buffer `buf`, the contents of the buffer will
* be overridden.
*/
-extern char *git_path_buf(struct strbuf *buf, const char *fmt, ...)
+char *git_path_buf(struct strbuf *buf, const char *fmt, ...)
__attribute__((format (printf, 2, 3)));
/*
* Construct a path into the main repository's (the_repository) git directory
* and append it to the provided buffer `sb`.
*/
-extern void strbuf_git_path(struct strbuf *sb, const char *fmt, ...)
+void strbuf_git_path(struct strbuf *sb, const char *fmt, ...)
__attribute__((format (printf, 2, 3)));
/*
*
* If the repository doesn't have a worktree NULL is returned.
*/
-extern char *repo_worktree_path(const struct repository *repo,
+char *repo_worktree_path(const struct repository *repo,
const char *fmt, ...)
__attribute__((format (printf, 2, 3)));
*
* If the repository doesn't have a worktree nothing will be appended to `sb`.
*/
-extern void strbuf_repo_worktree_path(struct strbuf *sb,
+void strbuf_repo_worktree_path(struct strbuf *sb,
const struct repository *repo,
const char *fmt, ...)
__attribute__((format (printf, 3, 4)));
* Return a path into a submodule's git directory located at `path`. `path`
* must only reference a submodule of the main repository (the_repository).
*/
-extern char *git_pathdup_submodule(const char *path, const char *fmt, ...)
+char *git_pathdup_submodule(const char *path, const char *fmt, ...)
__attribute__((format (printf, 2, 3)));
/*
* append it to the provided buffer `sb`. `path` must only reference a
* submodule of the main repository (the_repository).
*/
-extern int strbuf_git_path_submodule(struct strbuf *sb, const char *path,
+int strbuf_git_path_submodule(struct strbuf *sb, const char *path,
const char *fmt, ...)
__attribute__((format (printf, 3, 4)));
-extern void report_linked_checkout_garbage(void);
+void report_linked_checkout_garbage(void);
/*
* You can define a static memoized git path like:
return -1;
}
- if ($description !~ /^[0-9a-fA-F]{40} \S+ (\d+)$/) {
+ if ($description !~ /^[0-9a-fA-F]{40}(?:[0-9a-fA-F]{24})? \S+ (\d+)$/) {
carp "Unexpected result returned from git cat-file";
return -1;
}
void packet_flush(int fd)
{
packet_trace("0000", 4, 1);
- write_or_die(fd, "0000", 4);
+ if (write_in_full(fd, "0000", 4) < 0)
+ die_errno(_("unable to write flush packet"));
}
void packet_delim(int fd)
{
packet_trace("0001", 4, 1);
- write_or_die(fd, "0001", 4);
+ if (write_in_full(fd, "0001", 4) < 0)
+ die_errno(_("unable to write delim packet"));
}
int packet_flush_gently(int fd)
strbuf_add(buf, "0001", 4);
}
-static void set_packet_header(char *buf, const int size)
+void set_packet_header(char *buf, const int size)
{
static char hexchar[] = "0123456789abcdef";
return PACKET_READ_EOF;
}
- if ((options & PACKET_READ_DIE_ON_ERR_PACKET) &&
- starts_with(buffer, "ERR "))
- die(_("remote error: %s"), buffer + 4);
-
if ((options & PACKET_READ_CHOMP_NEWLINE) &&
len && buffer[len-1] == '\n')
len--;
buffer[len] = 0;
packet_trace(buffer, len, 0);
+
+ if ((options & PACKET_READ_DIE_ON_ERR_PACKET) &&
+ starts_with(buffer, "ERR "))
+ die(_("remote error: %s"), buffer + 4);
+
*pktlen = len;
return PACKET_READ_NORMAL;
}
void packet_write_fmt(int fd, const char *fmt, ...) __attribute__((format (printf, 2, 3)));
void packet_buf_flush(struct strbuf *buf);
void packet_buf_delim(struct strbuf *buf);
+void set_packet_header(char *buf, int size);
void packet_write(int fd_out, const char *buf, size_t size);
void packet_buf_write(struct strbuf *buf, const char *fmt, ...) __attribute__((format (printf, 2, 3)));
void packet_buf_write_len(struct strbuf *buf, const char *data, size_t len);
* Initialize a 'struct packet_reader' object which is an
* abstraction around the 'packet_read_with_status()' function.
*/
-extern void packet_reader_init(struct packet_reader *reader, int fd,
- char *src_buffer, size_t src_len,
- int options);
+void packet_reader_init(struct packet_reader *reader, int fd,
+ char *src_buffer, size_t src_len,
+ int options);
/*
* Perform a packet read and return the status of the read.
* 'line' is set to point at the read line
* PACKET_READ_FLUSH: 'pktlen' is set to '0' and 'line' is set to NULL
*/
-extern enum packet_read_status packet_reader_read(struct packet_reader *reader);
+enum packet_read_status packet_reader_read(struct packet_reader *reader);
/*
* Peek the next packet line without consuming it and return the status.
* Peeking multiple times without calling 'packet_reader_read()' will return
* the same result.
*/
-extern enum packet_read_status packet_reader_peek(struct packet_reader *reader);
+enum packet_read_status packet_reader_peek(struct packet_reader *reader);
#define DEFAULT_PACKET_MAX 1000
#define LARGE_PACKET_MAX 65520
#include <string.h>
#include "sha1.h"
-extern void ppc_sha1_core(uint32_t *hash, const unsigned char *p,
- unsigned int nblocks);
+void ppc_sha1_core(uint32_t *hash, const unsigned char *p,
+ unsigned int nblocks);
int ppc_SHA1_Init(ppc_SHA_CTX *c)
{
{ "fuller", CMIT_FMT_FULLER, 0, 8 },
{ "full", CMIT_FMT_FULL, 0, 8 },
{ "oneline", CMIT_FMT_ONELINE, 1, 0 }
+ /*
+ * Please update $__git_log_pretty_formats in
+ * git-completion.bash when you add new formats.
+ */
};
commit_formats_len = ARRAY_SIZE(builtin_formats);
builtin_formats_len = commit_formats_len;
return !(isalnum(ch) || ch == '!' || ch == '*' || ch == '+' || ch == '-' || ch == '/');
}
-static int needs_rfc2047_encoding(const char *line, int len,
- enum rfc2047_type type)
+static int needs_rfc2047_encoding(const char *line, int len)
{
int i;
}
strbuf_addstr(sb, "From: ");
- if (needs_rfc2047_encoding(namebuf, namelen, RFC2047_ADDRESS)) {
+ if (needs_rfc2047_encoding(namebuf, namelen)) {
add_rfc2047(sb, namebuf, namelen,
encoding, RFC2047_ADDRESS);
max_length = 76; /* per rfc2047 */
return rest - placeholder;
}
-static size_t parse_padding_placeholder(struct strbuf *sb,
- const char *placeholder,
+static size_t parse_padding_placeholder(const char *placeholder,
struct format_commit_context *c)
{
const char *ch = placeholder;
return 0;
}
-static int match_placeholder_arg(const char *to_parse, const char *candidate,
- const char **end)
+static int match_placeholder_arg_value(const char *to_parse, const char *candidate,
+ const char **end, const char **valuestart,
+ size_t *valuelen)
{
const char *p;
if (!(skip_prefix(to_parse, candidate, &p)))
return 0;
+ if (valuestart) {
+ if (*p == '=') {
+ *valuestart = p + 1;
+ *valuelen = strcspn(*valuestart, ",)");
+ p = *valuestart + *valuelen;
+ } else {
+ if (*p != ',' && *p != ')')
+ return 0;
+ *valuestart = NULL;
+ *valuelen = 0;
+ }
+ }
if (*p == ',') {
*end = p + 1;
return 1;
return 0;
}
+static int match_placeholder_bool_arg(const char *to_parse, const char *candidate,
+ const char **end, int *val)
+{
+ const char *argval;
+ char *strval;
+ size_t arglen;
+ int v;
+
+ if (!match_placeholder_arg_value(to_parse, candidate, end, &argval, &arglen))
+ return 0;
+
+ if (!argval) {
+ *val = 1;
+ return 1;
+ }
+
+ strval = xstrndup(argval, arglen);
+ v = git_parse_maybe_bool(strval);
+ free(strval);
+
+ if (v == -1)
+ return 0;
+
+ *val = v;
+
+ return 1;
+}
+
+static int format_trailer_match_cb(const struct strbuf *key, void *ud)
+{
+ const struct string_list *list = ud;
+ const struct string_list_item *item;
+
+ for_each_string_list_item (item, list) {
+ if (key->len == (uintptr_t)item->util &&
+ !strncasecmp(item->string, key->buf, key->len))
+ return 1;
+ }
+ return 0;
+}
+
static size_t format_commit_one(struct strbuf *sb, /* in UTF-8 */
const char *placeholder,
void *context)
const char *msg = c->message;
struct commit_list *p;
const char *arg;
- int ch;
+ size_t res;
char **slot;
/* these are independent of the commit */
+ res = strbuf_expand_literal_cb(sb, placeholder, NULL);
+ if (res)
+ return res;
+
switch (placeholder[0]) {
case 'C':
if (starts_with(placeholder + 1, "(auto)")) {
*/
return ret;
}
- case 'n': /* newline */
- strbuf_addch(sb, '\n');
- return 1;
- case 'x':
- /* %x00 == NUL, %x0a == LF, etc. */
- ch = hex2chr(placeholder + 1);
- if (ch < 0)
- return 0;
- strbuf_addch(sb, ch);
- return 3;
case 'w':
if (placeholder[1] == '(') {
unsigned long width = 0, indent1 = 0, indent2 = 0;
case '<':
case '>':
- return parse_padding_placeholder(sb, placeholder, c);
+ return parse_padding_placeholder(placeholder, c);
}
/* these depend on the commit */
if (skip_prefix(placeholder, "(trailers", &arg)) {
struct process_trailer_options opts = PROCESS_TRAILER_OPTIONS_INIT;
+ struct string_list filter_list = STRING_LIST_INIT_NODUP;
+ struct strbuf sepbuf = STRBUF_INIT;
+ size_t ret = 0;
opts.no_divider = 1;
if (*arg == ':') {
arg++;
for (;;) {
- if (match_placeholder_arg(arg, "only", &arg))
+ const char *argval;
+ size_t arglen;
+
+ if (match_placeholder_arg_value(arg, "key", &arg, &argval, &arglen)) {
+ uintptr_t len = arglen;
+
+ if (!argval)
+ goto trailer_out;
+
+ if (len && argval[len - 1] == ':')
+ len--;
+ string_list_append(&filter_list, argval)->util = (char *)len;
+
+ opts.filter = format_trailer_match_cb;
+ opts.filter_data = &filter_list;
opts.only_trailers = 1;
- else if (match_placeholder_arg(arg, "unfold", &arg))
- opts.unfold = 1;
- else
+ } else if (match_placeholder_arg_value(arg, "separator", &arg, &argval, &arglen)) {
+ char *fmt;
+
+ strbuf_reset(&sepbuf);
+ fmt = xstrndup(argval, arglen);
+ strbuf_expand(&sepbuf, fmt, strbuf_expand_literal_cb, NULL);
+ free(fmt);
+ opts.separator = &sepbuf;
+ } else if (!match_placeholder_bool_arg(arg, "only", &arg, &opts.only_trailers) &&
+ !match_placeholder_bool_arg(arg, "unfold", &arg, &opts.unfold) &&
+ !match_placeholder_bool_arg(arg, "valueonly", &arg, &opts.value_only))
break;
}
}
if (*arg == ')') {
format_trailers_from_commit(sb, msg + c->subject_off, &opts);
- return arg - placeholder + 1;
+ ret = arg - placeholder + 1;
}
+ trailer_out:
+ string_list_clear(&filter_list, 0);
+ strbuf_release(&sepbuf);
+ return ret;
}
return 0; /* unknown placeholder */
if (pp->print_email_subject) {
if (pp->rev)
fmt_output_email_subject(sb, pp->rev);
- if (needs_rfc2047_encoding(title.buf, title.len, RFC2047_SUBJECT))
+ if (needs_rfc2047_encoding(title.buf, title.len))
add_rfc2047(sb, title.buf, title.len,
encoding, RFC2047_SUBJECT);
else
/*
* Add the "thing" to the queue.
*/
-extern void prio_queue_put(struct prio_queue *, void *thing);
+void prio_queue_put(struct prio_queue *, void *thing);
/*
* Extract the "thing" that compares the smallest out of the queue,
* or NULL. If compare function is NULL, the queue acts as a LIFO
* stack.
*/
-extern void *prio_queue_get(struct prio_queue *);
+void *prio_queue_get(struct prio_queue *);
/*
* Gain access to the "thing" that would be returned by
* prio_queue_get, but do not remove it from the queue.
*/
-extern void *prio_queue_peek(struct prio_queue *);
+void *prio_queue_peek(struct prio_queue *);
-extern void clear_prio_queue(struct prio_queue *);
+void clear_prio_queue(struct prio_queue *);
/* Reverse the LIFO elements */
-extern void prio_queue_reverse(struct prio_queue *);
+void prio_queue_reverse(struct prio_queue *);
#endif /* PRIO_QUEUE_H */
* published by the Free Software Foundation.
*/
-#include "git-compat-util.h"
+#include "cache.h"
#include "gettext.h"
#include "progress.h"
#include "strbuf.h"
#include "trace.h"
+#include "utf8.h"
#define TP_IDX_MAX 8
uint64_t total;
unsigned last_percent;
unsigned delay;
+ unsigned sparse;
struct throughput *throughput;
uint64_t start_ns;
+ struct strbuf counters_sb;
+ int title_len;
+ int split;
};
static volatile sig_atomic_t progress_update;
return tpgrp < 0 || tpgrp == getpgid(0);
}
-static int display(struct progress *progress, uint64_t n, const char *done)
+static void display(struct progress *progress, uint64_t n, const char *done)
{
- const char *eol, *tp;
+ const char *tp;
+ struct strbuf *counters_sb = &progress->counters_sb;
+ int show_update = 0;
+ int last_count_len = counters_sb->len;
if (progress->delay && (!progress_update || --progress->delay))
- return 0;
+ return;
progress->last_value = n;
tp = (progress->throughput) ? progress->throughput->display.buf : "";
- eol = done ? done : " \r";
if (progress->total) {
unsigned percent = n * 100 / progress->total;
if (percent != progress->last_percent || progress_update) {
progress->last_percent = percent;
- if (is_foreground_fd(fileno(stderr)) || done) {
- fprintf(stderr, "%s: %3u%% (%"PRIuMAX"/%"PRIuMAX")%s%s",
- progress->title, percent,
- (uintmax_t)n, (uintmax_t)progress->total,
- tp, eol);
- fflush(stderr);
- }
- progress_update = 0;
- return 1;
+
+ strbuf_reset(counters_sb);
+ strbuf_addf(counters_sb,
+ "%3u%% (%"PRIuMAX"/%"PRIuMAX")%s", percent,
+ (uintmax_t)n, (uintmax_t)progress->total,
+ tp);
+ show_update = 1;
}
} else if (progress_update) {
+ strbuf_reset(counters_sb);
+ strbuf_addf(counters_sb, "%"PRIuMAX"%s", (uintmax_t)n, tp);
+ show_update = 1;
+ }
+
+ if (show_update) {
if (is_foreground_fd(fileno(stderr)) || done) {
- fprintf(stderr, "%s: %"PRIuMAX"%s%s",
- progress->title, (uintmax_t)n, tp, eol);
+ const char *eol = done ? done : "\r";
+ size_t clear_len = counters_sb->len < last_count_len ?
+ last_count_len - counters_sb->len + 1 :
+ 0;
+ size_t progress_line_len = progress->title_len +
+ counters_sb->len + 2;
+ int cols = term_columns();
+
+ if (progress->split) {
+ fprintf(stderr, " %s%*s", counters_sb->buf,
+ (int) clear_len, eol);
+ } else if (!done && cols < progress_line_len) {
+ clear_len = progress->title_len + 1 < cols ?
+ cols - progress->title_len : 0;
+ fprintf(stderr, "%s:%*s\n %s%s",
+ progress->title, (int) clear_len, "",
+ counters_sb->buf, eol);
+ progress->split = 1;
+ } else {
+ fprintf(stderr, "%s: %s%*s", progress->title,
+ counters_sb->buf, (int) clear_len, eol);
+ }
fflush(stderr);
}
progress_update = 0;
- return 1;
}
-
- return 0;
}
static void throughput_string(struct strbuf *buf, uint64_t total,
now_ns = getnanotime();
if (!tp) {
- progress->throughput = tp = calloc(1, sizeof(*tp));
- if (tp) {
- tp->prev_total = tp->curr_total = total;
- tp->prev_ns = now_ns;
- strbuf_init(&tp->display, 0);
- }
+ progress->throughput = tp = xcalloc(1, sizeof(*tp));
+ tp->prev_total = tp->curr_total = total;
+ tp->prev_ns = now_ns;
+ strbuf_init(&tp->display, 0);
return;
}
tp->curr_total = total;
display(progress, progress->last_value, NULL);
}
-int display_progress(struct progress *progress, uint64_t n)
+void display_progress(struct progress *progress, uint64_t n)
{
- return progress ? display(progress, n, NULL) : 0;
+ if (progress)
+ display(progress, n, NULL);
}
static struct progress *start_progress_delay(const char *title, uint64_t total,
- unsigned delay)
+ unsigned delay, unsigned sparse)
{
- struct progress *progress = malloc(sizeof(*progress));
- if (!progress) {
- /* unlikely, but here's a good fallback */
- fprintf(stderr, "%s...\n", title);
- fflush(stderr);
- return NULL;
- }
+ struct progress *progress = xmalloc(sizeof(*progress));
progress->title = title;
progress->total = total;
progress->last_value = -1;
progress->last_percent = -1;
progress->delay = delay;
+ progress->sparse = sparse;
progress->throughput = NULL;
progress->start_ns = getnanotime();
+ strbuf_init(&progress->counters_sb, 0);
+ progress->title_len = utf8_strwidth(title);
+ progress->split = 0;
set_progress_signal();
return progress;
}
struct progress *start_delayed_progress(const char *title, uint64_t total)
{
- return start_progress_delay(title, total, 2);
+ return start_progress_delay(title, total, 2, 0);
}
struct progress *start_progress(const char *title, uint64_t total)
{
- return start_progress_delay(title, total, 0);
+ return start_progress_delay(title, total, 0, 0);
+}
+
+/*
+ * Here "sparse" means that the caller might use some sampling criteria to
+ * decide when to call display_progress() rather than calling it for every
+ * integer value in[0 .. total). In particular, the caller might not call
+ * display_progress() for the last value in the range.
+ *
+ * When "sparse" is set, stop_progress() will automatically force the done
+ * message to show 100%.
+ */
+struct progress *start_sparse_progress(const char *title, uint64_t total)
+{
+ return start_progress_delay(title, total, 0, 1);
+}
+
+struct progress *start_delayed_sparse_progress(const char *title,
+ uint64_t total)
+{
+ return start_progress_delay(title, total, 2, 1);
+}
+
+static void finish_if_sparse(struct progress *progress)
+{
+ if (progress &&
+ progress->sparse &&
+ progress->last_value != progress->total)
+ display_progress(progress, progress->total);
}
void stop_progress(struct progress **p_progress)
{
+ finish_if_sparse(*p_progress);
+
stop_progress_msg(p_progress, _("done"));
}
free(buf);
}
clear_progress_signal();
+ strbuf_release(&progress->counters_sb);
if (progress->throughput)
strbuf_release(&progress->throughput->display);
free(progress->throughput);
struct progress;
void display_throughput(struct progress *progress, uint64_t total);
-int display_progress(struct progress *progress, uint64_t n);
+void display_progress(struct progress *progress, uint64_t n);
struct progress *start_progress(const char *title, uint64_t total);
+struct progress *start_sparse_progress(const char *title, uint64_t total);
struct progress *start_delayed_progress(const char *title, uint64_t total);
+struct progress *start_delayed_sparse_progress(const char *title,
+ uint64_t total);
void stop_progress(struct progress **progress);
void stop_progress_msg(struct progress **progress, const char *msg);
enum protocol_version get_protocol_version_config(void)
{
const char *value;
+ enum protocol_version retval = protocol_v0;
+ const char *git_test_k = "GIT_TEST_PROTOCOL_VERSION";
+ const char *git_test_v = getenv(git_test_k);
+
if (!git_config_get_string_const("protocol.version", &value)) {
enum protocol_version version = parse_protocol_version(value);
die("unknown value for config 'protocol.version': %s",
value);
- return version;
+ retval = version;
+ }
+
+ if (git_test_v && *git_test_v) {
+ enum protocol_version env = parse_protocol_version(git_test_v);
+
+ if (env == protocol_unknown_version)
+ die("unknown value for %s: %s", git_test_k, git_test_v);
+ if (retval < env)
+ retval = env;
}
- return protocol_v0;
+ return retval;
}
enum protocol_version determine_protocol_version_server(void)
* 'protocol.version' config. If unconfigured, a value of 'protocol_v0' is
* returned.
*/
-extern enum protocol_version get_protocol_version_config(void);
+enum protocol_version get_protocol_version_config(void);
/*
* Used by a server to determine which protocol version should be used based on
* request a particular protocol version, a default of 'protocol_v0' will be
* used.
*/
-extern enum protocol_version determine_protocol_version_server(void);
+enum protocol_version determine_protocol_version_server(void);
/*
* Used by a client to determine which protocol version the server is speaking
* based on the server's initial response.
*/
-extern enum protocol_version determine_protocol_version_client(const char *server_response);
+enum protocol_version determine_protocol_version_client(const char *server_response);
#endif /* PROTOCOL_H */
* sq_quotef() quotes the entire formatted string as a single result.
*/
-extern void sq_quote_buf(struct strbuf *, const char *src);
-extern void sq_quote_argv(struct strbuf *, const char **argv);
-extern void sq_quotef(struct strbuf *, const char *fmt, ...);
+void sq_quote_buf(struct strbuf *, const char *src);
+void sq_quote_argv(struct strbuf *, const char **argv);
+void sq_quotef(struct strbuf *, const char *fmt, ...);
/*
* These match their non-pretty variants, except that they avoid
* NULL if the input does not look like what sq_quote would have
* produced.
*/
-extern char *sq_dequote(char *);
+char *sq_dequote(char *);
/*
* Same as the above, but can be used to unwrap many arguments in the
* same string separated by space. Like sq_quote, it works in place,
* modifying arg and appending pointers into it to argv.
*/
-extern int sq_dequote_to_argv(char *arg, const char ***argv, int *nr, int *alloc);
+int sq_dequote_to_argv(char *arg, const char ***argv, int *nr, int *alloc);
/*
* Same as above, but store the unquoted strings in an argv_array. We will
* will duplicate and take ownership of the strings.
*/
struct argv_array;
-extern int sq_dequote_to_argv_array(char *arg, struct argv_array *);
+int sq_dequote_to_argv_array(char *arg, struct argv_array *);
-extern int unquote_c_style(struct strbuf *, const char *quoted, const char **endp);
-extern size_t quote_c_style(const char *name, struct strbuf *, FILE *, int no_dq);
-extern void quote_two_c_style(struct strbuf *, const char *, const char *, int);
+int unquote_c_style(struct strbuf *, const char *quoted, const char **endp);
+size_t quote_c_style(const char *name, struct strbuf *, FILE *, int no_dq);
+void quote_two_c_style(struct strbuf *, const char *, const char *, int);
-extern void write_name_quoted(const char *name, FILE *, int terminator);
-extern void write_name_quoted_relative(const char *name, const char *prefix,
- FILE *fp, int terminator);
+void write_name_quoted(const char *name, FILE *, int terminator);
+void write_name_quoted_relative(const char *name, const char *prefix,
+ FILE *fp, int terminator);
/* quote path as relative to the given prefix */
-extern char *quote_path_relative(const char *in, const char *prefix,
+char *quote_path_relative(const char *in, const char *prefix,
struct strbuf *out);
/* quoting as a string literal for other languages */
-extern void perl_quote_buf(struct strbuf *sb, const char *src);
-extern void python_quote_buf(struct strbuf *sb, const char *src);
-extern void tcl_quote_buf(struct strbuf *sb, const char *src);
-extern void basic_regex_quote_buf(struct strbuf *sb, const char *src);
+void perl_quote_buf(struct strbuf *sb, const char *src);
+void python_quote_buf(struct strbuf *sb, const char *src);
+void tcl_quote_buf(struct strbuf *sb, const char *src);
+void basic_regex_quote_buf(struct strbuf *sb, const char *src);
#endif
#include "packfile.h"
#include "worktree.h"
#include "object-store.h"
+#include "pack-bitmap.h"
struct connectivity_progress {
struct progress *progress;
FOR_EACH_OBJECT_LOCAL_ONLY);
}
+static void *lookup_object_by_type(struct repository *r,
+ const struct object_id *oid,
+ enum object_type type)
+{
+ switch (type) {
+ case OBJ_COMMIT:
+ return lookup_commit(r, oid);
+ case OBJ_TREE:
+ return lookup_tree(r, oid);
+ case OBJ_TAG:
+ return lookup_tag(r, oid);
+ case OBJ_BLOB:
+ return lookup_blob(r, oid);
+ default:
+ die("BUG: unknown object type %d", type);
+ }
+}
+
+static int mark_object_seen(const struct object_id *oid,
+ enum object_type type,
+ int exclude,
+ uint32_t name_hash,
+ struct packed_git *found_pack,
+ off_t found_offset)
+{
+ struct object *obj = lookup_object_by_type(the_repository, oid, type);
+ if (!obj)
+ die("unable to create object '%s'", oid_to_hex(oid));
+
+ obj->flags |= SEEN;
+ return 0;
+}
+
void mark_reachable_objects(struct rev_info *revs, int mark_reflog,
timestamp_t mark_recent, struct progress *progress)
{
struct connectivity_progress cp;
+ struct bitmap_index *bitmap_git;
/*
* Set up revision parsing, and mark us as being interested
cp.progress = progress;
cp.count = 0;
+ bitmap_git = prepare_bitmap_walk(revs);
+ if (bitmap_git) {
+ traverse_bitmap_commit_list(bitmap_git, mark_object_seen);
+ free_bitmap_index(bitmap_git);
+ return;
+ }
+
/*
* Set up the revision walk - this will move all commits
* from the pending list to the commit walking list.
struct progress;
struct rev_info;
-extern int add_unseen_recent_objects_to_traversal(struct rev_info *revs,
- timestamp_t timestamp);
-extern void mark_reachable_objects(struct rev_info *revs, int mark_reflog,
- timestamp_t mark_recent, struct progress *);
+int add_unseen_recent_objects_to_traversal(struct rev_info *revs,
+ timestamp_t timestamp);
+void mark_reachable_objects(struct rev_info *revs, int mark_reflog,
+ timestamp_t mark_recent, struct progress *);
#endif
#include "commit.h"
#include "blob.h"
#include "resolve-undo.h"
+#include "run-command.h"
#include "strbuf.h"
#include "varint.h"
#include "split-index.h"
* CE_REMOVE is set in ce_flags. This is much more effective than
* calling remove_index_entry_at() for each entry to be removed.
*/
-void remove_marked_cache_entries(struct index_state *istate)
+void remove_marked_cache_entries(struct index_state *istate, int invalidate)
{
struct cache_entry **ce_array = istate->cache;
unsigned int i, j;
for (i = j = 0; i < istate->cache_nr; i++) {
if (ce_array[i]->ce_flags & CE_REMOVE) {
+ if (invalidate) {
+ cache_tree_invalidate_path(istate,
+ ce_array[i]->name);
+ untracked_cache_remove_from_index(istate,
+ ce_array[i]->name);
+ }
remove_name_hash(istate, ce_array[i]);
save_or_free_index_entry(istate, ce_array[i]);
}
int add_option = (ADD_CACHE_OK_TO_ADD|ADD_CACHE_OK_TO_REPLACE|
(intent_only ? ADD_CACHE_NEW_ONLY : 0));
int hash_flags = HASH_WRITE_OBJECT;
+ struct object_id oid;
if (flags & ADD_CACHE_RENORMALIZE)
hash_flags |= HASH_RENORMALIZE;
namelen = strlen(path);
if (S_ISDIR(st_mode)) {
+ if (resolve_gitlink_ref(path, "HEAD", &oid) < 0)
+ return error(_("'%s' does not have a commit checked out"), path);
while (namelen && path[namelen-1] == '/')
namelen--;
}
uint32_t uid;
uint32_t gid;
uint32_t size;
- unsigned char sha1[20];
- uint16_t flags;
- char name[FLEX_ARRAY]; /* more */
-};
-
-/*
- * This struct is used when CE_EXTENDED bit is 1
- * The struct must match ondisk_cache_entry exactly from
- * ctime till flags
- */
-struct ondisk_cache_entry_extended {
- struct cache_time ctime;
- struct cache_time mtime;
- uint32_t dev;
- uint32_t ino;
- uint32_t mode;
- uint32_t uid;
- uint32_t gid;
- uint32_t size;
- unsigned char sha1[20];
- uint16_t flags;
- uint16_t flags2;
- char name[FLEX_ARRAY]; /* more */
+ /*
+ * unsigned char hash[hashsz];
+ * uint16_t flags;
+ * if (flags & CE_EXTENDED)
+ * uint16_t flags2;
+ */
+ unsigned char data[GIT_MAX_RAWSZ + 2 * sizeof(uint16_t)];
+ char name[FLEX_ARRAY];
};
/* These are only used for v3 or lower */
#define align_padding_size(size, len) ((size + (len) + 8) & ~7) - (size + len)
-#define align_flex_name(STRUCT,len) ((offsetof(struct STRUCT,name) + (len) + 8) & ~7)
+#define align_flex_name(STRUCT,len) ((offsetof(struct STRUCT,data) + (len) + 8) & ~7)
#define ondisk_cache_entry_size(len) align_flex_name(ondisk_cache_entry,len)
-#define ondisk_cache_entry_extended_size(len) align_flex_name(ondisk_cache_entry_extended,len)
-#define ondisk_ce_size(ce) (((ce)->ce_flags & CE_EXTENDED) ? \
- ondisk_cache_entry_extended_size(ce_namelen(ce)) : \
- ondisk_cache_entry_size(ce_namelen(ce)))
+#define ondisk_data_size(flags, len) (the_hash_algo->rawsz + \
+ ((flags & CE_EXTENDED) ? 2 : 1) * sizeof(uint16_t) + len)
+#define ondisk_data_size_max(len) (ondisk_data_size(CE_EXTENDED, len))
+#define ondisk_ce_size(ce) (ondisk_cache_entry_size(ondisk_data_size((ce)->ce_flags, ce_namelen(ce))))
/* Allow fsck to force verification of the index checksum. */
int verify_index_checksum;
struct cache_entry *ce;
size_t len;
const char *name;
+ const unsigned hashsz = the_hash_algo->rawsz;
+ const uint16_t *flagsp = (const uint16_t *)(ondisk->data + hashsz);
unsigned int flags;
size_t copy_len = 0;
/*
int expand_name_field = version == 4;
/* On-disk flags are just 16 bits */
- flags = get_be16(&ondisk->flags);
+ flags = get_be16(flagsp);
len = flags & CE_NAMEMASK;
if (flags & CE_EXTENDED) {
- struct ondisk_cache_entry_extended *ondisk2;
int extended_flags;
- ondisk2 = (struct ondisk_cache_entry_extended *)ondisk;
- extended_flags = get_be16(&ondisk2->flags2) << 16;
+ extended_flags = get_be16(flagsp + 1) << 16;
/* We do not yet understand any bit out of CE_EXTENDED_FLAGS */
if (extended_flags & ~CE_EXTENDED_FLAGS)
die(_("unknown index entry format 0x%08x"), extended_flags);
flags |= extended_flags;
- name = ondisk2->name;
+ name = (const char *)(flagsp + 2);
}
else
- name = ondisk->name;
+ name = (const char *)(flagsp + 1);
if (expand_name_field) {
const unsigned char *cp = (const unsigned char *)name;
ce->ce_flags = flags & ~CE_NAMEMASK;
ce->ce_namelen = len;
ce->index = 0;
- hashcpy(ce->oid.hash, ondisk->sha1);
+ hashcpy(ce->oid.hash, ondisk->data);
+ memcpy(ce->name, name, len);
+ ce->name[len] = '\0';
if (expand_name_field) {
if (copy_len)
load_index_extensions(&p);
}
munmap((void *)mmap, mmap_size);
+
+ /*
+ * TODO trace2: replace "the_repository" with the actual repo instance
+ * that is associated with the given "istate".
+ */
+ trace2_data_intmax("index", the_repository, "read/version",
+ istate->version);
+ trace2_data_intmax("index", the_repository, "read/cache_nr",
+ istate->cache_nr);
+
return istate->cache_nr;
unmap:
if (istate->initialized)
return istate->cache_nr;
+ /*
+ * TODO trace2: replace "the_repository" with the actual repo instance
+ * that is associated with the given "istate".
+ */
+ trace2_region_enter_printf("index", "do_read_index", the_repository,
+ "%s", path);
trace_performance_enter();
ret = do_read_index(istate, path, 0);
trace_performance_leave("read cache %s", path);
+ trace2_region_leave_printf("index", "do_read_index", the_repository,
+ "%s", path);
split_index = istate->split_index;
if (!split_index || is_null_oid(&split_index->base_oid)) {
base_oid_hex = oid_to_hex(&split_index->base_oid);
base_path = xstrfmt("%s/sharedindex.%s", gitdir, base_oid_hex);
+ trace2_region_enter_printf("index", "shared/do_read_index",
+ the_repository, "%s", base_path);
ret = do_read_index(split_index->base, base_path, 1);
+ trace2_region_leave_printf("index", "shared/do_read_index",
+ the_repository, "%s", base_path);
if (!oideq(&split_index->base_oid, &split_index->base->oid))
die(_("broken index, expect %s in %s, got %s"),
base_oid_hex, base_path,
struct cache_entry *ce)
{
short flags;
+ const unsigned hashsz = the_hash_algo->rawsz;
+ uint16_t *flagsp = (uint16_t *)(ondisk->data + hashsz);
ondisk->ctime.sec = htonl(ce->ce_stat_data.sd_ctime.sec);
ondisk->mtime.sec = htonl(ce->ce_stat_data.sd_mtime.sec);
ondisk->uid = htonl(ce->ce_stat_data.sd_uid);
ondisk->gid = htonl(ce->ce_stat_data.sd_gid);
ondisk->size = htonl(ce->ce_stat_data.sd_size);
- hashcpy(ondisk->sha1, ce->oid.hash);
+ hashcpy(ondisk->data, ce->oid.hash);
flags = ce->ce_flags & ~CE_NAMEMASK;
flags |= (ce_namelen(ce) >= CE_NAMEMASK ? CE_NAMEMASK : ce_namelen(ce));
- ondisk->flags = htons(flags);
+ flagsp[0] = htons(flags);
if (ce->ce_flags & CE_EXTENDED) {
- struct ondisk_cache_entry_extended *ondisk2;
- ondisk2 = (struct ondisk_cache_entry_extended *)ondisk;
- ondisk2->flags2 = htons((ce->ce_flags & CE_EXTENDED_FLAGS) >> 16);
+ flagsp[1] = htons((ce->ce_flags & CE_EXTENDED_FLAGS) >> 16);
}
}
stripped_name = 1;
}
- if (ce->ce_flags & CE_EXTENDED)
- size = offsetof(struct ondisk_cache_entry_extended, name);
- else
- size = offsetof(struct ondisk_cache_entry, name);
+ size = offsetof(struct ondisk_cache_entry,data) + ondisk_data_size(ce->ce_flags, 0);
if (!previous_name) {
int len = ce_namelen(ce);
struct cache_entry **cache = istate->cache;
int entries = istate->cache_nr;
struct stat st;
- struct ondisk_cache_entry_extended ondisk;
+ struct ondisk_cache_entry ondisk;
struct strbuf previous_name_buf = STRBUF_INIT, *previous_name;
int drop_cache_tree = istate->drop_cache_tree;
off_t offset;
return -1;
}
- if (!strip_extensions && istate->split_index) {
+ if (!strip_extensions && istate->split_index &&
+ !is_null_oid(&istate->split_index->base_oid)) {
struct strbuf sb = STRBUF_INIT;
err = write_link_extension(&sb, istate) < 0 ||
istate->timestamp.sec = (unsigned int)st.st_mtime;
istate->timestamp.nsec = ST_MTIME_NSEC(st);
trace_performance_since(start, "write index, changed mask = %x", istate->cache_changed);
+
+ /*
+ * TODO trace2: replace "the_repository" with the actual repo instance
+ * that is associated with the given "istate".
+ */
+ trace2_data_intmax("index", the_repository, "write/version",
+ istate->version);
+ trace2_data_intmax("index", the_repository, "write/cache_nr",
+ istate->cache_nr);
+
return 0;
}
static int do_write_locked_index(struct index_state *istate, struct lock_file *lock,
unsigned flags)
{
- int ret = do_write_index(istate, lock->tempfile, 0);
+ int ret;
+
+ /*
+ * TODO trace2: replace "the_repository" with the actual repo instance
+ * that is associated with the given "istate".
+ */
+ trace2_region_enter_printf("index", "do_write_index", the_repository,
+ "%s", lock->tempfile->filename.buf);
+ ret = do_write_index(istate, lock->tempfile, 0);
+ trace2_region_leave_printf("index", "do_write_index", the_repository,
+ "%s", lock->tempfile->filename.buf);
+
if (ret)
return ret;
if (flags & COMMIT_LOCK)
- return commit_locked_index(lock);
- return close_lock_file_gently(lock);
+ ret = commit_locked_index(lock);
+ else
+ ret = close_lock_file_gently(lock);
+
+ run_hook_le(NULL, "post-index-change",
+ istate->updated_workdir ? "1" : "0",
+ istate->updated_skipworktree ? "1" : "0", NULL);
+ istate->updated_workdir = 0;
+ istate->updated_skipworktree = 0;
+
+ return ret;
}
static int write_split_index(struct index_state *istate,
int ret;
move_cache_to_base_index(istate);
+
+ trace2_region_enter_printf("index", "shared/do_write_index",
+ the_repository, "%s", (*temp)->filename.buf);
ret = do_write_index(si->base, *temp, 1);
+ trace2_region_leave_printf("index", "shared/do_write_index",
+ the_repository, "%s", (*temp)->filename.buf);
+
if (ret)
return ret;
ret = adjust_shared_perm(get_tempfile_path(*temp));
ret = write_split_index(istate, lock, flags);
/* Freshen the shared index only if the split-index was written */
- if (!ret && !new_shared_index) {
+ if (!ret && !new_shared_index && !is_null_oid(&si->base_oid)) {
const char *shared_index = git_path("sharedindex.%s",
oid_to_hex(&si->base_oid));
freshen_shared_index(shared_index, 1);
#include "cache.h"
#include "commit.h"
-#include "rebase-interactive.h"
#include "sequencer.h"
+#include "rebase-interactive.h"
#include "strbuf.h"
+#include "commit-slab.h"
+#include "config.h"
+
+enum missing_commit_check_level {
+ MISSING_COMMIT_CHECK_IGNORE = 0,
+ MISSING_COMMIT_CHECK_WARN,
+ MISSING_COMMIT_CHECK_ERROR
+};
+
+static enum missing_commit_check_level get_missing_commit_check_level(void)
+{
+ const char *value;
+
+ if (git_config_get_value("rebase.missingcommitscheck", &value) ||
+ !strcasecmp("ignore", value))
+ return MISSING_COMMIT_CHECK_IGNORE;
+ if (!strcasecmp("warn", value))
+ return MISSING_COMMIT_CHECK_WARN;
+ if (!strcasecmp("error", value))
+ return MISSING_COMMIT_CHECK_ERROR;
+ warning(_("unrecognized setting %s for option "
+ "rebase.missingCommitsCheck. Ignoring."), value);
+ return MISSING_COMMIT_CHECK_IGNORE;
+}
-void append_todo_help(unsigned edit_todo, unsigned keep_empty,
+void append_todo_help(unsigned keep_empty, int command_count,
+ const char *shortrevisions, const char *shortonto,
struct strbuf *buf)
{
const char *msg = _("\nCommands:\n"
". specified). Use -c <commit> to reword the commit message.\n"
"\n"
"These lines can be re-ordered; they are executed from top to bottom.\n");
+ unsigned edit_todo = !(shortrevisions && shortonto);
+
+ if (!edit_todo) {
+ strbuf_addch(buf, '\n');
+ strbuf_commented_addf(buf, Q_("Rebase %s onto %s (%d command)",
+ "Rebase %s onto %s (%d commands)",
+ command_count),
+ shortrevisions, shortonto, command_count);
+ }
strbuf_add_commented_lines(buf, msg, strlen(msg));
}
}
-int edit_todo_list(struct repository *r, unsigned flags)
+int edit_todo_list(struct repository *r, struct todo_list *todo_list,
+ struct todo_list *new_todo, const char *shortrevisions,
+ const char *shortonto, unsigned flags)
{
- struct strbuf buf = STRBUF_INIT;
const char *todo_file = rebase_path_todo();
+ unsigned initial = shortrevisions && shortonto;
- if (strbuf_read_file(&buf, todo_file, 0) < 0)
- return error_errno(_("could not read '%s'."), todo_file);
+ /* If the user is editing the todo list, we first try to parse
+ * it. If there is an error, we do not return, because the user
+ * might want to fix it in the first place. */
+ if (!initial)
+ todo_list_parse_insn_buffer(r, todo_list->buf.buf, todo_list);
- strbuf_stripspace(&buf, 1);
- if (write_message(buf.buf, buf.len, todo_file, 0)) {
- strbuf_release(&buf);
- return -1;
- }
+ if (todo_list_write_to_file(r, todo_list, todo_file, shortrevisions, shortonto,
+ -1, flags | TODO_LIST_SHORTEN_IDS | TODO_LIST_APPEND_TODO_HELP))
+ return error_errno(_("could not write '%s'"), todo_file);
+
+ if (initial && copy_file(rebase_path_todo_backup(), todo_file, 0666))
+ return error(_("could not copy '%s' to '%s'."), todo_file,
+ rebase_path_todo_backup());
- strbuf_release(&buf);
+ if (launch_sequence_editor(todo_file, &new_todo->buf, NULL))
+ return -2;
- transform_todos(r, flags | TODO_LIST_SHORTEN_IDS);
+ strbuf_stripspace(&new_todo->buf, 1);
+ if (initial && new_todo->buf.len == 0)
+ return -3;
- if (strbuf_read_file(&buf, todo_file, 0) < 0)
- return error_errno(_("could not read '%s'."), todo_file);
+ /* For the initial edit, the todo list gets parsed in
+ * complete_action(). */
+ if (!initial)
+ return todo_list_parse_insn_buffer(r, new_todo->buf.buf, new_todo);
- append_todo_help(1, 0, &buf);
- if (write_message(buf.buf, buf.len, todo_file, 0)) {
- strbuf_release(&buf);
- return -1;
+ return 0;
+}
+
+define_commit_slab(commit_seen, unsigned char);
+/*
+ * Check if the user dropped some commits by mistake
+ * Behaviour determined by rebase.missingCommitsCheck.
+ * Check if there is an unrecognized command or a
+ * bad SHA-1 in a command.
+ */
+int todo_list_check(struct todo_list *old_todo, struct todo_list *new_todo)
+{
+ enum missing_commit_check_level check_level = get_missing_commit_check_level();
+ struct strbuf missing = STRBUF_INIT;
+ int res = 0, i;
+ struct commit_seen commit_seen;
+
+ init_commit_seen(&commit_seen);
+
+ if (check_level == MISSING_COMMIT_CHECK_IGNORE)
+ goto leave_check;
+
+ /* Mark the commits in git-rebase-todo as seen */
+ for (i = 0; i < new_todo->nr; i++) {
+ struct commit *commit = new_todo->items[i].commit;
+ if (commit)
+ *commit_seen_at(&commit_seen, commit) = 1;
}
- strbuf_release(&buf);
+ /* Find commits in git-rebase-todo.backup yet unseen */
+ for (i = old_todo->nr - 1; i >= 0; i--) {
+ struct todo_item *item = old_todo->items + i;
+ struct commit *commit = item->commit;
+ if (commit && !*commit_seen_at(&commit_seen, commit)) {
+ strbuf_addf(&missing, " - %s %.*s\n",
+ find_unique_abbrev(&commit->object.oid, DEFAULT_ABBREV),
+ item->arg_len,
+ todo_item_get_arg(old_todo, item));
+ *commit_seen_at(&commit_seen, commit) = 1;
+ }
+ }
- if (launch_sequence_editor(todo_file, NULL, NULL))
- return -1;
+ /* Warn about missing commits */
+ if (!missing.len)
+ goto leave_check;
- transform_todos(r, flags & ~(TODO_LIST_SHORTEN_IDS));
+ if (check_level == MISSING_COMMIT_CHECK_ERROR)
+ res = 1;
- return 0;
+ fprintf(stderr,
+ _("Warning: some commits may have been dropped accidentally.\n"
+ "Dropped commits (newer to older):\n"));
+
+ /* Make the list user-friendly and display */
+ fputs(missing.buf, stderr);
+ strbuf_release(&missing);
+
+ fprintf(stderr, _("To avoid this message, use \"drop\" to "
+ "explicitly remove a commit.\n\n"
+ "Use 'git config rebase.missingCommitsCheck' to change "
+ "the level of warnings.\n"
+ "The possible behaviours are: ignore, warn, error.\n\n"));
+
+leave_check:
+ clear_commit_seen(&commit_seen);
+ return res;
}
struct strbuf;
struct repository;
+struct todo_list;
-void append_todo_help(unsigned edit_todo, unsigned keep_empty,
+void append_todo_help(unsigned keep_empty, int command_count,
+ const char *shortrevisions, const char *shortonto,
struct strbuf *buf);
-int edit_todo_list(struct repository *r, unsigned flags);
+int edit_todo_list(struct repository *r, struct todo_list *todo_list,
+ struct todo_list *new_todo, const char *shortrevisions,
+ const char *shortonto, unsigned flags);
+int todo_list_check(struct todo_list *old_todo, struct todo_list *new_todo);
#endif
{ "if", SOURCE_NONE, FIELD_STR, if_atom_parser },
{ "then", SOURCE_NONE },
{ "else", SOURCE_NONE },
+ /*
+ * Please update $__git_ref_fieldlist in git-completion.bash
+ * when you add new atoms
+ */
};
#define REF_FORMATTING_STATE_INIT { 0, NULL }
}
/* See grab_values */
-static void grab_tag_values(struct atom_value *val, int deref, struct object *obj, void *buf, unsigned long sz)
+static void grab_tag_values(struct atom_value *val, int deref, struct object *obj)
{
int i;
struct tag *tag = (struct tag *) obj;
}
/* See grab_values */
-static void grab_commit_values(struct atom_value *val, int deref, struct object *obj, void *buf, unsigned long sz)
+static void grab_commit_values(struct atom_value *val, int deref, struct object *obj)
{
int i;
struct commit *commit = (struct commit *) obj;
}
}
-static const char *find_wholine(const char *who, int wholen, const char *buf, unsigned long sz)
+static const char *find_wholine(const char *who, int wholen, const char *buf)
{
const char *eol;
while (*buf) {
}
/* See grab_values */
-static void grab_person(const char *who, struct atom_value *val, int deref, struct object *obj, void *buf, unsigned long sz)
+static void grab_person(const char *who, struct atom_value *val, int deref, void *buf)
{
int i;
int wholen = strlen(who);
!starts_with(name + wholen, "date"))
continue;
if (!wholine)
- wholine = find_wholine(who, wholen, buf, sz);
+ wholine = find_wholine(who, wholen, buf);
if (!wholine)
return; /* no point looking for it */
if (name[wholen] == 0)
if (strcmp(who, "tagger") && strcmp(who, "committer"))
return; /* "author" for commit object is not wanted */
if (!wholine)
- wholine = find_wholine(who, wholen, buf, sz);
+ wholine = find_wholine(who, wholen, buf);
if (!wholine)
return;
for (i = 0; i < used_atom_cnt; i++) {
}
}
-static void find_subpos(const char *buf, unsigned long sz,
+static void find_subpos(const char *buf,
const char **sub, unsigned long *sublen,
const char **body, unsigned long *bodylen,
unsigned long *nonsiglen,
}
/* See grab_values */
-static void grab_sub_body_contents(struct atom_value *val, int deref, struct object *obj, void *buf, unsigned long sz)
+static void grab_sub_body_contents(struct atom_value *val, int deref, void *buf)
{
int i;
const char *subpos = NULL, *bodypos = NULL, *sigpos = NULL;
!starts_with(name, "contents"))
continue;
if (!subpos)
- find_subpos(buf, sz,
+ find_subpos(buf,
&subpos, &sublen,
&bodypos, &bodylen, &nonsiglen,
&sigpos, &siglen);
* pointed at by the ref itself; otherwise it is the object the
* ref (which is a tag) refers to.
*/
-static void grab_values(struct atom_value *val, int deref, struct object *obj, void *buf, unsigned long sz)
+static void grab_values(struct atom_value *val, int deref, struct object *obj, void *buf)
{
switch (obj->type) {
case OBJ_TAG:
- grab_tag_values(val, deref, obj, buf, sz);
- grab_sub_body_contents(val, deref, obj, buf, sz);
- grab_person("tagger", val, deref, obj, buf, sz);
+ grab_tag_values(val, deref, obj);
+ grab_sub_body_contents(val, deref, buf);
+ grab_person("tagger", val, deref, buf);
break;
case OBJ_COMMIT:
- grab_commit_values(val, deref, obj, buf, sz);
- grab_sub_body_contents(val, deref, obj, buf, sz);
- grab_person("author", val, deref, obj, buf, sz);
- grab_person("committer", val, deref, obj, buf, sz);
+ grab_commit_values(val, deref, obj);
+ grab_sub_body_contents(val, deref, buf);
+ grab_person("author", val, deref, buf);
+ grab_person("committer", val, deref, buf);
break;
case OBJ_TREE:
/* grab_tree_values(val, deref, obj, buf, sz); */
*s = show_ref(&atom->u.remote_ref.refname, refname);
else if (atom->u.remote_ref.option == RR_TRACK) {
if (stat_tracking_info(branch, &num_ours, &num_theirs,
- NULL, AHEAD_BEHIND_FULL) < 0) {
+ NULL, atom->u.remote_ref.push,
+ AHEAD_BEHIND_FULL) < 0) {
*s = xstrdup(msgs.gone);
} else if (!num_ours && !num_theirs)
*s = xstrdup("");
}
} else if (atom->u.remote_ref.option == RR_TRACKSHORT) {
if (stat_tracking_info(branch, &num_ours, &num_theirs,
- NULL, AHEAD_BEHIND_FULL) < 0) {
+ NULL, atom->u.remote_ref.push,
+ AHEAD_BEHIND_FULL) < 0) {
*s = xstrdup("");
return;
}
return strbuf_addf_ret(err, -1, _("parse_object_buffer failed on %s for %s"),
oid_to_hex(&oi->oid), ref->refname);
}
- grab_values(ref->value, deref, *obj, oi->content, oi->size);
+ grab_values(ref->value, deref, *obj, oi->content);
}
grab_common_values(ref->value, deref, oi);
int parse_opt_ref_sorting(const struct option *opt, const char *arg, int unset)
{
- if (!arg) /* should --no-sort void the list ? */
- return -1;
+ /*
+ * NEEDSWORK: We should probably clear the list in this case, but we've
+ * already munged the global used_atoms list, which would need to be
+ * undone.
+ */
+ BUG_ON_OPT_NEG(unset);
+
parse_ref_sorting(opt->value, arg);
return 0;
}
#define OPT_MERGED(f, h) _OPT_MERGED_NO_MERGED("merged", f, h)
#define OPT_NO_MERGED(f, h) _OPT_MERGED_NO_MERGED("no-merged", f, h)
+#define OPT_REF_SORT(var) \
+ OPT_CALLBACK_F(0, "sort", (var), \
+ N_("key"), N_("field name to sort on"), \
+ PARSE_OPT_NONEG, parse_opt_ref_sorting)
+
/*
* API for filtering a set of refs. Based on the type of refs the user
* has requested, we iterate through those refs and apply filters
struct commit;
struct reflog_walk_info;
-extern void init_reflog_walk(struct reflog_walk_info **info);
-extern int add_reflog_for_walk(struct reflog_walk_info *info,
- struct commit *commit, const char *name);
-extern void show_reflog_message(struct reflog_walk_info *info, int,
- const struct date_mode *, int force_date);
-extern void get_reflog_message(struct strbuf *sb,
- struct reflog_walk_info *reflog_info);
-extern const char *get_reflog_ident(struct reflog_walk_info *reflog_info);
-extern timestamp_t get_reflog_timestamp(struct reflog_walk_info *reflog_info);
-extern void get_reflog_selector(struct strbuf *sb,
- struct reflog_walk_info *reflog_info,
- const struct date_mode *dmode, int force_date,
- int shorten);
+void init_reflog_walk(struct reflog_walk_info **info);
+int add_reflog_for_walk(struct reflog_walk_info *info,
+ struct commit *commit, const char *name);
+void show_reflog_message(struct reflog_walk_info *info, int,
+ const struct date_mode *, int force_date);
+void get_reflog_message(struct strbuf *sb,
+ struct reflog_walk_info *reflog_info);
+const char *get_reflog_ident(struct reflog_walk_info *reflog_info);
+timestamp_t get_reflog_timestamp(struct reflog_walk_info *reflog_info);
+void get_reflog_selector(struct strbuf *sb,
+ struct reflog_walk_info *reflog_info,
+ const struct date_mode *dmode, int force_date,
+ int shorten);
-extern int reflog_walk_empty(struct reflog_walk_info *walk);
+int reflog_walk_empty(struct reflog_walk_info *walk);
struct commit *next_reflog_entry(struct reflog_walk_info *reflog_info);
return read_ref_full(refname, RESOLVE_REF_READING, oid, NULL);
}
+static int refs_ref_exists(struct ref_store *refs, const char *refname)
+{
+ return !!refs_resolve_ref_unsafe(refs, refname, RESOLVE_REF_READING, NULL, NULL);
+}
+
int ref_exists(const char *refname)
{
- return !!resolve_ref_unsafe(refname, RESOLVE_REF_READING, NULL, NULL);
+ return refs_ref_exists(get_main_ref_store(the_repository), refname);
}
static int match_ref_pattern(const char *refname,
* later free()ing) if the string passed in is a magic short-hand form
* to name a branch.
*/
-static char *substitute_branch_name(const char **string, int *len)
+static char *substitute_branch_name(struct repository *r,
+ const char **string, int *len)
{
struct strbuf buf = STRBUF_INIT;
- int ret = interpret_branch_name(*string, *len, &buf, 0);
+ int ret = repo_interpret_branch_name(r, *string, *len, &buf, 0);
if (ret == *len) {
size_t size;
return NULL;
}
-int dwim_ref(const char *str, int len, struct object_id *oid, char **ref)
+int repo_dwim_ref(struct repository *r, const char *str, int len,
+ struct object_id *oid, char **ref)
{
- char *last_branch = substitute_branch_name(&str, &len);
- int refs_found = expand_ref(str, len, oid, ref);
+ char *last_branch = substitute_branch_name(r, &str, &len);
+ int refs_found = expand_ref(r, str, len, oid, ref);
free(last_branch);
return refs_found;
}
-int expand_ref(const char *str, int len, struct object_id *oid, char **ref)
+int dwim_ref(const char *str, int len, struct object_id *oid, char **ref)
+{
+ return repo_dwim_ref(the_repository, str, len, oid, ref);
+}
+
+int expand_ref(struct repository *repo, const char *str, int len,
+ struct object_id *oid, char **ref)
{
const char **p, *r;
int refs_found = 0;
this_result = refs_found ? &oid_from_ref : oid;
strbuf_reset(&fullref);
strbuf_addf(&fullref, *p, len, str);
- r = resolve_ref_unsafe(fullref.buf, RESOLVE_REF_READING,
- this_result, &flag);
+ r = refs_resolve_ref_unsafe(get_main_ref_store(repo),
+ fullref.buf, RESOLVE_REF_READING,
+ this_result, &flag);
if (r) {
if (!refs_found++)
*ref = xstrdup(r);
return refs_found;
}
-int dwim_log(const char *str, int len, struct object_id *oid, char **log)
+int repo_dwim_log(struct repository *r, const char *str, int len,
+ struct object_id *oid, char **log)
{
- char *last_branch = substitute_branch_name(&str, &len);
+ struct ref_store *refs = get_main_ref_store(r);
+ char *last_branch = substitute_branch_name(r, &str, &len);
const char **p;
int logs_found = 0;
struct strbuf path = STRBUF_INIT;
strbuf_reset(&path);
strbuf_addf(&path, *p, len, str);
- ref = resolve_ref_unsafe(path.buf, RESOLVE_REF_READING,
- &hash, NULL);
+ ref = refs_resolve_ref_unsafe(refs, path.buf,
+ RESOLVE_REF_READING,
+ &hash, NULL);
if (!ref)
continue;
- if (reflog_exists(path.buf))
+ if (refs_reflog_exists(refs, path.buf))
it = path.buf;
- else if (strcmp(ref, path.buf) && reflog_exists(ref))
+ else if (strcmp(ref, path.buf) &&
+ refs_reflog_exists(refs, ref))
it = ref;
else
continue;
return logs_found;
}
+int dwim_log(const char *str, int len, struct object_id *oid, char **log)
+{
+ return repo_dwim_log(the_repository, str, len, oid, log);
+}
+
static int is_per_worktree_ref(const char *refname)
{
return !strcmp(refname, "HEAD") ||
return 1;
}
-int read_ref_at(const char *refname, unsigned int flags, timestamp_t at_time, int cnt,
+int read_ref_at(struct ref_store *refs, const char *refname,
+ unsigned int flags, timestamp_t at_time, int cnt,
struct object_id *oid, char **msg,
timestamp_t *cutoff_time, int *cutoff_tz, int *cutoff_cnt)
{
cb.cutoff_cnt = cutoff_cnt;
cb.oid = oid;
- for_each_reflog_ent_reverse(refname, read_ref_at_ent, &cb);
+ refs_for_each_reflog_ent_reverse(refs, refname, read_ref_at_ent, &cb);
if (!cb.reccnt) {
if (flags & GET_OID_QUIETLY)
if (cb.found_it)
return 0;
- for_each_reflog_ent(refname, read_ref_at_ent_oldest, &cb);
+ refs_for_each_reflog_ent(refs, refname, read_ref_at_ent_oldest, &cb);
return 1;
}
old_oid, flags, onerr);
}
-char *shorten_unambiguous_ref(const char *refname, int strict)
+char *refs_shorten_unambiguous_ref(struct ref_store *refs,
+ const char *refname, int strict)
{
int i;
static char **scanf_fmts;
strbuf_reset(&resolved_buf);
strbuf_addf(&resolved_buf, rule,
short_name_len, short_name);
- if (ref_exists(resolved_buf.buf))
+ if (refs_ref_exists(refs, resolved_buf.buf))
break;
}
return xstrdup(refname);
}
+char *shorten_unambiguous_ref(const char *refname, int strict)
+{
+ return refs_shorten_unambiguous_ref(get_main_ref_store(the_repository),
+ refname, strict);
+}
+
static struct string_list *hide_refs;
int parse_hide_refs_config(const char *var, const char *value, const char *section)
int is_branch(const char *refname);
-extern int refs_init_db(struct strbuf *err);
+int refs_init_db(struct strbuf *err);
/*
* If refname is a non-symbolic reference that refers to a tag object,
struct argv_array;
void expand_ref_prefix(struct argv_array *prefixes, const char *prefix);
-int expand_ref(const char *str, int len, struct object_id *oid, char **ref);
+int expand_ref(struct repository *r, const char *str, int len, struct object_id *oid, char **ref);
+int repo_dwim_ref(struct repository *r, const char *str, int len, struct object_id *oid, char **ref);
+int repo_dwim_log(struct repository *r, const char *str, int len, struct object_id *oid, char **ref);
int dwim_ref(const char *str, int len, struct object_id *oid, char **ref);
int dwim_log(const char *str, int len, struct object_id *oid, char **ref);
int safe_create_reflog(const char *refname, int force_create, struct strbuf *err);
/** Reads log for the value of ref during at_time. **/
-int read_ref_at(const char *refname, unsigned int flags,
+int read_ref_at(struct ref_store *refs,
+ const char *refname, unsigned int flags,
timestamp_t at_time, int cnt,
struct object_id *oid, char **msg,
timestamp_t *cutoff_time, int *cutoff_tz, int *cutoff_cnt);
const char *prettify_refname(const char *refname);
+char *refs_shorten_unambiguous_ref(struct ref_store *refs,
+ const char *refname, int strict);
char *shorten_unambiguous_ref(const char *refname, int strict);
/** rename ref, return 0 on success **/
}
}
+/*
+ * Manually add refs/bisect, refs/rewritten and refs/worktree, which, being
+ * per-worktree, might not appear in the directory listing for
+ * refs/ in the main repo.
+ */
+static void add_per_worktree_entries_to_dir(struct ref_dir *dir, const char *dirname)
+{
+ const char *prefixes[] = { "refs/bisect/", "refs/worktree/", "refs/rewritten/" };
+ int ip;
+
+ if (strcmp(dirname, "refs/"))
+ return;
+
+ for (ip = 0; ip < ARRAY_SIZE(prefixes); ip++) {
+ const char *prefix = prefixes[ip];
+ int prefix_len = strlen(prefix);
+ struct ref_entry *child_entry;
+ int pos;
+
+ pos = search_ref_dir(dir, prefix, prefix_len);
+ if (pos >= 0)
+ continue;
+ child_entry = create_dir_entry(dir->cache, prefix, prefix_len, 1);
+ add_entry_to_dir(dir, child_entry);
+ }
+}
+
/*
* Read the loose references from the namespace dirname into dir
* (without recursing). dirname must end with '/'. dir must be the
strbuf_release(&path);
closedir(d);
- /*
- * Manually add refs/bisect and refs/worktree, which, being
- * per-worktree, might not appear in the directory listing for
- * refs/ in the main repo.
- */
- if (!strcmp(dirname, "refs/")) {
- int pos = search_ref_dir(dir, "refs/bisect/", 12);
-
- if (pos < 0) {
- struct ref_entry *child_entry = create_dir_entry(
- dir->cache, "refs/bisect/", 12, 1);
- add_entry_to_dir(dir, child_entry);
- }
-
- pos = search_ref_dir(dir, "refs/worktree/", 11);
-
- if (pos < 0) {
- struct ref_entry *child_entry = create_dir_entry(
- dir->cache, "refs/worktree/", 11, 1);
- add_entry_to_dir(dir, child_entry);
- }
- }
+ add_per_worktree_entries_to_dir(dir, dirname);
}
static struct ref_cache *get_loose_ref_cache(struct files_ref_store *refs)
* Note that the new update will itself be subject to splitting when
* the iteration gets to it.
*/
-static int split_symref_update(struct files_ref_store *refs,
- struct ref_update *update,
+static int split_symref_update(struct ref_update *update,
const char *referent,
struct ref_transaction *transaction,
struct string_list *affected_refnames,
* of processing the split-off update, so we
* don't have to do it here.
*/
- ret = split_symref_update(refs, update,
+ ret = split_symref_update(update,
referent.buf, transaction,
affected_refnames, err);
if (ret)
if (is_packed_transaction_needed(refs->packed_ref_store,
packed_transaction)) {
ret = ref_transaction_prepare(packed_transaction, err);
+ /*
+ * A failure during the prepare step will abort
+ * itself, but not free. Do that now, and disconnect
+ * from the files_transaction so it does not try to
+ * abort us when we hit the cleanup code below.
+ */
+ if (ret) {
+ ref_transaction_free(packed_transaction);
+ backend_data->packed_transaction = NULL;
+ }
} else {
/*
* We can skip rewriting the `packed-refs`
* file. But we do need to leave it locked, so
* that somebody else doesn't pack a reference
* that we are trying to delete.
+ *
+ * We need to disconnect our transaction from
+ * backend_data, since the abort (whether successful or
+ * not) will free it.
*/
+ backend_data->packed_transaction = NULL;
if (ref_transaction_abort(packed_transaction, err)) {
ret = TRANSACTION_GENERIC_ERROR;
goto cleanup;
}
- backend_data->packed_transaction = NULL;
}
}
/* LHS */
if (!*item->src)
; /* empty is ok; it means "HEAD" */
- else if (llen == GIT_SHA1_HEXSZ && !get_oid_hex(item->src, &unused))
+ else if (llen == the_hash_algo->hexsz && !get_oid_hex(item->src, &unused))
item->exact_sha1 = 1; /* ok */
else if (!check_refname_format(item->src, flags))
; /* valid looking ref is ok */
#include "send-pack.h"
#include "protocol.h"
#include "quote.h"
+#include "transport.h"
static struct remote *remote;
/* always ends with a trailing slash */
else {
struct strbuf unquoted = STRBUF_INIT;
if (unquote_c_style(&unquoted, value, NULL) < 0)
- die("invalid quoting in push-option value");
+ die(_("invalid quoting in push-option value: '%s'"), value);
string_list_append_nodup(&options.push_options,
strbuf_detach(&unquoted, NULL));
}
if (data[i] == '\t')
mid = &data[i];
if (data[i] == '\n') {
- if (mid - start != 40)
- die("%sinfo/refs not valid: is this a git repository?",
- url.buf);
+ if (mid - start != the_hash_algo->hexsz)
+ die(_("%sinfo/refs not valid: is this a git repository?"),
+ transport_anonymize_url(url.buf));
data[i] = 0;
ref_name = mid + 1;
ref = alloc_ref(ref_name);
PACKET_READ_CHOMP_NEWLINE |
PACKET_READ_DIE_ON_ERR_PACKET);
if (packet_reader_read(&reader) != PACKET_READ_NORMAL)
- die("invalid server response; expected service, got flush packet");
+ die(_("invalid server response; expected service, got flush packet"));
if (skip_prefix(reader.line, "# service=", &p) && !strcmp(p, service)) {
/*
d->proto_git = 1;
} else {
- die("invalid server response; got '%s'", reader.line);
+ die(_("invalid server response; got '%s'"), reader.line);
}
}
break;
case HTTP_MISSING_TARGET:
show_http_message(&type, &charset, &buffer);
- die("repository '%s' not found", url.buf);
+ die(_("repository '%s' not found"),
+ transport_anonymize_url(url.buf));
case HTTP_NOAUTH:
show_http_message(&type, &charset, &buffer);
- die("Authentication failed for '%s'", url.buf);
+ die(_("Authentication failed for '%s'"),
+ transport_anonymize_url(url.buf));
default:
show_http_message(&type, &charset, &buffer);
- die("unable to access '%s': %s", url.buf, curl_errorstr);
+ die(_("unable to access '%s': %s"),
+ transport_anonymize_url(url.buf), curl_errorstr);
}
- if (options.verbosity && !starts_with(refs_url.buf, url.buf))
- warning(_("redirecting to %s"), url.buf);
+ if (options.verbosity && !starts_with(refs_url.buf, url.buf)) {
+ char *u = transport_anonymize_url(url.buf);
+ warning(_("redirecting to %s"), u);
+ free(u);
+ }
last= xcalloc(1, sizeof(*last_discovery));
last->service = xstrdup(service);
struct rpc_state {
const char *service_name;
- const char **argv;
- struct strbuf *stdin_preamble;
char *service_url;
char *hdr_content_type;
char *hdr_accept;
int in;
int out;
int any_written;
- struct strbuf result;
unsigned gzip_request : 1;
unsigned initial_buffer : 1;
+
+ /*
+ * Whenever a pkt-line is read into buf, append the 4 characters
+ * denoting its length before appending the payload.
+ */
+ unsigned write_line_lengths : 1;
+
+ /*
+ * Used by rpc_out; initialize to 0. This is true if a flush has been
+ * read, but the corresponding line length (if write_line_lengths is
+ * true) and EOF have not been sent to libcurl. Since each flush marks
+ * the end of a request, each flush must be completely sent before any
+ * further reading occurs.
+ */
+ unsigned flush_read_but_not_sent : 1;
};
+/*
+ * Appends the result of reading from rpc->out to the string represented by
+ * rpc->buf and rpc->len if there is enough space. Returns 1 if there was
+ * enough space, 0 otherwise.
+ *
+ * If rpc->write_line_lengths is true, appends the line length as a 4-byte
+ * hexadecimal string before appending the result described above.
+ *
+ * Writes the total number of bytes appended into appended.
+ */
+static int rpc_read_from_out(struct rpc_state *rpc, int options,
+ size_t *appended,
+ enum packet_read_status *status) {
+ size_t left;
+ char *buf;
+ int pktlen_raw;
+
+ if (rpc->write_line_lengths) {
+ left = rpc->alloc - rpc->len - 4;
+ buf = rpc->buf + rpc->len + 4;
+ } else {
+ left = rpc->alloc - rpc->len;
+ buf = rpc->buf + rpc->len;
+ }
+
+ if (left < LARGE_PACKET_MAX)
+ return 0;
+
+ *status = packet_read_with_status(rpc->out, NULL, NULL, buf,
+ left, &pktlen_raw, options);
+ if (*status != PACKET_READ_EOF) {
+ *appended = pktlen_raw + (rpc->write_line_lengths ? 4 : 0);
+ rpc->len += *appended;
+ }
+
+ if (rpc->write_line_lengths) {
+ switch (*status) {
+ case PACKET_READ_EOF:
+ if (!(options & PACKET_READ_GENTLE_ON_EOF))
+ die(_("shouldn't have EOF when not gentle on EOF"));
+ break;
+ case PACKET_READ_NORMAL:
+ set_packet_header(buf - 4, *appended);
+ break;
+ case PACKET_READ_DELIM:
+ memcpy(buf - 4, "0001", 4);
+ break;
+ case PACKET_READ_FLUSH:
+ memcpy(buf - 4, "0000", 4);
+ break;
+ }
+ }
+
+ return 1;
+}
+
static size_t rpc_out(void *ptr, size_t eltsize,
size_t nmemb, void *buffer_)
{
size_t max = eltsize * nmemb;
struct rpc_state *rpc = buffer_;
size_t avail = rpc->len - rpc->pos;
+ enum packet_read_status status;
if (!avail) {
rpc->initial_buffer = 0;
- avail = packet_read(rpc->out, NULL, NULL, rpc->buf, rpc->alloc, 0);
- if (!avail)
- return 0;
+ rpc->len = 0;
rpc->pos = 0;
- rpc->len = avail;
+ if (!rpc->flush_read_but_not_sent) {
+ if (!rpc_read_from_out(rpc, 0, &avail, &status))
+ BUG("The entire rpc->buf should be larger than LARGE_PACKET_MAX");
+ if (status == PACKET_READ_FLUSH)
+ rpc->flush_read_but_not_sent = 1;
+ }
+ /*
+ * If flush_read_but_not_sent is true, we have already read one
+ * full request but have not fully sent it + EOF, which is why
+ * we need to refrain from reading.
+ */
+ }
+ if (rpc->flush_read_but_not_sent) {
+ if (!avail) {
+ /*
+ * The line length either does not need to be sent at
+ * all or has already been completely sent. Now we can
+ * return 0, indicating EOF, meaning that the flush has
+ * been fully sent.
+ */
+ rpc->flush_read_but_not_sent = 0;
+ return 0;
+ }
+ /*
+ * If avail is non-zerp, the line length for the flush still
+ * hasn't been fully sent. Proceed with sending the line
+ * length.
+ */
}
if (max < avail)
rpc->pos = 0;
return CURLIOE_OK;
}
- error("unable to rewind rpc post data - try increasing http.postBuffer");
+ error(_("unable to rewind rpc post data - try increasing http.postBuffer"));
return CURLIOE_FAILRESTART;
default:
strbuf_addstr(&msg, curl_errorstr);
}
}
- error("RPC failed; %s", msg.buf);
+ error(_("RPC failed; %s"), msg.buf);
strbuf_release(&msg);
}
{
uintmax_t size = len;
if (size > maximum_signed_value_of_type(curl_off_t))
- die("cannot handle pushes this big");
+ die(_("cannot handle pushes this big"));
return (curl_off_t)size;
}
-static int post_rpc(struct rpc_state *rpc)
+/*
+ * If flush_received is true, do not attempt to read any more; just use what's
+ * in rpc->buf.
+ */
+static int post_rpc(struct rpc_state *rpc, int flush_received)
{
struct active_request_slot *slot;
struct curl_slist *headers = http_copy_default_headers();
* allocated buffer space we can use HTTP/1.0 and avoid the
* chunked encoding mess.
*/
- while (1) {
- size_t left = rpc->alloc - rpc->len;
- char *buf = rpc->buf + rpc->len;
- int n;
-
- if (left < LARGE_PACKET_MAX) {
- large_request = 1;
- use_gzip = 0;
- break;
+ if (!flush_received) {
+ while (1) {
+ size_t n;
+ enum packet_read_status status;
+
+ if (!rpc_read_from_out(rpc, 0, &n, &status)) {
+ large_request = 1;
+ use_gzip = 0;
+ break;
+ }
+ if (status == PACKET_READ_FLUSH)
+ break;
}
-
- n = packet_read(rpc->out, NULL, NULL, buf, left, 0);
- if (!n)
- break;
- rpc->len += n;
}
if (large_request) {
ret = git_deflate(&stream, Z_FINISH);
if (ret != Z_STREAM_END)
- die("cannot deflate request; zlib deflate error %d", ret);
+ die(_("cannot deflate request; zlib deflate error %d"), ret);
ret = git_deflate_end_gently(&stream);
if (ret != Z_OK)
- die("cannot deflate request; zlib end error %d", ret);
+ die(_("cannot deflate request; zlib end error %d"), ret);
gzip_size = stream.total_out;
return err;
}
-static int rpc_service(struct rpc_state *rpc, struct discovery *heads)
+static int rpc_service(struct rpc_state *rpc, struct discovery *heads,
+ const char **client_argv, const struct strbuf *preamble,
+ struct strbuf *rpc_result)
{
const char *svc = rpc->service_name;
struct strbuf buf = STRBUF_INIT;
- struct strbuf *preamble = rpc->stdin_preamble;
struct child_process client = CHILD_PROCESS_INIT;
int err = 0;
client.in = -1;
client.out = -1;
client.git_cmd = 1;
- client.argv = rpc->argv;
+ client.argv = client_argv;
if (start_command(&client))
exit(1);
- if (preamble)
- write_or_die(client.in, preamble->buf, preamble->len);
+ write_or_die(client.in, preamble->buf, preamble->len);
if (heads)
write_or_die(client.in, heads->buf, heads->len);
rpc->buf = xmalloc(rpc->alloc);
rpc->in = client.in;
rpc->out = client.out;
- strbuf_init(&rpc->result, 0);
strbuf_addf(&buf, "%s%s", url.buf, svc);
rpc->service_url = strbuf_detach(&buf, NULL);
break;
rpc->pos = 0;
rpc->len = n;
- err |= post_rpc(rpc);
+ err |= post_rpc(rpc, 0);
}
close(client.in);
client.in = -1;
if (!err) {
- strbuf_read(&rpc->result, client.out, 0);
+ strbuf_read(rpc_result, client.out, 0);
} else {
char buf[4096];
for (;;)
ALLOC_ARRAY(targets, nr_heads);
if (options.depth || options.deepen_since)
- die("dumb http transport does not support shallow capabilities");
+ die(_("dumb http transport does not support shallow capabilities"));
for (i = 0; i < nr_heads; i++)
targets[i] = xstrdup(oid_to_hex(&to_fetch[i]->old_oid));
free(targets[i]);
free(targets);
- return ret ? error("fetch failed.") : 0;
+ return ret ? error(_("fetch failed.")) : 0;
}
static int fetch_git(struct discovery *heads,
struct strbuf preamble = STRBUF_INIT;
int i, err;
struct argv_array args = ARGV_ARRAY_INIT;
+ struct strbuf rpc_result = STRBUF_INIT;
argv_array_pushl(&args, "fetch-pack", "--stateless-rpc",
"--stdin", "--lock-pack", NULL);
for (i = 0; i < nr_heads; i++) {
struct ref *ref = to_fetch[i];
if (!*ref->name)
- die("cannot fetch by sha1 over smart http");
+ die(_("cannot fetch by sha1 over smart http"));
packet_buf_write(&preamble, "%s %s\n",
oid_to_hex(&ref->old_oid), ref->name);
}
memset(&rpc, 0, sizeof(rpc));
rpc.service_name = "git-upload-pack",
- rpc.argv = args.argv;
- rpc.stdin_preamble = &preamble;
rpc.gzip_request = 1;
- err = rpc_service(&rpc, heads);
- if (rpc.result.len)
- write_or_die(1, rpc.result.buf, rpc.result.len);
- strbuf_release(&rpc.result);
+ err = rpc_service(&rpc, heads, args.argv, &preamble, &rpc_result);
+ if (rpc_result.len)
+ write_or_die(1, rpc_result.buf, rpc_result.len);
+ strbuf_release(&rpc_result);
strbuf_release(&preamble);
argv_array_clear(&args);
return err;
const char *name;
struct ref *ref;
struct object_id old_oid;
+ const char *q;
- if (get_oid_hex(p, &old_oid))
- die("protocol error: expected sha/ref, got %s'", p);
- if (p[GIT_SHA1_HEXSZ] == ' ')
- name = p + GIT_SHA1_HEXSZ + 1;
- else if (!p[GIT_SHA1_HEXSZ])
+ if (parse_oid_hex(p, &old_oid, &q))
+ die(_("protocol error: expected sha/ref, got %s'"), p);
+ if (*q == ' ')
+ name = q + 1;
+ else if (!*q)
name = "";
else
- die("protocol error: expected sha/ref, got %s'", p);
+ die(_("protocol error: expected sha/ref, got %s'"), p);
ref = alloc_ref(name);
oidcpy(&ref->old_oid, &old_oid);
to_fetch[nr_heads++] = ref;
}
else
- die("http transport does not support %s", buf->buf);
+ die(_("http transport does not support %s"), buf->buf);
strbuf_reset(buf);
if (strbuf_getline_lf(buf, stdin) == EOF)
argv_array_push(&child.args, specs[i]);
if (run_command(&child))
- die("git-http-push failed");
+ die(_("git-http-push failed"));
return 0;
}
struct argv_array args;
struct string_list_item *cas_option;
struct strbuf preamble = STRBUF_INIT;
+ struct strbuf rpc_result = STRBUF_INIT;
argv_array_init(&args);
argv_array_pushl(&args, "send-pack", "--stateless-rpc", "--helper-status",
memset(&rpc, 0, sizeof(rpc));
rpc.service_name = "git-receive-pack",
- rpc.argv = args.argv;
- rpc.stdin_preamble = &preamble;
- err = rpc_service(&rpc, heads);
- if (rpc.result.len)
- write_or_die(1, rpc.result.buf, rpc.result.len);
- strbuf_release(&rpc.result);
+ err = rpc_service(&rpc, heads, args.argv, &preamble, &rpc_result);
+ if (rpc_result.len)
+ write_or_die(1, rpc_result.buf, rpc_result.len);
+ strbuf_release(&rpc_result);
strbuf_release(&preamble);
argv_array_clear(&args);
return err;
specs[nr_spec++] = xstrdup(buf->buf + 5);
}
else
- die("http transport does not support %s", buf->buf);
+ die(_("http transport does not support %s"), buf->buf);
strbuf_reset(buf);
if (strbuf_getline_lf(buf, stdin) == EOF)
free(specs);
}
-/*
- * Used to represent the state of a connection to an HTTP server when
- * communicating using git's wire-protocol version 2.
- */
-struct proxy_state {
- char *service_name;
- char *service_url;
- struct curl_slist *headers;
- struct strbuf request_buffer;
- int in;
- int out;
- struct packet_reader reader;
- size_t pos;
- int seen_flush;
-};
-
-static void proxy_state_init(struct proxy_state *p, const char *service_name,
- enum protocol_version version)
-{
- struct strbuf buf = STRBUF_INIT;
-
- memset(p, 0, sizeof(*p));
- p->service_name = xstrdup(service_name);
-
- p->in = 0;
- p->out = 1;
- strbuf_init(&p->request_buffer, 0);
-
- strbuf_addf(&buf, "%s%s", url.buf, p->service_name);
- p->service_url = strbuf_detach(&buf, NULL);
-
- p->headers = http_copy_default_headers();
-
- strbuf_addf(&buf, "Content-Type: application/x-%s-request", p->service_name);
- p->headers = curl_slist_append(p->headers, buf.buf);
- strbuf_reset(&buf);
-
- strbuf_addf(&buf, "Accept: application/x-%s-result", p->service_name);
- p->headers = curl_slist_append(p->headers, buf.buf);
- strbuf_reset(&buf);
-
- p->headers = curl_slist_append(p->headers, "Transfer-Encoding: chunked");
-
- /* Add the Git-Protocol header */
- if (get_protocol_http_header(version, &buf))
- p->headers = curl_slist_append(p->headers, buf.buf);
-
- packet_reader_init(&p->reader, p->in, NULL, 0,
- PACKET_READ_GENTLE_ON_EOF |
- PACKET_READ_DIE_ON_ERR_PACKET);
-
- strbuf_release(&buf);
-}
-
-static void proxy_state_clear(struct proxy_state *p)
-{
- free(p->service_name);
- free(p->service_url);
- curl_slist_free_all(p->headers);
- strbuf_release(&p->request_buffer);
-}
-
-/*
- * CURLOPT_READFUNCTION callback function.
- * Attempts to copy over a single packet-line at a time into the
- * curl provided buffer.
- */
-static size_t proxy_in(char *buffer, size_t eltsize,
- size_t nmemb, void *userdata)
-{
- size_t max;
- struct proxy_state *p = userdata;
- size_t avail = p->request_buffer.len - p->pos;
-
-
- if (eltsize != 1)
- BUG("curl read callback called with size = %"PRIuMAX" != 1",
- (uintmax_t)eltsize);
- max = nmemb;
-
- if (!avail) {
- if (p->seen_flush) {
- p->seen_flush = 0;
- return 0;
- }
-
- strbuf_reset(&p->request_buffer);
- switch (packet_reader_read(&p->reader)) {
- case PACKET_READ_EOF:
- die("unexpected EOF when reading from parent process");
- case PACKET_READ_NORMAL:
- packet_buf_write_len(&p->request_buffer, p->reader.line,
- p->reader.pktlen);
- break;
- case PACKET_READ_DELIM:
- packet_buf_delim(&p->request_buffer);
- break;
- case PACKET_READ_FLUSH:
- packet_buf_flush(&p->request_buffer);
- p->seen_flush = 1;
- break;
- }
- p->pos = 0;
- avail = p->request_buffer.len;
- }
-
- if (max < avail)
- avail = max;
- memcpy(buffer, p->request_buffer.buf + p->pos, avail);
- p->pos += avail;
- return avail;
-}
-
-static size_t proxy_out(char *buffer, size_t eltsize,
- size_t nmemb, void *userdata)
-{
- size_t size;
- struct proxy_state *p = userdata;
-
- if (eltsize != 1)
- BUG("curl read callback called with size = %"PRIuMAX" != 1",
- (uintmax_t)eltsize);
- size = nmemb;
-
- write_or_die(p->out, buffer, size);
- return size;
-}
-
-/* Issues a request to the HTTP server configured in `p` */
-static int proxy_request(struct proxy_state *p)
-{
- struct active_request_slot *slot;
-
- slot = get_active_slot();
-
- curl_easy_setopt(slot->curl, CURLOPT_ENCODING, "");
- curl_easy_setopt(slot->curl, CURLOPT_NOBODY, 0);
- curl_easy_setopt(slot->curl, CURLOPT_POST, 1);
- curl_easy_setopt(slot->curl, CURLOPT_URL, p->service_url);
- curl_easy_setopt(slot->curl, CURLOPT_HTTPHEADER, p->headers);
-
- /* Setup function to read request from client */
- curl_easy_setopt(slot->curl, CURLOPT_READFUNCTION, proxy_in);
- curl_easy_setopt(slot->curl, CURLOPT_READDATA, p);
-
- /* Setup function to write server response to client */
- curl_easy_setopt(slot->curl, CURLOPT_WRITEFUNCTION, proxy_out);
- curl_easy_setopt(slot->curl, CURLOPT_WRITEDATA, p);
-
- if (run_slot(slot, NULL) != HTTP_OK)
- return -1;
-
- return 0;
-}
-
static int stateless_connect(const char *service_name)
{
struct discovery *discover;
- struct proxy_state p;
+ struct rpc_state rpc;
+ struct strbuf buf = STRBUF_INIT;
/*
* Run the info/refs request and see if the server supports protocol
fflush(stdout);
}
- proxy_state_init(&p, service_name, discover->version);
+ rpc.service_name = service_name;
+ rpc.service_url = xstrfmt("%s%s", url.buf, rpc.service_name);
+ rpc.hdr_content_type = xstrfmt("Content-Type: application/x-%s-request", rpc.service_name);
+ rpc.hdr_accept = xstrfmt("Accept: application/x-%s-result", rpc.service_name);
+ if (get_protocol_http_header(discover->version, &buf)) {
+ rpc.protocol_header = strbuf_detach(&buf, NULL);
+ } else {
+ rpc.protocol_header = NULL;
+ strbuf_release(&buf);
+ }
+ rpc.buf = xmalloc(http_post_buffer);
+ rpc.alloc = http_post_buffer;
+ rpc.len = 0;
+ rpc.pos = 0;
+ rpc.in = 1;
+ rpc.out = 0;
+ rpc.any_written = 0;
+ rpc.gzip_request = 1;
+ rpc.initial_buffer = 0;
+ rpc.write_line_lengths = 1;
+ rpc.flush_read_but_not_sent = 0;
/*
* Dump the capability listing that we got from the server earlier
* during the info/refs request.
*/
- write_or_die(p.out, discover->buf, discover->len);
+ write_or_die(rpc.in, discover->buf, discover->len);
- /* Peek the next packet line. Until we see EOF keep sending POSTs */
- while (packet_reader_peek(&p.reader) != PACKET_READ_EOF) {
- if (proxy_request(&p)) {
+ /* Until we see EOF keep sending POSTs */
+ while (1) {
+ size_t avail;
+ enum packet_read_status status;
+
+ if (!rpc_read_from_out(&rpc, PACKET_READ_GENTLE_ON_EOF, &avail,
+ &status))
+ BUG("The entire rpc->buf should be larger than LARGE_PACKET_MAX");
+ if (status == PACKET_READ_EOF)
+ break;
+ if (post_rpc(&rpc, status == PACKET_READ_FLUSH))
/* We would have an err here */
break;
- }
+ /* Reset the buffer for next request */
+ rpc.len = 0;
}
- proxy_state_clear(&p);
+ free(rpc.service_url);
+ free(rpc.hdr_content_type);
+ free(rpc.hdr_accept);
+ free(rpc.protocol_header);
+ free(rpc.buf);
+ strbuf_release(&buf);
+
return 0;
}
setup_git_directory_gently(&nongit);
if (argc < 2) {
- error("remote-curl: usage: git remote-curl <remote> [<url>]");
+ error(_("remote-curl: usage: git remote-curl <remote> [<url>]"));
return 1;
}
string_list_init(&options.deepen_not, 1);
string_list_init(&options.push_options, 1);
+ /*
+ * Just report "remote-curl" here (folding all the various aliases
+ * ("git-remote-http", "git-remote-https", and etc.) here since they
+ * are all just copies of the same actual executable.
+ */
+ trace2_cmd_name("remote-curl");
+
remote = remote_get(argv[1]);
if (argc > 2) {
if (strbuf_getline_lf(&buf, stdin) == EOF) {
if (ferror(stdin))
- error("remote-curl: error reading command stream from git");
+ error(_("remote-curl: error reading command stream from git"));
return 1;
}
if (buf.len == 0)
break;
if (starts_with(buf.buf, "fetch ")) {
if (nongit)
- die("remote-curl: fetch attempted without a local repo");
+ die(_("remote-curl: fetch attempted without a local repo"));
parse_fetch(&buf);
} else if (!strcmp(buf.buf, "list") || starts_with(buf.buf, "list ")) {
if (!stateless_connect(arg))
break;
} else {
- error("remote-curl: unknown command '%s' from git", buf.buf);
+ error(_("remote-curl: unknown command '%s' from git"), buf.buf);
return 1;
}
strbuf_reset(&buf);
return ret;
}
-static void free_ref(struct ref *ref)
+void free_one_ref(struct ref *ref)
{
if (!ref)
return;
- free_ref(ref->peer_ref);
+ free_one_ref(ref->peer_ref);
free(ref->remote_status);
free(ref->symref);
free(ref);
struct ref *next;
while (ref) {
next = ref->next;
- free_ref(ref);
+ free_one_ref(ref);
ref = next;
}
}
}
/*
- * Lookup the upstream branch for the given branch and if present, optionally
- * compute the commit ahead/behind values for the pair.
+ * Compute the commit ahead/behind values for the pair branch_name, base.
*
* If abf is AHEAD_BEHIND_FULL, compute the full ahead/behind and return the
* counts in *num_ours and *num_theirs. If abf is AHEAD_BEHIND_QUICK, skip
* the (potentially expensive) a/b computation (*num_ours and *num_theirs are
* set to zero).
*
- * The name of the upstream branch (or NULL if no upstream is defined) is
- * returned via *upstream_name, if it is not itself NULL.
- *
- * Returns -1 if num_ours and num_theirs could not be filled in (e.g., no
- * upstream defined, or ref does not exist). Returns 0 if the commits are
- * identical. Returns 1 if commits are different.
+ * Returns -1 if num_ours and num_theirs could not be filled in (e.g., ref
+ * does not exist). Returns 0 if the commits are identical. Returns 1 if
+ * commits are different.
*/
-int stat_tracking_info(struct branch *branch, int *num_ours, int *num_theirs,
- const char **upstream_name, enum ahead_behind_flags abf)
+
+static int stat_branch_pair(const char *branch_name, const char *base,
+ int *num_ours, int *num_theirs,
+ enum ahead_behind_flags abf)
{
struct object_id oid;
struct commit *ours, *theirs;
struct rev_info revs;
- const char *base;
struct argv_array argv = ARGV_ARRAY_INIT;
- /* Cannot stat unless we are marked to build on top of somebody else. */
- base = branch_get_upstream(branch, NULL);
- if (upstream_name)
- *upstream_name = base;
- if (!base)
- return -1;
-
/* Cannot stat if what we used to build on no longer exists */
if (read_ref(base, &oid))
return -1;
if (!theirs)
return -1;
- if (read_ref(branch->refname, &oid))
+ if (read_ref(branch_name, &oid))
return -1;
ours = lookup_commit_reference(the_repository, &oid);
if (!ours)
if (abf == AHEAD_BEHIND_QUICK)
return 1;
if (abf != AHEAD_BEHIND_FULL)
- BUG("stat_tracking_info: invalid abf '%d'", abf);
+ BUG("stat_branch_pair: invalid abf '%d'", abf);
/* Run "rev-list --left-right ours...theirs" internally... */
argv_array_push(&argv, ""); /* ignored */
return 1;
}
+/*
+ * Lookup the tracking branch for the given branch and if present, optionally
+ * compute the commit ahead/behind values for the pair.
+ *
+ * If for_push is true, the tracking branch refers to the push branch,
+ * otherwise it refers to the upstream branch.
+ *
+ * The name of the tracking branch (or NULL if it is not defined) is
+ * returned via *tracking_name, if it is not itself NULL.
+ *
+ * If abf is AHEAD_BEHIND_FULL, compute the full ahead/behind and return the
+ * counts in *num_ours and *num_theirs. If abf is AHEAD_BEHIND_QUICK, skip
+ * the (potentially expensive) a/b computation (*num_ours and *num_theirs are
+ * set to zero).
+ *
+ * Returns -1 if num_ours and num_theirs could not be filled in (e.g., no
+ * upstream defined, or ref does not exist). Returns 0 if the commits are
+ * identical. Returns 1 if commits are different.
+ */
+int stat_tracking_info(struct branch *branch, int *num_ours, int *num_theirs,
+ const char **tracking_name, int for_push,
+ enum ahead_behind_flags abf)
+{
+ const char *base;
+
+ /* Cannot stat unless we are marked to build on top of somebody else. */
+ base = for_push ? branch_get_push(branch, NULL) :
+ branch_get_upstream(branch, NULL);
+ if (tracking_name)
+ *tracking_name = base;
+ if (!base)
+ return -1;
+
+ return stat_branch_pair(branch->refname, base, num_ours, num_theirs, abf);
+}
+
/*
* Return true when there is anything to report, otherwise false.
*/
char *base;
int upstream_is_gone = 0;
- sti = stat_tracking_info(branch, &ours, &theirs, &full_base, abf);
+ sti = stat_tracking_info(branch, &ours, &theirs, &full_base, 0, abf);
if (sti < 0) {
if (!full_base)
return 0;
#define REF_HEADS (1u << 1)
#define REF_TAGS (1u << 2)
-extern struct ref *find_ref_by_name(const struct ref *list, const char *name);
+struct ref *find_ref_by_name(const struct ref *list, const char *name);
struct ref *alloc_ref(const char *name);
struct ref *copy_ref(const struct ref *ref);
struct ref *copy_ref_list(const struct ref *ref);
void sort_ref_list(struct ref **, int (*cmp)(const void *, const void *));
-extern int count_refspec_match(const char *, struct ref *refs, struct ref **matched_ref);
+int count_refspec_match(const char *, struct ref *refs, struct ref **matched_ref);
int ref_compare_name(const void *, const void *);
int check_ref_type(const struct ref *ref, int flags);
/*
- * Frees the entire list and peers of elements.
+ * Free a single ref and its peer, or an entire list of refs and their peers,
+ * respectively.
*/
+void free_one_ref(struct ref *ref);
void free_refs(struct ref *ref);
struct oid_array;
struct packet_reader;
struct argv_array;
struct string_list;
-extern struct ref **get_remote_heads(struct packet_reader *reader,
- struct ref **list, unsigned int flags,
- struct oid_array *extra_have,
- struct oid_array *shallow_points);
+struct ref **get_remote_heads(struct packet_reader *reader,
+ struct ref **list, unsigned int flags,
+ struct oid_array *extra_have,
+ struct oid_array *shallow_points);
/* Used for protocol v2 in order to retrieve refs from a remote */
-extern struct ref **get_remote_refs(int fd_out, struct packet_reader *reader,
- struct ref **list, int for_push,
- const struct argv_array *ref_prefixes,
- const struct string_list *server_options);
+struct ref **get_remote_refs(int fd_out, struct packet_reader *reader,
+ struct ref **list, int for_push,
+ const struct argv_array *ref_prefixes,
+ const struct string_list *server_options);
int resolve_remote_symref(struct ref *ref, struct ref *list);
/* Reporting of tracking info */
int stat_tracking_info(struct branch *branch, int *num_ours, int *num_theirs,
- const char **upstream_name, enum ahead_behind_flags abf);
+ const char **upstream_name, int for_push,
+ enum ahead_behind_flags abf);
int format_tracking_info(struct branch *branch, struct strbuf *sb,
enum ahead_behind_flags abf);
int alloc;
};
-extern int parseopt_push_cas_option(const struct option *, const char *arg, int unset);
+int parseopt_push_cas_option(const struct option *, const char *arg, int unset);
-extern int is_empty_cas(const struct push_cas_option *);
+int is_empty_cas(const struct push_cas_option *);
void apply_push_cas(struct push_cas_option *, struct remote *, struct ref *);
#endif
* This internal function is only declared here for the benefit of
* lookup_replace_object(). Please do not call it directly.
*/
-extern const struct object_id *do_lookup_replace_object(struct repository *r,
- const struct object_id *oid);
+const struct object_id *do_lookup_replace_object(struct repository *r,
+ const struct object_id *oid);
/*
* If object sha1 should be replaced, return the replacement object's
void repo_set_worktree(struct repository *repo, const char *path)
{
repo->worktree = real_pathdup(path, 1);
+
+ trace2_def_repo(repo);
}
static int read_and_verify_repository_format(struct repository_format *format,
const char *gitdir,
const char *worktree)
{
- struct repository_format format;
+ struct repository_format format = REPOSITORY_FORMAT_INIT;
memset(repo, 0, sizeof(*repo));
repo->objects = raw_object_store_new();
if (worktree)
repo_set_worktree(repo, worktree);
+ clear_repository_format(&format);
return 0;
error:
/* Repository's current hash algorithm, as serialized on disk. */
const struct git_hash_algo *hash_algo;
+ /* A unique-id for tracing purposes. */
+ int trace2_repo_id;
+
/* Configurations */
/* Indicate if a repository has a different 'commondir' from 'gitdir' */
struct object_id oid[3];
};
-extern void record_resolve_undo(struct index_state *, struct cache_entry *);
-extern void resolve_undo_write(struct strbuf *, struct string_list *);
-extern struct string_list *resolve_undo_read(const char *, unsigned long);
-extern void resolve_undo_clear_index(struct index_state *);
-extern int unmerge_index_entry_at(struct index_state *, int);
-extern void unmerge_index(struct index_state *, const struct pathspec *);
-extern void unmerge_marked_index(struct index_state *);
+void record_resolve_undo(struct index_state *, struct cache_entry *);
+void resolve_undo_write(struct strbuf *, struct string_list *);
+struct string_list *resolve_undo_read(const char *, unsigned long);
+void resolve_undo_clear_index(struct index_state *);
+int unmerge_index_entry_at(struct index_state *, int);
+void unmerge_index(struct index_state *, const struct pathspec *);
+void unmerge_marked_index(struct index_state *);
#endif
commit->object.flags |= TREESAME;
}
-static void commit_list_insert_by_date_cached(struct commit *p, struct commit_list **head,
- struct commit_list *cached_base, struct commit_list **cache)
-{
- struct commit_list *new_entry;
-
- if (cached_base && p->date < cached_base->item->date)
- new_entry = commit_list_insert_by_date(p, &cached_base->next);
- else
- new_entry = commit_list_insert_by_date(p, head);
-
- if (cache && (!*cache || p->date < (*cache)->item->date))
- *cache = new_entry;
-}
-
static int process_parents(struct rev_info *revs, struct commit *commit,
- struct commit_list **list, struct commit_list **cache_ptr)
+ struct commit_list **list, struct prio_queue *queue)
{
struct commit_list *parent = commit->parents;
unsigned left_flag;
- struct commit_list *cached_base = cache_ptr ? *cache_ptr : NULL;
if (commit->object.flags & ADDED)
return 0;
continue;
p->object.flags |= SEEN;
if (list)
- commit_list_insert_by_date_cached(p, list, cached_base, cache_ptr);
+ commit_list_insert_by_date(p, list);
+ if (queue)
+ prio_queue_put(queue, p);
}
return 0;
}
if (!(p->object.flags & SEEN)) {
p->object.flags |= SEEN;
if (list)
- commit_list_insert_by_date_cached(p, list, cached_base, cache_ptr);
+ commit_list_insert_by_date(p, list);
+ if (queue)
+ prio_queue_put(queue, p);
}
if (revs->first_parent_only)
break;
return 0;
}
-static void read_pathspec_from_stdin(struct rev_info *revs, struct strbuf *sb,
+static void read_pathspec_from_stdin(struct strbuf *sb,
struct argv_array *prune)
{
while (strbuf_getline(sb, stdin) != EOF)
die("bad revision '%s'", sb.buf);
}
if (seen_dashdash)
- read_pathspec_from_stdin(revs, &sb, prune);
+ read_pathspec_from_stdin(&sb, prune);
strbuf_release(&sb);
warn_on_object_refname_ambiguity = save_warning;
revs->diff = 1;
revs->dense_combined_merges = 0;
revs->combine_merges = 1;
+ } else if (!strcmp(arg, "--combined-all-paths")) {
+ revs->diff = 1;
+ revs->combined_all_paths = 1;
} else if (!strcmp(arg, "--cc")) {
revs->diff = 1;
revs->dense_combined_merges = 1;
}
if (revs->combine_merges)
revs->ignore_merges = 0;
+ if (revs->combined_all_paths && !revs->combine_merges)
+ die("--combined-all-paths makes no sense without -c or --cc");
+
revs->diffopt.abbrev = revs->abbrev;
if (revs->line_level_traverse) {
if (revs->first_parent_only && revs->bisect)
die(_("--first-parent is incompatible with --bisect"));
+ if (revs->line_level_traverse &&
+ (revs->diffopt.output_format & ~(DIFF_FORMAT_PATCH | DIFF_FORMAT_NO_OUTPUT)))
+ die(_("-L does not yet support diff formats besides -p and -s"));
+
if (revs->expand_tabs_in_log < 0)
revs->expand_tabs_in_log = revs->expand_tabs_in_log_default;
return st;
}
-static int mark_redundant_parents(struct rev_info *revs, struct commit *commit)
+static int mark_redundant_parents(struct commit *commit)
{
struct commit_list *h = reduce_heads(commit->parents);
int i = 0, marked = 0;
return marked;
}
-static int mark_treesame_root_parents(struct rev_info *revs, struct commit *commit)
+static int mark_treesame_root_parents(struct commit *commit)
{
struct commit_list *p;
int marked = 0;
* Detect and simplify both cases.
*/
if (1 < cnt) {
- int marked = mark_redundant_parents(revs, commit);
- marked += mark_treesame_root_parents(revs, commit);
+ int marked = mark_redundant_parents(commit);
+ marked += mark_treesame_root_parents(commit);
if (marked)
marked -= leave_one_treesame_to_parent(revs, commit);
if (marked)
return 0;
}
-static enum rewrite_result rewrite_one(struct rev_info *revs, struct commit **pp)
+static enum rewrite_result rewrite_one_1(struct rev_info *revs,
+ struct commit **pp,
+ struct prio_queue *queue)
{
- struct commit_list *cache = NULL;
-
for (;;) {
struct commit *p = *pp;
if (!revs->limited)
- if (process_parents(revs, p, &revs->commits, &cache) < 0)
+ if (process_parents(revs, p, NULL, queue) < 0)
return rewrite_one_error;
if (p->object.flags & UNINTERESTING)
return rewrite_one_ok;
}
}
+static void merge_queue_into_list(struct prio_queue *q, struct commit_list **list)
+{
+ while (q->nr) {
+ struct commit *item = prio_queue_peek(q);
+ struct commit_list *p = *list;
+
+ if (p && p->item->date >= item->date)
+ list = &p->next;
+ else {
+ p = commit_list_insert(item, list);
+ list = &p->next; /* skip newly added item */
+ prio_queue_get(q); /* pop item */
+ }
+ }
+}
+
+static enum rewrite_result rewrite_one(struct rev_info *revs, struct commit **pp)
+{
+ struct prio_queue queue = { compare_commits_by_commit_date };
+ enum rewrite_result ret = rewrite_one_1(revs, pp, &queue);
+ merge_queue_into_list(&queue, &revs->commits);
+ clear_prio_queue(&queue);
+ return ret;
+}
+
int rewrite_parents(struct rev_info *revs, struct commit *commit,
rewrite_parent_fn_t rewrite_parent)
{
verbose_header:1,
ignore_merges:1,
combine_merges:1,
+ combined_all_paths:1,
dense_combined_merges:1,
always_show_header:1;
int sane_execvp(const char *file, char * const argv[])
{
+#ifndef GIT_WINDOWS_NATIVE
+ /*
+ * execvp() doesn't return, so we all we can do is tell trace2
+ * what we are about to do and let it leave a hint in the log
+ * (unless of course the execvp() fails).
+ *
+ * we skip this for Windows because the compat layer already
+ * has to emulate the execvp() call anyway.
+ */
+ int exec_id = trace2_exec(file, (const char **)argv);
+#endif
+
if (!execvp(file, argv))
return 0; /* cannot happen ;-) */
+#ifndef GIT_WINDOWS_NATIVE
+ {
+ int ec = errno;
+ trace2_exec_result(exec_id, ec);
+ errno = ec;
+ }
+#endif
+
/*
* When a command can't be found because one of the directories
* listed in $PATH is unsearchable, execvp reports EACCES, but
cmd->err = fderr[0];
}
+ trace2_child_start(cmd);
trace_run_command(cmd);
fflush(NULL);
#endif
if (cmd->pid < 0) {
+ trace2_child_exit(cmd, -1);
+
if (need_in)
close_pair(fdin);
else if (cmd->in)
int finish_command(struct child_process *cmd)
{
int ret = wait_or_whine(cmd->pid, cmd->argv[0], 0);
+ trace2_child_exit(cmd, ret);
child_process_clear(cmd);
return ret;
}
int finish_command_in_signal(struct child_process *cmd)
{
- return wait_or_whine(cmd->pid, cmd->argv[0], 1);
+ int ret = wait_or_whine(cmd->pid, cmd->argv[0], 1);
+ trace2_child_exit(cmd, ret);
+ return ret;
}
return run_command_v_opt_cd_env(argv, opt, NULL, NULL);
}
+int run_command_v_opt_tr2(const char **argv, int opt, const char *tr2_class)
+{
+ return run_command_v_opt_cd_env_tr2(argv, opt, NULL, NULL, tr2_class);
+}
+
int run_command_v_opt_cd_env(const char **argv, int opt, const char *dir, const char *const *env)
+{
+ return run_command_v_opt_cd_env_tr2(argv, opt, dir, env, NULL);
+}
+
+int run_command_v_opt_cd_env_tr2(const char **argv, int opt, const char *dir,
+ const char *const *env, const char *tr2_class)
{
struct child_process cmd = CHILD_PROCESS_INIT;
cmd.argv = argv;
cmd.clean_on_exit = opt & RUN_CLEAN_ON_EXIT ? 1 : 0;
cmd.dir = dir;
cmd.env = env;
+ cmd.trace2_child_class = tr2_class;
return run_command(&cmd);
}
hook.env = env;
hook.no_stdin = 1;
hook.stdout_to_stderr = 1;
+ hook.trace2_hook_name = name;
return run_command(&hook);
}
pp_cleanup(&pp);
return 0;
}
+
+int run_processes_parallel_tr2(int n, get_next_task_fn get_next_task,
+ start_failure_fn start_failure,
+ task_finished_fn task_finished, void *pp_cb,
+ const char *tr2_category, const char *tr2_label)
+{
+ int result;
+
+ trace2_region_enter_printf(tr2_category, tr2_label, NULL, "max:%d",
+ ((n < 1) ? online_cpus() : n));
+
+ result = run_processes_parallel(n, get_next_task, start_failure,
+ task_finished, pp_cb);
+
+ trace2_region_leave(tr2_category, tr2_label, NULL);
+
+ return result;
+}
struct argv_array args;
struct argv_array env_array;
pid_t pid;
+
+ int trace2_child_id;
+ uint64_t trace2_child_us_start;
+ const char *trace2_child_class;
+ const char *trace2_hook_name;
+
/*
* Using .in, .out, .err:
* - Specify 0 for no redirections (child inherits stdin, stdout,
#define CHILD_PROCESS_INIT { NULL, ARGV_ARRAY_INIT, ARGV_ARRAY_INIT }
void child_process_init(struct child_process *);
void child_process_clear(struct child_process *);
-extern int is_executable(const char *name);
+int is_executable(const char *name);
int start_command(struct child_process *);
int finish_command(struct child_process *);
* or disabled. Note that this points to static storage that will be
* overwritten by further calls to find_hook and run_hook_*.
*/
-extern const char *find_hook(const char *name);
+const char *find_hook(const char *name);
LAST_ARG_MUST_BE_NULL
-extern int run_hook_le(const char *const *env, const char *name, ...);
-extern int run_hook_ve(const char *const *env, const char *name, va_list args);
+int run_hook_le(const char *const *env, const char *name, ...);
+int run_hook_ve(const char *const *env, const char *name, va_list args);
#define RUN_COMMAND_NO_STDIN 1
#define RUN_GIT_CMD 2 /*If this is to be git sub-command */
#define RUN_USING_SHELL 16
#define RUN_CLEAN_ON_EXIT 32
int run_command_v_opt(const char **argv, int opt);
-
+int run_command_v_opt_tr2(const char **argv, int opt, const char *tr2_class);
/*
* env (the environment) is to be formatted like environ: "VAR=VALUE".
* To unset an environment variable use just "VAR".
*/
int run_command_v_opt_cd_env(const char **argv, int opt, const char *dir, const char *const *env);
+int run_command_v_opt_cd_env_tr2(const char **argv, int opt, const char *dir,
+ const char *const *env, const char *tr2_class);
/**
* Execute the given command, sending "in" to its stdin, and capturing its
start_failure_fn,
task_finished_fn,
void *pp_cb);
+int run_processes_parallel_tr2(int n, get_next_task_fn, start_failure_fn,
+ task_finished_fn, void *pp_cb,
+ const char *tr2_category, const char *tr2_label);
#endif
* file and written to the tail of 'done'.
*/
GIT_PATH_FUNC(rebase_path_todo, "rebase-merge/git-rebase-todo")
-static GIT_PATH_FUNC(rebase_path_todo_backup,
- "rebase-merge/git-rebase-todo.backup")
+GIT_PATH_FUNC(rebase_path_todo_backup, "rebase-merge/git-rebase-todo.backup")
/*
* The rebase command lines that have already been processed. A line
if (status)
return status;
- if (!strcmp(s, "verbatim"))
+ if (!strcmp(s, "verbatim")) {
opts->default_msg_cleanup = COMMIT_MSG_CLEANUP_NONE;
- else if (!strcmp(s, "whitespace"))
+ opts->explicit_cleanup = 1;
+ } else if (!strcmp(s, "whitespace")) {
opts->default_msg_cleanup = COMMIT_MSG_CLEANUP_SPACE;
- else if (!strcmp(s, "strip"))
+ opts->explicit_cleanup = 1;
+ } else if (!strcmp(s, "strip")) {
opts->default_msg_cleanup = COMMIT_MSG_CLEANUP_ALL;
- else if (!strcmp(s, "scissors"))
- opts->default_msg_cleanup = COMMIT_MSG_CLEANUP_SPACE;
- else
+ opts->explicit_cleanup = 1;
+ } else if (!strcmp(s, "scissors")) {
+ opts->default_msg_cleanup = COMMIT_MSG_CLEANUP_SCISSORS;
+ opts->explicit_cleanup = 1;
+ } else {
warning(_("invalid commit message cleanup mode '%s'"),
s);
+ }
free((char *)s);
return status;
}
}
-int write_message(const void *buf, size_t len, const char *filename,
- int append_eol)
+static int write_message(const void *buf, size_t len, const char *filename,
+ int append_eol)
{
struct lock_file msg_file = LOCK_INIT;
return 0;
}
+enum commit_msg_cleanup_mode get_cleanup_mode(const char *cleanup_arg,
+ int use_editor)
+{
+ if (!cleanup_arg || !strcmp(cleanup_arg, "default"))
+ return use_editor ? COMMIT_MSG_CLEANUP_ALL :
+ COMMIT_MSG_CLEANUP_SPACE;
+ else if (!strcmp(cleanup_arg, "verbatim"))
+ return COMMIT_MSG_CLEANUP_NONE;
+ else if (!strcmp(cleanup_arg, "whitespace"))
+ return COMMIT_MSG_CLEANUP_SPACE;
+ else if (!strcmp(cleanup_arg, "strip"))
+ return COMMIT_MSG_CLEANUP_ALL;
+ else if (!strcmp(cleanup_arg, "scissors"))
+ return use_editor ? COMMIT_MSG_CLEANUP_SCISSORS :
+ COMMIT_MSG_CLEANUP_SPACE;
+ else
+ die(_("Invalid cleanup mode %s"), cleanup_arg);
+}
+
+/*
+ * NB using int rather than enum cleanup_mode to stop clang's
+ * -Wtautological-constant-out-of-range-compare complaining that the comparison
+ * is always true.
+ */
+static const char *describe_cleanup_mode(int cleanup_mode)
+{
+ static const char *modes[] = { "whitespace",
+ "verbatim",
+ "scissors",
+ "strip" };
+
+ if (cleanup_mode < ARRAY_SIZE(modes))
+ return modes[cleanup_mode];
+
+ BUG("invalid cleanup_mode provided (%d)", cleanup_mode);
+}
+
void append_conflicts_hint(struct index_state *istate,
- struct strbuf *msgbuf)
+ struct strbuf *msgbuf, enum commit_msg_cleanup_mode cleanup_mode)
{
int i;
+ if (cleanup_mode == COMMIT_MSG_CLEANUP_SCISSORS) {
+ strbuf_addch(msgbuf, '\n');
+ wt_status_append_cut_line(msgbuf);
+ strbuf_addch(msgbuf, comment_line_char);
+ }
+
strbuf_addch(msgbuf, '\n');
strbuf_commented_addf(msgbuf, "Conflicts:\n");
for (i = 0; i < istate->cache_nr;) {
_(action_name(opts)));
if (!clean)
- append_conflicts_hint(r->index, msgbuf);
+ append_conflicts_hint(r->index, msgbuf,
+ opts->default_msg_cleanup);
return !clean;
}
}
strbuf_reset(&out);
- strbuf_addstr(&out, fmt_ident(name, email, date, 0));
+ strbuf_addstr(&out, fmt_ident(name, email, WANT_AUTHOR_IDENT, date, 0));
strbuf_swap(buf, &out);
strbuf_release(&out);
free(name);
unsigned int flags)
{
struct child_process cmd = CHILD_PROCESS_INIT;
- const char *value;
if ((flags & CREATE_ROOT_COMMIT) && !(flags & AMEND_MSG)) {
struct strbuf msg = STRBUF_INIT, script = STRBUF_INIT;
argv_array_push(&cmd.args, "-e");
else if (!(flags & CLEANUP_MSG) &&
!opts->signoff && !opts->record_origin &&
- git_config_get_value("commit.cleanup", &value))
+ !opts->explicit_cleanup)
argv_array_push(&cmd.args, "--cleanup=verbatim");
if ((flags & ALLOW_EMPTY))
return 1;
}
+void cleanup_message(struct strbuf *msgbuf,
+ enum commit_msg_cleanup_mode cleanup_mode, int verbose)
+{
+ if (verbose || /* Truncate the message just before the diff, if any. */
+ cleanup_mode == COMMIT_MSG_CLEANUP_SCISSORS)
+ strbuf_setlen(msgbuf, wt_status_locate_end(msgbuf->buf, msgbuf->len));
+ if (cleanup_mode != COMMIT_MSG_CLEANUP_NONE)
+ strbuf_stripspace(msgbuf, cleanup_mode == COMMIT_MSG_CLEANUP_ALL);
+}
+
/*
* Find out if the message in the strbuf contains only whitespace and
* Signed-off-by lines.
proc.argv = argv;
proc.in = -1;
proc.stdout_to_stderr = 1;
+ proc.trace2_hook_name = "post-rewrite";
code = start_command(&proc);
if (code)
msg = &commit_msg;
}
- cleanup = (flags & CLEANUP_MSG) ? COMMIT_MSG_CLEANUP_ALL :
- opts->default_msg_cleanup;
+ if (flags & CLEANUP_MSG)
+ cleanup = COMMIT_MSG_CLEANUP_ALL;
+ else if ((opts->signoff || opts->record_origin) &&
+ !opts->explicit_cleanup)
+ cleanup = COMMIT_MSG_CLEANUP_SPACE;
+ else
+ cleanup = opts->default_msg_cleanup;
if (cleanup != COMMIT_MSG_CLEANUP_NONE)
strbuf_stripspace(msg, cleanup == COMMIT_MSG_CLEANUP_ALL);
return 1;
}
-/*
- * Note that ordering matters in this enum. Not only must it match the mapping
- * below, it is also divided into several sections that matter. When adding
- * new commands, make sure you add it in the right section.
- */
-enum todo_command {
- /* commands that handle commits */
- TODO_PICK = 0,
- TODO_REVERT,
- TODO_EDIT,
- TODO_REWORD,
- TODO_FIXUP,
- TODO_SQUASH,
- /* commands that do something else than handling a single commit */
- TODO_EXEC,
- TODO_BREAK,
- TODO_LABEL,
- TODO_RESET,
- TODO_MERGE,
- /* commands that do nothing but are counted for reporting progress */
- TODO_NOOP,
- TODO_DROP,
- /* comments (not counted for reporting progress) */
- TODO_COMMENT
-};
-
static struct {
char c;
const char *str;
TODO_EDIT_MERGE_MSG = 1
};
-struct todo_item {
- enum todo_command command;
- struct commit *commit;
- unsigned int flags;
- const char *arg;
- int arg_len;
- size_t offset_in_buf;
-};
-
-struct todo_list {
- struct strbuf buf;
- struct todo_item *items;
- int nr, alloc, current;
- int done_nr, total_nr;
- struct stat_data stat;
-};
-
-#define TODO_LIST_INIT { STRBUF_INIT }
-
-static void todo_list_release(struct todo_list *todo_list)
+void todo_list_release(struct todo_list *todo_list)
{
strbuf_release(&todo_list->buf);
FREE_AND_NULL(todo_list->items);
return todo_list->items + todo_list->nr++;
}
+const char *todo_item_get_arg(struct todo_list *todo_list,
+ struct todo_item *item)
+{
+ return todo_list->buf.buf + item->arg_offset;
+}
+
static int parse_insn_line(struct repository *r, struct todo_item *item,
- const char *bol, char *eol)
+ const char *buf, const char *bol, char *eol)
{
struct object_id commit_oid;
char *end_of_object_name;
if (bol == eol || *bol == '\r' || *bol == comment_line_char) {
item->command = TODO_COMMENT;
item->commit = NULL;
- item->arg = bol;
+ item->arg_offset = bol - buf;
item->arg_len = eol - bol;
return 0;
}
return error(_("%s does not accept arguments: '%s'"),
command_to_string(item->command), bol);
item->commit = NULL;
- item->arg = bol;
+ item->arg_offset = bol - buf;
item->arg_len = eol - bol;
return 0;
}
if (item->command == TODO_EXEC || item->command == TODO_LABEL ||
item->command == TODO_RESET) {
item->commit = NULL;
- item->arg = bol;
+ item->arg_offset = bol - buf;
item->arg_len = (int)(eol - bol);
return 0;
}
} else {
item->flags |= TODO_EDIT_MERGE_MSG;
item->commit = NULL;
- item->arg = bol;
+ item->arg_offset = bol - buf;
item->arg_len = (int)(eol - bol);
return 0;
}
status = get_oid(bol, &commit_oid);
*end_of_object_name = saved;
- item->arg = end_of_object_name + strspn(end_of_object_name, " \t");
- item->arg_len = (int)(eol - item->arg);
+ bol = end_of_object_name + strspn(end_of_object_name, " \t");
+ item->arg_offset = bol - buf;
+ item->arg_len = (int)(eol - bol);
if (status < 0)
- return -1;
+ return error(_("could not parse '%.*s'"),
+ (int)(end_of_object_name - bol), bol);
item->commit = lookup_commit_reference(r, &commit_oid);
return !item->commit;
}
-static int parse_insn_buffer(struct repository *r, char *buf,
- struct todo_list *todo_list)
+int sequencer_get_last_command(struct repository *r, enum replay_action *action)
+{
+ struct todo_item item;
+ char *eol;
+ const char *todo_file;
+ struct strbuf buf = STRBUF_INIT;
+ int ret = -1;
+
+ todo_file = git_path_todo_file();
+ if (strbuf_read_file(&buf, todo_file, 0) < 0) {
+ if (errno == ENOENT)
+ return -1;
+ else
+ return error_errno("unable to open '%s'", todo_file);
+ }
+ eol = strchrnul(buf.buf, '\n');
+ if (buf.buf != eol && eol[-1] == '\r')
+ eol--; /* strip Carriage Return */
+ if (parse_insn_line(r, &item, buf.buf, buf.buf, eol))
+ goto fail;
+ if (item.command == TODO_PICK)
+ *action = REPLAY_PICK;
+ else if (item.command == TODO_REVERT)
+ *action = REPLAY_REVERT;
+ else
+ goto fail;
+
+ ret = 0;
+
+ fail:
+ strbuf_release(&buf);
+
+ return ret;
+}
+
+int todo_list_parse_insn_buffer(struct repository *r, char *buf,
+ struct todo_list *todo_list)
{
struct todo_item *item;
char *p = buf, *next_p;
int i, res = 0, fixup_okay = file_exists(rebase_path_done());
+ todo_list->current = todo_list->nr = 0;
+
for (i = 1; *p; i++, p = next_p) {
char *eol = strchrnul(p, '\n');
item = append_new_todo(todo_list);
item->offset_in_buf = p - todo_list->buf.buf;
- if (parse_insn_line(r, item, p, eol)) {
+ if (parse_insn_line(r, item, buf, p, eol)) {
res = error(_("invalid line %d: %.*s"),
i, (int)(eol - p), p);
- item->command = TODO_NOOP;
+ item->command = TODO_COMMENT + 1;
+ item->arg_offset = p - buf;
+ item->arg_len = (int)(eol - p);
+ item->commit = NULL;
}
if (fixup_okay)
return len;
}
+static int have_finished_the_last_pick(void)
+{
+ struct strbuf buf = STRBUF_INIT;
+ const char *eol;
+ const char *todo_path = git_path_todo_file();
+ int ret = 0;
+
+ if (strbuf_read_file(&buf, todo_path, 0) < 0) {
+ if (errno == ENOENT) {
+ return 0;
+ } else {
+ error_errno("unable to open '%s'", todo_path);
+ return 0;
+ }
+ }
+ /* If there is only one line then we are done */
+ eol = strchr(buf.buf, '\n');
+ if (!eol || !eol[1])
+ ret = 1;
+
+ strbuf_release(&buf);
+
+ return ret;
+}
+
+void sequencer_post_commit_cleanup(struct repository *r)
+{
+ struct replay_opts opts = REPLAY_OPTS_INIT;
+ int need_cleanup = 0;
+
+ if (file_exists(git_path_cherry_pick_head(r))) {
+ unlink(git_path_cherry_pick_head(r));
+ opts.action = REPLAY_PICK;
+ need_cleanup = 1;
+ }
+
+ if (file_exists(git_path_revert_head(r))) {
+ unlink(git_path_revert_head(r));
+ opts.action = REPLAY_REVERT;
+ need_cleanup = 1;
+ }
+
+ if (!need_cleanup)
+ return;
+
+ if (!have_finished_the_last_pick())
+ return;
+
+ sequencer_remove_state(&opts);
+}
+
static int read_populate_todo(struct repository *r,
struct todo_list *todo_list,
struct replay_opts *opts)
return error(_("could not stat '%s'"), todo_file);
fill_stat_data(&todo_list->stat, &st);
- res = parse_insn_buffer(r, todo_list->buf.buf, todo_list);
+ res = todo_list_parse_insn_buffer(r, todo_list->buf.buf, todo_list);
if (res) {
if (is_rebase_i(opts))
return error(_("please fix this using "
FILE *f = fopen_or_warn(rebase_path_msgtotal(), "w");
if (strbuf_read_file(&done.buf, rebase_path_done(), 0) > 0 &&
- !parse_insn_buffer(r, done.buf.buf, &done))
+ !todo_list_parse_insn_buffer(r, done.buf.buf, &done))
todo_list->done_nr = count_commands(&done);
else
todo_list->done_nr = 0;
opts->no_commit = git_config_bool_or_int(key, value, &error_flag);
else if (!strcmp(key, "options.edit"))
opts->edit = git_config_bool_or_int(key, value, &error_flag);
+ else if (!strcmp(key, "options.allow-empty"))
+ opts->allow_empty =
+ git_config_bool_or_int(key, value, &error_flag);
+ else if (!strcmp(key, "options.allow-empty-message"))
+ opts->allow_empty_message =
+ git_config_bool_or_int(key, value, &error_flag);
+ else if (!strcmp(key, "options.keep-redundant-commits"))
+ opts->keep_redundant_commits =
+ git_config_bool_or_int(key, value, &error_flag);
else if (!strcmp(key, "options.signoff"))
opts->signoff = git_config_bool_or_int(key, value, &error_flag);
else if (!strcmp(key, "options.record-origin"))
opts->allow_rerere_auto =
git_config_bool_or_int(key, value, &error_flag) ?
RERERE_AUTOUPDATE : RERERE_NOAUTOUPDATE;
- else
+ else if (!strcmp(key, "options.default-msg-cleanup")) {
+ opts->explicit_cleanup = 1;
+ opts->default_msg_cleanup = get_cleanup_mode(value, 1);
+ } else
return error(_("invalid key: %s"), key);
if (!error_flag)
}
int write_basic_state(struct replay_opts *opts, const char *head_name,
- const char *onto, const char *orig_head)
+ struct commit *onto, const char *orig_head)
{
const char *quiet = getenv("GIT_QUIET");
if (head_name)
write_file(rebase_path_head_name(), "%s\n", head_name);
if (onto)
- write_file(rebase_path_onto(), "%s\n", onto);
+ write_file(rebase_path_onto(), "%s\n",
+ oid_to_hex(&onto->object.oid));
if (orig_head)
write_file(rebase_path_orig_head(), "%s\n", orig_head);
item->command = command;
item->commit = commit;
- item->arg = NULL;
+ item->arg_offset = 0;
item->arg_len = 0;
item->offset_in_buf = todo_list->buf.len;
subject_len = find_commit_subject(commit_buffer, &subject);
int res = 0;
if (opts->no_commit)
- res |= git_config_set_in_file_gently(opts_file, "options.no-commit", "true");
+ res |= git_config_set_in_file_gently(opts_file,
+ "options.no-commit", "true");
if (opts->edit)
- res |= git_config_set_in_file_gently(opts_file, "options.edit", "true");
+ res |= git_config_set_in_file_gently(opts_file,
+ "options.edit", "true");
+ if (opts->allow_empty)
+ res |= git_config_set_in_file_gently(opts_file,
+ "options.allow-empty", "true");
+ if (opts->allow_empty_message)
+ res |= git_config_set_in_file_gently(opts_file,
+ "options.allow-empty-message", "true");
+ if (opts->keep_redundant_commits)
+ res |= git_config_set_in_file_gently(opts_file,
+ "options.keep-redundant-commits", "true");
if (opts->signoff)
- res |= git_config_set_in_file_gently(opts_file, "options.signoff", "true");
+ res |= git_config_set_in_file_gently(opts_file,
+ "options.signoff", "true");
if (opts->record_origin)
- res |= git_config_set_in_file_gently(opts_file, "options.record-origin", "true");
+ res |= git_config_set_in_file_gently(opts_file,
+ "options.record-origin", "true");
if (opts->allow_ff)
- res |= git_config_set_in_file_gently(opts_file, "options.allow-ff", "true");
+ res |= git_config_set_in_file_gently(opts_file,
+ "options.allow-ff", "true");
if (opts->mainline) {
struct strbuf buf = STRBUF_INIT;
strbuf_addf(&buf, "%d", opts->mainline);
- res |= git_config_set_in_file_gently(opts_file, "options.mainline", buf.buf);
+ res |= git_config_set_in_file_gently(opts_file,
+ "options.mainline", buf.buf);
strbuf_release(&buf);
}
if (opts->strategy)
- res |= git_config_set_in_file_gently(opts_file, "options.strategy", opts->strategy);
+ res |= git_config_set_in_file_gently(opts_file,
+ "options.strategy", opts->strategy);
if (opts->gpg_sign)
- res |= git_config_set_in_file_gently(opts_file, "options.gpg-sign", opts->gpg_sign);
+ res |= git_config_set_in_file_gently(opts_file,
+ "options.gpg-sign", opts->gpg_sign);
if (opts->xopts) {
int i;
for (i = 0; i < opts->xopts_nr; i++)
res |= git_config_set_multivar_in_file_gently(opts_file,
- "options.strategy-option",
- opts->xopts[i], "^$", 0);
+ "options.strategy-option",
+ opts->xopts[i], "^$", 0);
}
if (opts->allow_rerere_auto)
- res |= git_config_set_in_file_gently(opts_file, "options.allow-rerere-auto",
- opts->allow_rerere_auto == RERERE_AUTOUPDATE ?
- "true" : "false");
+ res |= git_config_set_in_file_gently(opts_file,
+ "options.allow-rerere-auto",
+ opts->allow_rerere_auto == RERERE_AUTOUPDATE ?
+ "true" : "false");
+
+ if (opts->explicit_cleanup)
+ res |= git_config_set_in_file_gently(opts_file,
+ "options.default-msg-cleanup",
+ describe_cleanup_mode(opts->default_msg_cleanup));
return res;
}
return buf.buf;
}
-static int run_git_checkout(struct replay_opts *opts, const char *commit,
- const char *action)
+static int run_git_checkout(struct repository *r, struct replay_opts *opts,
+ const char *commit, const char *action)
{
struct child_process cmd = CHILD_PROCESS_INIT;
+ int ret;
cmd.git_cmd = 1;
argv_array_pushf(&cmd.env_array, GIT_REFLOG_ACTION "=%s", action);
if (opts->verbose)
- return run_command(&cmd);
+ ret = run_command(&cmd);
else
- return run_command_silent_on_success(&cmd);
+ ret = run_command_silent_on_success(&cmd);
+
+ if (!ret)
+ discard_index(r->index);
+
+ return ret;
}
-int prepare_branch_to_be_rebased(struct replay_opts *opts, const char *commit)
+int prepare_branch_to_be_rebased(struct repository *r, struct replay_opts *opts,
+ const char *commit)
{
const char *action;
if (commit && *commit) {
action = reflog_message(opts, "start", "checkout %s", commit);
- if (run_git_checkout(opts, commit, action))
+ if (run_git_checkout(r, opts, commit, action))
return error(_("could not checkout %s"), commit);
}
return 0;
}
-static int checkout_onto(struct replay_opts *opts,
- const char *onto_name, const char *onto,
+static int checkout_onto(struct repository *r, struct replay_opts *opts,
+ const char *onto_name, const struct object_id *onto,
const char *orig_head)
{
struct object_id oid;
if (get_oid(orig_head, &oid))
return error(_("%s: not a valid OID"), orig_head);
- if (run_git_checkout(opts, onto, action)) {
+ if (run_git_checkout(r, opts, oid_to_hex(onto), action)) {
apply_autostash(opts);
sequencer_remove_state(opts);
return error(_("could not detach HEAD"));
while (todo_list->current < todo_list->nr) {
struct todo_item *item = todo_list->items + todo_list->current;
+ const char *arg = todo_item_get_arg(todo_list, item);
+
if (save_todo(todo_list, opts))
return -1;
if (is_rebase_i(opts)) {
fprintf(stderr,
_("Stopped at %s... %.*s\n"),
short_commit_name(commit),
- item->arg_len, item->arg);
+ item->arg_len, arg);
return error_with_patch(r, commit,
- item->arg, item->arg_len, opts, res,
- !res);
+ arg, item->arg_len, opts, res, !res);
}
if (is_rebase_i(opts) && !res)
record_in_rewritten(&item->commit->object.oid,
if (res == 1)
intend_to_amend();
return error_failed_squash(r, item->commit, opts,
- item->arg_len, item->arg);
+ item->arg_len, arg);
} else if (res && is_rebase_i(opts) && item->commit) {
int to_amend = 0;
struct object_id oid;
to_amend = 1;
return res | error_with_patch(r, item->commit,
- item->arg, item->arg_len, opts,
+ arg, item->arg_len, opts,
res, to_amend);
}
} else if (item->command == TODO_EXEC) {
- char *end_of_arg = (char *)(item->arg + item->arg_len);
+ char *end_of_arg = (char *)(arg + item->arg_len);
int saved = *end_of_arg;
struct stat st;
*end_of_arg = '\0';
- res = do_exec(r, item->arg);
+ res = do_exec(r, arg);
*end_of_arg = saved;
- /* Reread the todo file if it has changed. */
if (res) {
if (opts->reschedule_failed_exec)
reschedule = 1;
res = error_errno(_("could not stat '%s'"),
get_todo_path(opts));
else if (match_stat_data(&todo_list->stat, &st)) {
+ /* Reread the todo file if it has changed. */
todo_list_release(todo_list);
if (read_populate_todo(r, todo_list, opts))
res = -1; /* message was printed */
todo_list->current = -1;
}
} else if (item->command == TODO_LABEL) {
- if ((res = do_label(r, item->arg, item->arg_len)))
+ if ((res = do_label(r, arg, item->arg_len)))
reschedule = 1;
} else if (item->command == TODO_RESET) {
- if ((res = do_reset(r, item->arg, item->arg_len, opts)))
+ if ((res = do_reset(r, arg, item->arg_len, opts)))
reschedule = 1;
} else if (item->command == TODO_MERGE) {
if ((res = do_merge(r, item->commit,
- item->arg, item->arg_len,
+ arg, item->arg_len,
item->flags, opts)) < 0)
reschedule = 1;
else if (item->commit)
if (res > 0)
/* failed with merge conflicts */
return error_with_patch(r, item->commit,
- item->arg,
- item->arg_len, opts,
- res, 0);
+ arg, item->arg_len,
+ opts, res, 0);
} else if (!is_noop(item->command))
return error(_("unknown command %d"), item->command);
if (item->commit)
return error_with_patch(r,
item->commit,
- item->arg,
- item->arg_len, opts,
- res, 0);
+ arg, item->arg_len,
+ opts, res, 0);
}
todo_list->current++;
hook.in = open(rebase_path_rewritten_list(),
O_RDONLY);
hook.stdout_to_stderr = 1;
+ hook.trace2_hook_name = "post-rewrite";
argv_array_push(&hook.args, post_rewrite_hook);
argv_array_push(&hook.args, "rebase");
/* we don't care if this hook failed */
int has_footer;
strbuf_addstr(&sob, sign_off_header);
- strbuf_addstr(&sob, fmt_name(getenv("GIT_COMMITTER_NAME"),
- getenv("GIT_COMMITTER_EMAIL")));
+ strbuf_addstr(&sob, fmt_name(WANT_COMMITTER_IDENT));
strbuf_addch(&sob, '\n');
if (!ignore_footer)
}
static int make_script_with_merges(struct pretty_print_context *pp,
- struct rev_info *revs, FILE *out,
+ struct rev_info *revs, struct strbuf *out,
unsigned flags)
{
int keep_empty = flags & TODO_LIST_KEEP_EMPTY;
* gathering commits not yet shown, reversing the list on the fly,
* then outputting that list (labeling revisions as needed).
*/
- fprintf(out, "%s onto\n", cmd_label);
+ strbuf_addf(out, "%s onto\n", cmd_label);
for (iter = tips; iter; iter = iter->next) {
struct commit_list *list = NULL, *iter2;
entry = oidmap_get(&state.commit2label, &commit->object.oid);
if (entry)
- fprintf(out, "\n%c Branch %s\n", comment_line_char, entry->string);
+ strbuf_addf(out, "\n%c Branch %s\n", comment_line_char, entry->string);
else
- fprintf(out, "\n");
+ strbuf_addch(out, '\n');
while (oidset_contains(&interesting, &commit->object.oid) &&
!oidset_contains(&shown, &commit->object.oid)) {
}
if (!commit)
- fprintf(out, "%s %s\n", cmd_reset,
- rebase_cousins ? "onto" : "[new root]");
+ strbuf_addf(out, "%s %s\n", cmd_reset,
+ rebase_cousins ? "onto" : "[new root]");
else {
const char *to = NULL;
&state);
if (!to || !strcmp(to, "onto"))
- fprintf(out, "%s onto\n", cmd_reset);
+ strbuf_addf(out, "%s onto\n", cmd_reset);
else {
strbuf_reset(&oneline);
pretty_print_commit(pp, commit, &oneline);
- fprintf(out, "%s %s # %s\n",
- cmd_reset, to, oneline.buf);
+ strbuf_addf(out, "%s %s # %s\n",
+ cmd_reset, to, oneline.buf);
}
}
entry = oidmap_get(&commit2todo, oid);
/* only show if not already upstream */
if (entry)
- fprintf(out, "%s\n", entry->string);
+ strbuf_addf(out, "%s\n", entry->string);
entry = oidmap_get(&state.commit2label, oid);
if (entry)
- fprintf(out, "%s %s\n",
- cmd_label, entry->string);
+ strbuf_addf(out, "%s %s\n",
+ cmd_label, entry->string);
oidset_insert(&shown, oid);
}
return 0;
}
-int sequencer_make_script(struct repository *r, FILE *out,
- int argc, const char **argv,
- unsigned flags)
+int sequencer_make_script(struct repository *r, struct strbuf *out, int argc,
+ const char **argv, unsigned flags)
{
char *format = NULL;
struct pretty_print_context pp = {0};
- struct strbuf buf = STRBUF_INIT;
struct rev_info revs;
struct commit *commit;
int keep_empty = flags & TODO_LIST_KEEP_EMPTY;
if (!is_empty && (commit->object.flags & PATCHSAME))
continue;
- strbuf_reset(&buf);
if (!keep_empty && is_empty)
- strbuf_addf(&buf, "%c ", comment_line_char);
- strbuf_addf(&buf, "%s %s ", insn,
+ strbuf_addf(out, "%c ", comment_line_char);
+ strbuf_addf(out, "%s %s ", insn,
oid_to_hex(&commit->object.oid));
- pretty_print_commit(&pp, commit, &buf);
- strbuf_addch(&buf, '\n');
- fputs(buf.buf, out);
+ pretty_print_commit(&pp, commit, out);
+ strbuf_addch(out, '\n');
}
- strbuf_release(&buf);
return 0;
}
* Add commands after pick and (series of) squash/fixup commands
* in the todo list.
*/
-int sequencer_add_exec_commands(struct repository *r,
- const char *commands)
+void todo_list_add_exec_commands(struct todo_list *todo_list,
+ struct string_list *commands)
{
- const char *todo_file = rebase_path_todo();
- struct todo_list todo_list = TODO_LIST_INIT;
- struct strbuf *buf = &todo_list.buf;
- size_t offset = 0, commands_len = strlen(commands);
- int i, insert;
+ struct strbuf *buf = &todo_list->buf;
+ size_t base_offset = buf->len;
+ int i, insert, nr = 0, alloc = 0;
+ struct todo_item *items = NULL, *base_items = NULL;
+
+ base_items = xcalloc(commands->nr, sizeof(struct todo_item));
+ for (i = 0; i < commands->nr; i++) {
+ size_t command_len = strlen(commands->items[i].string);
+
+ strbuf_addstr(buf, commands->items[i].string);
+ strbuf_addch(buf, '\n');
- if (strbuf_read_file(&todo_list.buf, todo_file, 0) < 0)
- return error(_("could not read '%s'."), todo_file);
+ base_items[i].command = TODO_EXEC;
+ base_items[i].offset_in_buf = base_offset;
+ base_items[i].arg_offset = base_offset + strlen("exec ");
+ base_items[i].arg_len = command_len - strlen("exec ");
- if (parse_insn_buffer(r, todo_list.buf.buf, &todo_list)) {
- todo_list_release(&todo_list);
- return error(_("unusable todo list: '%s'"), todo_file);
+ base_offset += command_len + 1;
}
/*
* Insert <commands> after every pick. Here, fixup/squash chains
* are considered part of the pick, so we insert the commands *after*
* those chains if there are any.
+ *
+ * As we insert the exec commands immediatly after rearranging
+ * any fixups and before the user edits the list, a fixup chain
+ * can never contain comments (any comments are empty picks that
+ * have been commented out because the user did not specify
+ * --keep-empty). So, it is safe to insert an exec command
+ * without looking at the command following a comment.
*/
- insert = -1;
- for (i = 0; i < todo_list.nr; i++) {
- enum todo_command command = todo_list.items[i].command;
-
- if (insert >= 0) {
- /* skip fixup/squash chains */
- if (command == TODO_COMMENT)
- continue;
- else if (is_fixup(command)) {
- insert = i + 1;
- continue;
- }
- strbuf_insert(buf,
- todo_list.items[insert].offset_in_buf +
- offset, commands, commands_len);
- offset += commands_len;
- insert = -1;
+ insert = 0;
+ for (i = 0; i < todo_list->nr; i++) {
+ enum todo_command command = todo_list->items[i].command;
+ if (insert && !is_fixup(command)) {
+ ALLOC_GROW(items, nr + commands->nr, alloc);
+ COPY_ARRAY(items + nr, base_items, commands->nr);
+ nr += commands->nr;
+
+ insert = 0;
}
+ ALLOC_GROW(items, nr + 1, alloc);
+ items[nr++] = todo_list->items[i];
+
if (command == TODO_PICK || command == TODO_MERGE)
- insert = i + 1;
+ insert = 1;
}
/* insert or append final <commands> */
- if (insert >= 0 && insert < todo_list.nr)
- strbuf_insert(buf, todo_list.items[insert].offset_in_buf +
- offset, commands, commands_len);
- else if (insert >= 0 || !offset)
- strbuf_add(buf, commands, commands_len);
+ if (insert || nr == todo_list->nr) {
+ ALLOC_GROW(items, nr + commands->nr, alloc);
+ COPY_ARRAY(items + nr, base_items, commands->nr);
+ nr += commands->nr;
+ }
- i = write_message(buf->buf, buf->len, todo_file, 0);
- todo_list_release(&todo_list);
- return i;
+ free(base_items);
+ FREE_AND_NULL(todo_list->items);
+ todo_list->items = items;
+ todo_list->nr = nr;
+ todo_list->alloc = alloc;
}
-int transform_todos(struct repository *r, unsigned flags)
+static void todo_list_to_strbuf(struct repository *r, struct todo_list *todo_list,
+ struct strbuf *buf, int num, unsigned flags)
{
- const char *todo_file = rebase_path_todo();
- struct todo_list todo_list = TODO_LIST_INIT;
- struct strbuf buf = STRBUF_INIT;
struct todo_item *item;
- int i;
-
- if (strbuf_read_file(&todo_list.buf, todo_file, 0) < 0)
- return error(_("could not read '%s'."), todo_file);
+ int i, max = todo_list->nr;
- if (parse_insn_buffer(r, todo_list.buf.buf, &todo_list)) {
- todo_list_release(&todo_list);
- return error(_("unusable todo list: '%s'"), todo_file);
- }
+ if (num > 0 && num < max)
+ max = num;
- for (item = todo_list.items, i = 0; i < todo_list.nr; i++, item++) {
+ for (item = todo_list->items, i = 0; i < max; i++, item++) {
/* if the item is not a command write it and continue */
if (item->command >= TODO_COMMENT) {
- strbuf_addf(&buf, "%.*s\n", item->arg_len, item->arg);
+ strbuf_addf(buf, "%.*s\n", item->arg_len,
+ todo_item_get_arg(todo_list, item));
continue;
}
/* add command to the buffer */
if (flags & TODO_LIST_ABBREVIATE_CMDS)
- strbuf_addch(&buf, command_to_char(item->command));
+ strbuf_addch(buf, command_to_char(item->command));
else
- strbuf_addstr(&buf, command_to_string(item->command));
+ strbuf_addstr(buf, command_to_string(item->command));
/* add commit id */
if (item->commit) {
if (item->command == TODO_MERGE) {
if (item->flags & TODO_EDIT_MERGE_MSG)
- strbuf_addstr(&buf, " -c");
+ strbuf_addstr(buf, " -c");
else
- strbuf_addstr(&buf, " -C");
+ strbuf_addstr(buf, " -C");
}
- strbuf_addf(&buf, " %s", oid);
+ strbuf_addf(buf, " %s", oid);
}
/* add all the rest */
if (!item->arg_len)
- strbuf_addch(&buf, '\n');
+ strbuf_addch(buf, '\n');
else
- strbuf_addf(&buf, " %.*s\n", item->arg_len, item->arg);
+ strbuf_addf(buf, " %.*s\n", item->arg_len,
+ todo_item_get_arg(todo_list, item));
}
-
- i = write_message(buf.buf, buf.len, todo_file, 0);
- todo_list_release(&todo_list);
- return i;
}
-enum missing_commit_check_level get_missing_commit_check_level(void)
+int todo_list_write_to_file(struct repository *r, struct todo_list *todo_list,
+ const char *file, const char *shortrevisions,
+ const char *shortonto, int num, unsigned flags)
{
- const char *value;
-
- if (git_config_get_value("rebase.missingcommitscheck", &value) ||
- !strcasecmp("ignore", value))
- return MISSING_COMMIT_CHECK_IGNORE;
- if (!strcasecmp("warn", value))
- return MISSING_COMMIT_CHECK_WARN;
- if (!strcasecmp("error", value))
- return MISSING_COMMIT_CHECK_ERROR;
- warning(_("unrecognized setting %s for option "
- "rebase.missingCommitsCheck. Ignoring."), value);
- return MISSING_COMMIT_CHECK_IGNORE;
-}
+ int res;
+ struct strbuf buf = STRBUF_INIT;
-define_commit_slab(commit_seen, unsigned char);
-/*
- * Check if the user dropped some commits by mistake
- * Behaviour determined by rebase.missingCommitsCheck.
- * Check if there is an unrecognized command or a
- * bad SHA-1 in a command.
- */
-int check_todo_list(struct repository *r)
-{
- enum missing_commit_check_level check_level = get_missing_commit_check_level();
- struct strbuf todo_file = STRBUF_INIT;
- struct todo_list todo_list = TODO_LIST_INIT;
- struct strbuf missing = STRBUF_INIT;
- int advise_to_edit_todo = 0, res = 0, i;
- struct commit_seen commit_seen;
+ todo_list_to_strbuf(r, todo_list, &buf, num, flags);
+ if (flags & TODO_LIST_APPEND_TODO_HELP)
+ append_todo_help(flags & TODO_LIST_KEEP_EMPTY, count_commands(todo_list),
+ shortrevisions, shortonto, &buf);
- init_commit_seen(&commit_seen);
+ res = write_message(buf.buf, buf.len, file, 0);
+ strbuf_release(&buf);
- strbuf_addstr(&todo_file, rebase_path_todo());
- if (strbuf_read_file_or_whine(&todo_list.buf, todo_file.buf) < 0) {
- res = -1;
- goto leave_check;
- }
- advise_to_edit_todo = res =
- parse_insn_buffer(r, todo_list.buf.buf, &todo_list);
+ return res;
+}
- if (res || check_level == MISSING_COMMIT_CHECK_IGNORE)
- goto leave_check;
+static const char edit_todo_list_advice[] =
+N_("You can fix this with 'git rebase --edit-todo' "
+"and then run 'git rebase --continue'.\n"
+"Or you can abort the rebase with 'git rebase"
+" --abort'.\n");
- /* Mark the commits in git-rebase-todo as seen */
- for (i = 0; i < todo_list.nr; i++) {
- struct commit *commit = todo_list.items[i].commit;
- if (commit)
- *commit_seen_at(&commit_seen, commit) = 1;
- }
+int check_todo_list_from_file(struct repository *r)
+{
+ struct todo_list old_todo = TODO_LIST_INIT, new_todo = TODO_LIST_INIT;
+ int res = 0;
- todo_list_release(&todo_list);
- strbuf_addstr(&todo_file, ".backup");
- if (strbuf_read_file_or_whine(&todo_list.buf, todo_file.buf) < 0) {
+ if (strbuf_read_file_or_whine(&new_todo.buf, rebase_path_todo()) < 0) {
res = -1;
- goto leave_check;
- }
- strbuf_release(&todo_file);
- res = !!parse_insn_buffer(r, todo_list.buf.buf, &todo_list);
-
- /* Find commits in git-rebase-todo.backup yet unseen */
- for (i = todo_list.nr - 1; i >= 0; i--) {
- struct todo_item *item = todo_list.items + i;
- struct commit *commit = item->commit;
- if (commit && !*commit_seen_at(&commit_seen, commit)) {
- strbuf_addf(&missing, " - %s %.*s\n",
- short_commit_name(commit),
- item->arg_len, item->arg);
- *commit_seen_at(&commit_seen, commit) = 1;
- }
+ goto out;
}
- /* Warn about missing commits */
- if (!missing.len)
- goto leave_check;
-
- if (check_level == MISSING_COMMIT_CHECK_ERROR)
- advise_to_edit_todo = res = 1;
-
- fprintf(stderr,
- _("Warning: some commits may have been dropped accidentally.\n"
- "Dropped commits (newer to older):\n"));
-
- /* Make the list user-friendly and display */
- fputs(missing.buf, stderr);
- strbuf_release(&missing);
-
- fprintf(stderr, _("To avoid this message, use \"drop\" to "
- "explicitly remove a commit.\n\n"
- "Use 'git config rebase.missingCommitsCheck' to change "
- "the level of warnings.\n"
- "The possible behaviours are: ignore, warn, error.\n\n"));
-
-leave_check:
- clear_commit_seen(&commit_seen);
- strbuf_release(&todo_file);
- todo_list_release(&todo_list);
+ if (strbuf_read_file_or_whine(&old_todo.buf, rebase_path_todo_backup()) < 0) {
+ res = -1;
+ goto out;
+ }
- if (advise_to_edit_todo)
- fprintf(stderr,
- _("You can fix this with 'git rebase --edit-todo' "
- "and then run 'git rebase --continue'.\n"
- "Or you can abort the rebase with 'git rebase"
- " --abort'.\n"));
+ res = todo_list_parse_insn_buffer(r, old_todo.buf.buf, &old_todo);
+ if (!res)
+ res = todo_list_parse_insn_buffer(r, new_todo.buf.buf, &new_todo);
+ if (!res)
+ res = todo_list_check(&old_todo, &new_todo);
+ if (res)
+ fprintf(stderr, _(edit_todo_list_advice));
+out:
+ todo_list_release(&old_todo);
+ todo_list_release(&new_todo);
return res;
}
-static int rewrite_file(const char *path, const char *buf, size_t len)
-{
- int rc = 0;
- int fd = open(path, O_WRONLY | O_TRUNC);
- if (fd < 0)
- return error_errno(_("could not open '%s' for writing"), path);
- if (write_in_full(fd, buf, len) < 0)
- rc = error_errno(_("could not write to '%s'"), path);
- if (close(fd) && !rc)
- rc = error_errno(_("could not close '%s'"), path);
- return rc;
-}
-
/* skip picking commits whose parents are unchanged */
-static int skip_unnecessary_picks(struct repository *r, struct object_id *output_oid)
+static int skip_unnecessary_picks(struct repository *r,
+ struct todo_list *todo_list,
+ struct object_id *base_oid)
{
- const char *todo_file = rebase_path_todo();
- struct strbuf buf = STRBUF_INIT;
- struct todo_list todo_list = TODO_LIST_INIT;
struct object_id *parent_oid;
- int fd, i;
-
- if (!read_oneliner(&buf, rebase_path_onto(), 0))
- return error(_("could not read 'onto'"));
- if (get_oid(buf.buf, output_oid)) {
- strbuf_release(&buf);
- return error(_("need a HEAD to fixup"));
- }
- strbuf_release(&buf);
-
- if (strbuf_read_file_or_whine(&todo_list.buf, todo_file) < 0)
- return -1;
- if (parse_insn_buffer(r, todo_list.buf.buf, &todo_list) < 0) {
- todo_list_release(&todo_list);
- return -1;
- }
+ int i;
- for (i = 0; i < todo_list.nr; i++) {
- struct todo_item *item = todo_list.items + i;
+ for (i = 0; i < todo_list->nr; i++) {
+ struct todo_item *item = todo_list->items + i;
if (item->command >= TODO_NOOP)
continue;
if (item->command != TODO_PICK)
break;
if (parse_commit(item->commit)) {
- todo_list_release(&todo_list);
return error(_("could not parse commit '%s'"),
oid_to_hex(&item->commit->object.oid));
}
if (item->commit->parents->next)
break; /* merge commit */
parent_oid = &item->commit->parents->item->object.oid;
- if (!oideq(parent_oid, output_oid))
+ if (!oideq(parent_oid, base_oid))
break;
- oidcpy(output_oid, &item->commit->object.oid);
+ oidcpy(base_oid, &item->commit->object.oid);
}
if (i > 0) {
- int offset = get_item_line_offset(&todo_list, i);
const char *done_path = rebase_path_done();
- fd = open(done_path, O_CREAT | O_WRONLY | O_APPEND, 0666);
- if (fd < 0) {
- error_errno(_("could not open '%s' for writing"),
- done_path);
- todo_list_release(&todo_list);
- return -1;
- }
- if (write_in_full(fd, todo_list.buf.buf, offset) < 0) {
+ if (todo_list_write_to_file(r, todo_list, done_path, NULL, NULL, i, 0)) {
error_errno(_("could not write to '%s'"), done_path);
- todo_list_release(&todo_list);
- close(fd);
return -1;
}
- close(fd);
- if (rewrite_file(rebase_path_todo(), todo_list.buf.buf + offset,
- todo_list.buf.len - offset) < 0) {
- todo_list_release(&todo_list);
- return -1;
- }
+ MOVE_ARRAY(todo_list->items, todo_list->items + i, todo_list->nr - i);
+ todo_list->nr -= i;
+ todo_list->current = 0;
- todo_list.current = i;
- if (is_fixup(peek_command(&todo_list, 0)))
- record_in_rewritten(output_oid, peek_command(&todo_list, 0));
+ if (is_fixup(peek_command(todo_list, 0)))
+ record_in_rewritten(base_oid, peek_command(todo_list, 0));
}
- todo_list_release(&todo_list);
-
return 0;
}
int complete_action(struct repository *r, struct replay_opts *opts, unsigned flags,
const char *shortrevisions, const char *onto_name,
- const char *onto, const char *orig_head, const char *cmd,
- unsigned autosquash)
+ struct commit *onto, const char *orig_head,
+ struct string_list *commands, unsigned autosquash,
+ struct todo_list *todo_list)
{
const char *shortonto, *todo_file = rebase_path_todo();
- struct todo_list todo_list = TODO_LIST_INIT;
- struct strbuf *buf = &(todo_list.buf);
- struct object_id oid;
- struct stat st;
+ struct todo_list new_todo = TODO_LIST_INIT;
+ struct strbuf *buf = &todo_list->buf;
+ struct object_id oid = onto->object.oid;
+ int res;
- get_oid(onto, &oid);
shortonto = find_unique_abbrev(&oid, DEFAULT_ABBREV);
- if (!lstat(todo_file, &st) && st.st_size == 0 &&
- write_message("noop\n", 5, todo_file, 0))
- return -1;
+ if (buf->len == 0) {
+ struct todo_item *item = append_new_todo(todo_list);
+ item->command = TODO_NOOP;
+ item->commit = NULL;
+ item->arg_len = item->arg_offset = item->flags = item->offset_in_buf = 0;
+ }
- if (autosquash && rearrange_squash(r))
+ if (autosquash && todo_list_rearrange_squash(todo_list))
return -1;
- if (cmd && *cmd)
- sequencer_add_exec_commands(r, cmd);
-
- if (strbuf_read_file(buf, todo_file, 0) < 0)
- return error_errno(_("could not read '%s'."), todo_file);
-
- if (parse_insn_buffer(r, buf->buf, &todo_list)) {
- todo_list_release(&todo_list);
- return error(_("unusable todo list: '%s'"), todo_file);
- }
+ if (commands->nr)
+ todo_list_add_exec_commands(todo_list, commands);
- if (count_commands(&todo_list) == 0) {
+ if (count_commands(todo_list) == 0) {
apply_autostash(opts);
sequencer_remove_state(opts);
- todo_list_release(&todo_list);
return error(_("nothing to do"));
}
- strbuf_addch(buf, '\n');
- strbuf_commented_addf(buf, Q_("Rebase %s onto %s (%d command)",
- "Rebase %s onto %s (%d commands)",
- count_commands(&todo_list)),
- shortrevisions, shortonto, count_commands(&todo_list));
- append_todo_help(0, flags & TODO_LIST_KEEP_EMPTY, buf);
-
- if (write_message(buf->buf, buf->len, todo_file, 0)) {
- todo_list_release(&todo_list);
+ res = edit_todo_list(r, todo_list, &new_todo, shortrevisions,
+ shortonto, flags);
+ if (res == -1)
return -1;
- }
-
- if (copy_file(rebase_path_todo_backup(), todo_file, 0666))
- return error(_("could not copy '%s' to '%s'."), todo_file,
- rebase_path_todo_backup());
-
- if (transform_todos(r, flags | TODO_LIST_SHORTEN_IDS))
- return error(_("could not transform the todo list"));
-
- strbuf_reset(buf);
-
- if (launch_sequence_editor(todo_file, buf, NULL)) {
+ else if (res == -2) {
apply_autostash(opts);
sequencer_remove_state(opts);
- todo_list_release(&todo_list);
return -1;
- }
-
- strbuf_stripspace(buf, 1);
- if (buf->len == 0) {
+ } else if (res == -3) {
apply_autostash(opts);
sequencer_remove_state(opts);
- todo_list_release(&todo_list);
+ todo_list_release(&new_todo);
return error(_("nothing to do"));
}
- todo_list_release(&todo_list);
+ if (todo_list_parse_insn_buffer(r, new_todo.buf.buf, &new_todo) ||
+ todo_list_check(todo_list, &new_todo)) {
+ fprintf(stderr, _(edit_todo_list_advice));
+ checkout_onto(r, opts, onto_name, &onto->object.oid, orig_head);
+ todo_list_release(&new_todo);
- if (check_todo_list(r)) {
- checkout_onto(opts, onto_name, onto, orig_head);
return -1;
}
- if (transform_todos(r, flags & ~(TODO_LIST_SHORTEN_IDS)))
- return error(_("could not transform the todo list"));
-
- if (opts->allow_ff && skip_unnecessary_picks(r, &oid))
+ if (opts->allow_ff && skip_unnecessary_picks(r, &new_todo, &oid)) {
+ todo_list_release(&new_todo);
return error(_("could not skip unnecessary pick commands"));
+ }
- if (checkout_onto(opts, onto_name, oid_to_hex(&oid), orig_head))
+ if (todo_list_write_to_file(r, &new_todo, todo_file, NULL, NULL, -1,
+ flags & ~(TODO_LIST_SHORTEN_IDS))) {
+ todo_list_release(&new_todo);
+ return error_errno(_("could not write '%s'"), todo_file);
+ }
+
+ todo_list_release(&new_todo);
+
+ if (checkout_onto(r, opts, onto_name, &oid, orig_head))
return -1;
if (require_clean_work_tree(r, "rebase", "", 1, 1))
* message will have to be retrieved from the commit (as the oneline in the
* script cannot be trusted) in order to normalize the autosquash arrangement.
*/
-int rearrange_squash(struct repository *r)
+int todo_list_rearrange_squash(struct todo_list *todo_list)
{
- const char *todo_file = rebase_path_todo();
- struct todo_list todo_list = TODO_LIST_INIT;
struct hashmap subject2item;
- int res = 0, rearranged = 0, *next, *tail, i;
+ int rearranged = 0, *next, *tail, i, nr = 0, alloc = 0;
char **subjects;
struct commit_todo_item commit_todo;
-
- if (strbuf_read_file_or_whine(&todo_list.buf, todo_file) < 0)
- return -1;
- if (parse_insn_buffer(r, todo_list.buf.buf, &todo_list) < 0) {
- todo_list_release(&todo_list);
- return -1;
- }
+ struct todo_item *items = NULL;
init_commit_todo_item(&commit_todo);
/*
* be moved to appear after the i'th.
*/
hashmap_init(&subject2item, (hashmap_cmp_fn) subject2item_cmp,
- NULL, todo_list.nr);
- ALLOC_ARRAY(next, todo_list.nr);
- ALLOC_ARRAY(tail, todo_list.nr);
- ALLOC_ARRAY(subjects, todo_list.nr);
- for (i = 0; i < todo_list.nr; i++) {
+ NULL, todo_list->nr);
+ ALLOC_ARRAY(next, todo_list->nr);
+ ALLOC_ARRAY(tail, todo_list->nr);
+ ALLOC_ARRAY(subjects, todo_list->nr);
+ for (i = 0; i < todo_list->nr; i++) {
struct strbuf buf = STRBUF_INIT;
- struct todo_item *item = todo_list.items + i;
+ struct todo_item *item = todo_list->items + i;
const char *commit_buffer, *subject, *p;
size_t subject_len;
int i2 = -1;
}
if (is_fixup(item->command)) {
- todo_list_release(&todo_list);
clear_commit_todo_item(&commit_todo);
return error(_("the script was already rearranged."));
}
*commit_todo_item_at(&commit_todo, commit2))
/* found by commit name */
i2 = *commit_todo_item_at(&commit_todo, commit2)
- - todo_list.items;
+ - todo_list->items;
else {
/* copy can be a prefix of the commit subject */
for (i2 = 0; i2 < i; i2++)
}
if (i2 >= 0) {
rearranged = 1;
- todo_list.items[i].command =
+ todo_list->items[i].command =
starts_with(subject, "fixup!") ?
TODO_FIXUP : TODO_SQUASH;
if (next[i2] < 0)
}
if (rearranged) {
- struct strbuf buf = STRBUF_INIT;
-
- for (i = 0; i < todo_list.nr; i++) {
- enum todo_command command = todo_list.items[i].command;
+ for (i = 0; i < todo_list->nr; i++) {
+ enum todo_command command = todo_list->items[i].command;
int cur = i;
/*
continue;
while (cur >= 0) {
- const char *bol =
- get_item_line(&todo_list, cur);
- const char *eol =
- get_item_line(&todo_list, cur + 1);
-
- /* replace 'pick', by 'fixup' or 'squash' */
- command = todo_list.items[cur].command;
- if (is_fixup(command)) {
- strbuf_addstr(&buf,
- todo_command_info[command].str);
- bol += strcspn(bol, " \t");
- }
-
- strbuf_add(&buf, bol, eol - bol);
-
+ ALLOC_GROW(items, nr + 1, alloc);
+ items[nr++] = todo_list->items[cur];
cur = next[cur];
}
}
- res = rewrite_file(todo_file, buf.buf, buf.len);
- strbuf_release(&buf);
+ FREE_AND_NULL(todo_list->items);
+ todo_list->items = items;
+ todo_list->nr = nr;
+ todo_list->alloc = alloc;
}
free(next);
free(tail);
- for (i = 0; i < todo_list.nr; i++)
+ for (i = 0; i < todo_list->nr; i++)
free(subjects[i]);
free(subjects);
hashmap_free(&subject2item, 1);
- todo_list_release(&todo_list);
clear_commit_todo_item(&commit_todo);
- return res;
+
+ return 0;
}
const char *git_path_commit_editmsg(void);
const char *git_path_seq_dir(void);
const char *rebase_path_todo(void);
+const char *rebase_path_todo_backup(void);
#define APPEND_SIGNOFF_DEDUP (1u << 0)
char *gpg_sign;
enum commit_msg_cleanup_mode default_msg_cleanup;
+ int explicit_cleanup;
/* Merge strategy */
char *strategy;
};
#define REPLAY_OPTS_INIT { .action = -1, .current_fixups = STRBUF_INIT }
-enum missing_commit_check_level {
- MISSING_COMMIT_CHECK_IGNORE = 0,
- MISSING_COMMIT_CHECK_WARN,
- MISSING_COMMIT_CHECK_ERROR
+/*
+ * Note that ordering matters in this enum. Not only must it match the mapping
+ * of todo_command_info (in sequencer.c), it is also divided into several
+ * sections that matter. When adding new commands, make sure you add it in the
+ * right section.
+ */
+enum todo_command {
+ /* commands that handle commits */
+ TODO_PICK = 0,
+ TODO_REVERT,
+ TODO_EDIT,
+ TODO_REWORD,
+ TODO_FIXUP,
+ TODO_SQUASH,
+ /* commands that do something else than handling a single commit */
+ TODO_EXEC,
+ TODO_BREAK,
+ TODO_LABEL,
+ TODO_RESET,
+ TODO_MERGE,
+ /* commands that do nothing but are counted for reporting progress */
+ TODO_NOOP,
+ TODO_DROP,
+ /* comments (not counted for reporting progress) */
+ TODO_COMMENT
+};
+
+struct todo_item {
+ enum todo_command command;
+ struct commit *commit;
+ unsigned int flags;
+ int arg_len;
+ /* The offset of the command and its argument in the strbuf */
+ size_t offset_in_buf, arg_offset;
+};
+
+struct todo_list {
+ struct strbuf buf;
+ struct todo_item *items;
+ int nr, alloc, current;
+ int done_nr, total_nr;
+ struct stat_data stat;
};
-int write_message(const void *buf, size_t len, const char *filename,
- int append_eol);
+#define TODO_LIST_INIT { STRBUF_INIT }
+
+int todo_list_parse_insn_buffer(struct repository *r, char *buf,
+ struct todo_list *todo_list);
+int todo_list_write_to_file(struct repository *r, struct todo_list *todo_list,
+ const char *file, const char *shortrevisions,
+ const char *shortonto, int num, unsigned flags);
+void todo_list_release(struct todo_list *todo_list);
+const char *todo_item_get_arg(struct todo_list *todo_list,
+ struct todo_item *item);
/* Call this to setup defaults before parsing command line options */
void sequencer_init_config(struct replay_opts *opts);
* commits should be rebased onto the new base, this flag needs to be passed.
*/
#define TODO_LIST_REBASE_COUSINS (1U << 4)
-int sequencer_make_script(struct repository *repo, FILE *out,
- int argc, const char **argv,
- unsigned flags);
-
-int sequencer_add_exec_commands(struct repository *r, const char *command);
-int transform_todos(struct repository *r, unsigned flags);
-enum missing_commit_check_level get_missing_commit_check_level(void);
-int check_todo_list(struct repository *r);
+#define TODO_LIST_APPEND_TODO_HELP (1U << 5)
+
+int sequencer_make_script(struct repository *r, struct strbuf *out, int argc,
+ const char **argv, unsigned flags);
+
+void todo_list_add_exec_commands(struct todo_list *todo_list,
+ struct string_list *commands);
+int check_todo_list_from_file(struct repository *r);
int complete_action(struct repository *r, struct replay_opts *opts, unsigned flags,
const char *shortrevisions, const char *onto_name,
- const char *onto, const char *orig_head, const char *cmd,
- unsigned autosquash);
-int rearrange_squash(struct repository *r);
+ struct commit *onto, const char *orig_head, struct string_list *commands,
+ unsigned autosquash, struct todo_list *todo_list);
+int todo_list_rearrange_squash(struct todo_list *todo_list);
/*
* Append a signoff to the commit message in "msgbuf". The ignore_footer
*/
void append_signoff(struct strbuf *msgbuf, size_t ignore_footer, unsigned flag);
-void append_conflicts_hint(struct index_state *istate, struct strbuf *msgbuf);
+void append_conflicts_hint(struct index_state *istate,
+ struct strbuf *msgbuf, enum commit_msg_cleanup_mode cleanup_mode);
+enum commit_msg_cleanup_mode get_cleanup_mode(const char *cleanup_arg,
+ int use_editor);
+
+void cleanup_message(struct strbuf *msgbuf,
+ enum commit_msg_cleanup_mode cleanup_mode, int verbose);
+
int message_is_empty(const struct strbuf *sb,
enum commit_msg_cleanup_mode cleanup_mode);
int template_untouched(const struct strbuf *sb, const char *template_file,
const struct commit *current_head,
const struct object_id *new_head);
-int prepare_branch_to_be_rebased(struct replay_opts *opts, const char *commit);
+int prepare_branch_to_be_rebased(struct repository *r, struct replay_opts *opts,
+ const char *commit);
#define SUMMARY_INITIAL_COMMIT (1 << 0)
#define SUMMARY_SHOW_AUTHOR_DATE (1 << 1)
void parse_strategy_opts(struct replay_opts *opts, char *raw_opts);
int write_basic_state(struct replay_opts *opts, const char *head_name,
- const char *onto, const char *orig_head);
+ struct commit *onto, const char *orig_head);
+void sequencer_post_commit_cleanup(struct repository *r);
+int sequencer_get_last_command(struct repository* r,
+ enum replay_action *action);
#define SERVE_H
struct argv_array;
-extern int has_capability(const struct argv_array *keys, const char *capability,
- const char **value);
+int has_capability(const struct argv_array *keys, const char *capability,
+ const char **value);
struct serve_options {
unsigned advertise_capabilities;
unsigned stateless_rpc;
};
#define SERVE_OPTIONS_INIT { 0 }
-extern void serve(struct serve_options *options);
+void serve(struct serve_options *options);
#endif /* SERVE_H */
return for_each_ref(add_info_ref, fp);
}
-static int update_info_refs(int force)
+static int update_info_refs(void)
{
char *path = git_pathdup("info/refs");
int ret = update_info_file(path, generate_info_refs);
struct packed_git *p;
int old_num;
int new_num;
- int nr_alloc;
} **info;
static int num_pack;
-static const char *objdir;
-static int objdirlen;
static struct pack_info *find_pack_by_name(const char *name)
{
int i;
for (i = 0; i < num_pack; i++) {
struct packed_git *p = info[i]->p;
- /* skip "/pack/" after ".git/objects" */
- if (!strcmp(p->pack_name + objdirlen + 6, name))
+ if (!strcmp(pack_basename(p), name))
return info[i];
}
return NULL;
/* Returns non-zero when we detect that the info in the
* old file is useless.
*/
-static int parse_pack_def(const char *line, int old_cnt)
+static int parse_pack_def(const char *packname, int old_cnt)
{
- struct pack_info *i = find_pack_by_name(line + 2);
+ struct pack_info *i = find_pack_by_name(packname);
if (i) {
i->old_num = old_cnt;
return 0;
static int read_pack_info_file(const char *infofile)
{
FILE *fp;
- char line[1000];
+ struct strbuf line = STRBUF_INIT;
int old_cnt = 0;
+ int stale = 1;
fp = fopen_or_warn(infofile, "r");
if (!fp)
return 1; /* nonexistent is not an error. */
- while (fgets(line, sizeof(line), fp)) {
- int len = strlen(line);
- if (len && line[len-1] == '\n')
- line[--len] = 0;
+ while (strbuf_getline(&line, fp) != EOF) {
+ const char *arg;
- if (!len)
+ if (!line.len)
continue;
- switch (line[0]) {
- case 'P': /* P name */
- if (parse_pack_def(line, old_cnt++))
+ if (skip_prefix(line.buf, "P ", &arg)) {
+ /* P name */
+ if (parse_pack_def(arg, old_cnt++))
goto out_stale;
- break;
- case 'D': /* we used to emit D but that was misguided. */
- case 'T': /* we used to emit T but nobody uses it. */
+ } else if (line.buf[0] == 'D') {
+ /* we used to emit D but that was misguided. */
goto out_stale;
- default:
- error("unrecognized: %s", line);
- break;
+ } else if (line.buf[0] == 'T') {
+ /* we used to emit T but nobody uses it. */
+ goto out_stale;
+ } else {
+ error("unrecognized: %s", line.buf);
}
}
- fclose(fp);
- return 0;
+ stale = 0;
+
out_stale:
+ strbuf_release(&line);
fclose(fp);
- return 1;
+ return stale;
}
static int compare_info(const void *a_, const void *b_)
int stale;
int i = 0;
- objdir = get_object_directory();
- objdirlen = strlen(objdir);
-
for (p = get_all_packs(the_repository); p; p = p->next) {
/* we ignore things on alternate path since they are
* not available to the pullers in general.
for (i = 0, p = get_all_packs(the_repository); p; p = p->next) {
if (!p->pack_local)
continue;
+ assert(i < num_pack);
info[i] = xcalloc(1, sizeof(struct pack_info));
info[i]->p = p;
info[i]->old_num = -1;
{
int i;
for (i = 0; i < num_pack; i++) {
- if (fprintf(fp, "P %s\n", info[i]->p->pack_name + objdirlen + 6) < 0)
+ if (fprintf(fp, "P %s\n", pack_basename(info[i]->p)) < 0)
return -1;
}
if (fputc('\n', fp) == EOF)
*/
int errs = 0;
- errs = errs | update_info_refs(force);
+ errs = errs | update_info_refs();
errs = errs | update_info_packs(force);
/* remove leftover rev-cache file if there is any */
die_errno(_("failed to stat '%s'"), arg);
}
-static void NORETURN die_verify_filename(const char *prefix,
+static void NORETURN die_verify_filename(struct repository *r,
+ const char *prefix,
const char *arg,
int diagnose_misspelt_rev)
{
* let maybe_die_on_misspelt_object_name() even trigger.
*/
if (!(arg[0] == ':' && !isalnum(arg[1])))
- maybe_die_on_misspelt_object_name(arg, prefix);
+ maybe_die_on_misspelt_object_name(r, arg, prefix);
/* ... or fall back the most general message. */
die(_("ambiguous argument '%s': unknown revision or path not in the working tree.\n"
die(_("option '%s' must come before non-option arguments"), arg);
if (looks_like_pathspec(arg) || check_filename(prefix, arg))
return;
- die_verify_filename(prefix, arg, diagnose_misspelt_rev);
+ die_verify_filename(the_repository, prefix, arg, diagnose_misspelt_rev);
}
/*
} else if (strcmp(var, "core.worktree") == 0) {
if (!value)
return config_error_nonbool(var);
+ free(data->work_tree);
data->work_tree = xstrdup(value);
}
return 0;
}
repository_format_precious_objects = candidate->precious_objects;
- repository_format_partial_clone = candidate->partial_clone;
+ repository_format_partial_clone = xstrdup_or_null(candidate->partial_clone);
repository_format_worktree_config = candidate->worktree_config;
string_list_clear(&candidate->unknown_extensions, 0);
}
if (candidate->work_tree) {
free(git_work_tree_cfg);
- git_work_tree_cfg = candidate->work_tree;
+ git_work_tree_cfg = xstrdup(candidate->work_tree);
inside_work_tree = -1;
}
- } else {
- free(candidate->work_tree);
}
return 0;
}
+static void init_repository_format(struct repository_format *format)
+{
+ const struct repository_format fresh = REPOSITORY_FORMAT_INIT;
+
+ memcpy(format, &fresh, sizeof(fresh));
+}
+
int read_repository_format(struct repository_format *format, const char *path)
{
- memset(format, 0, sizeof(*format));
- format->version = -1;
- format->is_bare = -1;
- format->hash_algo = GIT_HASH_SHA1;
- string_list_init(&format->unknown_extensions, 1);
+ clear_repository_format(format);
git_config_from_file(check_repo_format, path, format);
+ if (format->version == -1)
+ clear_repository_format(format);
return format->version;
}
+void clear_repository_format(struct repository_format *format)
+{
+ string_list_clear(&format->unknown_extensions, 0);
+ free(format->work_tree);
+ free(format->partial_clone);
+ init_repository_format(format);
+}
+
int verify_repository_format(const struct repository_format *format,
struct strbuf *err)
{
struct strbuf dir = STRBUF_INIT, err = STRBUF_INIT;
size_t gitdir_offset = gitdir->len, cwd_len;
size_t commondir_offset = commondir->len;
- struct repository_format candidate;
+ struct repository_format candidate = REPOSITORY_FORMAT_INIT;
if (strbuf_getcwd(&dir))
return -1;
strbuf_release(&err);
strbuf_setlen(commondir, commondir_offset);
strbuf_setlen(gitdir, gitdir_offset);
+ clear_repository_format(&candidate);
return -1;
}
+ clear_repository_format(&candidate);
return 0;
}
static struct strbuf cwd = STRBUF_INIT;
struct strbuf dir = STRBUF_INIT, gitdir = STRBUF_INIT;
const char *prefix = NULL;
- struct repository_format repo_fmt;
+ struct repository_format repo_fmt = REPOSITORY_FORMAT_INIT;
/*
* We may have read an incomplete configuration before
strbuf_release(&dir);
strbuf_release(&gitdir);
+ clear_repository_format(&repo_fmt);
return prefix;
}
void check_repository_format(void)
{
- struct repository_format repo_fmt;
+ struct repository_format repo_fmt = REPOSITORY_FORMAT_INIT;
check_repository_format_gently(get_git_dir(), &repo_fmt, NULL);
startup_info->have_repository = 1;
+ clear_repository_format(&repo_fmt);
}
/*
*/
#include "git-compat-util.h"
+#include "trace2.h"
/* Substitution of environment variables in shell format strings.
Copyright (C) 2003-2007 Free Software Foundation, Inc.
/* Default values for command line options. */
/* unsigned short int show_variables = 0; */
+ trace2_cmd_name("sh-i18n--envsubst");
+
switch (argc)
{
case 1:
return GIT_HASH_UNKNOWN;
}
+int hash_algo_by_length(int len)
+{
+ int i;
+ for (i = 1; i < GIT_HASH_NALGOS; i++)
+ if (len == hash_algos[i].rawsz)
+ return i;
+ return GIT_HASH_UNKNOWN;
+}
/*
* This is meant to hold a *small* number of objects that you would
/* Check if it is a missing object */
if (fetch_if_missing && repository_format_partial_clone &&
- !already_retried && r == the_repository) {
+ !already_retried && r == the_repository &&
+ !(flags & OBJECT_INFO_FOR_PREFETCH)) {
/*
* TODO Investigate having fetch_object() return
* TODO error/success and stopping the music here.
typedef const unsigned char *sha1_access_fn(size_t index, void *table);
-extern int sha1_pos(const unsigned char *sha1,
- void *table,
- size_t nr,
- sha1_access_fn fn);
+int sha1_pos(const unsigned char *sha1,
+ void *table,
+ size_t nr,
+ sha1_access_fn fn);
/*
* Searches for sha1 in table, using the given fanout table to determine the
#include "packfile.h"
#include "object-store.h"
#include "repository.h"
+#include "submodule.h"
#include "midx.h"
#include "commit-reach.h"
-static int get_oid_oneline(const char *, struct object_id *, struct commit_list *);
+static int get_oid_oneline(struct repository *r, const char *, struct object_id *, struct commit_list *);
-typedef int (*disambiguate_hint_fn)(const struct object_id *, void *);
+typedef int (*disambiguate_hint_fn)(struct repository *, const struct object_id *, void *);
struct disambiguate_state {
int len; /* length of prefix in hex chars */
char hex_pfx[GIT_MAX_HEXSZ + 1];
struct object_id bin_pfx;
+ struct repository *repo;
disambiguate_hint_fn fn;
void *cb_data;
struct object_id candidate;
static void update_candidates(struct disambiguate_state *ds, const struct object_id *current)
{
if (ds->always_call_fn) {
- ds->ambiguous = ds->fn(current, ds->cb_data) ? 1 : 0;
+ ds->ambiguous = ds->fn(ds->repo, current, ds->cb_data) ? 1 : 0;
return;
}
if (!ds->candidate_exists) {
}
if (!ds->candidate_checked) {
- ds->candidate_ok = ds->fn(&ds->candidate, ds->cb_data);
+ ds->candidate_ok = ds->fn(ds->repo, &ds->candidate, ds->cb_data);
ds->disambiguate_fn_used = 1;
ds->candidate_checked = 1;
}
}
/* if we reach this point, we know ds->candidate satisfies fn */
- if (ds->fn(current, ds->cb_data)) {
+ if (ds->fn(ds->repo, current, ds->cb_data)) {
/*
* if both current and candidate satisfy fn, we cannot
* disambiguate.
{
struct object_directory *odb;
- for (odb = the_repository->objects->odb;
- odb && !ds->ambiguous;
- odb = odb->next) {
+ for (odb = ds->repo->objects->odb; odb && !ds->ambiguous; odb = odb->next) {
int pos;
struct oid_array *loose_objects;
uint32_t num, i, first = 0;
const struct object_id *current = NULL;
+ if (p->multi_pack_index)
+ return;
+
if (open_pack_index(p) || !p->num_objects)
return;
struct multi_pack_index *m;
struct packed_git *p;
- for (m = get_multi_pack_index(the_repository); m && !ds->ambiguous;
+ for (m = get_multi_pack_index(ds->repo); m && !ds->ambiguous;
m = m->next)
unique_in_midx(m, ds);
- for (p = get_packed_git(the_repository); p && !ds->ambiguous;
+ for (p = get_packed_git(ds->repo); p && !ds->ambiguous;
p = p->next)
unique_in_pack(p, ds);
}
* same repository!
*/
ds->candidate_ok = (!ds->disambiguate_fn_used ||
- ds->fn(&ds->candidate, ds->cb_data));
+ ds->fn(ds->repo, &ds->candidate, ds->cb_data));
if (!ds->candidate_ok)
return SHORT_NAME_AMBIGUOUS;
return 0;
}
-static int disambiguate_commit_only(const struct object_id *oid, void *cb_data_unused)
+static int disambiguate_commit_only(struct repository *r,
+ const struct object_id *oid,
+ void *cb_data_unused)
{
- int kind = oid_object_info(the_repository, oid, NULL);
+ int kind = oid_object_info(r, oid, NULL);
return kind == OBJ_COMMIT;
}
-static int disambiguate_committish_only(const struct object_id *oid, void *cb_data_unused)
+static int disambiguate_committish_only(struct repository *r,
+ const struct object_id *oid,
+ void *cb_data_unused)
{
struct object *obj;
int kind;
- kind = oid_object_info(the_repository, oid, NULL);
+ kind = oid_object_info(r, oid, NULL);
if (kind == OBJ_COMMIT)
return 1;
if (kind != OBJ_TAG)
return 0;
/* We need to do this the hard way... */
- obj = deref_tag(the_repository, parse_object(the_repository, oid),
- NULL, 0);
+ obj = deref_tag(r, parse_object(r, oid), NULL, 0);
if (obj && obj->type == OBJ_COMMIT)
return 1;
return 0;
}
-static int disambiguate_tree_only(const struct object_id *oid, void *cb_data_unused)
+static int disambiguate_tree_only(struct repository *r,
+ const struct object_id *oid,
+ void *cb_data_unused)
{
- int kind = oid_object_info(the_repository, oid, NULL);
+ int kind = oid_object_info(r, oid, NULL);
return kind == OBJ_TREE;
}
-static int disambiguate_treeish_only(const struct object_id *oid, void *cb_data_unused)
+static int disambiguate_treeish_only(struct repository *r,
+ const struct object_id *oid,
+ void *cb_data_unused)
{
struct object *obj;
int kind;
- kind = oid_object_info(the_repository, oid, NULL);
+ kind = oid_object_info(r, oid, NULL);
if (kind == OBJ_TREE || kind == OBJ_COMMIT)
return 1;
if (kind != OBJ_TAG)
return 0;
/* We need to do this the hard way... */
- obj = deref_tag(the_repository, parse_object(the_repository, oid),
- NULL, 0);
+ obj = deref_tag(r, parse_object(r, oid), NULL, 0);
if (obj && (obj->type == OBJ_TREE || obj->type == OBJ_COMMIT))
return 1;
return 0;
}
-static int disambiguate_blob_only(const struct object_id *oid, void *cb_data_unused)
+static int disambiguate_blob_only(struct repository *r,
+ const struct object_id *oid,
+ void *cb_data_unused)
{
- int kind = oid_object_info(the_repository, oid, NULL);
+ int kind = oid_object_info(r, oid, NULL);
return kind == OBJ_BLOB;
}
return error("unknown hint type for '%s': %s", var, value);
}
-static int init_object_disambiguation(const char *name, int len,
+static int init_object_disambiguation(struct repository *r,
+ const char *name, int len,
struct disambiguate_state *ds)
{
int i;
ds->len = len;
ds->hex_pfx[len] = '\0';
- prepare_alt_odb(the_repository);
+ ds->repo = r;
+ prepare_alt_odb(r);
return 0;
}
struct strbuf desc = STRBUF_INIT;
int type;
- if (ds->fn && !ds->fn(oid, ds->cb_data))
+ if (ds->fn && !ds->fn(ds->repo, oid, ds->cb_data))
return 0;
- type = oid_object_info(the_repository, oid, NULL);
+ type = oid_object_info(ds->repo, oid, NULL);
if (type == OBJ_COMMIT) {
- struct commit *commit = lookup_commit(the_repository, oid);
+ struct commit *commit = lookup_commit(ds->repo, oid);
if (commit) {
struct pretty_print_context pp = {0};
pp.date_mode.type = DATE_SHORT;
format_commit_message(commit, " %ad - %s", &desc, &pp);
}
} else if (type == OBJ_TAG) {
- struct tag *tag = lookup_tag(the_repository, oid);
+ struct tag *tag = lookup_tag(ds->repo, oid);
if (!parse_tag(tag) && tag->tag)
strbuf_addf(&desc, " %s", tag->tag);
}
advise(" %s %s%s",
- find_unique_abbrev(oid, DEFAULT_ABBREV),
+ repo_find_unique_abbrev(ds->repo, oid, DEFAULT_ABBREV),
type_name(type) ? type_name(type) : "unknown type",
desc.buf);
return 0;
}
+static int repo_collect_ambiguous(struct repository *r,
+ const struct object_id *oid,
+ void *data)
+{
+ return collect_ambiguous(oid, data);
+}
+
+static struct repository *sort_ambiguous_repo;
static int sort_ambiguous(const void *a, const void *b)
{
- int a_type = oid_object_info(the_repository, a, NULL);
- int b_type = oid_object_info(the_repository, b, NULL);
+ int a_type = oid_object_info(sort_ambiguous_repo, a, NULL);
+ int b_type = oid_object_info(sort_ambiguous_repo, b, NULL);
int a_type_sort;
int b_type_sort;
return a_type_sort > b_type_sort ? 1 : -1;
}
-static enum get_oid_result get_short_oid(const char *name, int len,
+static void sort_ambiguous_oid_array(struct repository *r, struct oid_array *a)
+{
+ /* mutex will be needed if this code is to be made thread safe */
+ sort_ambiguous_repo = r;
+ QSORT(a->oid, a->nr, sort_ambiguous);
+ sort_ambiguous_repo = NULL;
+}
+
+static enum get_oid_result get_short_oid(struct repository *r,
+ const char *name, int len,
struct object_id *oid,
unsigned flags)
{
struct disambiguate_state ds;
int quietly = !!(flags & GET_OID_QUIETLY);
- if (init_object_disambiguation(name, len, &ds) < 0)
+ if (init_object_disambiguation(r, name, len, &ds) < 0)
return -1;
if (HAS_MULTI_BITS(flags & GET_OID_DISAMBIGUATORS))
find_short_packed_object(&ds);
status = finish_object_disambiguation(&ds, oid);
+ /*
+ * If we didn't find it, do the usual reprepare() slow-path,
+ * since the object may have recently been added to the repository
+ * or migrated from loose to packed.
+ */
+ if (status == MISSING_OBJECT) {
+ reprepare_packed_git(the_repository);
+ find_short_object_filename(&ds);
+ find_short_packed_object(&ds);
+ status = finish_object_disambiguation(&ds, oid);
+ }
+
if (!quietly && (status == SHORT_NAME_AMBIGUOUS)) {
struct oid_array collect = OID_ARRAY_INIT;
ds.fn = NULL;
advise(_("The candidates are:"));
- for_each_abbrev(ds.hex_pfx, collect_ambiguous, &collect);
- QSORT(collect.oid, collect.nr, sort_ambiguous);
+ repo_for_each_abbrev(r, ds.hex_pfx, collect_ambiguous, &collect);
+ sort_ambiguous_oid_array(r, &collect);
if (oid_array_for_each(&collect, show_ambiguous_object, &ds))
BUG("show_ambiguous_object shouldn't return non-zero");
return status;
}
-int for_each_abbrev(const char *prefix, each_abbrev_fn fn, void *cb_data)
+int repo_for_each_abbrev(struct repository *r, const char *prefix,
+ each_abbrev_fn fn, void *cb_data)
{
struct oid_array collect = OID_ARRAY_INIT;
struct disambiguate_state ds;
int ret;
- if (init_object_disambiguation(prefix, strlen(prefix), &ds) < 0)
+ if (init_object_disambiguation(r, prefix, strlen(prefix), &ds) < 0)
return -1;
ds.always_call_fn = 1;
- ds.fn = collect_ambiguous;
+ ds.fn = repo_collect_ambiguous;
ds.cb_data = &collect;
find_short_object_filename(&ds);
find_short_packed_object(&ds);
unsigned int init_len;
unsigned int cur_len;
char *hex;
+ struct repository *repo;
const struct object_id *oid;
};
return 0;
}
+static int repo_extend_abbrev_len(struct repository *r,
+ const struct object_id *oid,
+ void *cb_data)
+{
+ return extend_abbrev_len(oid, cb_data);
+}
+
static void find_abbrev_len_for_midx(struct multi_pack_index *m,
struct min_abbrev_data *mad)
{
struct object_id oid;
const struct object_id *mad_oid;
+ if (p->multi_pack_index)
+ return;
+
if (open_pack_index(p) || !p->num_objects)
return;
struct multi_pack_index *m;
struct packed_git *p;
- for (m = get_multi_pack_index(the_repository); m; m = m->next)
+ for (m = get_multi_pack_index(mad->repo); m; m = m->next)
find_abbrev_len_for_midx(m, mad);
- for (p = get_packed_git(the_repository); p; p = p->next)
+ for (p = get_packed_git(mad->repo); p; p = p->next)
find_abbrev_len_for_pack(p, mad);
}
-int find_unique_abbrev_r(char *hex, const struct object_id *oid, int len)
+int repo_find_unique_abbrev_r(struct repository *r, char *hex,
+ const struct object_id *oid, int len)
{
struct disambiguate_state ds;
struct min_abbrev_data mad;
struct object_id oid_ret;
- const unsigned hexsz = the_hash_algo->hexsz;
+ const unsigned hexsz = r->hash_algo->hexsz;
if (len < 0) {
- unsigned long count = approximate_object_count();
+ unsigned long count = repo_approximate_object_count(r);
/*
* Add one because the MSB only tells us the highest bit set,
* not including the value of all the _other_ bits (so "15"
if (len == hexsz || !len)
return hexsz;
+ mad.repo = r;
mad.init_len = len;
mad.cur_len = len;
mad.hex = hex;
find_abbrev_len_packed(&mad);
- if (init_object_disambiguation(hex, mad.cur_len, &ds) < 0)
+ if (init_object_disambiguation(r, hex, mad.cur_len, &ds) < 0)
return -1;
- ds.fn = extend_abbrev_len;
+ ds.fn = repo_extend_abbrev_len;
ds.always_call_fn = 1;
ds.cb_data = (void *)&mad;
return mad.cur_len;
}
-const char *find_unique_abbrev(const struct object_id *oid, int len)
+const char *repo_find_unique_abbrev(struct repository *r,
+ const struct object_id *oid,
+ int len)
{
static int bufno;
static char hexbuffer[4][GIT_MAX_HEXSZ + 1];
char *hex = hexbuffer[bufno];
bufno = (bufno + 1) % ARRAY_SIZE(hexbuffer);
- find_unique_abbrev_r(hex, oid, len);
+ repo_find_unique_abbrev_r(r, hex, oid, len);
return hex;
}
return at_mark(string, len, suffix, ARRAY_SIZE(suffix));
}
-static enum get_oid_result get_oid_1(const char *name, int len, struct object_id *oid, unsigned lookup_flags);
-static int interpret_nth_prior_checkout(const char *name, int namelen, struct strbuf *buf);
+static enum get_oid_result get_oid_1(struct repository *r, const char *name, int len, struct object_id *oid, unsigned lookup_flags);
+static int interpret_nth_prior_checkout(struct repository *r, const char *name, int namelen, struct strbuf *buf);
-static int get_oid_basic(const char *str, int len, struct object_id *oid,
- unsigned int flags)
+static int get_oid_basic(struct repository *r, const char *str, int len,
+ struct object_id *oid, unsigned int flags)
{
static const char *warn_msg = "refname '%.*s' is ambiguous.";
static const char *object_name_msg = N_(
int refs_found = 0;
int at, reflog_len, nth_prior = 0;
- if (len == the_hash_algo->hexsz && !get_oid_hex(str, oid)) {
+ if (len == r->hash_algo->hexsz && !get_oid_hex(str, oid)) {
if (warn_ambiguous_refs && warn_on_object_refname_ambiguity) {
- refs_found = dwim_ref(str, len, &tmp_oid, &real_ref);
+ refs_found = repo_dwim_ref(r, str, len, &tmp_oid, &real_ref);
if (refs_found > 0) {
warning(warn_msg, len, str);
if (advice_object_name_warning)
struct strbuf buf = STRBUF_INIT;
int detached;
- if (interpret_nth_prior_checkout(str, len, &buf) > 0) {
- detached = (buf.len == the_hash_algo->hexsz && !get_oid_hex(buf.buf, oid));
+ if (interpret_nth_prior_checkout(r, str, len, &buf) > 0) {
+ detached = (buf.len == r->hash_algo->hexsz && !get_oid_hex(buf.buf, oid));
strbuf_release(&buf);
if (detached)
return 0;
if (!len && reflog_len)
/* allow "@{...}" to mean the current branch reflog */
- refs_found = dwim_ref("HEAD", 4, oid, &real_ref);
+ refs_found = repo_dwim_ref(r, "HEAD", 4, oid, &real_ref);
else if (reflog_len)
- refs_found = dwim_log(str, len, oid, &real_ref);
+ refs_found = repo_dwim_log(r, str, len, oid, &real_ref);
else
- refs_found = dwim_ref(str, len, oid, &real_ref);
+ refs_found = repo_dwim_ref(r, str, len, oid, &real_ref);
if (!refs_found)
return -1;
if (warn_ambiguous_refs && !(flags & GET_OID_QUIETLY) &&
(refs_found > 1 ||
- !get_short_oid(str, len, &tmp_oid, GET_OID_QUIETLY)))
+ !get_short_oid(r, str, len, &tmp_oid, GET_OID_QUIETLY)))
warning(warn_msg, len, str);
if (reflog_len) {
return -1;
}
}
- if (read_ref_at(real_ref, flags, at_time, nth, oid, NULL,
+ if (read_ref_at(get_main_ref_store(r),
+ real_ref, flags, at_time, nth, oid, NULL,
&co_time, &co_tz, &co_cnt)) {
if (!len) {
if (starts_with(real_ref, "refs/heads/")) {
return 0;
}
-static enum get_oid_result get_parent(const char *name, int len,
+static enum get_oid_result get_parent(struct repository *r,
+ const char *name, int len,
struct object_id *result, int idx)
{
struct object_id oid;
- enum get_oid_result ret = get_oid_1(name, len, &oid,
+ enum get_oid_result ret = get_oid_1(r, name, len, &oid,
GET_OID_COMMITTISH);
struct commit *commit;
struct commit_list *p;
if (ret)
return ret;
- commit = lookup_commit_reference(the_repository, &oid);
+ commit = lookup_commit_reference(r, &oid);
if (parse_commit(commit))
return MISSING_OBJECT;
if (!idx) {
return MISSING_OBJECT;
}
-static enum get_oid_result get_nth_ancestor(const char *name, int len,
+static enum get_oid_result get_nth_ancestor(struct repository *r,
+ const char *name, int len,
struct object_id *result,
int generation)
{
struct commit *commit;
int ret;
- ret = get_oid_1(name, len, &oid, GET_OID_COMMITTISH);
+ ret = get_oid_1(r, name, len, &oid, GET_OID_COMMITTISH);
if (ret)
return ret;
- commit = lookup_commit_reference(the_repository, &oid);
+ commit = lookup_commit_reference(r, &oid);
if (!commit)
return MISSING_OBJECT;
return FOUND;
}
-struct object *peel_to_type(const char *name, int namelen,
- struct object *o, enum object_type expected_type)
+struct object *repo_peel_to_type(struct repository *r, const char *name, int namelen,
+ struct object *o, enum object_type expected_type)
{
if (name && !namelen)
namelen = strlen(name);
while (1) {
- if (!o || (!o->parsed && !parse_object(the_repository, &o->oid)))
+ if (!o || (!o->parsed && !parse_object(r, &o->oid)))
return NULL;
if (expected_type == OBJ_ANY || o->type == expected_type)
return o;
if (o->type == OBJ_TAG)
o = ((struct tag*) o)->tagged;
else if (o->type == OBJ_COMMIT)
- o = &(get_commit_tree(((struct commit *)o))->object);
+ o = &(repo_get_commit_tree(r, ((struct commit *)o))->object);
else {
if (name)
error("%.*s: expected %s type, but the object "
}
}
-static int peel_onion(const char *name, int len, struct object_id *oid,
- unsigned lookup_flags)
+static int peel_onion(struct repository *r, const char *name, int len,
+ struct object_id *oid, unsigned lookup_flags)
{
struct object_id outer;
const char *sp;
else if (expected_type == OBJ_TREE)
lookup_flags |= GET_OID_TREEISH;
- if (get_oid_1(name, sp - name - 2, &outer, lookup_flags))
+ if (get_oid_1(r, name, sp - name - 2, &outer, lookup_flags))
return -1;
- o = parse_object(the_repository, &outer);
+ o = parse_object(r, &outer);
if (!o)
return -1;
if (!expected_type) {
- o = deref_tag(the_repository, o, name, sp - name - 2);
- if (!o || (!o->parsed && !parse_object(the_repository, &o->oid)))
+ o = deref_tag(r, o, name, sp - name - 2);
+ if (!o || (!o->parsed && !parse_object(r, &o->oid)))
return -1;
oidcpy(oid, &o->oid);
return 0;
* if we do not get the needed object, we should
* barf.
*/
- o = peel_to_type(name, len, o, expected_type);
+ o = repo_peel_to_type(r, name, len, o, expected_type);
if (!o)
return -1;
prefix = xstrndup(sp + 1, name + len - 1 - (sp + 1));
commit_list_insert((struct commit *)o, &list);
- ret = get_oid_oneline(prefix, oid, list);
+ ret = get_oid_oneline(r, prefix, oid, list);
free(prefix);
return ret;
}
return 0;
}
-static int get_describe_name(const char *name, int len, struct object_id *oid)
+static int get_describe_name(struct repository *r,
+ const char *name, int len,
+ struct object_id *oid)
{
const char *cp;
unsigned flags = GET_OID_QUIETLY | GET_OID_COMMIT;
if (ch == 'g' && cp[-1] == '-') {
cp++;
len -= cp - name;
- return get_short_oid(cp, len, oid, flags);
+ return get_short_oid(r,
+ cp, len, oid, flags);
}
}
}
return -1;
}
-static enum get_oid_result get_oid_1(const char *name, int len,
+static enum get_oid_result get_oid_1(struct repository *r,
+ const char *name, int len,
struct object_id *oid,
unsigned lookup_flags)
{
if (!num && len1 == len - 1)
num = 1;
if (has_suffix == '^')
- return get_parent(name, len1, oid, num);
+ return get_parent(r, name, len1, oid, num);
/* else if (has_suffix == '~') -- goes without saying */
- return get_nth_ancestor(name, len1, oid, num);
+ return get_nth_ancestor(r, name, len1, oid, num);
}
- ret = peel_onion(name, len, oid, lookup_flags);
+ ret = peel_onion(r, name, len, oid, lookup_flags);
if (!ret)
return FOUND;
- ret = get_oid_basic(name, len, oid, lookup_flags);
+ ret = get_oid_basic(r, name, len, oid, lookup_flags);
if (!ret)
return FOUND;
/* It could be describe output that is "SOMETHING-gXXXX" */
- ret = get_describe_name(name, len, oid);
+ ret = get_describe_name(r, name, len, oid);
if (!ret)
return FOUND;
- return get_short_oid(name, len, oid, lookup_flags);
+ return get_short_oid(r, name, len, oid, lookup_flags);
}
/*
/* Remember to update object flag allocation in object.h */
#define ONELINE_SEEN (1u<<20)
+struct handle_one_ref_cb {
+ struct repository *repo;
+ struct commit_list **list;
+};
+
static int handle_one_ref(const char *path, const struct object_id *oid,
int flag, void *cb_data)
{
- struct commit_list **list = cb_data;
- struct object *object = parse_object(the_repository, oid);
+ struct handle_one_ref_cb *cb = cb_data;
+ struct commit_list **list = cb->list;
+ struct object *object = parse_object(cb->repo, oid);
if (!object)
return 0;
if (object->type == OBJ_TAG) {
- object = deref_tag(the_repository, object, path,
+ object = deref_tag(cb->repo, object, path,
strlen(path));
if (!object)
return 0;
return 0;
}
-static int get_oid_oneline(const char *prefix, struct object_id *oid,
- struct commit_list *list)
+static int get_oid_oneline(struct repository *r,
+ const char *prefix, struct object_id *oid,
+ struct commit_list *list)
{
struct commit_list *backup = NULL, *l;
int found = 0;
int matches;
commit = pop_most_recent_commit(&list, ONELINE_SEEN);
- if (!parse_object(the_repository, &commit->object.oid))
+ if (!parse_object(r, &commit->object.oid))
continue;
buf = get_commit_buffer(commit, NULL);
p = strstr(buf, "\n\n");
* Parse @{-N} syntax, return the number of characters parsed
* if successful; otherwise signal an error with negative value.
*/
-static int interpret_nth_prior_checkout(const char *name, int namelen,
+static int interpret_nth_prior_checkout(struct repository *r,
+ const char *name, int namelen,
struct strbuf *buf)
{
long nth;
cb.remaining = nth;
strbuf_init(&cb.buf, 20);
- retval = 0;
- if (0 < for_each_reflog_ent_reverse("HEAD", grab_nth_branch_switch, &cb)) {
+ retval = refs_for_each_reflog_ent_reverse(get_main_ref_store(r),
+ "HEAD", grab_nth_branch_switch, &cb);
+ if (0 < retval) {
strbuf_reset(buf);
strbuf_addbuf(buf, &cb.buf);
retval = brace - name + 1;
- }
+ } else
+ retval = 0;
strbuf_release(&cb.buf);
return retval;
}
-int get_oid_mb(const char *name, struct object_id *oid)
+int repo_get_oid_mb(struct repository *r,
+ const char *name,
+ struct object_id *oid)
{
struct commit *one, *two;
struct commit_list *mbs;
dots = strstr(name, "...");
if (!dots)
- return get_oid(name, oid);
+ return repo_get_oid(r, name, oid);
if (dots == name)
- st = get_oid("HEAD", &oid_tmp);
+ st = repo_get_oid(r, "HEAD", &oid_tmp);
else {
struct strbuf sb;
strbuf_init(&sb, dots - name);
strbuf_add(&sb, name, dots - name);
- st = get_oid_committish(sb.buf, &oid_tmp);
+ st = repo_get_oid_committish(r, sb.buf, &oid_tmp);
strbuf_release(&sb);
}
if (st)
return st;
- one = lookup_commit_reference_gently(the_repository, &oid_tmp, 0);
+ one = lookup_commit_reference_gently(r, &oid_tmp, 0);
if (!one)
return -1;
- if (get_oid_committish(dots[3] ? (dots + 3) : "HEAD", &oid_tmp))
+ if (repo_get_oid_committish(r, dots[3] ? (dots + 3) : "HEAD", &oid_tmp))
return -1;
- two = lookup_commit_reference_gently(the_repository, &oid_tmp, 0);
+ two = lookup_commit_reference_gently(r, &oid_tmp, 0);
if (!two)
return -1;
+ if (r != the_repository)
+ BUG("sorry get_merge_bases() can't take struct repository yet");
mbs = get_merge_bases(one, two);
if (!mbs || mbs->next)
st = -1;
return 1;
}
-static int reinterpret(const char *name, int namelen, int len,
+static int reinterpret(struct repository *r,
+ const char *name, int namelen, int len,
struct strbuf *buf, unsigned allowed)
{
/* we have extra data, which might need further processing */
int ret;
strbuf_add(buf, name + len, namelen - len);
- ret = interpret_branch_name(buf->buf, buf->len, &tmp, allowed);
+ ret = repo_interpret_branch_name(r, buf->buf, buf->len, &tmp, allowed);
/* that data was not interpreted, remove our cruft */
if (ret < 0) {
strbuf_setlen(buf, used);
return ret - used + len;
}
-static void set_shortened_ref(struct strbuf *buf, const char *ref)
+static void set_shortened_ref(struct repository *r, struct strbuf *buf, const char *ref)
{
- char *s = shorten_unambiguous_ref(ref, 0);
+ char *s = refs_shorten_unambiguous_ref(get_main_ref_store(r), ref, 0);
strbuf_reset(buf);
strbuf_addstr(buf, s);
free(s);
return 0;
}
-static int interpret_branch_mark(const char *name, int namelen,
+static int interpret_branch_mark(struct repository *r,
+ const char *name, int namelen,
int at, struct strbuf *buf,
int (*get_mark)(const char *, int),
const char *(*get_data)(struct branch *,
if (!branch_interpret_allowed(value, allowed))
return -1;
- set_shortened_ref(buf, value);
+ set_shortened_ref(r, buf, value);
return len + at;
}
-int interpret_branch_name(const char *name, int namelen, struct strbuf *buf,
- unsigned allowed)
+int repo_interpret_branch_name(struct repository *r,
+ const char *name, int namelen,
+ struct strbuf *buf,
+ unsigned allowed)
{
char *at;
const char *start;
namelen = strlen(name);
if (!allowed || (allowed & INTERPRET_BRANCH_LOCAL)) {
- len = interpret_nth_prior_checkout(name, namelen, buf);
+ len = interpret_nth_prior_checkout(r, name, namelen, buf);
if (!len) {
return len; /* syntax Ok, not enough switches */
} else if (len > 0) {
if (len == namelen)
return len; /* consumed all */
else
- return reinterpret(name, namelen, len, buf, allowed);
+ return reinterpret(r, name, namelen, len, buf, allowed);
}
}
if (!allowed || (allowed & INTERPRET_BRANCH_HEAD)) {
len = interpret_empty_at(name, namelen, at - name, buf);
if (len > 0)
- return reinterpret(name, namelen, len, buf,
+ return reinterpret(r, name, namelen, len, buf,
allowed);
}
- len = interpret_branch_mark(name, namelen, at - name, buf,
+ len = interpret_branch_mark(r, name, namelen, at - name, buf,
upstream_mark, branch_get_upstream,
allowed);
if (len > 0)
return len;
- len = interpret_branch_mark(name, namelen, at - name, buf,
+ len = interpret_branch_mark(r, name, namelen, at - name, buf,
push_mark, branch_get_push,
allowed);
if (len > 0)
* This is like "get_oid_basic()", except it allows "object ID expressions",
* notably "xyz^" for "parent of xyz"
*/
-int get_oid(const char *name, struct object_id *oid)
+int repo_get_oid(struct repository *r, const char *name, struct object_id *oid)
{
struct object_context unused;
- return get_oid_with_context(the_repository, name, 0, oid, &unused);
+ return get_oid_with_context(r, name, 0, oid, &unused);
}
+/*
+ * This returns a non-zero value if the string (built using printf
+ * format and the given arguments) is not a valid object.
+ */
+int get_oidf(struct object_id *oid, const char *fmt, ...)
+{
+ va_list ap;
+ int ret;
+ struct strbuf sb = STRBUF_INIT;
+
+ va_start(ap, fmt);
+ strbuf_vaddf(&sb, fmt, ap);
+ va_end(ap);
+
+ ret = get_oid(sb.buf, oid);
+ strbuf_release(&sb);
+
+ return ret;
+}
/*
* Many callers know that the user meant to name a commit-ish by
* commit-ish. It is merely to give a hint to the disambiguation
* machinery.
*/
-int get_oid_committish(const char *name, struct object_id *oid)
+int repo_get_oid_committish(struct repository *r,
+ const char *name,
+ struct object_id *oid)
{
struct object_context unused;
- return get_oid_with_context(the_repository,
- name, GET_OID_COMMITTISH,
+ return get_oid_with_context(r, name, GET_OID_COMMITTISH,
oid, &unused);
}
-int get_oid_treeish(const char *name, struct object_id *oid)
+int repo_get_oid_treeish(struct repository *r,
+ const char *name,
+ struct object_id *oid)
{
struct object_context unused;
- return get_oid_with_context(the_repository,
- name, GET_OID_TREEISH,
+ return get_oid_with_context(r, name, GET_OID_TREEISH,
oid, &unused);
}
-int get_oid_commit(const char *name, struct object_id *oid)
+int repo_get_oid_commit(struct repository *r,
+ const char *name,
+ struct object_id *oid)
{
struct object_context unused;
- return get_oid_with_context(the_repository,
- name, GET_OID_COMMIT,
+ return get_oid_with_context(r, name, GET_OID_COMMIT,
oid, &unused);
}
-int get_oid_tree(const char *name, struct object_id *oid)
+int repo_get_oid_tree(struct repository *r,
+ const char *name,
+ struct object_id *oid)
{
struct object_context unused;
- return get_oid_with_context(the_repository,
- name, GET_OID_TREE,
+ return get_oid_with_context(r, name, GET_OID_TREE,
oid, &unused);
}
-int get_oid_blob(const char *name, struct object_id *oid)
+int repo_get_oid_blob(struct repository *r,
+ const char *name,
+ struct object_id *oid)
{
struct object_context unused;
- return get_oid_with_context(the_repository,
- name, GET_OID_BLOB,
+ return get_oid_with_context(r, name, GET_OID_BLOB,
oid, &unused);
}
int object_name_len)
{
struct object_id oid;
- unsigned mode;
+ unsigned short mode;
if (!prefix)
prefix = "";
}
/* Must be called only when :stage:filename doesn't exist. */
-static void diagnose_invalid_index_path(struct index_state *istate,
+static void diagnose_invalid_index_path(struct repository *r,
int stage,
const char *prefix,
const char *filename)
{
+ struct index_state *istate = r->index;
const struct cache_entry *ce;
int pos;
unsigned namelen = strlen(filename);
ce_stage(ce), filename);
}
- if (file_exists(filename))
+ if (repo_file_exists(r, filename))
die("Path '%s' exists on disk, but not in the index.", filename);
if (is_missing_file_error(errno))
die("Path '%s' does not exist (neither on disk nor in the index).",
}
-static char *resolve_relative_path(const char *rel)
+static char *resolve_relative_path(struct repository *r, const char *rel)
{
if (!starts_with(rel, "./") && !starts_with(rel, "../"))
return NULL;
- if (!is_inside_work_tree())
+ if (r != the_repository || !is_inside_work_tree())
die("relative path syntax can't be used outside working tree.");
/* die() inside prefix_path() if resolved path is outside worktree */
memset(oc, 0, sizeof(*oc));
oc->mode = S_IFINVALID;
strbuf_init(&oc->symlink_path, 0);
- ret = get_oid_1(name, namelen, oid, flags);
+ ret = get_oid_1(repo, name, namelen, oid, flags);
if (!ret)
return ret;
/*
char *new_path = NULL;
int pos;
if (!only_to_die && namelen > 2 && name[1] == '/') {
+ struct handle_one_ref_cb cb;
struct commit_list *list = NULL;
- for_each_ref(handle_one_ref, &list);
- head_ref(handle_one_ref, &list);
+ cb.repo = repo;
+ cb.list = &list;
+ refs_for_each_ref(repo->refs, handle_one_ref, &cb);
+ refs_head_ref(repo->refs, handle_one_ref, &cb);
commit_list_sort_by_date(&list);
- return get_oid_oneline(name + 2, oid, list);
+ return get_oid_oneline(repo, name + 2, oid, list);
}
if (namelen < 3 ||
name[2] != ':' ||
stage = name[1] - '0';
cp = name + 3;
}
- new_path = resolve_relative_path(cp);
+ new_path = resolve_relative_path(repo, cp);
if (!new_path) {
namelen = namelen - (cp - name);
} else {
oc->path = xstrdup(cp);
if (!repo->index->cache)
- repo_read_index(the_repository);
+ repo_read_index(repo);
pos = index_name_pos(repo->index, cp, namelen);
if (pos < 0)
pos = -pos - 1;
pos++;
}
if (only_to_die && name[1] && name[1] != '/')
- diagnose_invalid_index_path(repo->index, stage, prefix, cp);
+ diagnose_invalid_index_path(repo, stage, prefix, cp);
free(new_path);
return -1;
}
sub_flags &= ~GET_OID_DISAMBIGUATORS;
sub_flags |= GET_OID_TREEISH;
- if (!get_oid_1(name, len, &tree_oid, sub_flags)) {
+ if (!get_oid_1(repo, name, len, &tree_oid, sub_flags)) {
const char *filename = cp+1;
char *new_filename = NULL;
- new_filename = resolve_relative_path(filename);
+ new_filename = resolve_relative_path(repo, filename);
if (new_filename)
filename = new_filename;
+ /*
+ * NEEDSWORK: Eventually get_tree_entry*() should
+ * learn to take struct repository directly and we
+ * would not need to inject submodule odb to the
+ * in-core odb.
+ */
+ if (repo != the_repository)
+ add_to_alternates_memory(repo->objects->odb->path);
if (flags & GET_OID_FOLLOW_SYMLINKS) {
ret = get_tree_entry_follow_symlinks(&tree_oid,
filename, oid, &oc->symlink_path,
* exist in 'HEAD'" when given "HEAD:doc", or it may return in which case
* you have a chance to diagnose the error further.
*/
-void maybe_die_on_misspelt_object_name(const char *name, const char *prefix)
+void maybe_die_on_misspelt_object_name(struct repository *r,
+ const char *name,
+ const char *prefix)
{
struct object_context oc;
struct object_id oid;
- get_oid_with_context_1(the_repository, name, GET_OID_ONLY_TO_DIE,
+ get_oid_with_context_1(r, name, GET_OID_ONLY_TO_DIE,
prefix, &oid, &oc);
}
-Subproject commit 232357eb2ea0397388254a4b188333a227bf5b10
+Subproject commit 16033998da4b273aebd92c84b1e1b12e4aaf7009
#endif
/*ENDIANNESS SELECTION*/
+#ifndef SHA1DC_FORCE_ALIGNED_ACCESS
#if defined(SHA1DC_FORCE_UNALIGNED_ACCESS) || defined(SHA1DC_ON_INTEL_LIKE_PROCESSOR)
#define SHA1DC_ALLOW_UNALIGNED_ACCESS
-#endif /*UNALIGNMENT DETECTION*/
-
+#endif /*UNALIGNED ACCESS DETECTION*/
+#endif /*FORCE ALIGNED ACCESS*/
#define rotate_right(x,n) (((x)>>(n))|((x)<<(32-(n))))
#define rotate_left(x,n) (((x)<<(n))|((x)>>(32-(n))))
ewah_each_bit(si->replace_bitmap, replace_entry, istate);
ewah_each_bit(si->delete_bitmap, mark_entry_for_delete, istate);
if (si->nr_deletions)
- remove_marked_cache_entries(istate);
+ remove_marked_cache_entries(istate, 0);
for (i = si->nr_replacements; i < si->saved_cache_nr; i++) {
if (!ce_namelen(si->saved_cache[i]))
void remove_split_index(struct index_state *istate)
{
if (istate->split_index) {
- /*
- * When removing the split index, we need to move
- * ownership of the mem_pool associated with the
- * base index to the main index. There may be cache entries
- * allocated from the base's memory pool that are shared with
- * the_index.cache[].
- */
- mem_pool_combine(istate->ce_mem_pool, istate->split_index->base->ce_mem_pool);
+ if (istate->split_index->base) {
+ /*
+ * When removing the split index, we need to move
+ * ownership of the mem_pool associated with the
+ * base index to the main index. There may be cache entries
+ * allocated from the base's memory pool that are shared with
+ * the_index.cache[].
+ */
+ mem_pool_combine(istate->ce_mem_pool,
+ istate->split_index->base->ce_mem_pool);
- /*
- * The split index no longer owns the mem_pool backing
- * its cache array. As we are discarding this index,
- * mark the index as having no cache entries, so it
- * will not attempt to clean up the cache entries or
- * validate them.
- */
- if (istate->split_index->base)
+ /*
+ * The split index no longer owns the mem_pool backing
+ * its cache array. As we are discarding this index,
+ * mark the index as having no cache entries, so it
+ * will not attempt to clean up the cache entries or
+ * validate them.
+ */
istate->split_index->base->cache_nr = 0;
+ }
/*
* We can discard the split index because its
strbuf_splice(sb, pos, 0, data, len);
}
+void strbuf_vinsertf(struct strbuf *sb, size_t pos, const char *fmt, va_list ap)
+{
+ int len, len2;
+ char save;
+ va_list cp;
+
+ if (pos > sb->len)
+ die("`pos' is too far after the end of the buffer");
+ va_copy(cp, ap);
+ len = vsnprintf(sb->buf + sb->len, 0, fmt, cp);
+ va_end(cp);
+ if (len < 0)
+ BUG("your vsnprintf is broken (returned %d)", len);
+ if (!len)
+ return; /* nothing to do */
+ if (unsigned_add_overflows(sb->len, len))
+ die("you want to use way too much memory");
+ strbuf_grow(sb, len);
+ memmove(sb->buf + pos + len, sb->buf + pos, sb->len - pos);
+ /* vsnprintf() will append a NUL, overwriting one of our characters */
+ save = sb->buf[pos + len];
+ len2 = vsnprintf(sb->buf + pos, len + 1, fmt, ap);
+ sb->buf[pos + len] = save;
+ if (len2 != len)
+ BUG("your vsnprintf is broken (returns inconsistent lengths)");
+ strbuf_setlen(sb, sb->len + len);
+}
+
+void strbuf_insertf(struct strbuf *sb, size_t pos, const char *fmt, ...)
+{
+ va_list ap;
+ va_start(ap, fmt);
+ strbuf_vinsertf(sb, pos, fmt, ap);
+ va_end(ap);
+}
+
void strbuf_remove(struct strbuf *sb, size_t pos, size_t len)
{
strbuf_splice(sb, pos, len, "", 0);
strbuf_setlen(sb, sb->len + sb2->len);
}
+const char *strbuf_join_argv(struct strbuf *buf,
+ int argc, const char **argv, char delim)
+{
+ if (!argc)
+ return buf->buf;
+
+ strbuf_addstr(buf, *argv);
+ while (--argc) {
+ strbuf_addch(buf, delim);
+ strbuf_addstr(buf, *(++argv));
+ }
+
+ return buf->buf;
+}
+
void strbuf_addchars(struct strbuf *sb, int c, size_t n)
{
strbuf_grow(sb, n);
}
}
+size_t strbuf_expand_literal_cb(struct strbuf *sb,
+ const char *placeholder,
+ void *context)
+{
+ int ch;
+
+ switch (placeholder[0]) {
+ case 'n': /* newline */
+ strbuf_addch(sb, '\n');
+ return 1;
+ case 'x':
+ /* %x00 == NUL, %x0a == LF, etc. */
+ ch = hex2chr(placeholder + 1);
+ if (ch < 0)
+ return 0;
+ strbuf_addch(sb, ch);
+ return 3;
+ }
+ return 0;
+}
+
size_t strbuf_expand_dict_cb(struct strbuf *sb, const char *placeholder,
void *context)
{
*/
void strbuf_insert(struct strbuf *sb, size_t pos, const void *, size_t);
+/**
+ * Insert data to the given position of the buffer giving a printf format
+ * string. The contents will be shifted, not overwritten.
+ */
+void strbuf_vinsertf(struct strbuf *sb, size_t pos, const char *fmt,
+ va_list ap);
+
+void strbuf_insertf(struct strbuf *sb, size_t pos, const char *fmt, ...);
+
/**
* Remove given amount of data from a given position of the buffer.
*/
*/
void strbuf_addbuf(struct strbuf *sb, const struct strbuf *sb2);
+/**
+ * Join the arguments into a buffer. `delim` is put between every
+ * two arguments.
+ */
+const char *strbuf_join_argv(struct strbuf *buf, int argc,
+ const char **argv, char delim);
+
/**
* This function can be used to expand a format string containing
* placeholders. To that end, it parses the string and calls the specified
expand_fn_t fn,
void *context);
+/**
+ * Used as callback for `strbuf_expand` to only expand literals
+ * (i.e. %n and %xNN). The context argument is ignored.
+ */
+size_t strbuf_expand_literal_cb(struct strbuf *sb,
+ const char *placeholder,
+ void *context);
+
/**
* Used as callback for `strbuf_expand()`, expects an array of
* struct strbuf_expand_dict_entry as context, i.e. pairs of
/* opaque */
struct git_istream;
-extern struct git_istream *open_istream(const struct object_id *, enum object_type *, unsigned long *, struct stream_filter *);
-extern int close_istream(struct git_istream *);
-extern ssize_t read_istream(struct git_istream *, void *, size_t);
+struct git_istream *open_istream(const struct object_id *, enum object_type *, unsigned long *, struct stream_filter *);
+int close_istream(struct git_istream *);
+ssize_t read_istream(struct git_istream *, void *, size_t);
-extern int stream_blob_to_fd(int fd, const struct object_id *, struct stream_filter *, int can_seek);
+int stream_blob_to_fd(int fd, const struct object_id *, struct stream_filter *, int can_seek);
#endif /* STREAMING_H */
* Remove the given string from the sorted list. If the string
* doesn't exist, the list is not altered.
*/
-extern void string_list_remove(struct string_list *list, const char *string,
- int free_util);
+void string_list_remove(struct string_list *list, const char *string,
+ int free_util);
/**
* Check if the given string is part of a sorted list. If it is part of the list,
process->out = -1;
process->clean_on_exit = 1;
process->clean_on_exit_handler = subprocess_exit_handler;
+ process->trace2_child_class = "subprocess";
err = start_command(process);
if (err) {
/* subprocess functions */
/* Function to test two subprocess hashmap entries for equality. */
-extern int cmd2process_cmp(const void *unused_cmp_data,
- const void *e1,
- const void *e2,
- const void *unused_keydata);
+int cmd2process_cmp(const void *unused_cmp_data,
+ const void *e1,
+ const void *e2,
+ const void *unused_keydata);
/*
* User-supplied function to initialize the sub-process. This is
default:
if (!strcmp(arg, "on-demand"))
return RECURSE_SUBMODULES_ON_DEMAND;
-
+ /*
+ * Please update $__git_fetch_recurse_submodules in
+ * git-completion.bash when you add new options.
+ */
if (die_on_error)
die("bad %s argument: %s", opt, arg);
else
return RECURSE_SUBMODULES_CHECK;
else if (!strcmp(arg, "only"))
return RECURSE_SUBMODULES_ONLY;
+ /*
+ * Please update $__git_push_recurse_submodules in
+ * git-completion.bash when you add new modes.
+ */
else if (die_on_error)
die("bad %s argument: %s", opt, arg);
else
const struct config_options opts = { 0 };
struct object_id oid;
char *file;
+ char *oidstr = NULL;
file = repo_worktree_path(repo, GITMODULES_FILE);
if (file_exists(file)) {
config_source.file = file;
- } else if (repo->submodule_prefix) {
- /*
- * When get_oid and config_with_options, used below,
- * become able to work on a specific repository, this
- * warning branch can be removed.
- */
- warning("nested submodules without %s in the working tree are not supported yet",
- GITMODULES_FILE);
- goto out;
- } else if (get_oid(GITMODULES_INDEX, &oid) >= 0) {
- config_source.blob = GITMODULES_INDEX;
- } else if (get_oid(GITMODULES_HEAD, &oid) >= 0) {
- config_source.blob = GITMODULES_HEAD;
+ } else if (repo_get_oid(repo, GITMODULES_INDEX, &oid) >= 0 ||
+ repo_get_oid(repo, GITMODULES_HEAD, &oid) >= 0) {
+ config_source.blob = oidstr = xstrdup(oid_to_hex(&oid));
+ if (repo != the_repository)
+ add_to_alternates_memory(repo->objects->odb->path);
} else {
goto out;
}
config_with_options(fn, data, &config_source, &opts);
out:
+ free(oidstr);
free(file);
}
}
struct submodule_cache;
struct repository;
-extern void submodule_cache_free(struct submodule_cache *cache);
+void submodule_cache_free(struct submodule_cache *cache);
-extern int parse_submodule_fetchjobs(const char *var, const char *value);
-extern int parse_fetch_recurse_submodules_arg(const char *opt, const char *arg);
+int parse_submodule_fetchjobs(const char *var, const char *value);
+int parse_fetch_recurse_submodules_arg(const char *opt, const char *arg);
struct option;
-extern int option_fetch_parse_recurse_submodules(const struct option *opt,
- const char *arg, int unset);
-extern int parse_update_recurse_submodules_arg(const char *opt, const char *arg);
-extern int parse_push_recurse_submodules_arg(const char *opt, const char *arg);
-extern void repo_read_gitmodules(struct repository *repo);
-extern void gitmodules_config_oid(const struct object_id *commit_oid);
+int option_fetch_parse_recurse_submodules(const struct option *opt,
+ const char *arg, int unset);
+int parse_update_recurse_submodules_arg(const char *opt, const char *arg);
+int parse_push_recurse_submodules_arg(const char *opt, const char *arg);
+void repo_read_gitmodules(struct repository *repo);
+void gitmodules_config_oid(const struct object_id *commit_oid);
const struct submodule *submodule_from_name(struct repository *r,
const struct object_id *commit_or_tree,
const char *name);
* New helpers to retrieve arbitrary configuration from the '.gitmodules' file
* should NOT be added.
*/
-extern void fetch_config_from_gitmodules(int *max_children, int *recurse_submodules);
-extern void update_clone_config_from_gitmodules(int *max_jobs);
+void fetch_config_from_gitmodules(int *max_children, int *recurse_submodules);
+void update_clone_config_from_gitmodules(int *max_jobs);
#endif /* SUBMODULE_CONFIG_H */
diffopt->flags.ignore_dirty_submodules = 1;
else if (strcmp(arg, "none"))
die("bad --ignore-submodules argument: %s", arg);
+ /*
+ * Please update _git_status() in git-completion.bash when you
+ * add new options
+ */
}
static int prepare_submodule_summary(struct rev_info *rev, const char *path,
if (start_command(&cp))
die("Could not run 'git rev-list <commits> --not --remotes -n 1' command in submodule %s",
path);
- if (strbuf_read(&buf, cp.out, 41))
+ if (strbuf_read(&buf, cp.out, the_hash_algo->hexsz + 1))
needs_pushing = 1;
finish_command(&cp);
close(cp.out);
struct oid_array *commits;
if (retvalue)
+ /*
+ * NEEDSWORK: This indicates that the overall fetch
+ * failed, even though there may be a subsequent fetch
+ * by commit hash that might work. It may be a good
+ * idea to not indicate failure in this case, and only
+ * indicate failure if the subsequent fetch fails.
+ */
spf->result = 1;
if (!task || !task->sub)
calculate_changed_submodule_paths(r, &spf.changed_submodule_names);
string_list_sort(&spf.changed_submodule_names);
- run_processes_parallel(max_parallel_jobs,
- get_next_submodule,
- fetch_start_failure,
- fetch_finish,
- &spf);
+ run_processes_parallel_tr2(max_parallel_jobs,
+ get_next_submodule,
+ fetch_start_failure,
+ fetch_finish,
+ &spf,
+ "submodule", "parallel/fetch");
argv_array_clear(&spf.args);
out:
variable to "1" or "0", respectively.
--stress::
---stress=<N>::
Run the test script repeatedly in multiple parallel jobs until
one of them fails. Useful for reproducing rare failures in
flaky tests. The number of parallel jobs is, in order of
- precedence: <N>, or the value of the GIT_TEST_STRESS_LOAD
+ precedence: the value of the GIT_TEST_STRESS_LOAD
environment variable, or twice the number of available
processors (as shown by the 'getconf' utility), or 8.
Implies `--verbose -x --immediate` to get the most information
'.stress-<nr>' suffix, and the trash directory of the failed
test job is renamed to end with a '.stress-failed' suffix.
+--stress-jobs=<N>::
+ Override the number of parallel jobs. Implies `--stress`.
+
--stress-limit=<N>::
When combined with --stress run the test script repeatedly
this many times in each of the parallel jobs or until one of
- them fails, whichever comes first.
+ them fails, whichever comes first. Implies `--stress`.
You can also set the GIT_TEST_INSTALLED environment variable to
the bindir of an existing git installation to test that installation.
GIT_TEST_SPLIT_INDEX=<boolean> forces split-index mode on the whole
test suite. Accept any boolean values that are accepted by git-config.
+GIT_TEST_PROTOCOL_VERSION=<n>, when set, overrides the
+'protocol.version' setting to n if it is less than n.
+
GIT_TEST_FULL_IN_PACK_ARRAY=<boolean> exercises the uncommon
pack-objects code path where there are more than 1024 packs even if
the actual number of packs in repository is below this limit. Accept
GIT_TEST_PRELOAD_INDEX=<boolean> exercises the preload-index code path
by overriding the minimum number of cache entries required per thread.
-GIT_TEST_REBASE_USE_BUILTIN=<boolean>, when false, disables the
-builtin version of git-rebase. See 'rebase.useBuiltin' in
+GIT_TEST_STASH_USE_BUILTIN=<boolean>, when false, disables the
+built-in version of git-stash. See 'stash.useBuiltin' in
git-config(1).
GIT_TEST_INDEX_THREADS=<n> enables exercising the multi-threaded loading
fetch-pack to not request sideband-all (even if the server advertises
sideband-all).
+GIT_TEST_DISALLOW_ABBREVIATED_OPTIONS=<boolean>, when true (which is
+the default when running tests), errors out when an abbreviated option
+is used.
+
Naming Tests
------------
...
'
+ - test_atexit <script>
+
+ Prepend <script> to a list of commands to run unconditionally to
+ clean up before the test script exits, e.g. to stop a daemon:
+
+ test_expect_success 'test git daemon' '
+ git daemon &
+ daemon_pid=$! &&
+ test_atexit 'kill $daemon_pid' &&
+ hello world
+ '
+
+ The commands will be executed before the trash directory is removed,
+ i.e. the atexit commands will still be able to access any pidfiles or
+ socket files.
+
+ Note that these commands will be run even when a test script run
+ with '--immediate' fails. Be careful with your atexit commands to
+ minimize any changes to the failed state.
+
- test_write_lines <lines>
Write <lines> on standard output, one line per argument.
test_oid_init or test_oid_cache. Providing an unknown key is an
error.
+ - yes [<string>]
+
+ This is often seen in modern UNIX but some platforms lack it, so
+ the test harness overrides the platform implementation with a
+ more limited one. Use this only when feeding a handful lines of
+ output to the downstream---unlike the real version, it generates
+ only up to 99 lines.
+
+
Prerequisites
-------------
check_count A 2
'
+test_expect_success 'blame in a bare repo without starting commit' '
+ git clone --bare . bare.git &&
+ (
+ cd bare.git &&
+ check_count A 2
+ )
+'
+
test_expect_success 'blame by tag objects' '
git tag -m "test tag" testTag &&
git tag -m "test tag #2" testTag2 testTag &&
}
}
-static void parse_dates(const char **argv, struct timeval *now)
+static void parse_dates(const char **argv)
{
struct strbuf result = STRBUF_INIT;
else if (skip_prefix(*argv, "show:", &x))
show_dates(argv+1, x);
else if (!strcmp(*argv, "parse"))
- parse_dates(argv+1, &now);
+ parse_dates(argv+1);
else if (!strcmp(*argv, "approxidate"))
parse_approxidate(argv+1, &now);
else if (!strcmp(*argv, "timestamp"))
#include "cache.h"
#include "parse-options.h"
#include "string-list.h"
+#include "trace2.h"
static int boolean = 0;
static int integer = 0;
OPT_NOOP_NOARG(0, "obsolete"),
OPT_STRING_LIST(0, "list", &list, "str", "add str to list"),
OPT_GROUP("Magic arguments"),
- OPT_ARGUMENT("quux", "means --quux"),
+ OPT_ARGUMENT("quux", NULL, "means --quux"),
OPT_NUMBER_CALLBACK(&integer, "set integer to NUM",
number_callback),
{ OPTION_COUNTUP, '+', NULL, &boolean, NULL, "same as -b",
OPT_CALLBACK(0, "expect", &expect, "string",
"expected output in the variable dump",
collect_expect),
+ OPT_GROUP("Alias"),
+ OPT_STRING('A', "alias-source", &string, "string", "get a string"),
+ OPT_ALIAS('Z', "alias-target", "alias-source"),
OPT_END(),
};
int i;
int ret = 0;
+ trace2_cmd_name("_parse_");
+
argc = parse_options(argc, (const char **)argv, prefix, options, usage, 0);
if (length_cb.called) {
} else if (!strcmp(*argv, "stack")) {
pq.compare = NULL;
} else {
- int *v = malloc(sizeof(*v));
+ int *v = xmalloc(sizeof(*v));
*v = atoi(*argv);
prio_queue_put(&pq, v);
}
--- /dev/null
+#include "test-tool.h"
+#include "cache.h"
+#include "parse-options.h"
+#include "serve.h"
+
+static char const * const serve_usage[] = {
+ N_("test-tool serve-v2 [<options>]"),
+ NULL
+};
+
+int cmd__serve_v2(int argc, const char **argv)
+{
+ struct serve_options opts = SERVE_OPTIONS_INIT;
+
+ struct option options[] = {
+ OPT_BOOL(0, "stateless-rpc", &opts.stateless_rpc,
+ N_("quit after a single request/response exchange")),
+ OPT_BOOL(0, "advertise-capabilities", &opts.advertise_capabilities,
+ N_("exit immediately after advertising capabilities")),
+ OPT_END()
+ };
+ const char *prefix = setup_git_directory();
+
+ /* ignore all unknown cmdline switches for now */
+ argc = parse_options(argc, argv, prefix, options, serve_usage,
+ PARSE_OPT_KEEP_DASHDASH |
+ PARSE_OPT_KEEP_UNKNOWN);
+ serve(&opts);
+
+ return 0;
+}
#include "git-compat-util.h"
#include "test-tool.h"
+#include "trace2.h"
+#include "parse-options.h"
+
+static const char * const test_tool_usage[] = {
+ "test-tool [-C <directory>] <command [<arguments>...]]",
+ NULL
+};
struct test_cmd {
const char *name;
{ "revision-walking", cmd__revision_walking },
{ "run-command", cmd__run_command },
{ "scrap-cache-tree", cmd__scrap_cache_tree },
+ { "serve-v2", cmd__serve_v2 },
{ "sha1", cmd__sha1 },
{ "sha1-array", cmd__sha1_array },
{ "sha256", cmd__sha256 },
{ "submodule-config", cmd__submodule_config },
{ "submodule-nested-repo-config", cmd__submodule_nested_repo_config },
{ "subprocess", cmd__subprocess },
+ { "trace2", cmd__trace2 },
{ "urlmatch-normalization", cmd__urlmatch_normalization },
{ "xml-encode", cmd__xml_encode },
{ "wildmatch", cmd__wildmatch },
int cmd_main(int argc, const char **argv)
{
int i;
+ const char *working_directory = NULL;
+ struct option options[] = {
+ OPT_STRING('C', NULL, &working_directory, "directory",
+ "change the working directory"),
+ OPT_END()
+ };
BUG_exit_code = 99;
+ argc = parse_options(argc, argv, NULL, options, test_tool_usage,
+ PARSE_OPT_STOP_AT_NON_OPTION |
+ PARSE_OPT_KEEP_ARGV0);
+
if (argc < 2)
die_usage();
+ if (working_directory && chdir(working_directory) < 0)
+ die("Could not cd to '%s'", working_directory);
+
for (i = 0; i < ARRAY_SIZE(cmds); i++) {
if (!strcmp(cmds[i].name, argv[1])) {
argv++;
argc--;
+ trace2_cmd_name(cmds[i].name);
+ trace2_cmd_list_config();
return cmds[i].fn(argc, argv);
}
}
int cmd__revision_walking(int argc, const char **argv);
int cmd__run_command(int argc, const char **argv);
int cmd__scrap_cache_tree(int argc, const char **argv);
+int cmd__serve_v2(int argc, const char **argv);
int cmd__sha1(int argc, const char **argv);
int cmd__sha1_array(int argc, const char **argv);
int cmd__sha256(int argc, const char **argv);
int cmd__submodule_config(int argc, const char **argv);
int cmd__submodule_nested_repo_config(int argc, const char **argv);
int cmd__subprocess(int argc, const char **argv);
+int cmd__trace2(int argc, const char **argv);
int cmd__urlmatch_normalization(int argc, const char **argv);
int cmd__xml_encode(int argc, const char **argv);
int cmd__wildmatch(int argc, const char **argv);
--- /dev/null
+#include "test-tool.h"
+#include "cache.h"
+#include "argv-array.h"
+#include "run-command.h"
+#include "exec-cmd.h"
+#include "config.h"
+
+typedef int(fn_unit_test)(int argc, const char **argv);
+
+struct unit_test {
+ fn_unit_test *ut_fn;
+ const char *ut_name;
+ const char *ut_usage;
+};
+
+#define MyOk 0
+#define MyError 1
+
+static int get_i(int *p_value, const char *data)
+{
+ char *endptr;
+
+ if (!data || !*data)
+ return MyError;
+
+ *p_value = strtol(data, &endptr, 10);
+ if (*endptr || errno == ERANGE)
+ return MyError;
+
+ return MyOk;
+}
+
+/*
+ * Cause process to exit with the requested value via "return".
+ *
+ * Rely on test-tool.c:cmd_main() to call trace2_cmd_exit()
+ * with our result.
+ *
+ * Test harness can confirm:
+ * [] the process-exit value.
+ * [] the "code" field in the "exit" trace2 event.
+ * [] the "code" field in the "atexit" trace2 event.
+ * [] the "name" field in the "cmd_name" trace2 event.
+ * [] "def_param" events for all of the "interesting" pre-defined
+ * config settings.
+ */
+static int ut_001return(int argc, const char **argv)
+{
+ int rc;
+
+ if (get_i(&rc, argv[0]))
+ die("expect <exit_code>");
+
+ return rc;
+}
+
+/*
+ * Cause the process to exit with the requested value via "exit()".
+ *
+ * Test harness can confirm:
+ * [] the "code" field in the "exit" trace2 event.
+ * [] the "code" field in the "atexit" trace2 event.
+ * [] the "name" field in the "cmd_name" trace2 event.
+ * [] "def_param" events for all of the "interesting" pre-defined
+ * config settings.
+ */
+static int ut_002exit(int argc, const char **argv)
+{
+ int rc;
+
+ if (get_i(&rc, argv[0]))
+ die("expect <exit_code>");
+
+ exit(rc);
+}
+
+/*
+ * Send an "error" event with each value in argv. Normally, git only issues
+ * a single "error" event immediately before issuing an "exit" event (such
+ * as in die() or BUG()), but multiple "error" events are allowed.
+ *
+ * Test harness can confirm:
+ * [] a trace2 "error" event for each value in argv.
+ * [] the "name" field in the "cmd_name" trace2 event.
+ * [] (optional) the file:line in the "exit" event refers to this function.
+ */
+static int ut_003error(int argc, const char **argv)
+{
+ int k;
+
+ if (!argv[0] || !*argv[0])
+ die("expect <error_message>");
+
+ for (k = 0; k < argc; k++)
+ error("%s", argv[k]);
+
+ return 0;
+}
+
+/*
+ * Run a child process and wait for it to finish and exit with its return code.
+ * test-tool trace2 004child [<child-command-line>]
+ *
+ * For example:
+ * test-tool trace2 004child git version
+ * test-tool trace2 004child test-tool trace2 001return 0
+ * test-tool trace2 004child test-tool trace2 004child test-tool trace2 004child
+ * test-tool trace2 004child git -c alias.xyz=version xyz
+ *
+ * Test harness can confirm:
+ * [] the "name" field in the "cmd_name" trace2 event.
+ * [] that the outer process has a single component SID (or depth "d0" in
+ * the PERF stream).
+ * [] that "child_start" and "child_exit" events are generated for the child.
+ * [] if the child process is an instrumented executable:
+ * [] that "version", "start", ..., "exit", and "atexit" events are
+ * generated by the child process.
+ * [] that the child process events have a multiple component SID (or
+ * depth "dN+1" in the PERF stream).
+ * [] that the child exit code is propagated to the parent process "exit"
+ * and "atexit" events..
+ * [] (optional) that the "t_abs" field in the child process "atexit" event
+ * is less than the "t_rel" field in the "child_exit" event of the parent
+ * process.
+ * [] if the child process is like the alias example above,
+ * [] (optional) the child process attempts to run "git-xyx" as a dashed
+ * command.
+ * [] the child process emits an "alias" event with "xyz" => "version"
+ * [] the child process runs "git version" as a child process.
+ * [] the child process has a 3 component SID (or depth "d2" in the PERF
+ * stream).
+ */
+static int ut_004child(int argc, const char **argv)
+{
+ int result;
+
+ /*
+ * Allow empty <child_command_line> so we can do arbitrarily deep
+ * command nesting and let the last one be null.
+ */
+ if (!argc)
+ return 0;
+
+ result = run_command_v_opt(argv, 0);
+ exit(result);
+}
+
+/*
+ * Exec a git command. This may either create a child process (Windows)
+ * or replace the existing process.
+ * test-tool trace2 005exec <git_command_args>
+ *
+ * For example:
+ * test-tool trace2 005exec version
+ *
+ * Test harness can confirm (on Windows):
+ * [] the "name" field in the "cmd_name" trace2 event.
+ * [] that the outer process has a single component SID (or depth "d0" in
+ * the PERF stream).
+ * [] that "exec" and "exec_result" events are generated for the child
+ * process (since the Windows compatibility layer fakes an exec() with
+ * a CreateProcess(), WaitForSingleObject(), and exit()).
+ * [] that the child process has multiple component SID (or depth "dN+1"
+ * in the PERF stream).
+ *
+ * Test harness can confirm (on platforms with a real exec() function):
+ * [] TODO talk about process replacement and how it affects SID.
+ */
+static int ut_005exec(int argc, const char **argv)
+{
+ int result;
+
+ if (!argc)
+ return 0;
+
+ result = execv_git_cmd(argv);
+ return result;
+}
+
+static int ut_006data(int argc, const char **argv)
+{
+ const char *usage_error =
+ "expect <cat0> <k0> <v0> [<cat1> <k1> <v1> [...]]";
+
+ if (argc % 3 != 0)
+ die("%s", usage_error);
+
+ while (argc) {
+ if (!argv[0] || !*argv[0] || !argv[1] || !*argv[1] ||
+ !argv[2] || !*argv[2])
+ die("%s", usage_error);
+
+ trace2_data_string(argv[0], the_repository, argv[1], argv[2]);
+ argv += 3;
+ argc -= 3;
+ }
+
+ return 0;
+}
+
+/*
+ * Usage:
+ * test-tool trace2 <ut_name_1> <ut_usage_1>
+ * test-tool trace2 <ut_name_2> <ut_usage_2>
+ * ...
+ */
+#define USAGE_PREFIX "test-tool trace2"
+
+/* clang-format off */
+static struct unit_test ut_table[] = {
+ { ut_001return, "001return", "<exit_code>" },
+ { ut_002exit, "002exit", "<exit_code>" },
+ { ut_003error, "003error", "<error_message>+" },
+ { ut_004child, "004child", "[<child_command_line>]" },
+ { ut_005exec, "005exec", "<git_command_args>" },
+ { ut_006data, "006data", "[<category> <key> <value>]+" },
+};
+/* clang-format on */
+
+/* clang-format off */
+#define for_each_ut(k, ut_k) \
+ for (k = 0, ut_k = &ut_table[k]; \
+ k < ARRAY_SIZE(ut_table); \
+ k++, ut_k = &ut_table[k])
+/* clang-format on */
+
+static int print_usage(void)
+{
+ int k;
+ struct unit_test *ut_k;
+
+ fprintf(stderr, "usage:\n");
+ for_each_ut (k, ut_k)
+ fprintf(stderr, "\t%s %s %s\n", USAGE_PREFIX, ut_k->ut_name,
+ ut_k->ut_usage);
+
+ return 129;
+}
+
+/*
+ * Issue various trace2 events for testing.
+ *
+ * We assume that these trace2 routines has already been called:
+ * [] trace2_initialize() [common-main.c:main()]
+ * [] trace2_cmd_start() [common-main.c:main()]
+ * [] trace2_cmd_name() [test-tool.c:cmd_main()]
+ * [] tracd2_cmd_list_config() [test-tool.c:cmd_main()]
+ * So that:
+ * [] the various trace2 streams are open.
+ * [] the process SID has been created.
+ * [] the "version" event has been generated.
+ * [] the "start" event has been generated.
+ * [] the "cmd_name" event has been generated.
+ * [] this writes various "def_param" events for interesting config values.
+ *
+ * We further assume that if we return (rather than exit()), trace2_cmd_exit()
+ * will be called by test-tool.c:cmd_main().
+ */
+int cmd__trace2(int argc, const char **argv)
+{
+ int k;
+ struct unit_test *ut_k;
+
+ argc--; /* skip over "trace2" arg */
+ argv++;
+
+ if (argc)
+ for_each_ut (k, ut_k)
+ if (!strcmp(argv[0], ut_k->ut_name))
+ return ut_k->ut_fn(argc - 1, argv + 1);
+
+ return print_usage();
+}
test_cmp expect actual
'
-stop_git_daemon
test_done
#
# test_expect_success ...
#
-# stop_git_daemon
# test_done
test_tristate GIT_TEST_GIT_DAEMON
test_set_port LIB_GIT_DAEMON_PORT
GIT_DAEMON_PID=
+GIT_DAEMON_PIDFILE="$PWD"/daemon.pid
GIT_DAEMON_DOCUMENT_ROOT_PATH="$PWD"/repo
GIT_DAEMON_HOST_PORT=127.0.0.1:$LIB_GIT_DAEMON_PORT
GIT_DAEMON_URL=git://$GIT_DAEMON_HOST_PORT
+registered_stop_git_daemon_atexit_handler=
start_git_daemon() {
if test -n "$GIT_DAEMON_PID"
then
mkdir -p "$GIT_DAEMON_DOCUMENT_ROOT_PATH"
- trap 'code=$?; stop_git_daemon; (exit $code); die' EXIT
+ # One of the test scripts stops and then re-starts 'git daemon'.
+ # Don't register and then run the same atexit handlers several times.
+ if test -z "$registered_stop_git_daemon_atexit_handler"
+ then
+ test_atexit 'stop_git_daemon'
+ registered_stop_git_daemon_atexit_handler=AlreadyDone
+ fi
say >&3 "Starting git daemon ..."
mkfifo git_daemon_output
${LIB_GIT_DAEMON_COMMAND:-git daemon} \
--listen=127.0.0.1 --port="$LIB_GIT_DAEMON_PORT" \
- --reuseaddr --verbose \
+ --reuseaddr --verbose --pid-file="$GIT_DAEMON_PIDFILE" \
--base-path="$GIT_DAEMON_DOCUMENT_ROOT_PATH" \
"$@" "$GIT_DAEMON_DOCUMENT_ROOT_PATH" \
>&3 2>git_daemon_output &
then
kill "$GIT_DAEMON_PID"
wait "$GIT_DAEMON_PID"
- trap 'die' EXIT
+ unset GIT_DAEMON_PID
test_skip_or_die $GIT_TEST_GIT_DAEMON \
"git daemon failed to start"
fi
return
fi
- trap 'die' EXIT
-
# kill git-daemon child of git
say >&3 "Stopping git daemon ..."
kill "$GIT_DAEMON_PID"
then
error "git daemon exited with status: $ret"
fi
+ kill "$(cat "$GIT_DAEMON_PIDFILE")" 2>/dev/null
GIT_DAEMON_PID=
- rm -f git_daemon_output
+ rm -f git_daemon_output "$GIT_DAEMON_PIDFILE"
}
# A stripped-down version of a netcat client, that connects to a "host:port"
echo "$path"
}
-# On Solaris the 'date +%s' function is not supported and therefore we
-# need this replacement.
-# Attention: This function is not safe again against time offset updates
-# at runtime (e.g. via NTP). The 'clock_gettime(CLOCK_MONOTONIC)'
-# function could fix that but it is not in Python until 3.3.
-time_in_seconds () {
- (cd / && "$PYTHON_PATH" -c 'import time; print(int(time.time()))')
-}
-
test_set_port P4DPORT
P4PORT=localhost:$P4DPORT
git="$TRASH_DIRECTORY/git"
pidfile="$TRASH_DIRECTORY/p4d.pid"
-# Sometimes "prove" seems to hang on exit because p4d is still running
-cleanup () {
- if test -f "$pidfile"
- then
- kill -9 $(cat "$pidfile") 2>/dev/null && exit 255
- fi
+stop_p4d_and_watchdog () {
+ kill -9 $p4d_pid $watchdog_pid
}
-trap cleanup EXIT
# git p4 submit generates a temp file, which will
# not get cleaned up if the submission fails. Don't
TMPDIR="$TRASH_DIRECTORY"
export TMPDIR
+registered_stop_p4d_atexit_handler=
start_p4d () {
+ # One of the test scripts stops and then re-starts p4d.
+ # Don't register and then run the same atexit handlers several times.
+ if test -z "$registered_stop_p4d_atexit_handler"
+ then
+ test_atexit 'stop_p4d_and_watchdog'
+ registered_stop_p4d_atexit_handler=AlreadyDone
+ fi
+
mkdir -p "$db" "$cli" "$git" &&
rm -f "$pidfile" &&
(
echo $! >"$pidfile"
}
) &&
+ p4d_pid=$(cat "$pidfile")
# This gives p4d a long time to start up, as it can be
# quite slow depending on the machine. Set this environment
# an automated test setup. If the p4d process dies, that
# will be caught with the "kill -0" check below.
i=${P4D_START_PATIENCE:-300}
- pid=$(cat "$pidfile")
- timeout=$(($(time_in_seconds) + $P4D_TIMEOUT))
+ nr_tries_left=$P4D_TIMEOUT
while true
do
- if test $(time_in_seconds) -gt $timeout
+ if test $nr_tries_left -eq 0
then
- kill -9 $pid
+ kill -9 $p4d_pid
exit 1
fi
sleep 1
- done &
+ nr_tries_left=$(($nr_tries_left - 1))
+ done 2>/dev/null 4>&2 &
watchdog_pid=$!
ready=
break
fi
# fail if p4d died
- kill -0 $pid 2>/dev/null || break
+ kill -0 $p4d_pid 2>/dev/null || break
echo waiting for p4d to start
sleep 1
i=$(( $i - 1 ))
}
retry_until_success () {
- timeout=$(($(time_in_seconds) + $RETRY_TIMEOUT))
- until "$@" 2>/dev/null || test $(time_in_seconds) -gt $timeout
- do
- sleep 1
- done
-}
-
-retry_until_fail () {
- timeout=$(($(time_in_seconds) + $RETRY_TIMEOUT))
- until ! "$@" 2>/dev/null || test $(time_in_seconds) -gt $timeout
+ nr_tries_left=$RETRY_TIMEOUT
+ until "$@" 2>/dev/null || test $nr_tries_left -eq 0
do
sleep 1
+ nr_tries_left=$(($nr_tries_left - 1))
done
}
-kill_p4d () {
- pid=$(cat "$pidfile")
- retry_until_fail kill $pid
- retry_until_fail kill -9 $pid
- # complain if it would not die
- test_must_fail kill $pid >/dev/null 2>&1 &&
- rm -rf "$db" "$cli" "$pidfile" &&
- retry_until_fail kill -9 $watchdog_pid
+stop_and_cleanup_p4d () {
+ kill -9 $p4d_pid $watchdog_pid
+ wait $p4d_pid
+ rm -rf "$db" "$cli" "$pidfile"
}
cleanup_git () {
LIB_HTTPD_SVN="$loc"
start_httpd
;;
- *)
- stop_httpd () {
- : noop
- }
- ;;
esac
}
#
# test_expect_success ...
#
-# stop_httpd
# test_done
#
# Can be configured using the following variables.
start_httpd() {
prepare_httpd >&3 2>&4
- trap 'code=$?; stop_httpd; (exit $code); die' EXIT
+ test_atexit stop_httpd
"$LIB_HTTPD_PATH" -d "$HTTPD_ROOT_PATH" \
-f "$TEST_PATH/apache.conf" $HTTPD_PARA \
>&3 2>&4
if test $? -ne 0
then
- trap 'die' EXIT
cat "$HTTPD_ROOT_PATH"/error.log >&4 2>/dev/null
test_skip_or_die $GIT_TEST_HTTPD "web server setup failed"
fi
}
stop_httpd() {
- trap 'die' EXIT
-
"$LIB_HTTPD_PATH" -d "$HTTPD_ROOT_PATH" \
-f "$TEST_PATH/apache.conf" $HTTPD_PARA -k stop
}
PassEnv GIT_VALGRIND_OPTIONS
PassEnv GNUPGHOME
PassEnv ASAN_OPTIONS
+PassEnv LSAN_OPTIONS
PassEnv GIT_TRACE
PassEnv GIT_CONFIG_NOSYSTEM
PassEnv GIT_TEST_SIDEBAND_ALL
git revert HEAD &&
git checkout -b invalid_sub1 add_sub1 &&
- git update-index --cacheinfo 160000 0123456789012345678901234567890123456789 sub1 &&
+ git update-index --cacheinfo 160000 $(test_oid numeric) sub1 &&
git commit -m "Invalid sub1 commit" &&
git checkout -b valid_sub1 &&
git revert HEAD &&
# the submodule repo if it doesn't exist and configures the most problematic
# settings for diff.ignoreSubmodules.
prolog () {
+ test_oid_init &&
(test -d submodule_update_repo || create_lib_submodule_repo) &&
test_config_global diff.ignoreSubmodules all &&
test_config diff.ignoreSubmodules all
$ ./p0001-rev-list.sh
[...]
- $ GIT_BUILD_DIR=/path/to/other/git ./p0001-rev-list.sh
+ $ ./run /path/to/other/git -- ./p0001-rev-list.sh
[...]
$ ./aggregate.perl . /path/to/other/git ./p0001-rev-list.sh
use lib '../../perl/build/lib';
use strict;
use warnings;
-use JSON;
use Getopt::Long;
use Git;
+use Cwd qw(realpath);
sub get_times {
my $name = shift;
while (scalar @ARGV) {
my $arg = $ARGV[0];
my $dir;
+ my $prefix = '';
last if -f $arg or $arg eq "--";
if (! -d $arg) {
my $rev = Git::command_oneline(qw(rev-parse --verify), $arg);
$dir = "build/".$rev;
+ } elsif ($arg eq '.') {
+ $dir = '.';
} else {
- $arg =~ s{/*$}{};
- $dir = $arg;
- $dirabbrevs{$dir} = $dir;
+ $dir = realpath($arg);
+ $dirnames{$dir} = $dir;
+ $prefix .= 'bindir';
}
push @dirs, $dir;
- $dirnames{$dir} = $arg;
- my $prefix = $dir;
+ $dirnames{$dir} ||= $arg;
+ $prefix .= $dir;
$prefix =~ tr/^a-zA-Z0-9/_/c;
$prefixes{$dir} = $prefix . '.';
shift @ARGV;
$environment = $reponame;
} elsif (exists $ENV{GIT_PERF_REPO_NAME} and $ENV{GIT_PERF_REPO_NAME} ne "") {
$environment = $ENV{GIT_PERF_REPO_NAME};
- } elsif (exists $ENV{GIT_TEST_INSTALLED} and $ENV{GIT_TEST_INSTALLED} ne "") {
- $environment = $ENV{GIT_TEST_INSTALLED};
- $environment =~ s|/bin-wrappers$||;
} else {
$environment = `uname -r`;
chomp $environment;
}
}
- print to_json(\@data, {utf8 => 1, pretty => 1, canonical => 1}), "\n";
+ require JSON;
+ print JSON::to_json(\@data, {utf8 => 1, pretty => 1, canonical => 1}), "\n";
}
binmode STDOUT, ":utf8" or die "PANIC on binmode: $!";
git rev-list --all --objects >/dev/null
'
+test_perf 'rev-list --parents' '
+ git rev-list --parents HEAD >/dev/null
+'
+
+test_expect_success 'create dummy file' '
+ echo unlikely-to-already-be-there >dummy &&
+ git add dummy &&
+ git commit -m dummy
+'
+
+test_perf 'rev-list -- dummy' '
+ git rev-list HEAD -- dummy
+'
+
+test_perf 'rev-list --parents -- dummy' '
+ git rev-list --parents HEAD -- dummy
+'
+
test_expect_success 'create new unreferenced commit' '
commit=$(git commit-tree HEAD^{tree} -p HEAD) &&
test_export commit
export PACK
'
-test_expect_success 'create target repositories' '
- for repo in t1 t2 t3 t4 t5 t6
- do
- git init --bare $repo
- done
-'
-
test_perf 'index-pack 0 threads' '
- GIT_DIR=t1 git index-pack --threads=1 --stdin < $PACK
+ rm -rf repo.git &&
+ git init --bare repo.git &&
+ GIT_DIR=repo.git git index-pack --threads=1 --stdin < $PACK
'
test_perf 'index-pack 1 thread ' '
- GIT_DIR=t2 GIT_FORCE_THREADS=1 git index-pack --threads=1 --stdin < $PACK
+ rm -rf repo.git &&
+ git init --bare repo.git &&
+ GIT_DIR=repo.git GIT_FORCE_THREADS=1 git index-pack --threads=1 --stdin < $PACK
'
test_perf 'index-pack 2 threads' '
- GIT_DIR=t3 git index-pack --threads=2 --stdin < $PACK
+ rm -rf repo.git &&
+ git init --bare repo.git &&
+ GIT_DIR=repo.git git index-pack --threads=2 --stdin < $PACK
'
test_perf 'index-pack 4 threads' '
- GIT_DIR=t4 git index-pack --threads=4 --stdin < $PACK
+ rm -rf repo.git &&
+ git init --bare repo.git &&
+ GIT_DIR=repo.git git index-pack --threads=4 --stdin < $PACK
'
test_perf 'index-pack 8 threads' '
- GIT_DIR=t5 git index-pack --threads=8 --stdin < $PACK
+ rm -rf repo.git &&
+ git init --bare repo.git &&
+ GIT_DIR=repo.git git index-pack --threads=8 --stdin < $PACK
'
test_perf 'index-pack default number of threads' '
- GIT_DIR=t6 git index-pack --stdin < $PACK
+ rm -rf repo.git &&
+ git init --bare repo.git &&
+ GIT_DIR=repo.git git index-pack --stdin < $PACK
'
test_done
--- /dev/null
+#!/bin/sh
+
+test_description='performance tests of prune'
+. ./perf-lib.sh
+
+test_perf_default_repo
+
+test_expect_success 'remove reachable loose objects' '
+ git repack -ad
+'
+
+test_expect_success 'remove unreachable loose objects' '
+ git prune
+'
+
+test_expect_success 'confirm there are no loose objects' '
+ git count-objects | grep ^0
+'
+
+test_perf 'prune with no objects' '
+ git prune
+'
+
+test_expect_success 'repack with bitmaps' '
+ git repack -adb
+'
+
+# We have to create the object in each trial run, since otherwise
+# runs after the first see no object and just skip the traversal entirely!
+test_perf 'prune with bitmaps' '
+ echo "probably not present in repo" | git hash-object -w --stdin &&
+ git prune
+'
+
+test_done
# We intentionally use the deprecated pack.writebitmaps
# config so that we can test against older versions of git.
test_expect_success 'setup bitmap config' '
- git config pack.writebitmaps true &&
- git config pack.writebitmaphashcache true
+ git config pack.writebitmaps true
'
test_perf 'repack to disk' '
test_expect_success 'create bitmapped server repo' '
git config pack.writebitmaps true &&
- git config pack.writebitmaphashcache true &&
git repack -ad
'
--- /dev/null
+#!/bin/sh
+
+test_description='performance of partial clones'
+. ./perf-lib.sh
+
+test_perf_default_repo
+
+test_expect_success 'enable server-side config' '
+ git config uploadpack.allowFilter true &&
+ git config uploadpack.allowAnySHA1InWant true
+'
+
+test_perf 'clone without blobs' '
+ rm -rf bare.git &&
+ git clone --no-local --bare --filter=blob:none . bare.git
+'
+
+test_perf 'checkout of result' '
+ rm -rf worktree &&
+ mkdir -p worktree/.git &&
+ tar -C bare.git -cf - . | tar -C worktree/.git -xf - &&
+ git -C worktree config core.bare false &&
+ git -C worktree checkout -f
+'
+
+test_done
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses/ .
-# do the --tee work early; it otherwise confuses our careful
-# GIT_BUILD_DIR mangling
-case "$GIT_TEST_TEE_STARTED, $* " in
-done,*)
- # do not redirect again
- ;;
-*' --tee '*|*' --va'*)
- mkdir -p test-results
- BASE=test-results/$(basename "$0" .sh)
- (GIT_TEST_TEE_STARTED=done ${SHELL-sh} "$0" "$@" 2>&1;
- echo $? > $BASE.exit) | tee $BASE.out
- test "$(cat $BASE.exit)" = 0
- exit
- ;;
-esac
-
+# These variables must be set before the inclusion of test-lib.sh below,
+# because it will change our working directory.
TEST_DIRECTORY=$(pwd)/..
TEST_OUTPUT_DIRECTORY=$(pwd)
-if test -z "$GIT_TEST_INSTALLED"; then
- perf_results_prefix=
-else
- perf_results_prefix=$(printf "%s" "${GIT_TEST_INSTALLED%/bin-wrappers}" | tr -c "[a-zA-Z0-9]" "[_*]")"."
- # make the tested dir absolute
- GIT_TEST_INSTALLED=$(cd "$GIT_TEST_INSTALLED" && pwd)
-fi
TEST_NO_CREATE_REPO=t
TEST_NO_MALLOC_CHECK=t
. ../test-lib.sh
+if test -n "$GIT_TEST_INSTALLED" -a -z "$PERF_SET_GIT_TEST_INSTALLED"
+then
+ error "Do not use GIT_TEST_INSTALLED with the perf tests.
+
+Instead use:
+
+ ./run <path-to-git> -- <tests>
+
+See t/perf/README for details."
+fi
+
# Variables from test-lib that are normally internal to the tests; we
# need to export them for test_perf subshells
export TEST_DIRECTORY TRASH_DIRECTORY GIT_BUILD_DIR GIT_TEST_CMP
base=$(basename "$0" .sh)
echo "$test_count" >>"$perf_results_dir"/$base.subtests
echo "$1" >"$perf_results_dir"/$base.$test_count.descr
- base="$perf_results_dir"/"$perf_results_prefix$(basename "$0" .sh)"."$test_count"
+ base="$perf_results_dir"/"$PERF_RESULTS_PREFIX$(basename "$0" .sh)"."$test_count"
"$test_wrapper_func_" "$@"
fi
) || die "failed to build revision '$mydir'"
}
+set_git_test_installed () {
+ mydir=$1
+
+ mydir_abs=$(cd $mydir && pwd)
+ mydir_abs_wrappers="$mydir_abs_wrappers/bin-wrappers"
+ if test -d "$mydir_abs_wrappers"
+ then
+ GIT_TEST_INSTALLED=$mydir_abs_wrappers
+ else
+ # Older versions of git lacked bin-wrappers;
+ # fallback to the files in the root.
+ GIT_TEST_INSTALLED=$mydir_abs
+ fi
+ export GIT_TEST_INSTALLED
+ PERF_SET_GIT_TEST_INSTALLED=true
+ export PERF_SET_GIT_TEST_INSTALLED
+}
+
run_dirs_helper () {
mydir=${1%/}
shift
if test $# -gt 0 -a "$1" = --; then
shift
fi
- if [ ! -d "$mydir" ]; then
+
+ PERF_RESULTS_PREFIX=
+ if test "$mydir" = "."
+ then
+ unset GIT_TEST_INSTALLED
+ elif test -d "$mydir"
+ then
+ PERF_RESULTS_PREFIX=bindir$(cd $mydir && printf "%s" "$(pwd)" | tr -c "[a-zA-Z0-9]" "_").
+ set_git_test_installed "$mydir"
+ else
rev=$(git rev-parse --verify "$mydir" 2>/dev/null) ||
die "'$mydir' is neither a directory nor a valid revision"
if [ ! -d build/$rev ]; then
fi
build_git_rev $rev "$mydir"
mydir=build/$rev
+
+ PERF_RESULTS_PREFIX=build_$rev.
+ set_git_test_installed "$mydir"
fi
- if test "$mydir" = .; then
- unset GIT_TEST_INSTALLED
- else
- GIT_TEST_INSTALLED="$mydir/bin-wrappers"
- # Older versions of git lacked bin-wrappers; fallback to the
- # files in the root.
- test -d "$GIT_TEST_INSTALLED" || GIT_TEST_INSTALLED=$mydir
- export GIT_TEST_INSTALLED
- fi
+ export PERF_RESULTS_PREFIX
+
run_one_dir "$@"
}
EOF
"
+test_expect_success 'test_atexit is run' "
+ test_must_fail run_sub_test_lib_test \
+ atexit-cleanup 'Run atexit commands' -i <<-\\EOF &&
+ test_expect_success 'tests clean up even after a failure' '
+ > ../../clean-atexit &&
+ test_atexit rm ../../clean-atexit &&
+ > ../../also-clean-atexit &&
+ test_atexit rm ../../also-clean-atexit &&
+ > ../../dont-clean-atexit &&
+ (exit 1)
+ '
+ test_done
+ EOF
+ test_path_is_file dont-clean-atexit &&
+ test_path_is_missing clean-atexit &&
+ test_path_is_missing also-clean-atexit
+"
+
test_expect_success 'test_oid setup' '
test_oid_init
'
sed -n \
-e "/^GIT_PREFIX=/d" \
-e "/^GIT_TEXTDOMAINDIR=/d" \
+ -e "/^GIT_TR2_PARENT/d" \
-e "/^GIT_/s/=.*//p" |
sort
EOF
)
'
+test_expect_success MINGW 'core.hidedotfiles = false' '
+ git config --global core.hidedotfiles false &&
+ rm -rf newdir &&
+ mkdir newdir &&
+ (
+ sane_unset GIT_DIR GIT_WORK_TREE GIT_CONFIG &&
+ git -C newdir init
+ ) &&
+ ! is_hidden newdir/.git
+'
+
test_expect_success MINGW 'redirect std handles' '
GIT_REDIRECT_STDOUT=output.txt git rev-parse --git-dir &&
test .git = "$(cat output.txt)" &&
-q, --quiet be quiet
--expect <string> expected output in the variable dump
+Alias
+ -A, --alias-source <string>
+ get a string
+ -Z, --alias-target <string>
+ get a string
+
EOF
test_expect_success 'test help' '
EOF
test_expect_success 'unambiguously abbreviated option' '
+ GIT_TEST_DISALLOW_ABBREVIATED_OPTIONS=false \
test-tool parse-options --int 2 --boolean --no-bo >output 2>output.err &&
test_must_be_empty output.err &&
test_cmp expect output
'
test_expect_success 'unambiguously abbreviated option with "="' '
+ GIT_TEST_DISALLOW_ABBREVIATED_OPTIONS=false \
test-tool parse-options --expect="integer: 2" --int=2
'
test_expect_success 'ambiguously abbreviated option' '
- test_expect_code 129 test-tool parse-options --strin 123
+ test_expect_code 129 env GIT_TEST_DISALLOW_ABBREVIATED_OPTIONS=false \
+ test-tool parse-options --strin 123
'
test_expect_success 'non ambiguous option (after two options it abbreviates)' '
+ GIT_TEST_DISALLOW_ABBREVIATED_OPTIONS=false \
test-tool parse-options --expect="string: 123" --st 123
'
+test_expect_success 'Alias options do not contribute to abbreviation' '
+ test-tool parse-options --alias-source 123 >output &&
+ grep "^string: 123" output &&
+ test-tool parse-options --alias-target 123 >output &&
+ grep "^string: 123" output &&
+ test_must_fail test-tool parse-options --alias &&
+ GIT_TEST_DISALLOW_ABBREVIATED_OPTIONS=false \
+ test-tool parse-options --alias 123 >output &&
+ grep "^string: 123" output
+'
+
cat >typo.err <<\EOF
error: did you mean `--boolean` (with two dashes ?)
EOF
EOF
test_expect_success 'negation of OPT_NONEG flags is not ambiguous' '
+ GIT_TEST_DISALLOW_ABBREVIATED_OPTIONS=false \
test-tool parse-options --no-ambig >output 2>output.err &&
test_must_be_empty output.err &&
test_cmp expect output
test-tool parse-options --expect="verbose: 0" -v -v -v --no-verbose
'
+test_expect_success 'GIT_TEST_DISALLOW_ABBREVIATED_OPTIONS works' '
+ GIT_TEST_DISALLOW_ABBREVIATED_OPTIONS=false \
+ test-tool parse-options --ye &&
+ test_must_fail env GIT_TEST_DISALLOW_ABBREVIATED_OPTIONS=true \
+ test-tool parse-options --ye
+'
+
test_done
--- /dev/null
+#!/bin/sh
+
+test_description='test trace2 facility (normal target)'
+. ./test-lib.sh
+
+# Turn off any inherited trace2 settings for this test.
+sane_unset GIT_TR2 GIT_TR2_PERF GIT_TR2_EVENT
+sane_unset GIT_TR2_BRIEF
+sane_unset GIT_TR2_CONFIG_PARAMS
+
+# Add t/helper directory to PATH so that we can use a relative
+# path to run nested instances of test-tool.exe (see 004child).
+# This helps with HEREDOC comparisons later.
+TTDIR="$GIT_BUILD_DIR/t/helper/" && export TTDIR
+PATH="$TTDIR:$PATH" && export PATH
+
+# Warning: use of 'test_cmp' may run test-tool.exe and/or git.exe
+# Warning: to do the actual diff/comparison, so the HEREDOCs here
+# Warning: only cover our actual calls to test-tool and/or git.
+# Warning: So you may see extra lines in artifact files when
+# Warning: interactively debugging.
+
+V=$(git version | sed -e 's/^git version //') && export V
+
+# There are multiple trace2 targets: normal, perf, and event.
+# Trace2 events will/can be written to each active target (subject
+# to whatever filtering that target decides to do).
+# This script tests the normal target in isolation.
+#
+# Defer setting GIT_TR2 until the actual command line we want to test
+# because hidden git and test-tool commands run by the test harness
+# can contaminate our output.
+
+# Enable "brief" feature which turns off "<clock> <file>:<line> " prefix.
+GIT_TR2_BRIEF=1 && export GIT_TR2_BRIEF
+
+# Basic tests of the trace2 normal stream. Since this stream is used
+# primarily with printf-style debugging/tracing, we do limited testing
+# here.
+#
+# We do confirm the following API features:
+# [] the 'version <v>' event
+# [] the 'start <argv>' event
+# [] the 'cmd_name <name>' event
+# [] the 'exit <time> code:<code>' event
+# [] the 'atexit <time> code:<code>' event
+#
+# Fields of the form _FIELD_ are tokens that have been replaced (such
+# as the elapsed time).
+
+# Verb 001return
+#
+# Implicit return from cmd_<verb> function propagates <code>.
+
+test_expect_success 'normal stream, return code 0' '
+ test_when_finished "rm trace.normal actual expect" &&
+ GIT_TR2="$(pwd)/trace.normal" test-tool trace2 001return 0 &&
+ perl "$TEST_DIRECTORY/t0210/scrub_normal.perl" <trace.normal >actual &&
+ cat >expect <<-EOF &&
+ version $V
+ start _EXE_ trace2 001return 0
+ cmd_name trace2 (trace2)
+ exit elapsed:_TIME_ code:0
+ atexit elapsed:_TIME_ code:0
+ EOF
+ test_cmp expect actual
+'
+
+test_expect_success 'normal stream, return code 1' '
+ test_when_finished "rm trace.normal actual expect" &&
+ test_must_fail env GIT_TR2="$(pwd)/trace.normal" test-tool trace2 001return 1 &&
+ perl "$TEST_DIRECTORY/t0210/scrub_normal.perl" <trace.normal >actual &&
+ cat >expect <<-EOF &&
+ version $V
+ start _EXE_ trace2 001return 1
+ cmd_name trace2 (trace2)
+ exit elapsed:_TIME_ code:1
+ atexit elapsed:_TIME_ code:1
+ EOF
+ test_cmp expect actual
+'
+
+test_expect_success 'automatic filename' '
+ test_when_finished "rm -r traces actual expect" &&
+ mkdir traces &&
+ GIT_TR2="$(pwd)/traces" test-tool trace2 001return 0 &&
+ perl "$TEST_DIRECTORY/t0210/scrub_normal.perl" <"$(ls traces/*)" >actual &&
+ cat >expect <<-EOF &&
+ version $V
+ start _EXE_ trace2 001return 0
+ cmd_name trace2 (trace2)
+ exit elapsed:_TIME_ code:0
+ atexit elapsed:_TIME_ code:0
+ EOF
+ test_cmp expect actual
+'
+
+# Verb 002exit
+#
+# Explicit exit(code) from within cmd_<verb> propagates <code>.
+
+test_expect_success 'normal stream, exit code 0' '
+ test_when_finished "rm trace.normal actual expect" &&
+ GIT_TR2="$(pwd)/trace.normal" test-tool trace2 002exit 0 &&
+ perl "$TEST_DIRECTORY/t0210/scrub_normal.perl" <trace.normal >actual &&
+ cat >expect <<-EOF &&
+ version $V
+ start _EXE_ trace2 002exit 0
+ cmd_name trace2 (trace2)
+ exit elapsed:_TIME_ code:0
+ atexit elapsed:_TIME_ code:0
+ EOF
+ test_cmp expect actual
+'
+
+test_expect_success 'normal stream, exit code 1' '
+ test_when_finished "rm trace.normal actual expect" &&
+ test_must_fail env GIT_TR2="$(pwd)/trace.normal" test-tool trace2 002exit 1 &&
+ perl "$TEST_DIRECTORY/t0210/scrub_normal.perl" <trace.normal >actual &&
+ cat >expect <<-EOF &&
+ version $V
+ start _EXE_ trace2 002exit 1
+ cmd_name trace2 (trace2)
+ exit elapsed:_TIME_ code:1
+ atexit elapsed:_TIME_ code:1
+ EOF
+ test_cmp expect actual
+'
+
+# Verb 003error
+#
+# To the above, add multiple 'error <msg>' events
+
+test_expect_success 'normal stream, error event' '
+ test_when_finished "rm trace.normal actual expect" &&
+ GIT_TR2="$(pwd)/trace.normal" test-tool trace2 003error "hello world" "this is a test" &&
+ perl "$TEST_DIRECTORY/t0210/scrub_normal.perl" <trace.normal >actual &&
+ cat >expect <<-EOF &&
+ version $V
+ start _EXE_ trace2 003error '\''hello world'\'' '\''this is a test'\''
+ cmd_name trace2 (trace2)
+ error hello world
+ error this is a test
+ exit elapsed:_TIME_ code:0
+ atexit elapsed:_TIME_ code:0
+ EOF
+ test_cmp expect actual
+'
+
+sane_unset GIT_TR2_BRIEF
+
+# Now test without environment variables and get all Trace2 settings
+# from the global config.
+
+test_expect_success 'using global config, normal stream, return code 0' '
+ test_when_finished "rm trace.normal actual expect" &&
+ test_config_global trace2.normalBrief 1 &&
+ test_config_global trace2.normalTarget "$(pwd)/trace.normal" &&
+ test-tool trace2 001return 0 &&
+ perl "$TEST_DIRECTORY/t0210/scrub_normal.perl" <trace.normal >actual &&
+ cat >expect <<-EOF &&
+ version $V
+ start _EXE_ trace2 001return 0
+ cmd_name trace2 (trace2)
+ exit elapsed:_TIME_ code:0
+ atexit elapsed:_TIME_ code:0
+ EOF
+ test_cmp expect actual
+'
+
+test_expect_success 'using global config with include' '
+ test_when_finished "rm trace.normal actual expect real.gitconfig" &&
+ test_config_global trace2.normalBrief 1 &&
+ test_config_global trace2.normalTarget "$(pwd)/trace.normal" &&
+ mv "$(pwd)/.gitconfig" "$(pwd)/real.gitconfig" &&
+ test_config_global include.path "$(pwd)/real.gitconfig" &&
+ test-tool trace2 001return 0 &&
+ perl "$TEST_DIRECTORY/t0210/scrub_normal.perl" <trace.normal >actual &&
+ cat >expect <<-EOF &&
+ version $V
+ start _EXE_ trace2 001return 0
+ cmd_name trace2 (trace2)
+ exit elapsed:_TIME_ code:0
+ atexit elapsed:_TIME_ code:0
+ EOF
+ test_cmp expect actual
+'
+
+test_done
--- /dev/null
+#!/usr/bin/perl
+#
+# Scrub the variable fields from the normal trace2 output to
+# make testing easier.
+
+use strict;
+use warnings;
+
+my $float = '[0-9]*\.[0-9]+([eE][-+]?[0-9]+)?';
+
+# This code assumes that the trace2 data was written with bare
+# turned on (which omits the "<clock> <file>:<line>" prefix.
+
+while (<>) {
+ # Various messages include an elapsed time in the middle
+ # of the message. Replace the time with a placeholder to
+ # simplify our HEREDOC in the test script.
+ s/elapsed:$float/elapsed:_TIME_/g;
+
+ my $line = $_;
+
+ # we expect:
+ # start <argv0> [<argv1> [<argv2> [...]]]
+ #
+ # where argv0 might be a relative or absolute path, with
+ # or without quotes, and platform dependent. Replace argv0
+ # with a token for HEREDOC matching in the test script.
+
+ if ($line =~ m/^start/) {
+ $line =~ /^start\s+(.*)/;
+ my $argv = $1;
+ $argv =~ m/(\'[^\']*\'|[^ ]+)\s+(.*)/;
+ my $argv_0 = $1;
+ my $argv_rest = $2;
+
+ print "start _EXE_ $argv_rest\n";
+ }
+ elsif ($line =~ m/^cmd_path/) {
+ # Likewise, the 'cmd_path' message breaks out argv[0].
+ #
+ # This line is only emitted when RUNTIME_PREFIX is defined,
+ # so just omit it for testing purposes.
+ # print "cmd_path _EXE_\n";
+ }
+ else {
+ print "$line";
+ }
+}
--- /dev/null
+#!/bin/sh
+
+test_description='test trace2 facility (perf target)'
+. ./test-lib.sh
+
+# Turn off any inherited trace2 settings for this test.
+sane_unset GIT_TR2 GIT_TR2_PERF GIT_TR2_EVENT
+sane_unset GIT_TR2_PERF_BRIEF
+sane_unset GIT_TR2_CONFIG_PARAMS
+
+# Add t/helper directory to PATH so that we can use a relative
+# path to run nested instances of test-tool.exe (see 004child).
+# This helps with HEREDOC comparisons later.
+TTDIR="$GIT_BUILD_DIR/t/helper/" && export TTDIR
+PATH="$TTDIR:$PATH" && export PATH
+
+# Warning: use of 'test_cmp' may run test-tool.exe and/or git.exe
+# Warning: to do the actual diff/comparison, so the HEREDOCs here
+# Warning: only cover our actual calls to test-tool and/or git.
+# Warning: So you may see extra lines in artifact files when
+# Warning: interactively debugging.
+
+V=$(git version | sed -e 's/^git version //') && export V
+
+# There are multiple trace2 targets: normal, perf, and event.
+# Trace2 events will/can be written to each active target (subject
+# to whatever filtering that target decides to do).
+# Test each target independently.
+#
+# Defer setting GIT_TR2_PERF until the actual command we want to
+# test because hidden git and test-tool commands in the test
+# harness can contaminate our output.
+
+# Enable "brief" feature which turns off the prefix:
+# "<clock> <file>:<line> | <nr_parents> | "
+GIT_TR2_PERF_BRIEF=1 && export GIT_TR2_PERF_BRIEF
+
+# Repeat some of the t0210 tests using the perf target stream instead of
+# the normal stream.
+#
+# Tokens here of the form _FIELD_ have been replaced in the observed output.
+
+# Verb 001return
+#
+# Implicit return from cmd_<verb> function propagates <code>.
+
+test_expect_success 'perf stream, return code 0' '
+ test_when_finished "rm trace.perf actual expect" &&
+ GIT_TR2_PERF="$(pwd)/trace.perf" test-tool trace2 001return 0 &&
+ perl "$TEST_DIRECTORY/t0211/scrub_perf.perl" <trace.perf >actual &&
+ cat >expect <<-EOF &&
+ d0|main|version|||||$V
+ d0|main|start||_T_ABS_|||_EXE_ trace2 001return 0
+ d0|main|cmd_name|||||trace2 (trace2)
+ d0|main|exit||_T_ABS_|||code:0
+ d0|main|atexit||_T_ABS_|||code:0
+ EOF
+ test_cmp expect actual
+'
+
+test_expect_success 'perf stream, return code 1' '
+ test_when_finished "rm trace.perf actual expect" &&
+ test_must_fail env GIT_TR2_PERF="$(pwd)/trace.perf" test-tool trace2 001return 1 &&
+ perl "$TEST_DIRECTORY/t0211/scrub_perf.perl" <trace.perf >actual &&
+ cat >expect <<-EOF &&
+ d0|main|version|||||$V
+ d0|main|start||_T_ABS_|||_EXE_ trace2 001return 1
+ d0|main|cmd_name|||||trace2 (trace2)
+ d0|main|exit||_T_ABS_|||code:1
+ d0|main|atexit||_T_ABS_|||code:1
+ EOF
+ test_cmp expect actual
+'
+
+# Verb 003error
+#
+# To the above, add multiple 'error <msg>' events
+
+test_expect_success 'perf stream, error event' '
+ test_when_finished "rm trace.perf actual expect" &&
+ GIT_TR2_PERF="$(pwd)/trace.perf" test-tool trace2 003error "hello world" "this is a test" &&
+ perl "$TEST_DIRECTORY/t0211/scrub_perf.perl" <trace.perf >actual &&
+ cat >expect <<-EOF &&
+ d0|main|version|||||$V
+ d0|main|start||_T_ABS_|||_EXE_ trace2 003error '\''hello world'\'' '\''this is a test'\''
+ d0|main|cmd_name|||||trace2 (trace2)
+ d0|main|error|||||hello world
+ d0|main|error|||||this is a test
+ d0|main|exit||_T_ABS_|||code:0
+ d0|main|atexit||_T_ABS_|||code:0
+ EOF
+ test_cmp expect actual
+'
+
+# Verb 004child
+#
+# Test nested spawning of child processes.
+#
+# Conceptually, this looks like:
+# P1: TT trace2 004child
+# P2: |--- TT trace2 004child
+# P3: |--- TT trace2 001return 0
+#
+# Which should generate events:
+# P1: version
+# P1: start
+# P1: cmd_name
+# P1: child_start
+# P2: version
+# P2: start
+# P2: cmd_name
+# P2: child_start
+# P3: version
+# P3: start
+# P3: cmd_name
+# P3: exit
+# P3: atexit
+# P2: child_exit
+# P2: exit
+# P2: atexit
+# P1: child_exit
+# P1: exit
+# P1: atexit
+
+test_expect_success 'perf stream, child processes' '
+ test_when_finished "rm trace.perf actual expect" &&
+ GIT_TR2_PERF="$(pwd)/trace.perf" test-tool trace2 004child test-tool trace2 004child test-tool trace2 001return 0 &&
+ perl "$TEST_DIRECTORY/t0211/scrub_perf.perl" <trace.perf >actual &&
+ cat >expect <<-EOF &&
+ d0|main|version|||||$V
+ d0|main|start||_T_ABS_|||_EXE_ trace2 004child test-tool trace2 004child test-tool trace2 001return 0
+ d0|main|cmd_name|||||trace2 (trace2)
+ d0|main|child_start||_T_ABS_|||[ch0] class:? argv: test-tool trace2 004child test-tool trace2 001return 0
+ d1|main|version|||||$V
+ d1|main|start||_T_ABS_|||_EXE_ trace2 004child test-tool trace2 001return 0
+ d1|main|cmd_name|||||trace2 (trace2/trace2)
+ d1|main|child_start||_T_ABS_|||[ch0] class:? argv: test-tool trace2 001return 0
+ d2|main|version|||||$V
+ d2|main|start||_T_ABS_|||_EXE_ trace2 001return 0
+ d2|main|cmd_name|||||trace2 (trace2/trace2/trace2)
+ d2|main|exit||_T_ABS_|||code:0
+ d2|main|atexit||_T_ABS_|||code:0
+ d1|main|child_exit||_T_ABS_|_T_REL_||[ch0] pid:_PID_ code:0
+ d1|main|exit||_T_ABS_|||code:0
+ d1|main|atexit||_T_ABS_|||code:0
+ d0|main|child_exit||_T_ABS_|_T_REL_||[ch0] pid:_PID_ code:0
+ d0|main|exit||_T_ABS_|||code:0
+ d0|main|atexit||_T_ABS_|||code:0
+ EOF
+ test_cmp expect actual
+'
+
+sane_unset GIT_TR2_PERF_BRIEF
+
+# Now test without environment variables and get all Trace2 settings
+# from the global config.
+
+test_expect_success 'using global config, perf stream, return code 0' '
+ test_when_finished "rm trace.perf actual expect" &&
+ test_config_global trace2.perfBrief 1 &&
+ test_config_global trace2.perfTarget "$(pwd)/trace.perf" &&
+ test-tool trace2 001return 0 &&
+ perl "$TEST_DIRECTORY/t0211/scrub_perf.perl" <trace.perf >actual &&
+ cat >expect <<-EOF &&
+ d0|main|version|||||$V
+ d0|main|start||_T_ABS_|||_EXE_ trace2 001return 0
+ d0|main|cmd_name|||||trace2 (trace2)
+ d0|main|exit||_T_ABS_|||code:0
+ d0|main|atexit||_T_ABS_|||code:0
+ EOF
+ test_cmp expect actual
+'
+
+test_done
--- /dev/null
+#!/usr/bin/perl
+#
+# Scrub the variable fields from the perf trace2 output to
+# make testing easier.
+
+use strict;
+use warnings;
+
+my $qpath = '\'[^\']*\'|[^ ]*';
+
+my $col_depth=0;
+my $col_thread=1;
+my $col_event=2;
+my $col_repo=3;
+my $col_t_abs=4;
+my $col_t_rel=5;
+my $col_category=6;
+my $col_rest=7;
+
+# This code assumes that the trace2 data was written with bare
+# turned on (which omits the "<clock> <file>:<line> | <parents>"
+# prefix.
+
+while (<>) {
+ my @tokens = split /\|/;
+
+ foreach my $col (@tokens) { $col =~ s/^\s+|\s+$//g; }
+
+ if ($tokens[$col_event] =~ m/^start/) {
+ # The 'start' message lists the contents of argv in $col_rest.
+ # On some platforms (Windows), argv[0] is *sometimes* a canonical
+ # absolute path to the EXE rather than the value passed in the
+ # shell script. Replace it with a placeholder to simplify our
+ # HEREDOC in the test script.
+ my $argv0;
+ my $argvRest;
+ $tokens[$col_rest] =~ s/^($qpath)\W*(.*)/_EXE_ $2/;
+ }
+ elsif ($tokens[$col_event] =~ m/cmd_path/) {
+ # Likewise, the 'cmd_path' message breaks out argv[0].
+ #
+ # This line is only emitted when RUNTIME_PREFIX is defined,
+ # so just omit it for testing purposes.
+ # $tokens[$col_rest] = "_EXE_";
+ goto SKIP_LINE;
+ }
+ elsif ($tokens[$col_event] =~ m/child_exit/) {
+ $tokens[$col_rest] =~ s/ pid:\d* / pid:_PID_ /;
+ }
+ elsif ($tokens[$col_event] =~ m/data/) {
+ if ($tokens[$col_category] =~ m/process/) {
+ # 'data' and 'data_json' events containing 'process'
+ # category data are assumed to be platform-specific
+ # and highly variable. Just omit them.
+ goto SKIP_LINE;
+ }
+ }
+
+ # t_abs and t_rel are either blank or a float. Replace the float
+ # with a constant for matching the HEREDOC in the test script.
+ if ($tokens[$col_t_abs] =~ m/\d/) {
+ $tokens[$col_t_abs] = "_T_ABS_";
+ }
+ if ($tokens[$col_t_rel] =~ m/\d/) {
+ $tokens[$col_t_rel] = "_T_REL_";
+ }
+
+ my $out;
+
+ $out = join('|', @tokens);
+ print "$out\n";
+
+ SKIP_LINE:
+}
+
+
--- /dev/null
+#!/bin/sh
+
+test_description='test trace2 facility'
+. ./test-lib.sh
+
+# Turn off any inherited trace2 settings for this test.
+sane_unset GIT_TR2 GIT_TR2_PERF GIT_TR2_EVENT
+sane_unset GIT_TR2_BARE
+sane_unset GIT_TR2_CONFIG_PARAMS
+
+perl -MJSON::PP -e 0 >/dev/null 2>&1 && test_set_prereq JSON_PP
+
+# Add t/helper directory to PATH so that we can use a relative
+# path to run nested instances of test-tool.exe (see 004child).
+# This helps with HEREDOC comparisons later.
+TTDIR="$GIT_BUILD_DIR/t/helper/" && export TTDIR
+PATH="$TTDIR:$PATH" && export PATH
+
+# Warning: use of 'test_cmp' may run test-tool.exe and/or git.exe
+# Warning: to do the actual diff/comparison, so the HEREDOCs here
+# Warning: only cover our actual calls to test-tool and/or git.
+# Warning: So you may see extra lines in artifact files when
+# Warning: interactively debugging.
+
+V=$(git version | sed -e 's/^git version //') && export V
+
+# There are multiple trace2 targets: normal, perf, and event.
+# Trace2 events will/can be written to each active target (subject
+# to whatever filtering that target decides to do).
+# Test each target independently.
+#
+# Defer setting GIT_TR2_PERF until the actual command we want to
+# test because hidden git and test-tool commands in the test
+# harness can contaminate our output.
+
+# We don't bother repeating the 001return and 002exit tests, since they
+# have coverage in the normal and perf targets.
+
+# Verb 003error
+#
+# To the above, add multiple 'error <msg>' events
+
+test_expect_success JSON_PP 'event stream, error event' '
+ test_when_finished "rm trace.event actual expect" &&
+ GIT_TR2_EVENT="$(pwd)/trace.event" test-tool trace2 003error "hello world" "this is a test" &&
+ perl "$TEST_DIRECTORY/t0212/parse_events.perl" <trace.event >actual &&
+ sed -e "s/^|//" >expect <<-EOF &&
+ |VAR1 = {
+ | "_SID0_":{
+ | "argv":[
+ | "_EXE_",
+ | "trace2",
+ | "003error",
+ | "hello world",
+ | "this is a test"
+ | ],
+ | "errors":[
+ | "%s",
+ | "%s"
+ | ],
+ | "exit_code":0,
+ | "hierarchy":"trace2",
+ | "name":"trace2",
+ | "version":"$V"
+ | }
+ |};
+ EOF
+ test_cmp expect actual
+'
+
+# Verb 004child
+#
+# Test nested spawning of child processes.
+#
+# Conceptually, this looks like:
+# P1: TT trace2 004child
+# P2: |--- TT trace2 004child
+# P3: |--- TT trace2 001return 0
+
+test_expect_success JSON_PP 'event stream, return code 0' '
+ test_when_finished "rm trace.event actual expect" &&
+ GIT_TR2_EVENT="$(pwd)/trace.event" test-tool trace2 004child test-tool trace2 004child test-tool trace2 001return 0 &&
+ perl "$TEST_DIRECTORY/t0212/parse_events.perl" <trace.event >actual &&
+ sed -e "s/^|//" >expect <<-EOF &&
+ |VAR1 = {
+ | "_SID0_":{
+ | "argv":[
+ | "_EXE_",
+ | "trace2",
+ | "004child",
+ | "test-tool",
+ | "trace2",
+ | "004child",
+ | "test-tool",
+ | "trace2",
+ | "001return",
+ | "0"
+ | ],
+ | "child":{
+ | "0":{
+ | "child_argv":[
+ | "_EXE_",
+ | "trace2",
+ | "004child",
+ | "test-tool",
+ | "trace2",
+ | "001return",
+ | "0"
+ | ],
+ | "child_class":"?",
+ | "child_code":0,
+ | "use_shell":0
+ | }
+ | },
+ | "exit_code":0,
+ | "hierarchy":"trace2",
+ | "name":"trace2",
+ | "version":"$V"
+ | },
+ | "_SID0_/_SID1_":{
+ | "argv":[
+ | "_EXE_",
+ | "trace2",
+ | "004child",
+ | "test-tool",
+ | "trace2",
+ | "001return",
+ | "0"
+ | ],
+ | "child":{
+ | "0":{
+ | "child_argv":[
+ | "_EXE_",
+ | "trace2",
+ | "001return",
+ | "0"
+ | ],
+ | "child_class":"?",
+ | "child_code":0,
+ | "use_shell":0
+ | }
+ | },
+ | "exit_code":0,
+ | "hierarchy":"trace2/trace2",
+ | "name":"trace2",
+ | "version":"$V"
+ | },
+ | "_SID0_/_SID1_/_SID2_":{
+ | "argv":[
+ | "_EXE_",
+ | "trace2",
+ | "001return",
+ | "0"
+ | ],
+ | "exit_code":0,
+ | "hierarchy":"trace2/trace2/trace2",
+ | "name":"trace2",
+ | "version":"$V"
+ | }
+ |};
+ EOF
+ test_cmp expect actual
+'
+
+# Test listing of all "interesting" config settings.
+
+test_expect_success JSON_PP 'event stream, list config' '
+ test_when_finished "rm trace.event actual expect" &&
+ git config --local t0212.abc 1 &&
+ git config --local t0212.def "hello world" &&
+ GIT_TR2_EVENT="$(pwd)/trace.event" GIT_TR2_CONFIG_PARAMS="t0212.*" test-tool trace2 001return 0 &&
+ perl "$TEST_DIRECTORY/t0212/parse_events.perl" <trace.event >actual &&
+ sed -e "s/^|//" >expect <<-EOF &&
+ |VAR1 = {
+ | "_SID0_":{
+ | "argv":[
+ | "_EXE_",
+ | "trace2",
+ | "001return",
+ | "0"
+ | ],
+ | "exit_code":0,
+ | "hierarchy":"trace2",
+ | "name":"trace2",
+ | "params":[
+ | {
+ | "param":"t0212.abc",
+ | "value":"1"
+ | },
+ | {
+ | "param":"t0212.def",
+ | "value":"hello world"
+ | }
+ | ],
+ | "version":"$V"
+ | }
+ |};
+ EOF
+ test_cmp expect actual
+'
+
+test_expect_success JSON_PP 'basic trace2_data' '
+ test_when_finished "rm trace.event actual expect" &&
+ GIT_TR2_EVENT="$(pwd)/trace.event" test-tool trace2 006data test_category k1 v1 test_category k2 v2 &&
+ perl "$TEST_DIRECTORY/t0212/parse_events.perl" <trace.event >actual &&
+ sed -e "s/^|//" >expect <<-EOF &&
+ |VAR1 = {
+ | "_SID0_":{
+ | "argv":[
+ | "_EXE_",
+ | "trace2",
+ | "006data",
+ | "test_category",
+ | "k1",
+ | "v1",
+ | "test_category",
+ | "k2",
+ | "v2"
+ | ],
+ | "data":{
+ | "test_category":{
+ | "k1":"v1",
+ | "k2":"v2"
+ | }
+ | },
+ | "exit_code":0,
+ | "hierarchy":"trace2",
+ | "name":"trace2",
+ | "version":"$V"
+ | }
+ |};
+ EOF
+ test_cmp expect actual
+'
+
+# Now test without environment variables and get all Trace2 settings
+# from the global config.
+
+test_expect_success JSON_PP 'using global config, event stream, error event' '
+ test_when_finished "rm trace.event actual expect" &&
+ test_config_global trace2.eventTarget "$(pwd)/trace.event" &&
+ test-tool trace2 003error "hello world" "this is a test" &&
+ perl "$TEST_DIRECTORY/t0212/parse_events.perl" <trace.event >actual &&
+ sed -e "s/^|//" >expect <<-EOF &&
+ |VAR1 = {
+ | "_SID0_":{
+ | "argv":[
+ | "_EXE_",
+ | "trace2",
+ | "003error",
+ | "hello world",
+ | "this is a test"
+ | ],
+ | "errors":[
+ | "%s",
+ | "%s"
+ | ],
+ | "exit_code":0,
+ | "hierarchy":"trace2",
+ | "name":"trace2",
+ | "version":"$V"
+ | }
+ |};
+ EOF
+ test_cmp expect actual
+'
+
+test_done
--- /dev/null
+#!/usr/bin/perl
+#
+# Parse event stream and convert individual events into a summary
+# record for the process.
+#
+# Git.exe generates one or more "event" records for each API method,
+# such as "start <argv>" and "exit <code>", during the life of the git
+# process. Additionally, the input may contain interleaved events
+# from multiple concurrent git processes and/or multiple threads from
+# within a git process.
+#
+# Accumulate events for each process (based on its unique SID) in a
+# dictionary and emit process summary records.
+#
+# Convert some of the variable fields (such as elapsed time) into
+# placeholders (or omit them) to make HEREDOC comparisons easier in
+# the test scripts.
+#
+# We may also omit fields not (currently) useful for testing purposes.
+
+use strict;
+use warnings;
+use JSON::PP;
+use Data::Dumper;
+use Getopt::Long;
+
+# The version of the trace2 event target format that we understand.
+# This is reported in the 'version' event in the 'evt' field.
+# It comes from the GIT_TR2_EVENT_VERSION macro in trace2/tr2_tgt_event.c
+my $evt_version = '1';
+
+my $show_children = 1;
+my $show_exec = 1;
+my $show_threads = 1;
+
+# A hack to generate test HEREDOC data for pasting into the test script.
+# Usage:
+# cd "t/trash directory.t0212-trace2-event"
+# $TT trace ... >trace.event
+# VV=$(../../git.exe version | sed -e 's/^git version //')
+# perl ../t0212/parse_events.perl --HEREDOC --VERSION=$VV <trace.event >heredoc
+# Then paste heredoc into your new test.
+
+my $gen_heredoc = 0;
+my $gen_version = '';
+
+GetOptions("children!" => \$show_children,
+ "exec!" => \$show_exec,
+ "threads!" => \$show_threads,
+ "HEREDOC!" => \$gen_heredoc,
+ "VERSION=s" => \$gen_version )
+ or die("Error in command line arguments\n");
+
+
+# SIDs contains timestamps and PIDs of the process and its parents.
+# This makes it difficult to match up in a HEREDOC in the test script.
+# Build a map from actual SIDs to predictable constant values and yet
+# keep the parent/child relationships. For example:
+# {..., "sid":"1539706952458276-8652", ...}
+# {..., "sid":"1539706952458276-8652/1539706952649493-15452", ...}
+# becomes:
+# {..., "sid":"_SID1_", ...}
+# {..., "sid":"_SID1_/_SID2_", ...}
+my $sid_map;
+my $sid_count = 0;
+
+my $processes;
+
+while (<>) {
+ my $line = decode_json( $_ );
+
+ my $sid = "";
+ my $sid_sep = "";
+
+ my $raw_sid = $line->{'sid'};
+ my @raw_sid_parts = split /\//, $raw_sid;
+ foreach my $raw_sid_k (@raw_sid_parts) {
+ if (!exists $sid_map->{$raw_sid_k}) {
+ $sid_map->{$raw_sid_k} = '_SID' . $sid_count . '_';
+ $sid_count++;
+ }
+ $sid = $sid . $sid_sep . $sid_map->{$raw_sid_k};
+ $sid_sep = '/';
+ }
+
+ my $event = $line->{'event'};
+
+ if ($event eq 'version') {
+ $processes->{$sid}->{'version'} = $line->{'exe'};
+ if ($gen_heredoc == 1 && $gen_version eq $line->{'exe'}) {
+ # If we are generating data FOR the test script, replace
+ # the reported git.exe version with a reference to an
+ # environment variable. When our output is pasted into
+ # the test script, it will then be expanded in future
+ # test runs to the THEN current version of git.exe.
+ # We assume that the test script uses env var $V.
+ $processes->{$sid}->{'version'} = "\$V";
+ }
+ }
+
+ elsif ($event eq 'start') {
+ $processes->{$sid}->{'argv'} = $line->{'argv'};
+ $processes->{$sid}->{'argv'}[0] = "_EXE_";
+ }
+
+ elsif ($event eq 'exit') {
+ $processes->{$sid}->{'exit_code'} = $line->{'code'};
+ }
+
+ elsif ($event eq 'atexit') {
+ $processes->{$sid}->{'exit_code'} = $line->{'code'};
+ }
+
+ elsif ($event eq 'error') {
+ # For HEREDOC purposes, use the error message format string if
+ # available, rather than the formatted message (which probably
+ # has an absolute pathname).
+ if (exists $line->{'fmt'}) {
+ push( @{$processes->{$sid}->{'errors'}}, $line->{'fmt'} );
+ }
+ elsif (exists $line->{'msg'}) {
+ push( @{$processes->{$sid}->{'errors'}}, $line->{'msg'} );
+ }
+ }
+
+ elsif ($event eq 'cmd_path') {
+ ## $processes->{$sid}->{'path'} = $line->{'path'};
+ #
+ # Like in the 'start' event, we need to replace the value of
+ # argv[0] with a token for HEREDOC purposes. However, the
+ # event is only emitted when RUNTIME_PREFIX is defined, so
+ # just omit it for testing purposes.
+ # $processes->{$sid}->{'path'} = "_EXE_";
+ }
+
+ elsif ($event eq 'cmd_name') {
+ $processes->{$sid}->{'name'} = $line->{'name'};
+ $processes->{$sid}->{'hierarchy'} = $line->{'hierarchy'};
+ }
+
+ elsif ($event eq 'alias') {
+ $processes->{$sid}->{'alias'}->{'key'} = $line->{'alias'};
+ $processes->{$sid}->{'alias'}->{'argv'} = $line->{'argv'};
+ }
+
+ elsif ($event eq 'def_param') {
+ my $kv;
+ $kv->{'param'} = $line->{'param'};
+ $kv->{'value'} = $line->{'value'};
+ push( @{$processes->{$sid}->{'params'}}, $kv );
+ }
+
+ elsif ($event eq 'child_start') {
+ if ($show_children == 1) {
+ $processes->{$sid}->{'child'}->{$line->{'child_id'}}->{'child_class'} = $line->{'child_class'};
+ $processes->{$sid}->{'child'}->{$line->{'child_id'}}->{'child_argv'} = $line->{'argv'};
+ $processes->{$sid}->{'child'}->{$line->{'child_id'}}->{'child_argv'}[0] = "_EXE_";
+ $processes->{$sid}->{'child'}->{$line->{'child_id'}}->{'use_shell'} = $line->{'use_shell'} ? 1 : 0;
+ }
+ }
+
+ elsif ($event eq 'child_exit') {
+ if ($show_children == 1) {
+ $processes->{$sid}->{'child'}->{$line->{'child_id'}}->{'child_code'} = $line->{'code'};
+ }
+ }
+
+ # TODO decide what information we want to test from thread events.
+
+ elsif ($event eq 'thread_start') {
+ if ($show_threads == 1) {
+ }
+ }
+
+ elsif ($event eq 'thread_exit') {
+ if ($show_threads == 1) {
+ }
+ }
+
+ # TODO decide what information we want to test from exec events.
+
+ elsif ($event eq 'exec') {
+ if ($show_exec == 1) {
+ }
+ }
+
+ elsif ($event eq 'exec_result') {
+ if ($show_exec == 1) {
+ }
+ }
+
+ elsif ($event eq 'def_param') {
+ # Accumulate parameter key/value pairs by key rather than in an array
+ # so that we get overwrite (last one wins) effects.
+ $processes->{$sid}->{'params'}->{$line->{'param'}} = $line->{'value'};
+ }
+
+ elsif ($event eq 'def_repo') {
+ # $processes->{$sid}->{'repos'}->{$line->{'repo'}} = $line->{'worktree'};
+ $processes->{$sid}->{'repos'}->{$line->{'repo'}} = "_WORKTREE_";
+ }
+
+ # A series of potentially nested and threaded region and data events
+ # is fundamentally incompatibile with the type of summary record we
+ # are building in this script. Since they are intended for
+ # perf-trace-like analysis rather than a result summary, we ignore
+ # most of them here.
+
+ # elsif ($event eq 'region_enter') {
+ # }
+ # elsif ($event eq 'region_leave') {
+ # }
+
+ elsif ($event eq 'data') {
+ my $cat = $line->{'category'};
+ if ($cat eq 'test_category') {
+
+ my $key = $line->{'key'};
+ my $value = $line->{'value'};
+ $processes->{$sid}->{'data'}->{$cat}->{$key} = $value;
+ }
+ }
+
+ # This trace2 target does not emit 'printf' events.
+ #
+ # elsif ($event eq 'printf') {
+ # }
+}
+
+# Dump the resulting hash into something that we can compare against
+# in the test script. These options make Dumper output look a little
+# bit like JSON. Also convert variable references of the form "$VAR*"
+# so that the matching HEREDOC doesn't need to escape it.
+
+$Data::Dumper::Sortkeys = 1;
+$Data::Dumper::Indent = 1;
+$Data::Dumper::Purity = 1;
+$Data::Dumper::Pair = ':';
+
+my $out = Dumper($processes);
+$out =~ s/'/"/g;
+$out =~ s/\$VAR/VAR/g;
+
+# Finally, if we're running this script to generate (manually confirmed)
+# data to add to the test script, guard the indentation.
+
+if ($gen_heredoc == 1) {
+ $out =~ s/^/\t\|/gms;
+}
+
+print $out;
}
# don't leave a stale daemon running
-trap 'code=$?; git credential-cache exit; (exit $code); die' EXIT
+test_atexit 'git credential-cache exit'
# test that the daemon works with no special setup
helper_test cache
helper_test_timeout cache --timeout=1
-# we can't rely on our "trap" above working after test_done,
-# as test_done will delete the trash directory containing
-# our socket, leaving us with no way to access the daemon.
-git credential-cache exit
-
test_done
git verify-pack --verbose "$IDX" | grep "$HASH"
'
-stop_httpd
-
test_done
test_i18ngrep "too-short tree object" err
'
-hex2oct() {
- perl -ne 'printf "\\%03o", hex for /../g'
-}
-
test_expect_success 'malformed mode in tree' '
hex_sha1=$(echo foo | git hash-object --stdin -w) &&
bin_sha1=$(echo $hex_sha1 | hex2oct) &&
)
'
+test_expect_success 'internal tree objects are not "missing"' '
+ git init missing-empty &&
+ (
+ cd missing-empty &&
+ empty_tree=$(git hash-object -t tree /dev/null) &&
+ commit=$(echo foo | git commit-tree $empty_tree) &&
+ git rev-list --objects $commit
+ )
+'
+
test_done
)
'
+test_expect_success 'conditional include with /**/' '
+ REPO=foo/bar/repo &&
+ git init $REPO &&
+ cat >>$REPO/.git/config <<-\EOF &&
+ [includeIf "gitdir:**/foo/**/bar/**"]
+ path=bar7
+ EOF
+ echo "[test]seven=7" >$REPO/.git/bar7 &&
+ echo 7 >expect &&
+ git -C $REPO config test.seven >actual &&
+ test_cmp expect actual
+'
+
test_expect_success SYMLINKS 'conditional include, set up symlinked $HOME' '
mkdir real-home &&
ln -s real-home home &&
test_cmp unchanged actual
'
+test_expect_success 'delete fails cleanly if packed-refs.new write fails' '
+ # Setup and expectations are similar to the test above.
+ prefix=refs/failed-packed-refs &&
+ git update-ref $prefix/foo $C &&
+ git pack-refs --all &&
+ git update-ref $prefix/foo $D &&
+ git for-each-ref $prefix >unchanged &&
+ # This should not happen in practice, but it is an easy way to get a
+ # reliable error (we open with create_tempfile(), which uses O_EXCL).
+ : >.git/packed-refs.new &&
+ test_when_finished "rm -f .git/packed-refs.new" &&
+ test_must_fail git update-ref -d $prefix/foo &&
+ git for-each-ref $prefix >actual &&
+ test_cmp unchanged actual
+'
+
test_done
'
test_expect_success 'gc.reflogexpire=never' '
+ test_config gc.reflogexpire never &&
+ test_config gc.reflogexpireunreachable never &&
+
+ git reflog expire --verbose --all >output &&
+ test_line_count = 9 output &&
- git config gc.reflogexpire never &&
- git config gc.reflogexpireunreachable never &&
- git reflog expire --verbose --all &&
git reflog refs/heads/master >output &&
test_line_count = 4 output
'
test_expect_success 'gc.reflogexpire=false' '
+ test_config gc.reflogexpire false &&
+ test_config gc.reflogexpireunreachable false &&
- git config gc.reflogexpire false &&
- git config gc.reflogexpireunreachable false &&
git reflog expire --verbose --all &&
git reflog refs/heads/master >output &&
- test_line_count = 4 output &&
+ test_line_count = 4 output
+
+'
- git config --unset gc.reflogexpire &&
- git config --unset gc.reflogexpireunreachable
+test_expect_success 'git reflog expire unknown reference' '
+ test_config gc.reflogexpire never &&
+ test_config gc.reflogexpireunreachable never &&
+ test_must_fail git reflog expire master@{123} 2>stderr &&
+ test_i18ngrep "points nowhere" stderr &&
+ test_must_fail git reflog expire does-not-exist 2>stderr &&
+ test_i18ngrep "points nowhere" stderr
'
test_expect_success 'checkout should not delete log for packed ref' '
test_cmp expected actual.wt2
'
+test_expect_success 'for-each-ref from main repo' '
+ mkdir fer1 &&
+ git -C fer1 init repo &&
+ test_commit -C fer1/repo initial &&
+ git -C fer1/repo worktree add ../second &&
+ git -C fer1/repo update-ref refs/bisect/main HEAD &&
+ git -C fer1/repo update-ref refs/rewritten/main HEAD &&
+ git -C fer1/repo update-ref refs/worktree/main HEAD &&
+ git -C fer1/repo for-each-ref --format="%(refname)" | grep main >actual &&
+ cat >expected <<-\EOF &&
+ refs/bisect/main
+ refs/rewritten/main
+ refs/worktree/main
+ EOF
+ test_cmp expected actual
+'
+
+test_expect_success 'for-each-ref from linked repo' '
+ mkdir fer2 &&
+ git -C fer2 init repo &&
+ test_commit -C fer2/repo initial &&
+ git -C fer2/repo worktree add ../second &&
+ git -C fer2/second update-ref refs/bisect/second HEAD &&
+ git -C fer2/second update-ref refs/rewritten/second HEAD &&
+ git -C fer2/second update-ref refs/worktree/second HEAD &&
+ git -C fer2/second for-each-ref --format="%(refname)" | grep second >actual &&
+ cat >expected <<-\EOF &&
+ refs/bisect/second
+ refs/heads/second
+ refs/rewritten/second
+ refs/worktree/second
+ EOF
+ test_cmp expected actual
+'
+
test_done
test_i18ngrep ! "fatal: empty filename in tree entry" out
'
-hex2oct() {
- perl -ne 'printf "\\%03o", hex for /../g'
-}
-
test_expect_success 'tree entry with type mismatch' '
test_when_finished "remove_object \$blob" &&
test_when_finished "remove_object \$tree" &&
# for each of type, we have one version which is referenced by another object
# (and so while unreachable, not dangling), and another variant which really is
# dangling.
-test_expect_success 'fsck notices dangling objects' '
+test_expect_success 'create dangling-object repository' '
git init dangling &&
(
cd dangling &&
commit=$(git commit-tree $tree) &&
dcommit=$(git commit-tree -p $commit $tree) &&
- cat >expect <<-EOF &&
+ cat >expect <<-EOF
dangling blob $dblob
dangling commit $dcommit
dangling tree $dtree
EOF
+ )
+'
+test_expect_success 'fsck notices dangling objects' '
+ (
+ cd dangling &&
git fsck >actual &&
# the output order is non-deterministic, as it comes from a hash
sort <actual >actual.sorted &&
)
'
+test_expect_success 'fsck --connectivity-only notices dangling objects' '
+ (
+ cd dangling &&
+ git fsck --connectivity-only >actual &&
+ # the output order is non-deterministic, as it comes from a hash
+ sort <actual >actual.sorted &&
+ test_i18ncmp expect actual.sorted
+ )
+'
+
test_expect_success 'fsck $name notices bogus $name' '
test_must_fail git fsck bogus &&
test_must_fail git fsck $ZERO_OID
test_line_count = 0 cache-tree.out
'
+test_expect_success 'do not refresh null base index' '
+ test_create_repo merge &&
+ (
+ cd merge &&
+ test_commit initial &&
+ git checkout -b side-branch &&
+ test_commit extra &&
+ git checkout master &&
+ git update-index --split-index &&
+ test_commit more &&
+ # must not write a new shareindex, or we wont catch the problem
+ git -c splitIndex.maxPercentChange=100 merge --no-edit side-branch 2>err &&
+ # i.e. do not expect warnings like
+ # could not freshen shared index .../shareindex.00000...
+ test_must_be_empty err
+ )
+'
+
test_done
'
test_expect_success 'checkout -b to a new branch, set to HEAD' '
+ test_when_finished "
+ git checkout branch1 &&
+ test_might_fail git branch -D branch2" &&
do_checkout branch2
'
-test_expect_success 'checkout -b to a new branch, set to an explicit ref' '
- git checkout branch1 &&
- git branch -D branch2 &&
+test_expect_success 'checkout -b to a merge base' '
+ test_when_finished "
+ git checkout branch1 &&
+ test_might_fail git branch -D branch2" &&
+ git checkout -b branch2 branch1...
+'
+test_expect_success 'checkout -b to a new branch, set to an explicit ref' '
+ test_when_finished "
+ git checkout branch1 &&
+ test_might_fail git branch -D branch2" &&
do_checkout branch2 $HEAD1
'
test_expect_success 'checkout -b to a new branch with unmergeable changes fails' '
- git checkout branch1 &&
-
- # clean up from previous test
- git branch -D branch2 &&
-
setup_dirty_unmergeable &&
test_must_fail do_checkout branch2 $HEAD1 &&
test_dirty_unmergeable
'
test_expect_success 'checkout -f -b to a new branch with unmergeable changes discards changes' '
+ test_when_finished "
+ git checkout branch1 &&
+ test_might_fail git branch -D branch2" &&
+
# still dirty and on branch1
do_checkout branch2 $HEAD1 "-f -b" &&
test_must_fail test_dirty_unmergeable
'
test_expect_success 'checkout -b to a new branch preserves mergeable changes' '
- git checkout branch1 &&
-
- # clean up from previous test
- git branch -D branch2 &&
+ test_when_finished "
+ git reset --hard &&
+ git checkout branch1 &&
+ test_might_fail git branch -D branch2" &&
setup_dirty_mergeable &&
do_checkout branch2 $HEAD1 &&
'
test_expect_success 'checkout -f -b to a new branch with mergeable changes discards changes' '
- # clean up from previous test
- git reset --hard &&
-
- git checkout branch1 &&
-
- # clean up from previous test
- git branch -D branch2 &&
-
+ test_when_finished git reset --hard HEAD &&
setup_dirty_mergeable &&
do_checkout branch2 $HEAD1 "-f -b" &&
test_must_fail test_dirty_mergeable
'
test_expect_success 'checkout -b to an existing branch fails' '
- git reset --hard HEAD &&
-
+ test_when_finished git reset --hard HEAD &&
test_must_fail do_checkout branch2 $HEAD2
'
test_expect_success 'checkout -b to @{-1} fails with the right branch name' '
- git reset --hard HEAD &&
git checkout branch1 &&
git checkout branch2 &&
echo >expect "fatal: A branch named '\''branch1'\'' already exists." &&
do_checkout branch2 "" -B
'
+test_expect_success 'checkout -B to a merge base' '
+ git checkout branch1 &&
+
+ git checkout -B branch2 branch1...
+'
+
test_expect_success 'checkout -B to an existing branch from detached HEAD resets branch to HEAD' '
git checkout $(git rev-parse --verify HEAD) &&
'
test_expect_success 'checkout -B to an existing branch preserves mergeable changes' '
+ test_when_finished git reset --hard &&
git checkout branch1 &&
setup_dirty_mergeable &&
'
test_expect_success 'checkout -f -B to an existing branch with mergeable changes discards changes' '
- # clean up from previous test
- git reset --hard &&
-
git checkout branch1 &&
setup_dirty_mergeable &&
test_cmp both.txt.conflicted.cleaned both.txt.cleaned
'
+test_expect_success 'force checkout a conflict file creates stage zero entry' '
+ git init co-force &&
+ (
+ cd co-force &&
+ echo a >a &&
+ git add a &&
+ git commit -ama &&
+ A_OBJ=$(git rev-parse :a) &&
+ git branch topic &&
+ echo b >a &&
+ git commit -amb &&
+ B_OBJ=$(git rev-parse :a) &&
+ git checkout topic &&
+ echo c >a &&
+ C_OBJ=$(git hash-object a) &&
+ git checkout -m master &&
+ test_cmp_rev :1:a $A_OBJ &&
+ test_cmp_rev :2:a $B_OBJ &&
+ test_cmp_rev :3:a $C_OBJ &&
+ git checkout -f topic &&
+ test_cmp_rev :0:a $A_OBJ
+ )
+'
+
test_done
--- /dev/null
+#!/bin/sh
+
+test_description='checkout --no-overlay <tree-ish> -- <pathspec>'
+
+. ./test-lib.sh
+
+test_expect_success 'setup' '
+ git commit --allow-empty -m "initial"
+'
+
+test_expect_success 'checkout --no-overlay deletes files not in <tree-ish>' '
+ >file &&
+ mkdir dir &&
+ >dir/file1 &&
+ git add file dir/file1 &&
+ git checkout --no-overlay HEAD -- file &&
+ test_path_is_missing file &&
+ test_path_is_file dir/file1
+'
+
+test_expect_success 'checkout --no-overlay removing last file from directory' '
+ git checkout --no-overlay HEAD -- dir/file1 &&
+ test_path_is_missing dir
+'
+
+test_expect_success 'checkout -p --overlay is disallowed' '
+ test_must_fail git checkout -p --overlay HEAD 2>actual &&
+ test_i18ngrep "fatal: -p and --overlay are mutually exclusive" actual
+'
+
+test_expect_success '--no-overlay --theirs with D/F conflict deletes file' '
+ test_commit file1 file1 &&
+ test_commit file2 file2 &&
+ git rm --cached file1 &&
+ echo 1234 >file1 &&
+ F1=$(git rev-parse HEAD:file1) &&
+ F2=$(git rev-parse HEAD:file2) &&
+ {
+ echo "100644 $F1 1 file1" &&
+ echo "100644 $F2 2 file1"
+ } | git update-index --index-info &&
+ test_path_is_file file1 &&
+ git checkout --theirs --no-overlay -- file1 &&
+ test_path_is_missing file1
+'
+
+test_done
+++ /dev/null
-#!/bin/sh
-
-test_description='test git worktree add'
-
-. ./test-lib.sh
-
-. "$TEST_DIRECTORY"/lib-rebase.sh
-
-test_expect_success 'setup' '
- test_commit init
-'
-
-test_expect_success '"add" an existing worktree' '
- mkdir -p existing/subtree &&
- test_must_fail git worktree add --detach existing master
-'
-
-test_expect_success '"add" an existing empty worktree' '
- mkdir existing_empty &&
- git worktree add --detach existing_empty master
-'
-
-test_expect_success '"add" using shorthand - fails when no previous branch' '
- test_must_fail git worktree add existing_short -
-'
-
-test_expect_success '"add" using - shorthand' '
- git checkout -b newbranch &&
- echo hello >myworld &&
- git add myworld &&
- git commit -m myworld &&
- git checkout master &&
- git worktree add short-hand - &&
- echo refs/heads/newbranch >expect &&
- git -C short-hand rev-parse --symbolic-full-name HEAD >actual &&
- test_cmp expect actual
-'
-
-test_expect_success '"add" refuses to checkout locked branch' '
- test_must_fail git worktree add zere master &&
- ! test -d zere &&
- ! test -d .git/worktrees/zere
-'
-
-test_expect_success 'checking out paths not complaining about linked checkouts' '
- (
- cd existing_empty &&
- echo dirty >>init.t &&
- git checkout master -- init.t
- )
-'
-
-test_expect_success '"add" worktree' '
- git rev-parse HEAD >expect &&
- git worktree add --detach here master &&
- (
- cd here &&
- test_cmp ../init.t init.t &&
- test_must_fail git symbolic-ref HEAD &&
- git rev-parse HEAD >actual &&
- test_cmp ../expect actual &&
- git fsck
- )
-'
-
-test_expect_success '"add" worktree with lock' '
- git rev-parse HEAD >expect &&
- git worktree add --detach --lock here-with-lock master &&
- test -f .git/worktrees/here-with-lock/locked
-'
-
-test_expect_success '"add" worktree from a subdir' '
- (
- mkdir sub &&
- cd sub &&
- git worktree add --detach here master &&
- cd here &&
- test_cmp ../../init.t init.t
- )
-'
-
-test_expect_success '"add" from a linked checkout' '
- (
- cd here &&
- git worktree add --detach nested-here master &&
- cd nested-here &&
- git fsck
- )
-'
-
-test_expect_success '"add" worktree creating new branch' '
- git worktree add -b newmaster there master &&
- (
- cd there &&
- test_cmp ../init.t init.t &&
- git symbolic-ref HEAD >actual &&
- echo refs/heads/newmaster >expect &&
- test_cmp expect actual &&
- git fsck
- )
-'
-
-test_expect_success 'die the same branch is already checked out' '
- (
- cd here &&
- test_must_fail git checkout newmaster
- )
-'
-
-test_expect_success SYMLINKS 'die the same branch is already checked out (symlink)' '
- head=$(git -C there rev-parse --git-path HEAD) &&
- ref=$(git -C there symbolic-ref HEAD) &&
- rm "$head" &&
- ln -s "$ref" "$head" &&
- test_must_fail git -C here checkout newmaster
-'
-
-test_expect_success 'not die the same branch is already checked out' '
- (
- cd here &&
- git worktree add --force anothernewmaster newmaster
- )
-'
-
-test_expect_success 'not die on re-checking out current branch' '
- (
- cd there &&
- git checkout newmaster
- )
-'
-
-test_expect_success '"add" from a bare repo' '
- (
- git clone --bare . bare &&
- cd bare &&
- git worktree add -b bare-master ../there2 master
- )
-'
-
-test_expect_success 'checkout from a bare repo without "add"' '
- (
- cd bare &&
- test_must_fail git checkout master
- )
-'
-
-test_expect_success '"add" default branch of a bare repo' '
- (
- git clone --bare . bare2 &&
- cd bare2 &&
- git worktree add ../there3 master
- )
-'
-
-test_expect_success 'checkout with grafts' '
- test_when_finished rm .git/info/grafts &&
- test_commit abc &&
- SHA1=$(git rev-parse HEAD) &&
- test_commit def &&
- test_commit xyz &&
- echo "$(git rev-parse HEAD) $SHA1" >.git/info/grafts &&
- cat >expected <<-\EOF &&
- xyz
- abc
- EOF
- git log --format=%s -2 >actual &&
- test_cmp expected actual &&
- git worktree add --detach grafted master &&
- git --git-dir=grafted/.git log --format=%s -2 >actual &&
- test_cmp expected actual
-'
-
-test_expect_success '"add" from relative HEAD' '
- test_commit a &&
- test_commit b &&
- test_commit c &&
- git rev-parse HEAD~1 >expected &&
- git worktree add relhead HEAD~1 &&
- git -C relhead rev-parse HEAD >actual &&
- test_cmp expected actual
-'
-
-test_expect_success '"add -b" with <branch> omitted' '
- git worktree add -b burble flornk &&
- test_cmp_rev HEAD burble
-'
-
-test_expect_success '"add --detach" with <branch> omitted' '
- git worktree add --detach fishhook &&
- git rev-parse HEAD >expected &&
- git -C fishhook rev-parse HEAD >actual &&
- test_cmp expected actual &&
- test_must_fail git -C fishhook symbolic-ref HEAD
-'
-
-test_expect_success '"add" with <branch> omitted' '
- git worktree add wiffle/bat &&
- test_cmp_rev HEAD bat
-'
-
-test_expect_success '"add" checks out existing branch of dwimd name' '
- git branch dwim HEAD~1 &&
- git worktree add dwim &&
- test_cmp_rev HEAD~1 dwim &&
- (
- cd dwim &&
- test_cmp_rev HEAD dwim
- )
-'
-
-test_expect_success '"add <path>" dwim fails with checked out branch' '
- git checkout -b test-branch &&
- test_must_fail git worktree add test-branch &&
- test_path_is_missing test-branch
-'
-
-test_expect_success '"add --force" with existing dwimd name doesnt die' '
- git checkout test-branch &&
- git worktree add --force test-branch
-'
-
-test_expect_success '"add" no auto-vivify with --detach and <branch> omitted' '
- git worktree add --detach mish/mash &&
- test_must_fail git rev-parse mash -- &&
- test_must_fail git -C mish/mash symbolic-ref HEAD
-'
-
-test_expect_success '"add" -b/-B mutually exclusive' '
- test_must_fail git worktree add -b poodle -B poodle bamboo master
-'
-
-test_expect_success '"add" -b/--detach mutually exclusive' '
- test_must_fail git worktree add -b poodle --detach bamboo master
-'
-
-test_expect_success '"add" -B/--detach mutually exclusive' '
- test_must_fail git worktree add -B poodle --detach bamboo master
-'
-
-test_expect_success '"add -B" fails if the branch is checked out' '
- git rev-parse newmaster >before &&
- test_must_fail git worktree add -B newmaster bamboo master &&
- git rev-parse newmaster >after &&
- test_cmp before after
-'
-
-test_expect_success 'add -B' '
- git worktree add -B poodle bamboo2 master^ &&
- git -C bamboo2 symbolic-ref HEAD >actual &&
- echo refs/heads/poodle >expected &&
- test_cmp expected actual &&
- test_cmp_rev master^ poodle
-'
-
-test_expect_success 'add --quiet' '
- git worktree add --quiet another-worktree master 2>actual &&
- test_must_be_empty actual
-'
-
-test_expect_success 'local clone from linked checkout' '
- git clone --local here here-clone &&
- ( cd here-clone && git fsck )
-'
-
-test_expect_success 'local clone --shared from linked checkout' '
- git -C bare worktree add --detach ../baretree &&
- git clone --local --shared baretree bare-clone &&
- grep /bare/ bare-clone/.git/objects/info/alternates
-'
-
-test_expect_success '"add" worktree with --no-checkout' '
- git worktree add --no-checkout -b swamp swamp &&
- ! test -e swamp/init.t &&
- git -C swamp reset --hard &&
- test_cmp init.t swamp/init.t
-'
-
-test_expect_success '"add" worktree with --checkout' '
- git worktree add --checkout -b swmap2 swamp2 &&
- test_cmp init.t swamp2/init.t
-'
-
-test_expect_success 'put a worktree under rebase' '
- git worktree add under-rebase &&
- (
- cd under-rebase &&
- set_fake_editor &&
- FAKE_LINES="edit 1" git rebase -i HEAD^ &&
- git worktree list | grep "under-rebase.*detached HEAD"
- )
-'
-
-test_expect_success 'add a worktree, checking out a rebased branch' '
- test_must_fail git worktree add new-rebase under-rebase &&
- ! test -d new-rebase
-'
-
-test_expect_success 'checking out a rebased branch from another worktree' '
- git worktree add new-place &&
- test_must_fail git -C new-place checkout under-rebase
-'
-
-test_expect_success 'not allow to delete a branch under rebase' '
- (
- cd under-rebase &&
- test_must_fail git branch -D under-rebase
- )
-'
-
-test_expect_success 'rename a branch under rebase not allowed' '
- test_must_fail git branch -M under-rebase rebase-with-new-name
-'
-
-test_expect_success 'check out from current worktree branch ok' '
- (
- cd under-rebase &&
- git checkout under-rebase &&
- git checkout - &&
- git rebase --abort
- )
-'
-
-test_expect_success 'checkout a branch under bisect' '
- git worktree add under-bisect &&
- (
- cd under-bisect &&
- git bisect start &&
- git bisect bad &&
- git bisect good HEAD~2 &&
- git worktree list | grep "under-bisect.*detached HEAD" &&
- test_must_fail git worktree add new-bisect under-bisect &&
- ! test -d new-bisect
- )
-'
-
-test_expect_success 'rename a branch under bisect not allowed' '
- test_must_fail git branch -M under-bisect bisect-with-new-name
-'
-# Is branch "refs/heads/$1" set to pull from "$2/$3"?
-test_branch_upstream () {
- printf "%s\n" "$2" "refs/heads/$3" >expect.upstream &&
- {
- git config "branch.$1.remote" &&
- git config "branch.$1.merge"
- } >actual.upstream &&
- test_cmp expect.upstream actual.upstream
-}
-
-test_expect_success '--track sets up tracking' '
- test_when_finished rm -rf track &&
- git worktree add --track -b track track master &&
- test_branch_upstream track . master
-'
-
-# setup remote repository $1 and repository $2 with $1 set up as
-# remote. The remote has two branches, master and foo.
-setup_remote_repo () {
- git init $1 &&
- (
- cd $1 &&
- test_commit $1_master &&
- git checkout -b foo &&
- test_commit upstream_foo
- ) &&
- git init $2 &&
- (
- cd $2 &&
- test_commit $2_master &&
- git remote add $1 ../$1 &&
- git config remote.$1.fetch \
- "refs/heads/*:refs/remotes/$1/*" &&
- git fetch --all
- )
-}
-
-test_expect_success '--no-track avoids setting up tracking' '
- test_when_finished rm -rf repo_upstream repo_local foo &&
- setup_remote_repo repo_upstream repo_local &&
- (
- cd repo_local &&
- git worktree add --no-track -b foo ../foo repo_upstream/foo
- ) &&
- (
- cd foo &&
- test_must_fail git config "branch.foo.remote" &&
- test_must_fail git config "branch.foo.merge" &&
- test_cmp_rev refs/remotes/repo_upstream/foo refs/heads/foo
- )
-'
-
-test_expect_success '"add" <path> <non-existent-branch> fails' '
- test_must_fail git worktree add foo non-existent
-'
-
-test_expect_success '"add" <path> <branch> dwims' '
- test_when_finished rm -rf repo_upstream repo_dwim foo &&
- setup_remote_repo repo_upstream repo_dwim &&
- git init repo_dwim &&
- (
- cd repo_dwim &&
- git worktree add ../foo foo
- ) &&
- (
- cd foo &&
- test_branch_upstream foo repo_upstream foo &&
- test_cmp_rev refs/remotes/repo_upstream/foo refs/heads/foo
- )
-'
-
-test_expect_success '"add" <path> <branch> dwims with checkout.defaultRemote' '
- test_when_finished rm -rf repo_upstream repo_dwim foo &&
- setup_remote_repo repo_upstream repo_dwim &&
- git init repo_dwim &&
- (
- cd repo_dwim &&
- git remote add repo_upstream2 ../repo_upstream &&
- git fetch repo_upstream2 &&
- test_must_fail git worktree add ../foo foo &&
- git -c checkout.defaultRemote=repo_upstream worktree add ../foo foo &&
- git status -uno --porcelain >status.actual &&
- test_must_be_empty status.actual
- ) &&
- (
- cd foo &&
- test_branch_upstream foo repo_upstream foo &&
- test_cmp_rev refs/remotes/repo_upstream/foo refs/heads/foo
- )
-'
-
-test_expect_success 'git worktree add does not match remote' '
- test_when_finished rm -rf repo_a repo_b foo &&
- setup_remote_repo repo_a repo_b &&
- (
- cd repo_b &&
- git worktree add ../foo
- ) &&
- (
- cd foo &&
- test_must_fail git config "branch.foo.remote" &&
- test_must_fail git config "branch.foo.merge" &&
- ! test_cmp_rev refs/remotes/repo_a/foo refs/heads/foo
- )
-'
-
-test_expect_success 'git worktree add --guess-remote sets up tracking' '
- test_when_finished rm -rf repo_a repo_b foo &&
- setup_remote_repo repo_a repo_b &&
- (
- cd repo_b &&
- git worktree add --guess-remote ../foo
- ) &&
- (
- cd foo &&
- test_branch_upstream foo repo_a foo &&
- test_cmp_rev refs/remotes/repo_a/foo refs/heads/foo
- )
-'
-
-test_expect_success 'git worktree add with worktree.guessRemote sets up tracking' '
- test_when_finished rm -rf repo_a repo_b foo &&
- setup_remote_repo repo_a repo_b &&
- (
- cd repo_b &&
- git config worktree.guessRemote true &&
- git worktree add ../foo
- ) &&
- (
- cd foo &&
- test_branch_upstream foo repo_a foo &&
- test_cmp_rev refs/remotes/repo_a/foo refs/heads/foo
- )
-'
-
-test_expect_success 'git worktree --no-guess-remote option overrides config' '
- test_when_finished rm -rf repo_a repo_b foo &&
- setup_remote_repo repo_a repo_b &&
- (
- cd repo_b &&
- git config worktree.guessRemote true &&
- git worktree add --no-guess-remote ../foo
- ) &&
- (
- cd foo &&
- test_must_fail git config "branch.foo.remote" &&
- test_must_fail git config "branch.foo.merge" &&
- ! test_cmp_rev refs/remotes/repo_a/foo refs/heads/foo
- )
-'
-
-post_checkout_hook () {
- gitdir=${1:-.git}
- test_when_finished "rm -f $gitdir/hooks/post-checkout" &&
- mkdir -p $gitdir/hooks &&
- write_script $gitdir/hooks/post-checkout <<-\EOF
- {
- echo $*
- git rev-parse --git-dir --show-toplevel
- } >hook.actual
- EOF
-}
-
-test_expect_success '"add" invokes post-checkout hook (branch)' '
- post_checkout_hook &&
- {
- echo $ZERO_OID $(git rev-parse HEAD) 1 &&
- echo $(pwd)/.git/worktrees/gumby &&
- echo $(pwd)/gumby
- } >hook.expect &&
- git worktree add gumby &&
- test_cmp hook.expect gumby/hook.actual
-'
-
-test_expect_success '"add" invokes post-checkout hook (detached)' '
- post_checkout_hook &&
- {
- echo $ZERO_OID $(git rev-parse HEAD) 1 &&
- echo $(pwd)/.git/worktrees/grumpy &&
- echo $(pwd)/grumpy
- } >hook.expect &&
- git worktree add --detach grumpy &&
- test_cmp hook.expect grumpy/hook.actual
-'
-
-test_expect_success '"add --no-checkout" suppresses post-checkout hook' '
- post_checkout_hook &&
- rm -f hook.actual &&
- git worktree add --no-checkout gloopy &&
- test_path_is_missing gloopy/hook.actual
-'
-
-test_expect_success '"add" in other worktree invokes post-checkout hook' '
- post_checkout_hook &&
- {
- echo $ZERO_OID $(git rev-parse HEAD) 1 &&
- echo $(pwd)/.git/worktrees/guppy &&
- echo $(pwd)/guppy
- } >hook.expect &&
- git -C gloopy worktree add --detach ../guppy &&
- test_cmp hook.expect guppy/hook.actual
-'
-
-test_expect_success '"add" in bare repo invokes post-checkout hook' '
- rm -rf bare &&
- git clone --bare . bare &&
- {
- echo $ZERO_OID $(git --git-dir=bare rev-parse HEAD) 1 &&
- echo $(pwd)/bare/worktrees/goozy &&
- echo $(pwd)/goozy
- } >hook.expect &&
- post_checkout_hook bare &&
- git -C bare worktree add --detach ../goozy &&
- test_cmp hook.expect goozy/hook.actual
-'
-
-test_expect_success '"add" an existing but missing worktree' '
- git worktree add --detach pneu &&
- test_must_fail git worktree add --detach pneu &&
- rm -fr pneu &&
- test_must_fail git worktree add --detach pneu &&
- git worktree add --force --detach pneu
-'
-
-test_expect_success '"add" an existing locked but missing worktree' '
- git worktree add --detach gnoo &&
- git worktree lock gnoo &&
- test_when_finished "git worktree unlock gnoo || :" &&
- rm -fr gnoo &&
- test_must_fail git worktree add --detach gnoo &&
- test_must_fail git worktree add --force --detach gnoo &&
- git worktree add --force --force --detach gnoo
-'
-
-test_done
+++ /dev/null
-#!/bin/sh
-
-test_description='prune $GIT_DIR/worktrees'
-
-. ./test-lib.sh
-
-test_expect_success initialize '
- git commit --allow-empty -m init
-'
-
-test_expect_success 'worktree prune on normal repo' '
- git worktree prune &&
- test_must_fail git worktree prune abc
-'
-
-test_expect_success 'prune files inside $GIT_DIR/worktrees' '
- mkdir .git/worktrees &&
- : >.git/worktrees/abc &&
- git worktree prune --verbose >actual &&
- cat >expect <<EOF &&
-Removing worktrees/abc: not a valid directory
-EOF
- test_i18ncmp expect actual &&
- ! test -f .git/worktrees/abc &&
- ! test -d .git/worktrees
-'
-
-test_expect_success 'prune directories without gitdir' '
- mkdir -p .git/worktrees/def/abc &&
- : >.git/worktrees/def/def &&
- cat >expect <<EOF &&
-Removing worktrees/def: gitdir file does not exist
-EOF
- git worktree prune --verbose >actual &&
- test_i18ncmp expect actual &&
- ! test -d .git/worktrees/def &&
- ! test -d .git/worktrees
-'
-
-test_expect_success SANITY 'prune directories with unreadable gitdir' '
- mkdir -p .git/worktrees/def/abc &&
- : >.git/worktrees/def/def &&
- : >.git/worktrees/def/gitdir &&
- chmod u-r .git/worktrees/def/gitdir &&
- git worktree prune --verbose >actual &&
- test_i18ngrep "Removing worktrees/def: unable to read gitdir file" actual &&
- ! test -d .git/worktrees/def &&
- ! test -d .git/worktrees
-'
-
-test_expect_success 'prune directories with invalid gitdir' '
- mkdir -p .git/worktrees/def/abc &&
- : >.git/worktrees/def/def &&
- : >.git/worktrees/def/gitdir &&
- git worktree prune --verbose >actual &&
- test_i18ngrep "Removing worktrees/def: invalid gitdir file" actual &&
- ! test -d .git/worktrees/def &&
- ! test -d .git/worktrees
-'
-
-test_expect_success 'prune directories with gitdir pointing to nowhere' '
- mkdir -p .git/worktrees/def/abc &&
- : >.git/worktrees/def/def &&
- echo "$(pwd)"/nowhere >.git/worktrees/def/gitdir &&
- git worktree prune --verbose >actual &&
- test_i18ngrep "Removing worktrees/def: gitdir file points to non-existent location" actual &&
- ! test -d .git/worktrees/def &&
- ! test -d .git/worktrees
-'
-
-test_expect_success 'not prune locked checkout' '
- test_when_finished rm -r .git/worktrees &&
- mkdir -p .git/worktrees/ghi &&
- : >.git/worktrees/ghi/locked &&
- git worktree prune &&
- test -d .git/worktrees/ghi
-'
-
-test_expect_success 'not prune recent checkouts' '
- test_when_finished rm -r .git/worktrees &&
- git worktree add jlm HEAD &&
- test -d .git/worktrees/jlm &&
- rm -rf jlm &&
- git worktree prune --verbose --expire=2.days.ago &&
- test -d .git/worktrees/jlm
-'
-
-test_expect_success 'not prune proper checkouts' '
- test_when_finished rm -r .git/worktrees &&
- git worktree add --detach "$PWD/nop" master &&
- git worktree prune &&
- test -d .git/worktrees/nop
-'
-
-test_done
+++ /dev/null
-#!/bin/sh
-
-test_description='test git worktree list'
-
-. ./test-lib.sh
-
-test_expect_success 'setup' '
- test_commit init
-'
-
-test_expect_success 'rev-parse --git-common-dir on main worktree' '
- git rev-parse --git-common-dir >actual &&
- echo .git >expected &&
- test_cmp expected actual &&
- mkdir sub &&
- git -C sub rev-parse --git-common-dir >actual2 &&
- echo ../.git >expected2 &&
- test_cmp expected2 actual2
-'
-
-test_expect_success 'rev-parse --git-path objects linked worktree' '
- echo "$(git rev-parse --show-toplevel)/.git/objects" >expect &&
- test_when_finished "rm -rf linked-tree actual expect && git worktree prune" &&
- git worktree add --detach linked-tree master &&
- git -C linked-tree rev-parse --git-path objects >actual &&
- test_cmp expect actual
-'
-
-test_expect_success '"list" all worktrees from main' '
- echo "$(git rev-parse --show-toplevel) $(git rev-parse --short HEAD) [$(git symbolic-ref --short HEAD)]" >expect &&
- test_when_finished "rm -rf here out actual expect && git worktree prune" &&
- git worktree add --detach here master &&
- echo "$(git -C here rev-parse --show-toplevel) $(git rev-parse --short HEAD) (detached HEAD)" >>expect &&
- git worktree list >out &&
- sed "s/ */ /g" <out >actual &&
- test_cmp expect actual
-'
-
-test_expect_success '"list" all worktrees from linked' '
- echo "$(git rev-parse --show-toplevel) $(git rev-parse --short HEAD) [$(git symbolic-ref --short HEAD)]" >expect &&
- test_when_finished "rm -rf here out actual expect && git worktree prune" &&
- git worktree add --detach here master &&
- echo "$(git -C here rev-parse --show-toplevel) $(git rev-parse --short HEAD) (detached HEAD)" >>expect &&
- git -C here worktree list >out &&
- sed "s/ */ /g" <out >actual &&
- test_cmp expect actual
-'
-
-test_expect_success '"list" all worktrees --porcelain' '
- echo "worktree $(git rev-parse --show-toplevel)" >expect &&
- echo "HEAD $(git rev-parse HEAD)" >>expect &&
- echo "branch $(git symbolic-ref HEAD)" >>expect &&
- echo >>expect &&
- test_when_finished "rm -rf here actual expect && git worktree prune" &&
- git worktree add --detach here master &&
- echo "worktree $(git -C here rev-parse --show-toplevel)" >>expect &&
- echo "HEAD $(git rev-parse HEAD)" >>expect &&
- echo "detached" >>expect &&
- echo >>expect &&
- git worktree list --porcelain >actual &&
- test_cmp expect actual
-'
-
-test_expect_success 'bare repo setup' '
- git init --bare bare1 &&
- echo "data" >file1 &&
- git add file1 &&
- git commit -m"File1: add data" &&
- git push bare1 master &&
- git reset --hard HEAD^
-'
-
-test_expect_success '"list" all worktrees from bare main' '
- test_when_finished "rm -rf there out actual expect && git -C bare1 worktree prune" &&
- git -C bare1 worktree add --detach ../there master &&
- echo "$(pwd)/bare1 (bare)" >expect &&
- echo "$(git -C there rev-parse --show-toplevel) $(git -C there rev-parse --short HEAD) (detached HEAD)" >>expect &&
- git -C bare1 worktree list >out &&
- sed "s/ */ /g" <out >actual &&
- test_cmp expect actual
-'
-
-test_expect_success '"list" all worktrees --porcelain from bare main' '
- test_when_finished "rm -rf there actual expect && git -C bare1 worktree prune" &&
- git -C bare1 worktree add --detach ../there master &&
- echo "worktree $(pwd)/bare1" >expect &&
- echo "bare" >>expect &&
- echo >>expect &&
- echo "worktree $(git -C there rev-parse --show-toplevel)" >>expect &&
- echo "HEAD $(git -C there rev-parse HEAD)" >>expect &&
- echo "detached" >>expect &&
- echo >>expect &&
- git -C bare1 worktree list --porcelain >actual &&
- test_cmp expect actual
-'
-
-test_expect_success '"list" all worktrees from linked with a bare main' '
- test_when_finished "rm -rf there out actual expect && git -C bare1 worktree prune" &&
- git -C bare1 worktree add --detach ../there master &&
- echo "$(pwd)/bare1 (bare)" >expect &&
- echo "$(git -C there rev-parse --show-toplevel) $(git -C there rev-parse --short HEAD) (detached HEAD)" >>expect &&
- git -C there worktree list >out &&
- sed "s/ */ /g" <out >actual &&
- test_cmp expect actual
-'
-
-test_expect_success 'bare repo cleanup' '
- rm -rf bare1
-'
-
-test_expect_success 'broken main worktree still at the top' '
- git init broken-main &&
- (
- cd broken-main &&
- test_commit new &&
- git worktree add linked &&
- cat >expected <<-EOF &&
- worktree $(pwd)
- HEAD $ZERO_OID
-
- EOF
- cd linked &&
- echo "worktree $(pwd)" >expected &&
- echo "ref: .broken" >../.git/HEAD &&
- git worktree list --porcelain >out &&
- head -n 3 out >actual &&
- test_cmp ../expected actual &&
- git worktree list >out &&
- head -n 1 out >actual.2 &&
- grep -F "(error)" actual.2
- )
-'
-
-test_expect_success 'linked worktrees are sorted' '
- mkdir sorted &&
- git init sorted/main &&
- (
- cd sorted/main &&
- test_tick &&
- test_commit new &&
- git worktree add ../first &&
- git worktree add ../second &&
- git worktree list --porcelain >out &&
- grep ^worktree out >actual
- ) &&
- cat >expected <<-EOF &&
- worktree $(pwd)/sorted/main
- worktree $(pwd)/sorted/first
- worktree $(pwd)/sorted/second
- EOF
- test_cmp expected sorted/main/actual
-'
-
-test_done
+++ /dev/null
-#!/bin/sh
-
-test_description='test git worktree move, remove, lock and unlock'
-
-. ./test-lib.sh
-
-test_expect_success 'setup' '
- test_commit init &&
- git worktree add source &&
- git worktree list --porcelain >out &&
- grep "^worktree" out >actual &&
- cat <<-EOF >expected &&
- worktree $(pwd)
- worktree $(pwd)/source
- EOF
- test_cmp expected actual
-'
-
-test_expect_success 'lock main worktree' '
- test_must_fail git worktree lock .
-'
-
-test_expect_success 'lock linked worktree' '
- git worktree lock --reason hahaha source &&
- echo hahaha >expected &&
- test_cmp expected .git/worktrees/source/locked
-'
-
-test_expect_success 'lock linked worktree from another worktree' '
- rm .git/worktrees/source/locked &&
- git worktree add elsewhere &&
- git -C elsewhere worktree lock --reason hahaha ../source &&
- echo hahaha >expected &&
- test_cmp expected .git/worktrees/source/locked
-'
-
-test_expect_success 'lock worktree twice' '
- test_must_fail git worktree lock source &&
- echo hahaha >expected &&
- test_cmp expected .git/worktrees/source/locked
-'
-
-test_expect_success 'lock worktree twice (from the locked worktree)' '
- test_must_fail git -C source worktree lock . &&
- echo hahaha >expected &&
- test_cmp expected .git/worktrees/source/locked
-'
-
-test_expect_success 'unlock main worktree' '
- test_must_fail git worktree unlock .
-'
-
-test_expect_success 'unlock linked worktree' '
- git worktree unlock source &&
- test_path_is_missing .git/worktrees/source/locked
-'
-
-test_expect_success 'unlock worktree twice' '
- test_must_fail git worktree unlock source &&
- test_path_is_missing .git/worktrees/source/locked
-'
-
-test_expect_success 'move non-worktree' '
- mkdir abc &&
- test_must_fail git worktree move abc def
-'
-
-test_expect_success 'move locked worktree' '
- git worktree lock source &&
- test_when_finished "git worktree unlock source" &&
- test_must_fail git worktree move source destination
-'
-
-test_expect_success 'move worktree' '
- git worktree move source destination &&
- test_path_is_missing source &&
- git worktree list --porcelain >out &&
- grep "^worktree.*/destination$" out &&
- ! grep "^worktree.*/source$" out &&
- git -C destination log --format=%s >actual2 &&
- echo init >expected2 &&
- test_cmp expected2 actual2
-'
-
-test_expect_success 'move main worktree' '
- test_must_fail git worktree move . def
-'
-
-test_expect_success 'move worktree to another dir' '
- mkdir some-dir &&
- git worktree move destination some-dir &&
- test_when_finished "git worktree move some-dir/destination destination" &&
- test_path_is_missing destination &&
- git worktree list --porcelain >out &&
- grep "^worktree.*/some-dir/destination$" out &&
- git -C some-dir/destination log --format=%s >actual2 &&
- echo init >expected2 &&
- test_cmp expected2 actual2
-'
-
-test_expect_success 'move locked worktree (force)' '
- test_when_finished "
- git worktree unlock flump || :
- git worktree remove flump || :
- git worktree unlock ploof || :
- git worktree remove ploof || :
- " &&
- git worktree add --detach flump &&
- git worktree lock flump &&
- test_must_fail git worktree move flump ploof" &&
- test_must_fail git worktree move --force flump ploof" &&
- git worktree move --force --force flump ploof
-'
-
-test_expect_success 'move a repo with uninitialized submodule' '
- git init withsub &&
- (
- cd withsub &&
- test_commit initial &&
- git submodule add "$PWD"/.git sub &&
- git commit -m withsub &&
- git worktree add second HEAD &&
- git worktree move second third
- )
-'
-
-test_expect_success 'not move a repo with initialized submodule' '
- (
- cd withsub &&
- git -C third submodule update &&
- test_must_fail git worktree move third forth
- )
-'
-
-test_expect_success 'remove main worktree' '
- test_must_fail git worktree remove .
-'
-
-test_expect_success 'remove locked worktree' '
- git worktree lock destination &&
- test_when_finished "git worktree unlock destination" &&
- test_must_fail git worktree remove destination
-'
-
-test_expect_success 'remove worktree with dirty tracked file' '
- echo dirty >>destination/init.t &&
- test_when_finished "git -C destination checkout init.t" &&
- test_must_fail git worktree remove destination
-'
-
-test_expect_success 'remove worktree with untracked file' '
- : >destination/untracked &&
- test_must_fail git worktree remove destination
-'
-
-test_expect_success 'force remove worktree with untracked file' '
- git worktree remove --force destination &&
- test_path_is_missing destination
-'
-
-test_expect_success 'remove missing worktree' '
- git worktree add to-be-gone &&
- test -d .git/worktrees/to-be-gone &&
- mv to-be-gone gone &&
- git worktree remove to-be-gone &&
- test_path_is_missing .git/worktrees/to-be-gone
-'
-
-test_expect_success 'NOT remove missing-but-locked worktree' '
- git worktree add gone-but-locked &&
- git worktree lock gone-but-locked &&
- test -d .git/worktrees/gone-but-locked &&
- mv gone-but-locked really-gone-now &&
- test_must_fail git worktree remove gone-but-locked &&
- test_path_is_dir .git/worktrees/gone-but-locked
-'
-
-test_expect_success 'proper error when worktree not found' '
- for i in noodle noodle/bork
- do
- test_must_fail git worktree lock $i 2>err &&
- test_i18ngrep "not a working tree" err || return 1
- done
-'
-
-test_expect_success 'remove locked worktree (force)' '
- git worktree add --detach gumby &&
- test_when_finished "git worktree remove gumby || :" &&
- git worktree lock gumby &&
- test_when_finished "git worktree unlock gumby || :" &&
- test_must_fail git worktree remove gumby &&
- test_must_fail git worktree remove --force gumby &&
- git worktree remove --force --force gumby
-'
-
-test_expect_success 'remove cleans up .git/worktrees when empty' '
- git init moog &&
- (
- cd moog &&
- test_commit bim &&
- git worktree add --detach goom &&
- test_path_exists .git/worktrees &&
- git worktree remove goom &&
- test_path_is_missing .git/worktrees
- )
-'
-
-test_expect_success 'remove a repo with uninitialized submodule' '
- (
- cd withsub &&
- git worktree add to-remove HEAD &&
- git worktree remove to-remove
- )
-'
-
-test_expect_success 'not remove a repo with initialized submodule' '
- (
- cd withsub &&
- git worktree add to-remove HEAD &&
- git -C to-remove submodule update &&
- test_must_fail git worktree remove to-remove
- )
-'
-
-test_done
+++ /dev/null
-#!/bin/sh
-
-test_description="config file in multi worktree"
-
-. ./test-lib.sh
-
-test_expect_success 'setup' '
- test_commit start
-'
-
-test_expect_success 'config --worktree in single worktree' '
- git config --worktree foo.bar true &&
- test_cmp_config true foo.bar
-'
-
-test_expect_success 'add worktrees' '
- git worktree add wt1 &&
- git worktree add wt2
-'
-
-test_expect_success 'config --worktree without extension' '
- test_must_fail git config --worktree foo.bar false
-'
-
-test_expect_success 'enable worktreeConfig extension' '
- git config extensions.worktreeConfig true &&
- test_cmp_config true extensions.worktreeConfig
-'
-
-test_expect_success 'config is shared as before' '
- git config this.is shared &&
- test_cmp_config shared this.is &&
- test_cmp_config -C wt1 shared this.is &&
- test_cmp_config -C wt2 shared this.is
-'
-
-test_expect_success 'config is shared (set from another worktree)' '
- git -C wt1 config that.is also-shared &&
- test_cmp_config also-shared that.is &&
- test_cmp_config -C wt1 also-shared that.is &&
- test_cmp_config -C wt2 also-shared that.is
-'
-
-test_expect_success 'config private to main worktree' '
- git config --worktree this.is for-main &&
- test_cmp_config for-main this.is &&
- test_cmp_config -C wt1 shared this.is &&
- test_cmp_config -C wt2 shared this.is
-'
-
-test_expect_success 'config private to linked worktree' '
- git -C wt1 config --worktree this.is for-wt1 &&
- test_cmp_config for-main this.is &&
- test_cmp_config -C wt1 for-wt1 this.is &&
- test_cmp_config -C wt2 shared this.is
-'
-
-test_expect_success 'core.bare no longer for main only' '
- test_config core.bare true &&
- test "$(git rev-parse --is-bare-repository)" = true &&
- test "$(git -C wt1 rev-parse --is-bare-repository)" = true &&
- test "$(git -C wt2 rev-parse --is-bare-repository)" = true
-'
-
-test_expect_success 'per-worktree core.bare is picked up' '
- git -C wt1 config --worktree core.bare true &&
- test "$(git rev-parse --is-bare-repository)" = false &&
- test "$(git -C wt1 rev-parse --is-bare-repository)" = true &&
- test "$(git -C wt2 rev-parse --is-bare-repository)" = false
-'
-
-test_expect_success 'config.worktree no longer read without extension' '
- git config --unset extensions.worktreeConfig &&
- test_cmp_config shared this.is &&
- test_cmp_config -C wt1 shared this.is &&
- test_cmp_config -C wt2 shared this.is
-'
-
-test_done
--- /dev/null
+#!/bin/sh
+
+test_description='test git worktree add'
+
+. ./test-lib.sh
+
+. "$TEST_DIRECTORY"/lib-rebase.sh
+
+test_expect_success 'setup' '
+ test_commit init
+'
+
+test_expect_success '"add" an existing worktree' '
+ mkdir -p existing/subtree &&
+ test_must_fail git worktree add --detach existing master
+'
+
+test_expect_success '"add" an existing empty worktree' '
+ mkdir existing_empty &&
+ git worktree add --detach existing_empty master
+'
+
+test_expect_success '"add" using shorthand - fails when no previous branch' '
+ test_must_fail git worktree add existing_short -
+'
+
+test_expect_success '"add" using - shorthand' '
+ git checkout -b newbranch &&
+ echo hello >myworld &&
+ git add myworld &&
+ git commit -m myworld &&
+ git checkout master &&
+ git worktree add short-hand - &&
+ echo refs/heads/newbranch >expect &&
+ git -C short-hand rev-parse --symbolic-full-name HEAD >actual &&
+ test_cmp expect actual
+'
+
+test_expect_success '"add" refuses to checkout locked branch' '
+ test_must_fail git worktree add zere master &&
+ ! test -d zere &&
+ ! test -d .git/worktrees/zere
+'
+
+test_expect_success 'checking out paths not complaining about linked checkouts' '
+ (
+ cd existing_empty &&
+ echo dirty >>init.t &&
+ git checkout master -- init.t
+ )
+'
+
+test_expect_success '"add" worktree' '
+ git rev-parse HEAD >expect &&
+ git worktree add --detach here master &&
+ (
+ cd here &&
+ test_cmp ../init.t init.t &&
+ test_must_fail git symbolic-ref HEAD &&
+ git rev-parse HEAD >actual &&
+ test_cmp ../expect actual &&
+ git fsck
+ )
+'
+
+test_expect_success '"add" worktree with lock' '
+ git rev-parse HEAD >expect &&
+ git worktree add --detach --lock here-with-lock master &&
+ test -f .git/worktrees/here-with-lock/locked
+'
+
+test_expect_success '"add" worktree from a subdir' '
+ (
+ mkdir sub &&
+ cd sub &&
+ git worktree add --detach here master &&
+ cd here &&
+ test_cmp ../../init.t init.t
+ )
+'
+
+test_expect_success '"add" from a linked checkout' '
+ (
+ cd here &&
+ git worktree add --detach nested-here master &&
+ cd nested-here &&
+ git fsck
+ )
+'
+
+test_expect_success '"add" worktree creating new branch' '
+ git worktree add -b newmaster there master &&
+ (
+ cd there &&
+ test_cmp ../init.t init.t &&
+ git symbolic-ref HEAD >actual &&
+ echo refs/heads/newmaster >expect &&
+ test_cmp expect actual &&
+ git fsck
+ )
+'
+
+test_expect_success 'die the same branch is already checked out' '
+ (
+ cd here &&
+ test_must_fail git checkout newmaster
+ )
+'
+
+test_expect_success SYMLINKS 'die the same branch is already checked out (symlink)' '
+ head=$(git -C there rev-parse --git-path HEAD) &&
+ ref=$(git -C there symbolic-ref HEAD) &&
+ rm "$head" &&
+ ln -s "$ref" "$head" &&
+ test_must_fail git -C here checkout newmaster
+'
+
+test_expect_success 'not die the same branch is already checked out' '
+ (
+ cd here &&
+ git worktree add --force anothernewmaster newmaster
+ )
+'
+
+test_expect_success 'not die on re-checking out current branch' '
+ (
+ cd there &&
+ git checkout newmaster
+ )
+'
+
+test_expect_success '"add" from a bare repo' '
+ (
+ git clone --bare . bare &&
+ cd bare &&
+ git worktree add -b bare-master ../there2 master
+ )
+'
+
+test_expect_success 'checkout from a bare repo without "add"' '
+ (
+ cd bare &&
+ test_must_fail git checkout master
+ )
+'
+
+test_expect_success '"add" default branch of a bare repo' '
+ (
+ git clone --bare . bare2 &&
+ cd bare2 &&
+ git worktree add ../there3 master
+ )
+'
+
+test_expect_success 'checkout with grafts' '
+ test_when_finished rm .git/info/grafts &&
+ test_commit abc &&
+ SHA1=$(git rev-parse HEAD) &&
+ test_commit def &&
+ test_commit xyz &&
+ echo "$(git rev-parse HEAD) $SHA1" >.git/info/grafts &&
+ cat >expected <<-\EOF &&
+ xyz
+ abc
+ EOF
+ git log --format=%s -2 >actual &&
+ test_cmp expected actual &&
+ git worktree add --detach grafted master &&
+ git --git-dir=grafted/.git log --format=%s -2 >actual &&
+ test_cmp expected actual
+'
+
+test_expect_success '"add" from relative HEAD' '
+ test_commit a &&
+ test_commit b &&
+ test_commit c &&
+ git rev-parse HEAD~1 >expected &&
+ git worktree add relhead HEAD~1 &&
+ git -C relhead rev-parse HEAD >actual &&
+ test_cmp expected actual
+'
+
+test_expect_success '"add -b" with <branch> omitted' '
+ git worktree add -b burble flornk &&
+ test_cmp_rev HEAD burble
+'
+
+test_expect_success '"add --detach" with <branch> omitted' '
+ git worktree add --detach fishhook &&
+ git rev-parse HEAD >expected &&
+ git -C fishhook rev-parse HEAD >actual &&
+ test_cmp expected actual &&
+ test_must_fail git -C fishhook symbolic-ref HEAD
+'
+
+test_expect_success '"add" with <branch> omitted' '
+ git worktree add wiffle/bat &&
+ test_cmp_rev HEAD bat
+'
+
+test_expect_success '"add" checks out existing branch of dwimd name' '
+ git branch dwim HEAD~1 &&
+ git worktree add dwim &&
+ test_cmp_rev HEAD~1 dwim &&
+ (
+ cd dwim &&
+ test_cmp_rev HEAD dwim
+ )
+'
+
+test_expect_success '"add <path>" dwim fails with checked out branch' '
+ git checkout -b test-branch &&
+ test_must_fail git worktree add test-branch &&
+ test_path_is_missing test-branch
+'
+
+test_expect_success '"add --force" with existing dwimd name doesnt die' '
+ git checkout test-branch &&
+ git worktree add --force test-branch
+'
+
+test_expect_success '"add" no auto-vivify with --detach and <branch> omitted' '
+ git worktree add --detach mish/mash &&
+ test_must_fail git rev-parse mash -- &&
+ test_must_fail git -C mish/mash symbolic-ref HEAD
+'
+
+test_expect_success '"add" -b/-B mutually exclusive' '
+ test_must_fail git worktree add -b poodle -B poodle bamboo master
+'
+
+test_expect_success '"add" -b/--detach mutually exclusive' '
+ test_must_fail git worktree add -b poodle --detach bamboo master
+'
+
+test_expect_success '"add" -B/--detach mutually exclusive' '
+ test_must_fail git worktree add -B poodle --detach bamboo master
+'
+
+test_expect_success '"add -B" fails if the branch is checked out' '
+ git rev-parse newmaster >before &&
+ test_must_fail git worktree add -B newmaster bamboo master &&
+ git rev-parse newmaster >after &&
+ test_cmp before after
+'
+
+test_expect_success 'add -B' '
+ git worktree add -B poodle bamboo2 master^ &&
+ git -C bamboo2 symbolic-ref HEAD >actual &&
+ echo refs/heads/poodle >expected &&
+ test_cmp expected actual &&
+ test_cmp_rev master^ poodle
+'
+
+test_expect_success 'add --quiet' '
+ git worktree add --quiet another-worktree master 2>actual &&
+ test_must_be_empty actual
+'
+
+test_expect_success 'local clone from linked checkout' '
+ git clone --local here here-clone &&
+ ( cd here-clone && git fsck )
+'
+
+test_expect_success 'local clone --shared from linked checkout' '
+ git -C bare worktree add --detach ../baretree &&
+ git clone --local --shared baretree bare-clone &&
+ grep /bare/ bare-clone/.git/objects/info/alternates
+'
+
+test_expect_success '"add" worktree with --no-checkout' '
+ git worktree add --no-checkout -b swamp swamp &&
+ ! test -e swamp/init.t &&
+ git -C swamp reset --hard &&
+ test_cmp init.t swamp/init.t
+'
+
+test_expect_success '"add" worktree with --checkout' '
+ git worktree add --checkout -b swmap2 swamp2 &&
+ test_cmp init.t swamp2/init.t
+'
+
+test_expect_success 'put a worktree under rebase' '
+ git worktree add under-rebase &&
+ (
+ cd under-rebase &&
+ set_fake_editor &&
+ FAKE_LINES="edit 1" git rebase -i HEAD^ &&
+ git worktree list | grep "under-rebase.*detached HEAD"
+ )
+'
+
+test_expect_success 'add a worktree, checking out a rebased branch' '
+ test_must_fail git worktree add new-rebase under-rebase &&
+ ! test -d new-rebase
+'
+
+test_expect_success 'checking out a rebased branch from another worktree' '
+ git worktree add new-place &&
+ test_must_fail git -C new-place checkout under-rebase
+'
+
+test_expect_success 'not allow to delete a branch under rebase' '
+ (
+ cd under-rebase &&
+ test_must_fail git branch -D under-rebase
+ )
+'
+
+test_expect_success 'rename a branch under rebase not allowed' '
+ test_must_fail git branch -M under-rebase rebase-with-new-name
+'
+
+test_expect_success 'check out from current worktree branch ok' '
+ (
+ cd under-rebase &&
+ git checkout under-rebase &&
+ git checkout - &&
+ git rebase --abort
+ )
+'
+
+test_expect_success 'checkout a branch under bisect' '
+ git worktree add under-bisect &&
+ (
+ cd under-bisect &&
+ git bisect start &&
+ git bisect bad &&
+ git bisect good HEAD~2 &&
+ git worktree list | grep "under-bisect.*detached HEAD" &&
+ test_must_fail git worktree add new-bisect under-bisect &&
+ ! test -d new-bisect
+ )
+'
+
+test_expect_success 'rename a branch under bisect not allowed' '
+ test_must_fail git branch -M under-bisect bisect-with-new-name
+'
+# Is branch "refs/heads/$1" set to pull from "$2/$3"?
+test_branch_upstream () {
+ printf "%s\n" "$2" "refs/heads/$3" >expect.upstream &&
+ {
+ git config "branch.$1.remote" &&
+ git config "branch.$1.merge"
+ } >actual.upstream &&
+ test_cmp expect.upstream actual.upstream
+}
+
+test_expect_success '--track sets up tracking' '
+ test_when_finished rm -rf track &&
+ git worktree add --track -b track track master &&
+ test_branch_upstream track . master
+'
+
+# setup remote repository $1 and repository $2 with $1 set up as
+# remote. The remote has two branches, master and foo.
+setup_remote_repo () {
+ git init $1 &&
+ (
+ cd $1 &&
+ test_commit $1_master &&
+ git checkout -b foo &&
+ test_commit upstream_foo
+ ) &&
+ git init $2 &&
+ (
+ cd $2 &&
+ test_commit $2_master &&
+ git remote add $1 ../$1 &&
+ git config remote.$1.fetch \
+ "refs/heads/*:refs/remotes/$1/*" &&
+ git fetch --all
+ )
+}
+
+test_expect_success '--no-track avoids setting up tracking' '
+ test_when_finished rm -rf repo_upstream repo_local foo &&
+ setup_remote_repo repo_upstream repo_local &&
+ (
+ cd repo_local &&
+ git worktree add --no-track -b foo ../foo repo_upstream/foo
+ ) &&
+ (
+ cd foo &&
+ test_must_fail git config "branch.foo.remote" &&
+ test_must_fail git config "branch.foo.merge" &&
+ test_cmp_rev refs/remotes/repo_upstream/foo refs/heads/foo
+ )
+'
+
+test_expect_success '"add" <path> <non-existent-branch> fails' '
+ test_must_fail git worktree add foo non-existent
+'
+
+test_expect_success '"add" <path> <branch> dwims' '
+ test_when_finished rm -rf repo_upstream repo_dwim foo &&
+ setup_remote_repo repo_upstream repo_dwim &&
+ git init repo_dwim &&
+ (
+ cd repo_dwim &&
+ git worktree add ../foo foo
+ ) &&
+ (
+ cd foo &&
+ test_branch_upstream foo repo_upstream foo &&
+ test_cmp_rev refs/remotes/repo_upstream/foo refs/heads/foo
+ )
+'
+
+test_expect_success '"add" <path> <branch> dwims with checkout.defaultRemote' '
+ test_when_finished rm -rf repo_upstream repo_dwim foo &&
+ setup_remote_repo repo_upstream repo_dwim &&
+ git init repo_dwim &&
+ (
+ cd repo_dwim &&
+ git remote add repo_upstream2 ../repo_upstream &&
+ git fetch repo_upstream2 &&
+ test_must_fail git worktree add ../foo foo &&
+ git -c checkout.defaultRemote=repo_upstream worktree add ../foo foo &&
+ git status -uno --porcelain >status.actual &&
+ test_must_be_empty status.actual
+ ) &&
+ (
+ cd foo &&
+ test_branch_upstream foo repo_upstream foo &&
+ test_cmp_rev refs/remotes/repo_upstream/foo refs/heads/foo
+ )
+'
+
+test_expect_success 'git worktree add does not match remote' '
+ test_when_finished rm -rf repo_a repo_b foo &&
+ setup_remote_repo repo_a repo_b &&
+ (
+ cd repo_b &&
+ git worktree add ../foo
+ ) &&
+ (
+ cd foo &&
+ test_must_fail git config "branch.foo.remote" &&
+ test_must_fail git config "branch.foo.merge" &&
+ ! test_cmp_rev refs/remotes/repo_a/foo refs/heads/foo
+ )
+'
+
+test_expect_success 'git worktree add --guess-remote sets up tracking' '
+ test_when_finished rm -rf repo_a repo_b foo &&
+ setup_remote_repo repo_a repo_b &&
+ (
+ cd repo_b &&
+ git worktree add --guess-remote ../foo
+ ) &&
+ (
+ cd foo &&
+ test_branch_upstream foo repo_a foo &&
+ test_cmp_rev refs/remotes/repo_a/foo refs/heads/foo
+ )
+'
+
+test_expect_success 'git worktree add with worktree.guessRemote sets up tracking' '
+ test_when_finished rm -rf repo_a repo_b foo &&
+ setup_remote_repo repo_a repo_b &&
+ (
+ cd repo_b &&
+ git config worktree.guessRemote true &&
+ git worktree add ../foo
+ ) &&
+ (
+ cd foo &&
+ test_branch_upstream foo repo_a foo &&
+ test_cmp_rev refs/remotes/repo_a/foo refs/heads/foo
+ )
+'
+
+test_expect_success 'git worktree --no-guess-remote option overrides config' '
+ test_when_finished rm -rf repo_a repo_b foo &&
+ setup_remote_repo repo_a repo_b &&
+ (
+ cd repo_b &&
+ git config worktree.guessRemote true &&
+ git worktree add --no-guess-remote ../foo
+ ) &&
+ (
+ cd foo &&
+ test_must_fail git config "branch.foo.remote" &&
+ test_must_fail git config "branch.foo.merge" &&
+ ! test_cmp_rev refs/remotes/repo_a/foo refs/heads/foo
+ )
+'
+
+post_checkout_hook () {
+ gitdir=${1:-.git}
+ test_when_finished "rm -f $gitdir/hooks/post-checkout" &&
+ mkdir -p $gitdir/hooks &&
+ write_script $gitdir/hooks/post-checkout <<-\EOF
+ {
+ echo $*
+ git rev-parse --git-dir --show-toplevel
+ } >hook.actual
+ EOF
+}
+
+test_expect_success '"add" invokes post-checkout hook (branch)' '
+ post_checkout_hook &&
+ {
+ echo $ZERO_OID $(git rev-parse HEAD) 1 &&
+ echo $(pwd)/.git/worktrees/gumby &&
+ echo $(pwd)/gumby
+ } >hook.expect &&
+ git worktree add gumby &&
+ test_cmp hook.expect gumby/hook.actual
+'
+
+test_expect_success '"add" invokes post-checkout hook (detached)' '
+ post_checkout_hook &&
+ {
+ echo $ZERO_OID $(git rev-parse HEAD) 1 &&
+ echo $(pwd)/.git/worktrees/grumpy &&
+ echo $(pwd)/grumpy
+ } >hook.expect &&
+ git worktree add --detach grumpy &&
+ test_cmp hook.expect grumpy/hook.actual
+'
+
+test_expect_success '"add --no-checkout" suppresses post-checkout hook' '
+ post_checkout_hook &&
+ rm -f hook.actual &&
+ git worktree add --no-checkout gloopy &&
+ test_path_is_missing gloopy/hook.actual
+'
+
+test_expect_success '"add" in other worktree invokes post-checkout hook' '
+ post_checkout_hook &&
+ {
+ echo $ZERO_OID $(git rev-parse HEAD) 1 &&
+ echo $(pwd)/.git/worktrees/guppy &&
+ echo $(pwd)/guppy
+ } >hook.expect &&
+ git -C gloopy worktree add --detach ../guppy &&
+ test_cmp hook.expect guppy/hook.actual
+'
+
+test_expect_success '"add" in bare repo invokes post-checkout hook' '
+ rm -rf bare &&
+ git clone --bare . bare &&
+ {
+ echo $ZERO_OID $(git --git-dir=bare rev-parse HEAD) 1 &&
+ echo $(pwd)/bare/worktrees/goozy &&
+ echo $(pwd)/goozy
+ } >hook.expect &&
+ post_checkout_hook bare &&
+ git -C bare worktree add --detach ../goozy &&
+ test_cmp hook.expect goozy/hook.actual
+'
+
+test_expect_success '"add" an existing but missing worktree' '
+ git worktree add --detach pneu &&
+ test_must_fail git worktree add --detach pneu &&
+ rm -fr pneu &&
+ test_must_fail git worktree add --detach pneu &&
+ git worktree add --force --detach pneu
+'
+
+test_expect_success '"add" an existing locked but missing worktree' '
+ git worktree add --detach gnoo &&
+ git worktree lock gnoo &&
+ test_when_finished "git worktree unlock gnoo || :" &&
+ rm -fr gnoo &&
+ test_must_fail git worktree add --detach gnoo &&
+ test_must_fail git worktree add --force --detach gnoo &&
+ git worktree add --force --force --detach gnoo
+'
+
+test_done
--- /dev/null
+#!/bin/sh
+
+test_description='prune $GIT_DIR/worktrees'
+
+. ./test-lib.sh
+
+test_expect_success initialize '
+ git commit --allow-empty -m init
+'
+
+test_expect_success 'worktree prune on normal repo' '
+ git worktree prune &&
+ test_must_fail git worktree prune abc
+'
+
+test_expect_success 'prune files inside $GIT_DIR/worktrees' '
+ mkdir .git/worktrees &&
+ : >.git/worktrees/abc &&
+ git worktree prune --verbose >actual &&
+ cat >expect <<EOF &&
+Removing worktrees/abc: not a valid directory
+EOF
+ test_i18ncmp expect actual &&
+ ! test -f .git/worktrees/abc &&
+ ! test -d .git/worktrees
+'
+
+test_expect_success 'prune directories without gitdir' '
+ mkdir -p .git/worktrees/def/abc &&
+ : >.git/worktrees/def/def &&
+ cat >expect <<EOF &&
+Removing worktrees/def: gitdir file does not exist
+EOF
+ git worktree prune --verbose >actual &&
+ test_i18ncmp expect actual &&
+ ! test -d .git/worktrees/def &&
+ ! test -d .git/worktrees
+'
+
+test_expect_success SANITY 'prune directories with unreadable gitdir' '
+ mkdir -p .git/worktrees/def/abc &&
+ : >.git/worktrees/def/def &&
+ : >.git/worktrees/def/gitdir &&
+ chmod u-r .git/worktrees/def/gitdir &&
+ git worktree prune --verbose >actual &&
+ test_i18ngrep "Removing worktrees/def: unable to read gitdir file" actual &&
+ ! test -d .git/worktrees/def &&
+ ! test -d .git/worktrees
+'
+
+test_expect_success 'prune directories with invalid gitdir' '
+ mkdir -p .git/worktrees/def/abc &&
+ : >.git/worktrees/def/def &&
+ : >.git/worktrees/def/gitdir &&
+ git worktree prune --verbose >actual &&
+ test_i18ngrep "Removing worktrees/def: invalid gitdir file" actual &&
+ ! test -d .git/worktrees/def &&
+ ! test -d .git/worktrees
+'
+
+test_expect_success 'prune directories with gitdir pointing to nowhere' '
+ mkdir -p .git/worktrees/def/abc &&
+ : >.git/worktrees/def/def &&
+ echo "$(pwd)"/nowhere >.git/worktrees/def/gitdir &&
+ git worktree prune --verbose >actual &&
+ test_i18ngrep "Removing worktrees/def: gitdir file points to non-existent location" actual &&
+ ! test -d .git/worktrees/def &&
+ ! test -d .git/worktrees
+'
+
+test_expect_success 'not prune locked checkout' '
+ test_when_finished rm -r .git/worktrees &&
+ mkdir -p .git/worktrees/ghi &&
+ : >.git/worktrees/ghi/locked &&
+ git worktree prune &&
+ test -d .git/worktrees/ghi
+'
+
+test_expect_success 'not prune recent checkouts' '
+ test_when_finished rm -r .git/worktrees &&
+ git worktree add jlm HEAD &&
+ test -d .git/worktrees/jlm &&
+ rm -rf jlm &&
+ git worktree prune --verbose --expire=2.days.ago &&
+ test -d .git/worktrees/jlm
+'
+
+test_expect_success 'not prune proper checkouts' '
+ test_when_finished rm -r .git/worktrees &&
+ git worktree add --detach "$PWD/nop" master &&
+ git worktree prune &&
+ test -d .git/worktrees/nop
+'
+
+test_done
--- /dev/null
+#!/bin/sh
+
+test_description='test git worktree list'
+
+. ./test-lib.sh
+
+test_expect_success 'setup' '
+ test_commit init
+'
+
+test_expect_success 'rev-parse --git-common-dir on main worktree' '
+ git rev-parse --git-common-dir >actual &&
+ echo .git >expected &&
+ test_cmp expected actual &&
+ mkdir sub &&
+ git -C sub rev-parse --git-common-dir >actual2 &&
+ echo ../.git >expected2 &&
+ test_cmp expected2 actual2
+'
+
+test_expect_success 'rev-parse --git-path objects linked worktree' '
+ echo "$(git rev-parse --show-toplevel)/.git/objects" >expect &&
+ test_when_finished "rm -rf linked-tree actual expect && git worktree prune" &&
+ git worktree add --detach linked-tree master &&
+ git -C linked-tree rev-parse --git-path objects >actual &&
+ test_cmp expect actual
+'
+
+test_expect_success '"list" all worktrees from main' '
+ echo "$(git rev-parse --show-toplevel) $(git rev-parse --short HEAD) [$(git symbolic-ref --short HEAD)]" >expect &&
+ test_when_finished "rm -rf here out actual expect && git worktree prune" &&
+ git worktree add --detach here master &&
+ echo "$(git -C here rev-parse --show-toplevel) $(git rev-parse --short HEAD) (detached HEAD)" >>expect &&
+ git worktree list >out &&
+ sed "s/ */ /g" <out >actual &&
+ test_cmp expect actual
+'
+
+test_expect_success '"list" all worktrees from linked' '
+ echo "$(git rev-parse --show-toplevel) $(git rev-parse --short HEAD) [$(git symbolic-ref --short HEAD)]" >expect &&
+ test_when_finished "rm -rf here out actual expect && git worktree prune" &&
+ git worktree add --detach here master &&
+ echo "$(git -C here rev-parse --show-toplevel) $(git rev-parse --short HEAD) (detached HEAD)" >>expect &&
+ git -C here worktree list >out &&
+ sed "s/ */ /g" <out >actual &&
+ test_cmp expect actual
+'
+
+test_expect_success '"list" all worktrees --porcelain' '
+ echo "worktree $(git rev-parse --show-toplevel)" >expect &&
+ echo "HEAD $(git rev-parse HEAD)" >>expect &&
+ echo "branch $(git symbolic-ref HEAD)" >>expect &&
+ echo >>expect &&
+ test_when_finished "rm -rf here actual expect && git worktree prune" &&
+ git worktree add --detach here master &&
+ echo "worktree $(git -C here rev-parse --show-toplevel)" >>expect &&
+ echo "HEAD $(git rev-parse HEAD)" >>expect &&
+ echo "detached" >>expect &&
+ echo >>expect &&
+ git worktree list --porcelain >actual &&
+ test_cmp expect actual
+'
+
+test_expect_success 'bare repo setup' '
+ git init --bare bare1 &&
+ echo "data" >file1 &&
+ git add file1 &&
+ git commit -m"File1: add data" &&
+ git push bare1 master &&
+ git reset --hard HEAD^
+'
+
+test_expect_success '"list" all worktrees from bare main' '
+ test_when_finished "rm -rf there out actual expect && git -C bare1 worktree prune" &&
+ git -C bare1 worktree add --detach ../there master &&
+ echo "$(pwd)/bare1 (bare)" >expect &&
+ echo "$(git -C there rev-parse --show-toplevel) $(git -C there rev-parse --short HEAD) (detached HEAD)" >>expect &&
+ git -C bare1 worktree list >out &&
+ sed "s/ */ /g" <out >actual &&
+ test_cmp expect actual
+'
+
+test_expect_success '"list" all worktrees --porcelain from bare main' '
+ test_when_finished "rm -rf there actual expect && git -C bare1 worktree prune" &&
+ git -C bare1 worktree add --detach ../there master &&
+ echo "worktree $(pwd)/bare1" >expect &&
+ echo "bare" >>expect &&
+ echo >>expect &&
+ echo "worktree $(git -C there rev-parse --show-toplevel)" >>expect &&
+ echo "HEAD $(git -C there rev-parse HEAD)" >>expect &&
+ echo "detached" >>expect &&
+ echo >>expect &&
+ git -C bare1 worktree list --porcelain >actual &&
+ test_cmp expect actual
+'
+
+test_expect_success '"list" all worktrees from linked with a bare main' '
+ test_when_finished "rm -rf there out actual expect && git -C bare1 worktree prune" &&
+ git -C bare1 worktree add --detach ../there master &&
+ echo "$(pwd)/bare1 (bare)" >expect &&
+ echo "$(git -C there rev-parse --show-toplevel) $(git -C there rev-parse --short HEAD) (detached HEAD)" >>expect &&
+ git -C there worktree list >out &&
+ sed "s/ */ /g" <out >actual &&
+ test_cmp expect actual
+'
+
+test_expect_success 'bare repo cleanup' '
+ rm -rf bare1
+'
+
+test_expect_success 'broken main worktree still at the top' '
+ git init broken-main &&
+ (
+ cd broken-main &&
+ test_commit new &&
+ git worktree add linked &&
+ cat >expected <<-EOF &&
+ worktree $(pwd)
+ HEAD $ZERO_OID
+
+ EOF
+ cd linked &&
+ echo "worktree $(pwd)" >expected &&
+ echo "ref: .broken" >../.git/HEAD &&
+ git worktree list --porcelain >out &&
+ head -n 3 out >actual &&
+ test_cmp ../expected actual &&
+ git worktree list >out &&
+ head -n 1 out >actual.2 &&
+ grep -F "(error)" actual.2
+ )
+'
+
+test_expect_success 'linked worktrees are sorted' '
+ mkdir sorted &&
+ git init sorted/main &&
+ (
+ cd sorted/main &&
+ test_tick &&
+ test_commit new &&
+ git worktree add ../first &&
+ git worktree add ../second &&
+ git worktree list --porcelain >out &&
+ grep ^worktree out >actual
+ ) &&
+ cat >expected <<-EOF &&
+ worktree $(pwd)/sorted/main
+ worktree $(pwd)/sorted/first
+ worktree $(pwd)/sorted/second
+ EOF
+ test_cmp expected sorted/main/actual
+'
+
+test_done
--- /dev/null
+#!/bin/sh
+
+test_description='test git worktree move, remove, lock and unlock'
+
+. ./test-lib.sh
+
+test_expect_success 'setup' '
+ test_commit init &&
+ git worktree add source &&
+ git worktree list --porcelain >out &&
+ grep "^worktree" out >actual &&
+ cat <<-EOF >expected &&
+ worktree $(pwd)
+ worktree $(pwd)/source
+ EOF
+ test_cmp expected actual
+'
+
+test_expect_success 'lock main worktree' '
+ test_must_fail git worktree lock .
+'
+
+test_expect_success 'lock linked worktree' '
+ git worktree lock --reason hahaha source &&
+ echo hahaha >expected &&
+ test_cmp expected .git/worktrees/source/locked
+'
+
+test_expect_success 'lock linked worktree from another worktree' '
+ rm .git/worktrees/source/locked &&
+ git worktree add elsewhere &&
+ git -C elsewhere worktree lock --reason hahaha ../source &&
+ echo hahaha >expected &&
+ test_cmp expected .git/worktrees/source/locked
+'
+
+test_expect_success 'lock worktree twice' '
+ test_must_fail git worktree lock source &&
+ echo hahaha >expected &&
+ test_cmp expected .git/worktrees/source/locked
+'
+
+test_expect_success 'lock worktree twice (from the locked worktree)' '
+ test_must_fail git -C source worktree lock . &&
+ echo hahaha >expected &&
+ test_cmp expected .git/worktrees/source/locked
+'
+
+test_expect_success 'unlock main worktree' '
+ test_must_fail git worktree unlock .
+'
+
+test_expect_success 'unlock linked worktree' '
+ git worktree unlock source &&
+ test_path_is_missing .git/worktrees/source/locked
+'
+
+test_expect_success 'unlock worktree twice' '
+ test_must_fail git worktree unlock source &&
+ test_path_is_missing .git/worktrees/source/locked
+'
+
+test_expect_success 'move non-worktree' '
+ mkdir abc &&
+ test_must_fail git worktree move abc def
+'
+
+test_expect_success 'move locked worktree' '
+ git worktree lock source &&
+ test_when_finished "git worktree unlock source" &&
+ test_must_fail git worktree move source destination
+'
+
+test_expect_success 'move worktree' '
+ git worktree move source destination &&
+ test_path_is_missing source &&
+ git worktree list --porcelain >out &&
+ grep "^worktree.*/destination$" out &&
+ ! grep "^worktree.*/source$" out &&
+ git -C destination log --format=%s >actual2 &&
+ echo init >expected2 &&
+ test_cmp expected2 actual2
+'
+
+test_expect_success 'move main worktree' '
+ test_must_fail git worktree move . def
+'
+
+test_expect_success 'move worktree to another dir' '
+ mkdir some-dir &&
+ git worktree move destination some-dir &&
+ test_when_finished "git worktree move some-dir/destination destination" &&
+ test_path_is_missing destination &&
+ git worktree list --porcelain >out &&
+ grep "^worktree.*/some-dir/destination$" out &&
+ git -C some-dir/destination log --format=%s >actual2 &&
+ echo init >expected2 &&
+ test_cmp expected2 actual2
+'
+
+test_expect_success 'move locked worktree (force)' '
+ test_when_finished "
+ git worktree unlock flump || :
+ git worktree remove flump || :
+ git worktree unlock ploof || :
+ git worktree remove ploof || :
+ " &&
+ git worktree add --detach flump &&
+ git worktree lock flump &&
+ test_must_fail git worktree move flump ploof" &&
+ test_must_fail git worktree move --force flump ploof" &&
+ git worktree move --force --force flump ploof
+'
+
+test_expect_success 'move a repo with uninitialized submodule' '
+ git init withsub &&
+ (
+ cd withsub &&
+ test_commit initial &&
+ git submodule add "$PWD"/.git sub &&
+ git commit -m withsub &&
+ git worktree add second HEAD &&
+ git worktree move second third
+ )
+'
+
+test_expect_success 'not move a repo with initialized submodule' '
+ (
+ cd withsub &&
+ git -C third submodule update &&
+ test_must_fail git worktree move third forth
+ )
+'
+
+test_expect_success 'remove main worktree' '
+ test_must_fail git worktree remove .
+'
+
+test_expect_success 'remove locked worktree' '
+ git worktree lock destination &&
+ test_when_finished "git worktree unlock destination" &&
+ test_must_fail git worktree remove destination
+'
+
+test_expect_success 'remove worktree with dirty tracked file' '
+ echo dirty >>destination/init.t &&
+ test_when_finished "git -C destination checkout init.t" &&
+ test_must_fail git worktree remove destination
+'
+
+test_expect_success 'remove worktree with untracked file' '
+ : >destination/untracked &&
+ test_must_fail git worktree remove destination
+'
+
+test_expect_success 'force remove worktree with untracked file' '
+ git worktree remove --force destination &&
+ test_path_is_missing destination
+'
+
+test_expect_success 'remove missing worktree' '
+ git worktree add to-be-gone &&
+ test -d .git/worktrees/to-be-gone &&
+ mv to-be-gone gone &&
+ git worktree remove to-be-gone &&
+ test_path_is_missing .git/worktrees/to-be-gone
+'
+
+test_expect_success 'NOT remove missing-but-locked worktree' '
+ git worktree add gone-but-locked &&
+ git worktree lock gone-but-locked &&
+ test -d .git/worktrees/gone-but-locked &&
+ mv gone-but-locked really-gone-now &&
+ test_must_fail git worktree remove gone-but-locked &&
+ test_path_is_dir .git/worktrees/gone-but-locked
+'
+
+test_expect_success 'proper error when worktree not found' '
+ for i in noodle noodle/bork
+ do
+ test_must_fail git worktree lock $i 2>err &&
+ test_i18ngrep "not a working tree" err || return 1
+ done
+'
+
+test_expect_success 'remove locked worktree (force)' '
+ git worktree add --detach gumby &&
+ test_when_finished "git worktree remove gumby || :" &&
+ git worktree lock gumby &&
+ test_when_finished "git worktree unlock gumby || :" &&
+ test_must_fail git worktree remove gumby &&
+ test_must_fail git worktree remove --force gumby &&
+ git worktree remove --force --force gumby
+'
+
+test_expect_success 'remove cleans up .git/worktrees when empty' '
+ git init moog &&
+ (
+ cd moog &&
+ test_commit bim &&
+ git worktree add --detach goom &&
+ test_path_exists .git/worktrees &&
+ git worktree remove goom &&
+ test_path_is_missing .git/worktrees
+ )
+'
+
+test_expect_success 'remove a repo with uninitialized submodule' '
+ (
+ cd withsub &&
+ git worktree add to-remove HEAD &&
+ git worktree remove to-remove
+ )
+'
+
+test_expect_success 'not remove a repo with initialized submodule' '
+ (
+ cd withsub &&
+ git worktree add to-remove HEAD &&
+ git -C to-remove submodule update &&
+ test_must_fail git worktree remove to-remove
+ )
+'
+
+test_done
--- /dev/null
+#!/bin/sh
+
+test_description="config file in multi worktree"
+
+. ./test-lib.sh
+
+test_expect_success 'setup' '
+ test_commit start
+'
+
+test_expect_success 'config --worktree in single worktree' '
+ git config --worktree foo.bar true &&
+ test_cmp_config true foo.bar
+'
+
+test_expect_success 'add worktrees' '
+ git worktree add wt1 &&
+ git worktree add wt2
+'
+
+test_expect_success 'config --worktree without extension' '
+ test_must_fail git config --worktree foo.bar false
+'
+
+test_expect_success 'enable worktreeConfig extension' '
+ git config extensions.worktreeConfig true &&
+ test_cmp_config true extensions.worktreeConfig
+'
+
+test_expect_success 'config is shared as before' '
+ git config this.is shared &&
+ test_cmp_config shared this.is &&
+ test_cmp_config -C wt1 shared this.is &&
+ test_cmp_config -C wt2 shared this.is
+'
+
+test_expect_success 'config is shared (set from another worktree)' '
+ git -C wt1 config that.is also-shared &&
+ test_cmp_config also-shared that.is &&
+ test_cmp_config -C wt1 also-shared that.is &&
+ test_cmp_config -C wt2 also-shared that.is
+'
+
+test_expect_success 'config private to main worktree' '
+ git config --worktree this.is for-main &&
+ test_cmp_config for-main this.is &&
+ test_cmp_config -C wt1 shared this.is &&
+ test_cmp_config -C wt2 shared this.is
+'
+
+test_expect_success 'config private to linked worktree' '
+ git -C wt1 config --worktree this.is for-wt1 &&
+ test_cmp_config for-main this.is &&
+ test_cmp_config -C wt1 for-wt1 this.is &&
+ test_cmp_config -C wt2 shared this.is
+'
+
+test_expect_success 'core.bare no longer for main only' '
+ test_config core.bare true &&
+ test "$(git rev-parse --is-bare-repository)" = true &&
+ test "$(git -C wt1 rev-parse --is-bare-repository)" = true &&
+ test "$(git -C wt2 rev-parse --is-bare-repository)" = true
+'
+
+test_expect_success 'per-worktree core.bare is picked up' '
+ git -C wt1 config --worktree core.bare true &&
+ test "$(git rev-parse --is-bare-repository)" = false &&
+ test "$(git -C wt1 rev-parse --is-bare-repository)" = true &&
+ test "$(git -C wt2 rev-parse --is-bare-repository)" = false
+'
+
+test_expect_success 'config.worktree no longer read without extension' '
+ git config --unset extensions.worktreeConfig &&
+ test_cmp_config shared this.is &&
+ test_cmp_config -C wt1 shared this.is &&
+ test_cmp_config -C wt2 shared this.is
+'
+
+test_done
# Copyright (c) 2005 Junio C Hamano
#
-test_description='git ls-files test (--others should pick up symlinks).
+test_description='basic tests for ls-files --others
This test runs git ls-files --others with the following on the
filesystem.
--- /dev/null
+#!/bin/sh
+
+test_description='test git ls-files --others with non-submodule repositories
+
+This test runs git ls-files --others with the following working tree:
+
+ nonrepo-no-files/
+ plain directory with no files
+ nonrepo-untracked-file/
+ plain directory with an untracked file
+ repo-no-commit-no-files/
+ git repository without a commit or a file
+ repo-no-commit-untracked-file/
+ git repository without a commit but with an untracked file
+ repo-with-commit-no-files/
+ git repository with a commit and no untracked files
+ repo-with-commit-untracked-file/
+ git repository with a commit and an untracked file
+'
+
+. ./test-lib.sh
+
+test_expect_success 'setup: directories' '
+ mkdir nonrepo-no-files/ &&
+ mkdir nonrepo-untracked-file &&
+ : >nonrepo-untracked-file/untracked &&
+ git init repo-no-commit-no-files &&
+ git init repo-no-commit-untracked-file &&
+ : >repo-no-commit-untracked-file/untracked &&
+ git init repo-with-commit-no-files &&
+ git -C repo-with-commit-no-files commit --allow-empty -mmsg &&
+ git init repo-with-commit-untracked-file &&
+ test_commit -C repo-with-commit-untracked-file msg &&
+ : >repo-with-commit-untracked-file/untracked
+'
+
+test_expect_success 'ls-files --others handles untracked git repositories' '
+ git ls-files -o >output &&
+ cat >expect <<-EOF &&
+ nonrepo-untracked-file/untracked
+ output
+ repo-no-commit-no-files/
+ repo-no-commit-untracked-file/
+ repo-with-commit-no-files/
+ repo-with-commit-untracked-file/
+ EOF
+ test_cmp expect output
+'
+
+test_done
git branch a/b/c && test_path_is_file .git/refs/heads/a/b/c
'
+test_expect_success 'git branch mb master... should create a branch' '
+ git branch mb master... && test_path_is_file .git/refs/heads/mb
+'
+
test_expect_success 'git branch HEAD should fail' '
test_must_fail git branch HEAD
'
test_must_fail git rev-parse refs/heads/t
'
+test_expect_success 'deleting checked-out branch from repo that is a submodule' '
+ test_when_finished "rm -rf repo1 repo2" &&
+
+ git init repo1 &&
+ git init repo1/sub &&
+ test_commit -C repo1/sub x &&
+ git -C repo1 submodule add ./sub &&
+ git -C repo1 commit -m "adding sub" &&
+
+ git clone --recurse-submodules repo1 repo2 &&
+ git -C repo2/sub checkout -b work &&
+ test_must_fail git -C repo2/sub branch -D work
+'
+
+test_expect_success 'bare main worktree has HEAD at branch deleted by secondary worktree' '
+ test_when_finished "rm -rf nonbare base secondary" &&
+
+ git init nonbare &&
+ test_commit -C nonbare x &&
+ git clone --bare nonbare bare &&
+ git -C bare worktree add --detach ../secondary master &&
+ git -C secondary branch -D master
+'
+
test_expect_success 'git branch --list -v with --abbrev' '
test_when_finished "git branch -D t" &&
git branch t &&
test_expect_success 'git branch --column' '
COLUMNS=81 git branch --column=column >actual &&
cat >expected <<\EOF &&
- a/b/c bam foo l * master n o/p r
- abc bar j/k m/m master2 o/o q
+ a/b/c bam foo l * master mb o/o q
+ abc bar j/k m/m master2 n o/p r
EOF
test_cmp expected actual
'
m/m
* master
master2
+ mb
n
o/o
o/p
git config --unset column.branch &&
git config --unset column.ui &&
cat >expected <<\EOF &&
- a/b/c bam foo l * master n o/p r
- abc bar j/k m/m master2 o/o q
+ a/b/c bam foo l * master mb o/o q
+ abc bar j/k m/m master2 n o/p r
EOF
test_cmp expected actual
'
m/m
* master
master2
+ mb
n
o/o
o/p
test_must_fail git branch -v branch*
'
+test_expect_success 'git branch `--show-current` shows current branch' '
+ cat >expect <<-\EOF &&
+ branch-two
+ EOF
+ git checkout branch-two &&
+ git branch --show-current >actual &&
+ test_cmp expect actual
+'
+
+test_expect_success 'git branch `--show-current` is silent when detached HEAD' '
+ git checkout HEAD^0 &&
+ git branch --show-current >actual &&
+ test_must_be_empty actual
+'
+
+test_expect_success 'git branch `--show-current` works properly when tag exists' '
+ cat >expect <<-\EOF &&
+ branch-and-tag-name
+ EOF
+ test_when_finished "
+ git checkout branch-one
+ git branch -D branch-and-tag-name
+ " &&
+ git checkout -b branch-and-tag-name &&
+ test_when_finished "git tag -d branch-and-tag-name" &&
+ git tag branch-and-tag-name &&
+ git branch --show-current >actual &&
+ test_cmp expect actual
+'
+
+test_expect_success 'git branch `--show-current` works properly with worktrees' '
+ cat >expect <<-\EOF &&
+ branch-one
+ branch-two
+ EOF
+ git checkout branch-one &&
+ git worktree add worktree branch-two &&
+ {
+ git branch --show-current &&
+ git -C worktree branch --show-current
+ } >actual &&
+ test_cmp expect actual
+'
+
test_expect_success 'git branch shows detached HEAD properly' '
cat >expect <<EOF &&
* (HEAD detached at $(git rev-parse --short HEAD^0))
test_config notes.rewriteMode overwrite &&
test_config notes.rewriteRef refs/notes/other &&
echo $(git rev-parse HEAD^) $(git rev-parse HEAD) |
- GIT_NOTES_REWRITE_REF= git notes copy --for-rewrite=foo &&
+ GIT_NOTES_REWRITE_REF=refs/notes/commits \
+ git notes copy --for-rewrite=foo &&
git log -1 >actual &&
- test_cmp expect actual
+ grep "replacement note 3" actual
'
test_expect_success 'git notes copy diagnoses too many or too few parameters' '
git rebase master
'
+test_expect_success 'rebase sets ORIG_HEAD to pre-rebase state' '
+ git checkout -b orig-head topic &&
+ pre="$(git rev-parse --verify HEAD)" &&
+ git rebase master &&
+ test_cmp_rev "$pre" ORIG_HEAD &&
+ ! test_cmp_rev "$pre" HEAD
+'
+
test_expect_success 'rebase, with <onto> and <upstream> specified as :/quuxery' '
test_when_finished "git branch -D torebase" &&
git checkout -b torebase my-topic-branch^ &&
)
'
+test_expect_success 'rebase -c rebase.useBuiltin=false warning' '
+ expected="rebase.useBuiltin support has been removed" &&
+
+ # Only warn when the legacy rebase is requested...
+ test_must_fail git -c rebase.useBuiltin=false rebase 2>err &&
+ test_i18ngrep "$expected" err &&
+ test_must_fail env GIT_TEST_REBASE_USE_BUILTIN=false git rebase 2>err &&
+ test_i18ngrep "$expected" err &&
+
+ # ...not when we would have used the built-in anyway
+ test_must_fail git -c rebase.useBuiltin=true rebase 2>err &&
+ test_must_be_empty err &&
+ test_must_fail env GIT_TEST_REBASE_USE_BUILTIN=true git rebase 2>err &&
+ test_must_be_empty err
+'
+
test_done
git checkout B^0 &&
set_fake_editor &&
- FAKE_LINES="1" git rebase --interactive A &&
+ FAKE_LINES="1" git -c merge.directoryRenames=true rebase --interactive A &&
git ls-files -s >out &&
test_line_count = 5 out &&
git checkout B^0 &&
- git rebase A &&
+ git -c merge.directoryRenames=true rebase A &&
git ls-files -s >out &&
test_line_count = 5 out &&
git checkout B^0 &&
- git rebase --merge A &&
+ git -c merge.directoryRenames=true rebase --merge A &&
git ls-files -s >out &&
test_line_count = 5 out &&
git format-patch -1 B &&
- git am --3way 0001*.patch &&
+ git -c merge.directoryRenames=true am --3way 0001*.patch &&
git ls-files -s >out &&
test_line_count = 5 out &&
test_expect_success 'rebase -x with empty command fails' '
test_when_finished "git rebase --abort ||:" &&
- test_must_fail env GIT_TEST_REBASE_USE_BUILTIN=true \
- git rebase -x "" @ 2>actual &&
+ test_must_fail env git rebase -x "" @ 2>actual &&
test_write_lines "error: empty exec command" >expected &&
test_i18ncmp expected actual &&
- test_must_fail env GIT_TEST_REBASE_USE_BUILTIN=true \
- git rebase -x " " @ 2>actual &&
+ test_must_fail env git rebase -x " " @ 2>actual &&
test_i18ncmp expected actual
'
'
test_expect_success 'rebase -x with newline in command fails' '
test_when_finished "git rebase --abort ||:" &&
- test_must_fail env GIT_TEST_REBASE_USE_BUILTIN=true \
- git rebase -x "a${LF}b" @ 2>actual &&
+ test_must_fail env git rebase -x "a${LF}b" @ 2>actual &&
test_write_lines "error: exec commands cannot contain newlines" \
>expected &&
test_i18ncmp expected actual
(
set_cat_todo_editor &&
test_must_fail git -c rebase.instructionFormat= \
- rebase --autosquash --force -i HEAD^ >actual &&
+ rebase --autosquash --force-rebase -i HEAD^ >actual &&
git log -1 --format="pick %h %s" >expect &&
test_cmp expect actual
)
test -e F
'
+test_expect_success SHA1 'loose object cache vs re-reading todo list' '
+ GIT_REBASE_TODO=.git/rebase-merge/git-rebase-todo &&
+ export GIT_REBASE_TODO &&
+ write_script append-todo.sh <<-\EOS &&
+ # For values 5 and 6, this yields SHA-1s with the same first two digits
+ echo "pick $(git rev-parse --short \
+ $(printf "%s\\n" \
+ "tree $EMPTY_TREE" \
+ "author A U Thor <author@example.org> $1 +0000" \
+ "committer A U Thor <author@example.org> $1 +0000" \
+ "" \
+ "$1" |
+ git hash-object -t commit -w --stdin))" >>$GIT_REBASE_TODO
+
+ shift
+ test -z "$*" ||
+ echo "exec $0 $*" >>$GIT_REBASE_TODO
+ EOS
+
+ git rebase HEAD -x "./append-todo.sh 5 6"
+'
+
test_done
EOF
test_config sequence.editor \""$PWD"/replace-editor.sh\" &&
test_tick &&
- git rebase -i --force --root -r &&
+ git rebase -i --force-rebase --root -r &&
test "Parsnip" = "$(git show -s --format=%an HEAD^)" &&
test $(git rev-parse second-root^0) != $(git rev-parse HEAD^) &&
test $(git rev-parse second-root:second-root.t) = \
test_cmp_rev HEAD $before &&
test_tick &&
- git rebase -i --force -r HEAD^^ &&
+ git rebase -i --force-rebase -r HEAD^^ &&
test "Hank" = "$(git show -s --format=%an HEAD)" &&
test "$before" != $(git rev-parse HEAD) &&
test_cmp_graph HEAD^^.. <<-\EOF
test_commit base foo b &&
test_commit picked foo c &&
test_commit --signoff picked-signed foo d &&
+ git checkout -b topic initial &&
+ test_commit redundant-pick foo c redundant &&
+ git commit --allow-empty --allow-empty-message &&
+ git tag empty &&
+ git checkout master &&
git config advice.detachedhead false
'
test_expect_success 'cherry-pick w/dirty tree does not set CHERRY_PICK_HEAD' '
pristine_detach initial &&
- echo foo > foo &&
+ echo foo >foo &&
test_must_fail git cherry-pick base &&
test_must_fail git rev-parse --verify CHERRY_PICK_HEAD
'
test_expect_success \
'cherry-pick --strategy=resolve w/dirty tree does not set CHERRY_PICK_HEAD' '
pristine_detach initial &&
- echo foo > foo &&
+ echo foo >foo &&
test_must_fail git cherry-pick --strategy=resolve base &&
test_must_fail git rev-parse --verify CHERRY_PICK_HEAD
'
test_must_fail git rev-parse --verify CHERRY_PICK_HEAD
'
+test_expect_success 'successful final commit clears cherry-pick state' '
+ pristine_detach initial &&
+
+ test_must_fail git cherry-pick base picked-signed &&
+ echo resolved >foo &&
+ test_path_is_file .git/sequencer/todo &&
+ git commit -a &&
+ test_must_fail test_path_exists .git/sequencer
+'
+
+test_expect_success 'reset after final pick clears cherry-pick state' '
+ pristine_detach initial &&
+
+ test_must_fail git cherry-pick base picked-signed &&
+ echo resolved >foo &&
+ test_path_is_file .git/sequencer/todo &&
+ git reset &&
+ test_must_fail test_path_exists .git/sequencer
+'
test_expect_success 'failed cherry-pick produces dirty index' '
pristine_detach initial &&
git ls-files --stage foo &&
git checkout picked -- foo &&
git ls-files --stage foo
- } > stages &&
+ } >stages &&
sed "
1 s/ 0 / 1 /
2 s/ 0 / 2 /
3 s/ 0 / 3 /
- " < stages > expected &&
+ " stages >expected &&
git read-tree -u --reset HEAD &&
test_must_fail git cherry-pick picked &&
- git ls-files --stage --unmerged > actual &&
+ git ls-files --stage --unmerged >actual &&
test_cmp expected actual
'
+test_expect_success \
+ 'cherry-pick conflict, ensure commit.cleanup = scissors places scissors line properly' '
+ pristine_detach initial &&
+ git config commit.cleanup scissors &&
+ cat <<-EOF >expected &&
+ picked
+
+ # ------------------------ >8 ------------------------
+ # Do not modify or remove the line above.
+ # Everything below it will be ignored.
+ #
+ # Conflicts:
+ # foo
+ EOF
+
+ test_must_fail git cherry-pick picked &&
+
+ test_i18ncmp expected .git/MERGE_MSG
+'
+
+test_expect_success \
+ 'cherry-pick conflict, ensure cleanup=scissors places scissors line properly' '
+ pristine_detach initial &&
+ git config --unset commit.cleanup &&
+ cat <<-EOF >expected &&
+ picked
+
+ # ------------------------ >8 ------------------------
+ # Do not modify or remove the line above.
+ # Everything below it will be ignored.
+ #
+ # Conflicts:
+ # foo
+ EOF
+
+ test_must_fail git cherry-pick --cleanup=scissors picked &&
+
+ test_i18ncmp expected .git/MERGE_MSG
+'
+
test_expect_success 'failed cherry-pick describes conflict in work tree' '
pristine_detach initial &&
- cat <<-EOF > expected &&
+ cat <<-EOF >expected &&
<<<<<<< HEAD
a
=======
test_must_fail git cherry-pick picked &&
- sed "s/[a-f0-9]*\.\.\./objid/" foo > actual &&
+ sed "s/[a-f0-9]*\.\.\./objid/" foo >actual &&
test_cmp expected actual
'
test_expect_success 'diff3 -m style' '
pristine_detach initial &&
git config merge.conflictstyle diff3 &&
- cat <<-EOF > expected &&
+ cat <<-EOF >expected &&
<<<<<<< HEAD
a
||||||| parent of objid picked
test_must_fail git cherry-pick picked &&
- sed "s/[a-f0-9]*\.\.\./objid/" foo > actual &&
+ sed "s/[a-f0-9]*\.\.\./objid/" foo >actual &&
test_cmp expected actual
'
test_expect_success 'revert also handles conflicts sanely' '
git config --unset merge.conflictstyle &&
pristine_detach initial &&
- cat <<-EOF > expected &&
+ cat <<-EOF >expected &&
<<<<<<< HEAD
a
=======
git ls-files --stage foo &&
git checkout base -- foo &&
git ls-files --stage foo
- } > stages &&
+ } >stages &&
sed "
1 s/ 0 / 1 /
2 s/ 0 / 2 /
3 s/ 0 / 3 /
- " < stages > expected-stages &&
+ " stages >expected-stages &&
git read-tree -u --reset HEAD &&
head=$(git rev-parse HEAD) &&
test_must_fail git revert picked &&
newhead=$(git rev-parse HEAD) &&
- git ls-files --stage --unmerged > actual-stages &&
+ git ls-files --stage --unmerged >actual-stages &&
test "$head" = "$newhead" &&
test_must_fail git update-index --refresh -q &&
test_must_fail git diff-index --exit-code HEAD &&
test_cmp expected-stages actual-stages &&
- sed "s/[a-f0-9]*\.\.\./objid/" foo > actual &&
+ sed "s/[a-f0-9]*\.\.\./objid/" foo >actual &&
test_cmp expected actual
'
test_expect_success 'revert w/dirty tree does not set REVERT_HEAD' '
pristine_detach base &&
- echo foo > foo &&
+ echo foo >foo &&
test_must_fail git revert base &&
test_must_fail git rev-parse --verify CHERRY_PICK_HEAD &&
test_must_fail git rev-parse --verify REVERT_HEAD
test_cmp_rev picked REVERT_HEAD
'
+test_expect_success 'successful final commit clears revert state' '
+ pristine_detach picked-signed &&
+
+ test_must_fail git revert picked-signed base &&
+ echo resolved >foo &&
+ test_path_is_file .git/sequencer/todo &&
+ git commit -a &&
+ test_must_fail test_path_exists .git/sequencer
+'
+
+test_expect_success 'reset after final pick clears revert state' '
+ pristine_detach picked-signed &&
+
+ test_must_fail git revert picked-signed base &&
+ echo resolved >foo &&
+ test_path_is_file .git/sequencer/todo &&
+ git reset &&
+ test_must_fail test_path_exists .git/sequencer
+'
+
test_expect_success 'revert conflict, diff3 -m style' '
pristine_detach initial &&
git config merge.conflictstyle diff3 &&
- cat <<-EOF > expected &&
+ cat <<-EOF >expected &&
<<<<<<< HEAD
a
||||||| objid picked
test_must_fail git revert picked &&
- sed "s/[a-f0-9]*\.\.\./objid/" foo > actual &&
+ sed "s/[a-f0-9]*\.\.\./objid/" foo >actual &&
test_cmp expected actual
'
+test_expect_success \
+ 'revert conflict, ensure commit.cleanup = scissors places scissors line properly' '
+ pristine_detach initial &&
+ git config commit.cleanup scissors &&
+ cat >expected <<-EOF &&
+ Revert "picked"
+
+ This reverts commit OBJID.
+
+ # ------------------------ >8 ------------------------
+ # Do not modify or remove the line above.
+ # Everything below it will be ignored.
+ #
+ # Conflicts:
+ # foo
+ EOF
+
+ test_must_fail git revert picked &&
+
+ sed "s/$OID_REGEX/OBJID/" .git/MERGE_MSG >actual &&
+ test_i18ncmp expected actual
+'
+
+test_expect_success \
+ 'revert conflict, ensure cleanup=scissors places scissors line properly' '
+ pristine_detach initial &&
+ git config --unset commit.cleanup &&
+ cat >expected <<-EOF &&
+ Revert "picked"
+
+ This reverts commit OBJID.
+
+ # ------------------------ >8 ------------------------
+ # Do not modify or remove the line above.
+ # Everything below it will be ignored.
+ #
+ # Conflicts:
+ # foo
+ EOF
+
+ test_must_fail git revert --cleanup=scissors picked &&
+
+ sed "s/$OID_REGEX/OBJID/" .git/MERGE_MSG >actual &&
+ test_i18ncmp expected actual
+'
+
test_expect_success 'failed cherry-pick does not forget -s' '
pristine_detach initial &&
test_must_fail git cherry-pick -s picked &&
pristine_detach initial &&
test_must_fail git cherry-pick -s picked-signed &&
git commit -a -s &&
- test $(git show -s |grep -c "Signed-off-by") = 1
+ test $(git show -s >tmp && grep -c "Signed-off-by" tmp && rm tmp) = 1
'
test_expect_success 'commit after failed cherry-pick adds -s at the right place' '
Signed-off-by: C O Mitter <committer@example.com>
# Conflicts:
EOF
- grep -e "^# Conflicts:" -e '^Signed-off-by' <.git/COMMIT_EDITMSG >actual &&
+ grep -e "^# Conflicts:" -e '^Signed-off-by' .git/COMMIT_EDITMSG >actual &&
test_cmp expect actual &&
cat <<-\EOF >expected &&
# emulate old-style conflicts block
mv .git/MERGE_MSG .git/MERGE_MSG+ &&
- sed -e "/^# Conflicts:/,\$s/^# *//" <.git/MERGE_MSG+ >.git/MERGE_MSG &&
+ sed -e "/^# Conflicts:/,\$s/^# *//" .git/MERGE_MSG+ >.git/MERGE_MSG &&
git commit -a &&
git commit --amend -s &&
Signed-off-by: C O Mitter <committer@example.com>
Conflicts:
EOF
- grep -e "^Conflicts:" -e '^Signed-off-by' <.git/COMMIT_EDITMSG >actual &&
+ grep -e "^Conflicts:" -e '^Signed-off-by' .git/COMMIT_EDITMSG >actual &&
test_cmp expect actual
'
test_i18ngrep ! "Changes not staged for commit:" actual
'
+test_expect_success 'cherry-pick --continue remembers --keep-redundant-commits' '
+ test_when_finished "git cherry-pick --abort || :" &&
+ pristine_detach initial &&
+ test_must_fail git cherry-pick --keep-redundant-commits picked redundant &&
+ echo c >foo &&
+ git add foo &&
+ git cherry-pick --continue
+'
+
+test_expect_success 'cherry-pick --continue remembers --allow-empty and --allow-empty-message' '
+ test_when_finished "git cherry-pick --abort || :" &&
+ pristine_detach initial &&
+ test_must_fail git cherry-pick --allow-empty --allow-empty-message \
+ picked empty &&
+ echo c >foo &&
+ git add foo &&
+ git cherry-pick --continue
+'
+
test_done
test_cmp expect actual
'
+test_expect_success 'cherry-pick -x cleans commit message' '
+ pristine_detach initial &&
+ git cherry-pick -x mesg-unclean &&
+ git log -1 --pretty=format:%B >actual &&
+ printf "%s\n(cherry picked from commit %s)\n" \
+ "$mesg_unclean" $(git rev-parse mesg-unclean) |
+ git stripspace >expect &&
+ test_cmp expect actual
+'
+
+test_expect_success 'cherry-pick -x respects commit.cleanup' '
+ pristine_detach initial &&
+ git -c commit.cleanup=strip cherry-pick -x mesg-unclean &&
+ git log -1 --pretty=format:%B >actual &&
+ printf "%s\n(cherry picked from commit %s)\n" \
+ "$mesg_unclean" $(git rev-parse mesg-unclean) |
+ git stripspace -s >expect &&
+ test_cmp expect actual
+'
+
test_done
. ./test-lib.sh
# Setup some files to be removed, some with funny characters
-test_expect_success \
- 'Initialize test directory' \
- "touch -- foo bar baz 'space embedded' -q &&
- git add -- foo bar baz 'space embedded' -q &&
- git commit -m 'add normal files'"
+test_expect_success 'Initialize test directory' '
+ touch -- foo bar baz "space embedded" -q &&
+ git add -- foo bar baz "space embedded" -q &&
+ git commit -m "add normal files"
+'
-if test_have_prereq !FUNNYNAMES; then
+if test_have_prereq !FUNNYNAMES
+then
say 'Your filesystem does not allow tabs in filenames.'
fi
-test_expect_success FUNNYNAMES 'add files with funny names' "
- touch -- 'tab embedded' 'newline
-embedded' &&
- git add -- 'tab embedded' 'newline
-embedded' &&
- git commit -m 'add files with tabs and newlines'
-"
-
-test_expect_success \
- 'Pre-check that foo exists and is in index before git rm foo' \
- '[ -f foo ] && git ls-files --error-unmatch foo'
-
-test_expect_success \
- 'Test that git rm foo succeeds' \
- 'git rm --cached foo'
-
-test_expect_success \
- 'Test that git rm --cached foo succeeds if the index matches the file' \
- 'echo content >foo &&
- git add foo &&
- git rm --cached foo'
-
-test_expect_success \
- 'Test that git rm --cached foo succeeds if the index matches the file' \
- 'echo content >foo &&
- git add foo &&
- git commit -m foo &&
- echo "other content" >foo &&
- git rm --cached foo'
-
-test_expect_success \
- 'Test that git rm --cached foo fails if the index matches neither the file nor HEAD' '
- echo content >foo &&
- git add foo &&
- git commit -m foo --allow-empty &&
- echo "other content" >foo &&
- git add foo &&
- echo "yet another content" >foo &&
- test_must_fail git rm --cached foo
-'
-
-test_expect_success \
- 'Test that git rm --cached -f foo works in case where --cached only did not' \
- 'echo content >foo &&
- git add foo &&
- git commit -m foo --allow-empty &&
- echo "other content" >foo &&
- git add foo &&
- echo "yet another content" >foo &&
- git rm --cached -f foo'
-
-test_expect_success \
- 'Post-check that foo exists but is not in index after git rm foo' \
- '[ -f foo ] && test_must_fail git ls-files --error-unmatch foo'
-
-test_expect_success \
- 'Pre-check that bar exists and is in index before "git rm bar"' \
- '[ -f bar ] && git ls-files --error-unmatch bar'
-
-test_expect_success \
- 'Test that "git rm bar" succeeds' \
- 'git rm bar'
-
-test_expect_success \
- 'Post-check that bar does not exist and is not in index after "git rm -f bar"' \
- '! [ -f bar ] && test_must_fail git ls-files --error-unmatch bar'
-
-test_expect_success \
- 'Test that "git rm -- -q" succeeds (remove a file that looks like an option)' \
- 'git rm -- -q'
-
-test_expect_success FUNNYNAMES \
- "Test that \"git rm -f\" succeeds with embedded space, tab, or newline characters." \
- "git rm -f 'space embedded' 'tab embedded' 'newline
-embedded'"
+test_expect_success FUNNYNAMES 'add files with funny names' '
+ touch -- "tab embedded" "newline${LF}embedded" &&
+ git add -- "tab embedded" "newline${LF}embedded" &&
+ git commit -m "add files with tabs and newlines"
+'
+
+test_expect_success 'Pre-check that foo exists and is in index before git rm foo' '
+ test_path_is_file foo &&
+ git ls-files --error-unmatch foo
+'
+
+test_expect_success 'Test that git rm foo succeeds' '
+ git rm --cached foo
+'
+
+test_expect_success 'Test that git rm --cached foo succeeds if the index matches the file' '
+ echo content >foo &&
+ git add foo &&
+ git rm --cached foo
+'
+
+test_expect_success 'Test that git rm --cached foo succeeds if the index matches the file' '
+ echo content >foo &&
+ git add foo &&
+ git commit -m foo &&
+ echo "other content" >foo &&
+ git rm --cached foo
+'
+
+test_expect_success 'Test that git rm --cached foo fails if the index matches neither the file nor HEAD' '
+ echo content >foo &&
+ git add foo &&
+ git commit -m foo --allow-empty &&
+ echo "other content" >foo &&
+ git add foo &&
+ echo "yet another content" >foo &&
+ test_must_fail git rm --cached foo
+'
+
+test_expect_success 'Test that git rm --cached -f foo works in case where --cached only did not' '
+ echo content >foo &&
+ git add foo &&
+ git commit -m foo --allow-empty &&
+ echo "other content" >foo &&
+ git add foo &&
+ echo "yet another content" >foo &&
+ git rm --cached -f foo
+'
+
+test_expect_success 'Post-check that foo exists but is not in index after git rm foo' '
+ test_path_is_file foo &&
+ test_must_fail git ls-files --error-unmatch foo
+'
+
+test_expect_success 'Pre-check that bar exists and is in index before "git rm bar"' '
+ test_path_is_file bar &&
+ git ls-files --error-unmatch bar
+'
+
+test_expect_success 'Test that "git rm bar" succeeds' '
+ git rm bar
+'
+
+test_expect_success 'Post-check that bar does not exist and is not in index after "git rm -f bar"' '
+ test_path_is_missing bar &&
+ test_must_fail git ls-files --error-unmatch bar
+'
+
+test_expect_success 'Test that "git rm -- -q" succeeds (remove a file that looks like an option)' '
+ git rm -- -q
+'
+
+test_expect_success FUNNYNAMES 'Test that "git rm -f" succeeds with embedded space, tab, or newline characters.' '
+ git rm -f "space embedded" "tab embedded" "newline${LF}embedded"
+'
test_expect_success SANITY 'Test that "git rm -f" fails if its rm fails' '
test_when_finished "chmod 775 ." &&
test_must_fail git rm -f baz
'
-test_expect_success \
- 'When the rm in "git rm -f" fails, it should not remove the file from the index' \
- 'git ls-files --error-unmatch baz'
+test_expect_success 'When the rm in "git rm -f" fails, it should not remove the file from the index' '
+ git ls-files --error-unmatch baz
+'
test_expect_success 'Remove nonexistent file with --ignore-unmatch' '
git rm --ignore-unmatch nonexistent
test_expect_success 'Modify foo -- rm should refuse' '
echo >>foo &&
test_must_fail git rm foo baz &&
- test -f foo &&
- test -f baz &&
+ test_path_is_file foo &&
+ test_path_is_file baz &&
git ls-files --error-unmatch foo baz
'
test_expect_success 'Modified foo -- rm -f should work' '
git rm -f foo baz &&
- test ! -f foo &&
- test ! -f baz &&
+ test_path_is_missing foo &&
+ test_path_is_missing baz &&
test_must_fail git ls-files --error-unmatch foo &&
test_must_fail git ls-files --error-unmatch bar
'
test_expect_success 'foo is different in index from HEAD -- rm should refuse' '
test_must_fail git rm foo baz &&
- test -f foo &&
- test -f baz &&
+ test_path_is_file foo &&
+ test_path_is_file baz &&
git ls-files --error-unmatch foo baz
'
test_expect_success 'but with -f it should work.' '
git rm -f foo baz &&
- test ! -f foo &&
- test ! -f baz &&
+ test_path_is_missing foo &&
+ test_path_is_missing baz &&
test_must_fail git ls-files --error-unmatch foo &&
test_must_fail git ls-files --error-unmatch baz
'
test_expect_success 'Recursive without -r fails' '
test_must_fail git rm frotz &&
- test -d frotz &&
- test -f frotz/nitfol
+ test_path_is_dir frotz &&
+ test_path_is_file frotz/nitfol
'
test_expect_success 'Recursive with -r but dirty' '
echo qfwfq >>frotz/nitfol &&
test_must_fail git rm -r frotz &&
- test -d frotz &&
- test -f frotz/nitfol
+ test_path_is_dir frotz &&
+ test_path_is_file frotz/nitfol
'
test_expect_success 'Recursive with -r -f' '
git rm -f -r frotz &&
- ! test -f frotz/nitfol &&
- ! test -d frotz
+ test_path_is_missing frotz/nitfol &&
+ test_path_is_missing frotz
'
test_expect_success 'Remove nonexistent file returns nonzero exit status' '
test_expect_success 'Call "rm" from outside the work tree' '
mkdir repo &&
- (cd repo &&
- git init &&
- echo something >somefile &&
- git add somefile &&
- git commit -m "add a file" &&
- (cd .. &&
- git --git-dir=repo/.git --work-tree=repo rm somefile) &&
- test_must_fail git ls-files --error-unmatch somefile)
+ (
+ cd repo &&
+ git init &&
+ echo something >somefile &&
+ git add somefile &&
+ git commit -m "add a file" &&
+ (
+ cd .. &&
+ git --git-dir=repo/.git --work-tree=repo rm somefile
+ ) &&
+ test_must_fail git ls-files --error-unmatch somefile
+ )
'
test_expect_success 'refresh index before checking if it is up-to-date' '
-
git reset --hard &&
test-tool chmtime -86400 frotz/nitfol &&
git rm frotz/nitfol &&
- test ! -f frotz/nitfol
-
+ test_path_is_missing frotz/nitfol
'
test_expect_success 'choking "git rm" should not let it die with cruft' '
i=0 &&
while test $i -lt 12000
do
- echo "100644 1234567890123456789012345678901234567890 0 some-file-$i"
- i=$(( $i + 1 ))
+ echo "100644 1234567890123456789012345678901234567890 0 some-file-$i"
+ i=$(( $i + 1 ))
done | git update-index --index-info &&
git rm -n "some-file-*" | : &&
test_path_is_missing .git/index.lock
echo content >dir/subdir/subsubdir/file &&
git add dir/subdir/subsubdir/file &&
git rm -f dir/subdir/subsubdir/file &&
- ! test -d dir
+ test_path_is_missing dir
'
cat >expect <<EOF
git add .gitmodules &&
git commit -m "add submodule" &&
git rm submod &&
- test ! -e submod &&
+ test_path_is_missing submod &&
git status -s -uno --ignore-submodules=none >actual &&
test_cmp expect actual &&
test_must_fail git config -f .gitmodules submodule.sub.url &&
git reset --hard &&
git submodule update &&
git rm submod &&
- test ! -d submod &&
+ test_path_is_missing submod &&
git status -s -uno --ignore-submodules=none >actual &&
test_cmp expect actual &&
test_must_fail git config -f .gitmodules submodule.sub.url &&
git reset --hard &&
git submodule update &&
git rm submod/ &&
- test ! -d submod &&
+ test_path_is_missing submod &&
git status -s -uno --ignore-submodules=none >actual &&
test_cmp expect actual
'
git submodule update &&
git -C submod checkout HEAD^ &&
test_must_fail git rm submod &&
- test -d submod &&
- test -f submod/.git &&
+ test_path_is_dir submod &&
+ test_path_is_file submod/.git &&
git status -s -uno --ignore-submodules=none >actual &&
test_cmp expect.modified actual &&
git rm -f submod &&
- test ! -d submod &&
+ test_path_is_missing submod &&
git status -s -uno --ignore-submodules=none >actual &&
test_cmp expect actual &&
test_must_fail git config -f .gitmodules submodule.sub.url &&
git reset --hard &&
git submodule update &&
git rm --cached submod &&
- test -d submod &&
- test -f submod/.git &&
+ test_path_is_dir submod &&
+ test_path_is_file submod/.git &&
git status -s -uno >actual &&
test_cmp expect.cached actual &&
git config -f .gitmodules submodule.sub.url &&
git reset --hard &&
git submodule update &&
git rm -n submod &&
- test -f submod/.git &&
+ test_path_is_file submod/.git &&
git diff-index --exit-code HEAD
'
git rm .gitmodules &&
git rm submod >actual 2>actual.err &&
test_must_be_empty actual.err &&
- ! test -d submod &&
- ! test -f submod/.git &&
+ test_path_is_missing submod &&
+ test_path_is_missing submod/.git &&
git status -s -uno >actual &&
test_cmp expect.both_deleted actual
'
git submodule update &&
git config -f .gitmodules foo.bar true &&
test_must_fail git rm submod >actual 2>actual.err &&
- test -s actual.err &&
- test -d submod &&
- test -f submod/.git &&
+ test_file_not_empty actual.err &&
+ test_path_is_dir submod &&
+ test_path_is_file submod/.git &&
git diff-files --quiet -- submod &&
git add .gitmodules &&
git rm submod >actual 2>actual.err &&
test_must_be_empty actual.err &&
- ! test -d submod &&
- ! test -f submod/.git &&
+ test_path_is_missing submod &&
+ test_path_is_missing submod/.git &&
git status -s -uno >actual &&
test_cmp expect actual
'
echo "warning: Could not find section in .gitmodules where path=submod" >expect.err &&
git rm submod >actual 2>actual.err &&
test_i18ncmp expect.err actual.err &&
- ! test -d submod &&
- ! test -f submod/.git &&
+ test_path_is_missing submod &&
+ test_path_is_missing submod/.git &&
git status -s -uno >actual &&
test_cmp expect actual
'
git submodule update &&
echo X >submod/empty &&
test_must_fail git rm submod &&
- test -d submod &&
- test -f submod/.git &&
+ test_path_is_dir submod &&
+ test_path_is_file submod/.git &&
git status -s -uno --ignore-submodules=none >actual &&
test_cmp expect.modified_inside actual &&
git rm -f submod &&
- test ! -d submod &&
+ test_path_is_missing submod &&
git status -s -uno --ignore-submodules=none >actual &&
test_cmp expect actual
'
git submodule update &&
echo X >submod/untracked &&
test_must_fail git rm submod &&
- test -d submod &&
- test -f submod/.git &&
+ test_path_is_dir submod &&
+ test_path_is_file submod/.git &&
git status -s -uno --ignore-submodules=none >actual &&
test_cmp expect.modified_untracked actual &&
git rm -f submod &&
- test ! -d submod &&
+ test_path_is_missing submod &&
git status -s -uno --ignore-submodules=none >actual &&
test_cmp expect actual
'
git submodule update &&
test_must_fail git merge conflict2 &&
git rm submod &&
- test ! -d submod &&
+ test_path_is_missing submod &&
git status -s -uno --ignore-submodules=none >actual &&
test_cmp expect actual
'
git -C submod checkout HEAD^ &&
test_must_fail git merge conflict2 &&
test_must_fail git rm submod &&
- test -d submod &&
- test -f submod/.git &&
+ test_path_is_dir submod &&
+ test_path_is_file submod/.git &&
git status -s -uno --ignore-submodules=none >actual &&
test_cmp expect.conflict actual &&
git rm -f submod &&
- test ! -d submod &&
+ test_path_is_missing submod &&
git status -s -uno --ignore-submodules=none >actual &&
test_cmp expect actual &&
test_must_fail git config -f .gitmodules submodule.sub.url &&
echo X >submod/empty &&
test_must_fail git merge conflict2 &&
test_must_fail git rm submod &&
- test -d submod &&
- test -f submod/.git &&
+ test_path_is_dir submod &&
+ test_path_is_file submod/.git &&
git status -s -uno --ignore-submodules=none >actual &&
test_cmp expect.conflict actual &&
git rm -f submod &&
- test ! -d submod &&
+ test_path_is_missing submod &&
git status -s -uno --ignore-submodules=none >actual &&
test_cmp expect actual &&
test_must_fail git config -f .gitmodules submodule.sub.url &&
echo X >submod/untracked &&
test_must_fail git merge conflict2 &&
test_must_fail git rm submod &&
- test -d submod &&
- test -f submod/.git &&
+ test_path_is_dir submod &&
+ test_path_is_file submod/.git &&
git status -s -uno --ignore-submodules=none >actual &&
test_cmp expect.conflict actual &&
git rm -f submod &&
- test ! -d submod &&
+ test_path_is_missing submod &&
git status -s -uno --ignore-submodules=none >actual &&
test_cmp expect actual
'
git checkout conflict1 &&
git reset --hard &&
git submodule update &&
- (cd submod &&
+ (
+ cd submod &&
rm .git &&
cp -R ../.git/modules/sub .git &&
GIT_WORK_TREE=. git config --unset core.worktree
) &&
test_must_fail git merge conflict2 &&
test_must_fail git rm submod &&
- test -d submod &&
- test -d submod/.git &&
+ test_path_is_dir submod &&
+ test_path_is_dir submod/.git &&
git status -s -uno --ignore-submodules=none >actual &&
test_cmp expect.conflict actual &&
test_must_fail git rm -f submod &&
- test -d submod &&
- test -d submod/.git &&
+ test_path_is_dir submod &&
+ test_path_is_dir submod/.git &&
git status -s -uno --ignore-submodules=none >actual &&
test_cmp expect.conflict actual &&
git merge --abort &&
git reset --hard &&
test_must_fail git merge conflict2 &&
git rm submod &&
- test ! -d submod &&
+ test_path_is_missing submod &&
git status -s -uno --ignore-submodules=none >actual &&
test_cmp expect actual
'
git checkout -f master &&
git reset --hard &&
git submodule update &&
- (cd submod &&
+ (
+ cd submod &&
rm .git &&
cp -R ../.git/modules/sub .git &&
GIT_WORK_TREE=. git config --unset core.worktree &&
rm -r ../.git/modules/sub
) &&
git rm submod 2>output.err &&
- ! test -d submod &&
- ! test -d submod/.git &&
+ test_path_is_missing submod &&
+ test_path_is_missing submod/.git &&
git status -s -uno --ignore-submodules=none >actual &&
- test -s actual &&
+ test_file_not_empty actual &&
test_i18ngrep Migrating output.err
'
test_expect_success 'setup subsubmodule' '
git reset --hard &&
git submodule update &&
- (cd submod &&
+ (
+ cd submod &&
git update-index --add --cacheinfo 160000 $(git rev-parse HEAD) subsubmod &&
git config -f .gitmodules submodule.sub.url ../. &&
git config -f .gitmodules submodule.sub.path subsubmod &&
test_expect_success 'rm recursively removes work tree of unmodified submodules' '
git rm submod &&
- test ! -d submod &&
+ test_path_is_missing submod &&
git status -s -uno --ignore-submodules=none >actual &&
test_cmp expect actual
'
git submodule update --recursive &&
git -C submod/subsubmod checkout HEAD^ &&
test_must_fail git rm submod &&
- test -d submod &&
- test -f submod/.git &&
+ test_path_is_dir submod &&
+ test_path_is_file submod/.git &&
git status -s -uno --ignore-submodules=none >actual &&
test_cmp expect.modified_inside actual &&
git rm -f submod &&
- test ! -d submod &&
+ test_path_is_missing submod &&
git status -s -uno --ignore-submodules=none >actual &&
test_cmp expect actual
'
git submodule update --recursive &&
echo X >submod/subsubmod/empty &&
test_must_fail git rm submod &&
- test -d submod &&
- test -f submod/.git &&
+ test_path_is_dir submod &&
+ test_path_is_file submod/.git &&
git status -s -uno --ignore-submodules=none >actual &&
test_cmp expect.modified_inside actual &&
git rm -f submod &&
- test ! -d submod &&
+ test_path_is_missing submod &&
git status -s -uno --ignore-submodules=none >actual &&
test_cmp expect actual
'
git submodule update --recursive &&
echo X >submod/subsubmod/untracked &&
test_must_fail git rm submod &&
- test -d submod &&
- test -f submod/.git &&
+ test_path_is_dir submod &&
+ test_path_is_file submod/.git &&
git status -s -uno --ignore-submodules=none >actual &&
test_cmp expect.modified_untracked actual &&
git rm -f submod &&
- test ! -d submod &&
+ test_path_is_missing submod &&
git status -s -uno --ignore-submodules=none >actual &&
test_cmp expect actual
'
test_expect_success "rm absorbs submodule's nested .git directory" '
git reset --hard &&
git submodule update --recursive &&
- (cd submod/subsubmod &&
+ (
+ cd submod/subsubmod &&
rm .git &&
mv ../../.git/modules/sub/modules/sub .git &&
GIT_WORK_TREE=. git config --unset core.worktree
) &&
git rm submod 2>output.err &&
- ! test -d submod &&
- ! test -d submod/subsubmod/.git &&
+ test_path_is_missing submod &&
+ test_path_is_missing submod/subsubmod/.git &&
git status -s -uno --ignore-submodules=none >actual &&
- test -s actual &&
+ test_file_not_empty actual &&
test_i18ngrep Migrating output.err
'
)
'
+test_expect_success 'error on a repository with no commits' '
+ rm -fr empty &&
+ git init empty &&
+ test_must_fail git add empty >actual 2>&1 &&
+ cat >expect <<-EOF &&
+ error: '"'empty/'"' does not have a commit checked out
+ fatal: adding files failed
+ EOF
+ test_i18ncmp expect actual
+'
+
test_expect_success 'git add --dry-run of existing changed file' "
echo new >>track-this &&
git add --dry-run track-this >actual 2>&1 &&
'
test_expect_success 'all statuses changed in folder if . is given' '
+ rm -fr empty &&
git add --chmod=+x . &&
test $(git ls-files --stage | grep ^100644 | wc -l) -eq 0 &&
git add --chmod=-x . &&
. ./test-lib.sh
test_expect_success 'stash some dirty working directory' '
- echo 1 > file &&
+ echo 1 >file &&
git add file &&
echo unrelated >other-file &&
git add other-file &&
test_tick &&
git commit -m initial &&
- echo 2 > file &&
+ echo 2 >file &&
git add file &&
- echo 3 > file &&
+ echo 3 >file &&
test_tick &&
git stash &&
git diff-files --quiet &&
git diff-index --cached --quiet HEAD
'
-cat > expect << EOF
+cat >expect <<EOF
diff --git a/file b/file
index 0cfbf08..00750ed 100644
--- a/file
test_expect_success 'parents of stash' '
test $(git rev-parse stash^) = $(git rev-parse HEAD) &&
- git diff stash^2..stash > output &&
+ git diff stash^2..stash >output &&
test_cmp expect output
'
test_expect_success 'apply stashed changes (including index)' '
git reset --hard HEAD^ &&
- echo 6 > other-file &&
+ echo 6 >other-file &&
git add other-file &&
test_tick &&
git commit -m other-file &&
test_expect_success 'drop top stash' '
git reset --hard &&
- git stash list > stashlist1 &&
- echo 7 > file &&
+ git stash list >expected &&
+ echo 7 >file &&
git stash &&
git stash drop &&
- git stash list > stashlist2 &&
- test_cmp stashlist1 stashlist2 &&
+ git stash list >actual &&
+ test_cmp expected actual &&
git stash apply &&
test 3 = $(cat file) &&
test 1 = $(git show :file) &&
test_expect_success 'drop middle stash' '
git reset --hard &&
- echo 8 > file &&
+ echo 8 >file &&
git stash &&
- echo 9 > file &&
+ echo 9 >file &&
git stash &&
git stash drop stash@{1} &&
test 2 = $(git stash list | wc -l) &&
test 0 = $(git stash list | wc -l)
'
-cat > expect << EOF
+cat >expect <<EOF
diff --git a/file2 b/file2
new file mode 100644
index 0000000..1fe912c
+bar2
EOF
-cat > expect1 << EOF
+cat >expect1 <<EOF
diff --git a/file b/file
index 257cc56..5716ca5 100644
--- a/file
+bar
EOF
-cat > expect2 << EOF
+cat >expect2 <<EOF
diff --git a/file b/file
index 7601807..5716ca5 100644
--- a/file
EOF
test_expect_success 'stash branch' '
- echo foo > file &&
+ echo foo >file &&
git commit file -m first &&
- echo bar > file &&
- echo bar2 > file2 &&
+ echo bar >file &&
+ echo bar2 >file2 &&
git add file2 &&
git stash &&
- echo baz > file &&
+ echo baz >file &&
git commit file -m second &&
git stash branch stashbranch &&
test refs/heads/stashbranch = $(git symbolic-ref HEAD) &&
test $(git rev-parse HEAD) = $(git rev-parse master^) &&
- git diff --cached > output &&
+ git diff --cached >output &&
test_cmp expect output &&
- git diff > output &&
+ git diff >output &&
test_cmp expect1 output &&
git add file &&
git commit -m alternate\ second &&
- git diff master..stashbranch > output &&
+ git diff master..stashbranch >output &&
test_cmp output expect2 &&
test 0 = $(git stash list | wc -l)
'
test_expect_success 'apply -q is quiet' '
- echo foo > file &&
+ echo foo >file &&
git stash &&
- git stash apply -q > output.out 2>&1 &&
+ git stash apply -q >output.out 2>&1 &&
test_must_be_empty output.out
'
test_expect_success 'save -q is quiet' '
- git stash save --quiet > output.out 2>&1 &&
+ git stash save --quiet >output.out 2>&1 &&
test_must_be_empty output.out
'
test_expect_success 'pop -q is quiet' '
- git stash pop -q > output.out 2>&1 &&
+ git stash pop -q >output.out 2>&1 &&
test_must_be_empty output.out
'
test_expect_success 'pop -q --index works and is quiet' '
- echo foo > file &&
+ echo foo >file &&
git add file &&
git stash save --quiet &&
- git stash pop -q --index > output.out 2>&1 &&
+ git stash pop -q --index >output.out 2>&1 &&
test foo = "$(git show :file)" &&
test_must_be_empty output.out
'
test_expect_success 'drop -q is quiet' '
git stash &&
- git stash drop -q > output.out 2>&1 &&
+ git stash drop -q >output.out 2>&1 &&
test_must_be_empty output.out
'
test_expect_success 'stash -k' '
- echo bar3 > file &&
- echo bar4 > file2 &&
+ echo bar3 >file &&
+ echo bar4 >file2 &&
git add file2 &&
git stash -k &&
test bar,bar4 = $(cat file),$(cat file2)
'
test_expect_success 'stash --no-keep-index' '
- echo bar33 > file &&
- echo bar44 > file2 &&
+ echo bar33 >file &&
+ echo bar44 >file2 &&
git add file2 &&
git stash --no-keep-index &&
test bar,bar2 = $(cat file),$(cat file2)
'
test_expect_success 'stash --invalid-option' '
- echo bar5 > file &&
- echo bar6 > file2 &&
+ echo bar5 >file &&
+ echo bar6 >file2 &&
git add file2 &&
test_must_fail git stash --invalid-option &&
test_must_fail git stash save --invalid-option &&
test new = "$(cat file3)"
'
+test_expect_success 'stash --intent-to-add file' '
+ git reset --hard &&
+ echo new >file4 &&
+ git add --intent-to-add file4 &&
+ test_when_finished "git rm -f file4" &&
+ test_must_fail git stash
+'
+
test_expect_success 'stash rm then recreate' '
git reset --hard &&
git rm file &&
test foo = "$(cat file/file)"
'
+test_expect_success 'giving too many ref arguments does not modify files' '
+ git stash clear &&
+ test_when_finished "git reset --hard HEAD" &&
+ echo foo >file2 &&
+ git stash &&
+ echo bar >file2 &&
+ git stash &&
+ test-tool chmtime =123456789 file2 &&
+ for type in apply pop "branch stash-branch"
+ do
+ test_must_fail git stash $type stash@{0} stash@{1} 2>err &&
+ test_i18ngrep "Too many revisions" err &&
+ test 123456789 = $(test-tool chmtime -g file2) || return 1
+ done
+'
+
+test_expect_success 'drop: too many arguments errors out (does nothing)' '
+ git stash list >expect &&
+ test_must_fail git stash drop stash@{0} stash@{1} 2>err &&
+ test_i18ngrep "Too many revisions" err &&
+ git stash list >actual &&
+ test_cmp expect actual
+'
+
+test_expect_success 'show: too many arguments errors out (does nothing)' '
+ test_must_fail git stash show stash@{0} stash@{1} 2>err 1>out &&
+ test_i18ngrep "Too many revisions" err &&
+ test_must_be_empty out
+'
+
test_expect_success 'stash create - no changes' '
git stash clear &&
test_when_finished "git reset --hard HEAD" &&
git stash clear &&
test_when_finished "git reset --hard HEAD" &&
git reset --hard &&
- echo foo >> file &&
+ echo foo >>file &&
STASH_ID=$(git stash create) &&
git reset --hard &&
git stash branch stash-branch ${STASH_ID} &&
- test_when_finished "git reset --hard HEAD && git checkout master && git branch -D stash-branch" &&
+ test_when_finished "git reset --hard HEAD && git checkout master &&
+ git branch -D stash-branch" &&
test $(git ls-files --modified | wc -l) -eq 1
'
git stash clear &&
test_when_finished "git reset --hard HEAD" &&
git reset --hard &&
- echo foo >> file &&
+ echo foo >>file &&
git stash &&
test_when_finished "git stash drop" &&
- echo bar >> file &&
+ echo bar >>file &&
STASH_ID=$(git stash create) &&
git reset --hard &&
git stash branch stash-branch ${STASH_ID} &&
- test_when_finished "git reset --hard HEAD && git checkout master && git branch -D stash-branch" &&
+ test_when_finished "git reset --hard HEAD && git checkout master &&
+ git branch -D stash-branch" &&
test $(git ls-files --modified | wc -l) -eq 1
'
+test_expect_success 'stash branch complains with no arguments' '
+ test_must_fail git stash branch 2>err &&
+ test_i18ngrep "No branch name specified" err
+'
+
test_expect_success 'stash show format defaults to --stat' '
git stash clear &&
test_when_finished "git reset --hard HEAD" &&
git reset --hard &&
- echo foo >> file &&
+ echo foo >>file &&
git stash &&
test_when_finished "git stash drop" &&
- echo bar >> file &&
+ echo bar >>file &&
STASH_ID=$(git stash create) &&
git reset --hard &&
cat >expected <<-EOF &&
git stash clear &&
test_when_finished "git reset --hard HEAD" &&
git reset --hard &&
- echo foo >> file &&
+ echo foo >>file &&
git stash &&
test_when_finished "git stash drop" &&
- echo bar >> file &&
+ echo bar >>file &&
STASH_ID=$(git stash create) &&
git reset --hard &&
echo "1 0 file" >expected &&
git stash clear &&
test_when_finished "git reset --hard HEAD" &&
git reset --hard &&
- echo foo >> file &&
+ echo foo >>file &&
git stash &&
test_when_finished "git stash drop" &&
- echo bar >> file &&
+ echo bar >>file &&
STASH_ID=$(git stash create) &&
git reset --hard &&
cat >expected <<-EOF &&
git stash clear &&
test_when_finished "git reset --hard HEAD" &&
git reset --hard &&
- echo foo >> file &&
+ echo foo >>file &&
STASH_ID=$(git stash create) &&
git reset --hard &&
echo "1 0 file" >expected &&
git stash clear &&
test_when_finished "git reset --hard HEAD" &&
git reset --hard &&
- echo foo >> file &&
+ echo foo >>file &&
STASH_ID=$(git stash create) &&
git reset --hard &&
cat >expected <<-EOF &&
test_cmp expected actual
'
-test_expect_success 'stash drop - fail early if specified stash is not a stash reference' '
+test_expect_success 'stash show --patience shows diff' '
+ git reset --hard &&
+ echo foo >>file &&
+ STASH_ID=$(git stash create) &&
+ git reset --hard &&
+ cat >expected <<-EOF &&
+ diff --git a/file b/file
+ index 7601807..71b52c4 100644
+ --- a/file
+ +++ b/file
+ @@ -1 +1,2 @@
+ baz
+ +foo
+ EOF
+ git stash show --patience ${STASH_ID} >actual &&
+ test_cmp expected actual
+'
+
+test_expect_success 'drop: fail early if specified stash is not a stash ref' '
git stash clear &&
test_when_finished "git reset --hard HEAD && git stash clear" &&
git reset --hard &&
- echo foo > file &&
+ echo foo >file &&
git stash &&
- echo bar > file &&
+ echo bar >file &&
git stash &&
test_must_fail git stash drop $(git rev-parse stash@{0}) &&
git stash pop &&
git reset --hard HEAD
'
-test_expect_success 'stash pop - fail early if specified stash is not a stash reference' '
+test_expect_success 'pop: fail early if specified stash is not a stash ref' '
git stash clear &&
test_when_finished "git reset --hard HEAD && git stash clear" &&
git reset --hard &&
- echo foo > file &&
+ echo foo >file &&
git stash &&
- echo bar > file &&
+ echo bar >file &&
git stash &&
test_must_fail git stash pop $(git rev-parse stash@{0}) &&
git stash pop &&
test_expect_success 'ref with non-existent reflog' '
git stash clear &&
- echo bar5 > file &&
- echo bar6 > file2 &&
+ echo bar5 >file &&
+ echo bar6 >file2 &&
git add file2 &&
git stash &&
test_must_fail git rev-parse --quiet --verify does-not-exist &&
test_expect_success 'invalid ref of the form stash@{n}, n >= N' '
git stash clear &&
test_must_fail git stash drop stash@{0} &&
- echo bar5 > file &&
- echo bar6 > file2 &&
+ echo bar5 >file &&
+ echo bar6 >file2 &&
git add file2 &&
git stash &&
test_must_fail git stash drop stash@{1} &&
git stash drop
'
-test_expect_success 'stash branch should not drop the stash if the branch exists' '
+test_expect_success 'branch: do not drop the stash if the branch exists' '
git stash clear &&
echo foo >file &&
git add file &&
git rev-parse stash@{0} --
'
-test_expect_success 'stash branch should not drop the stash if the apply fails' '
+test_expect_success 'branch: should not drop the stash if the apply fails' '
git stash clear &&
git reset HEAD~1 --hard &&
echo foo >file &&
git rev-parse stash@{0} --
'
-test_expect_success 'stash apply shows status same as git status (relative to current directory)' '
+test_expect_success 'apply: show same status as git status (relative to ./)' '
git stash clear &&
echo 1 >subdir/subfile1 &&
echo 2 >subdir/subfile2 &&
test_i18ncmp expect actual
'
-cat > expect << EOF
+cat >expect <<EOF
diff --git a/HEAD b/HEAD
new file mode 100644
index 0000000..fe0cbee
test_expect_success 'stash where working directory contains "HEAD" file' '
git stash clear &&
git reset --hard &&
- echo file-not-a-ref > HEAD &&
+ echo file-not-a-ref >HEAD &&
git add HEAD &&
test_tick &&
git stash &&
git diff-files --quiet &&
git diff-index --cached --quiet HEAD &&
test "$(git rev-parse stash^)" = "$(git rev-parse HEAD)" &&
- git diff stash^..stash > output &&
+ git diff stash^..stash >output &&
test_cmp expect output
'
test_i18ncmp expect actual
'
-test_expect_success 'stash push with pathspec shows no changes when there are none' '
+test_expect_success 'push <pathspec>: show no changes when there are none' '
>foo &&
git add foo &&
git commit -m "tmp" &&
test_i18ncmp expect actual
'
-test_expect_success 'stash push with pathspec not in the repository errors out' '
+test_expect_success 'push: <pathspec> not in the repository errors out' '
>untracked &&
test_must_fail git stash push untracked &&
test_path_is_file untracked
'
+test_expect_success 'push: -q is quiet with changes' '
+ >foo &&
+ git add foo &&
+ git stash push -q >output 2>&1 &&
+ test_must_be_empty output
+'
+
+test_expect_success 'push: -q is quiet with no changes' '
+ git stash push -q >output 2>&1 &&
+ test_must_be_empty output
+'
+
+test_expect_success 'push: -q is quiet even if there is no initial commit' '
+ git init foo_dir &&
+ test_when_finished rm -rf foo_dir &&
+ (
+ cd foo_dir &&
+ >bar &&
+ test_must_fail git stash push -q >output 2>&1 &&
+ test_must_be_empty output
+ )
+'
+
test_expect_success 'untracked files are left in place when -u is not given' '
>file &&
git add file &&
test_path_is_file subdir/untracked
'
+test_expect_success 'stash with user.name and user.email set works' '
+ test_config user.name "A U Thor" &&
+ test_config user.email "a.u@thor" &&
+ git stash
+'
+
test_expect_success 'stash works when user.name and user.email are not set' '
git reset &&
>1 &&
test_i18ncmp expect actual
'
+test_expect_success 'stash -u with globs' '
+ >untracked.txt &&
+ git stash -u -- ":(glob)**/*.txt" &&
+ test_path_is_missing untracked.txt
+'
+
test_done
--- /dev/null
+#!/bin/sh
+
+test_description='Test git stash show configuration.'
+
+. ./test-lib.sh
+
+test_expect_success 'setup' '
+ test_commit file
+'
+
+# takes three parameters:
+# 1. the stash.showStat value (or "<unset>")
+# 2. the stash.showPatch value (or "<unset>")
+# 3. the diff options of the expected output (or nothing for no output)
+test_stat_and_patch () {
+ if test "<unset>" = "$1"
+ then
+ test_unconfig stash.showStat
+ else
+ test_config stash.showStat "$1"
+ fi &&
+
+ if test "<unset>" = "$2"
+ then
+ test_unconfig stash.showPatch
+ else
+ test_config stash.showPatch "$2"
+ fi &&
+
+ shift 2 &&
+ echo 2 >file.t &&
+ if test $# != 0
+ then
+ git diff "$@" >expect
+ fi &&
+ git stash &&
+ git stash show >actual &&
+
+ if test $# = 0
+ then
+ test_must_be_empty actual
+ else
+ test_cmp expect actual
+ fi
+}
+
+test_expect_success 'showStat unset showPatch unset' '
+ test_stat_and_patch "<unset>" "<unset>" --stat
+'
+
+test_expect_success 'showStat unset showPatch false' '
+ test_stat_and_patch "<unset>" false --stat
+'
+
+test_expect_success 'showStat unset showPatch true' '
+ test_stat_and_patch "<unset>" true --stat -p
+'
+
+test_expect_success 'showStat false showPatch unset' '
+ test_stat_and_patch false "<unset>"
+'
+
+test_expect_success 'showStat false showPatch false' '
+ test_stat_and_patch false false
+'
+
+test_expect_success 'showStat false showPatch true' '
+ test_stat_and_patch false true -p
+'
+
+test_expect_success 'showStat true showPatch unset' '
+ test_stat_and_patch true "<unset>" --stat
+'
+
+test_expect_success 'showStat true showPatch false' '
+ test_stat_and_patch true false --stat
+'
+
+test_expect_success 'showStat true showPatch true' '
+ test_stat_and_patch true true --stat -p
+'
+
+test_done
ls patches/0004-This-is-an-excessively-long-subject-line-for-a-messa.patch
'
+test_expect_success 'failure to write cover-letter aborts gracefully' '
+ test_when_finished "rmdir 0000-cover-letter.patch" &&
+ mkdir 0000-cover-letter.patch &&
+ test_must_fail git format-patch --no-renames --cover-letter -1
+'
+
test_expect_success 'cover-letter inherits diff options' '
git mv file foo &&
git commit -m foo &&
test_cmp expect actual
'
+test_expect_success 'setup for --combined-all-paths' '
+ git branch side1c &&
+ git branch side2c &&
+ git checkout side1c &&
+ test_seq 1 10 >filename-side1c &&
+ git add filename-side1c &&
+ git commit -m with &&
+ git checkout side2c &&
+ test_seq 1 9 >filename-side2c &&
+ echo ten >>filename-side2c &&
+ git add filename-side2c &&
+ git commit -m iam &&
+ git checkout -b mergery side1c &&
+ git merge --no-commit side2c &&
+ git rm filename-side1c &&
+ echo eleven >>filename-side2c &&
+ git mv filename-side2c filename-merged &&
+ git add filename-merged &&
+ git commit
+'
+
+test_expect_success '--combined-all-paths and --raw' '
+ cat <<-\EOF >expect &&
+ ::100644 100644 100644 f00c965d8307308469e537302baa73048488f162 088bd5d92c2a8e0203ca8e7e4c2a5c692f6ae3f7 333b9c62519f285e1854830ade0fe1ef1d40ee1b RR filename-side1c filename-side2c filename-merged
+ EOF
+ git diff-tree -c -M --raw --combined-all-paths HEAD >actual.tmp &&
+ sed 1d <actual.tmp >actual &&
+ test_cmp expect actual
+'
+
+test_expect_success '--combined-all-paths and --cc' '
+ cat <<-\EOF >expect &&
+ --- a/filename-side1c
+ --- a/filename-side2c
+ +++ b/filename-merged
+ EOF
+ git diff-tree --cc -M --combined-all-paths HEAD >actual.tmp &&
+ grep ^[-+][-+][-+] <actual.tmp >actual &&
+ test_cmp expect actual
+'
+
+test_expect_success FUNNYNAMES 'setup for --combined-all-paths with funny names' '
+ git branch side1d &&
+ git branch side2d &&
+ git checkout side1d &&
+ test_seq 1 10 >"$(printf "file\twith\ttabs")" &&
+ git add file* &&
+ git commit -m with &&
+ git checkout side2d &&
+ test_seq 1 9 >"$(printf "i\tam\ttabbed")" &&
+ echo ten >>"$(printf "i\tam\ttabbed")" &&
+ git add *tabbed &&
+ git commit -m iam &&
+ git checkout -b funny-names-mergery side1d &&
+ git merge --no-commit side2d &&
+ git rm *tabs &&
+ echo eleven >>"$(printf "i\tam\ttabbed")" &&
+ git mv "$(printf "i\tam\ttabbed")" "$(printf "fickle\tnaming")" &&
+ git add fickle* &&
+ git commit
+'
+
+test_expect_success FUNNYNAMES '--combined-all-paths and --raw and funny names' '
+ cat <<-\EOF >expect &&
+ ::100644 100644 100644 f00c965d8307308469e537302baa73048488f162 088bd5d92c2a8e0203ca8e7e4c2a5c692f6ae3f7 333b9c62519f285e1854830ade0fe1ef1d40ee1b RR "file\twith\ttabs" "i\tam\ttabbed" "fickle\tnaming"
+ EOF
+ git diff-tree -c -M --raw --combined-all-paths HEAD >actual.tmp &&
+ sed 1d <actual.tmp >actual &&
+ test_cmp expect actual
+'
+
+test_expect_success FUNNYNAMES '--combined-all-paths and --raw -and -z and funny names' '
+ printf "aaf8087c3cbd4db8e185a2d074cf27c53cfb75d7\0::100644 100644 100644 f00c965d8307308469e537302baa73048488f162 088bd5d92c2a8e0203ca8e7e4c2a5c692f6ae3f7 333b9c62519f285e1854830ade0fe1ef1d40ee1b RR\0file\twith\ttabs\0i\tam\ttabbed\0fickle\tnaming\0" >expect &&
+ git diff-tree -c -M --raw --combined-all-paths -z HEAD >actual &&
+ test_cmp -a expect actual
+'
+
+test_expect_success FUNNYNAMES '--combined-all-paths and --cc and funny names' '
+ cat <<-\EOF >expect &&
+ --- "a/file\twith\ttabs"
+ --- "a/i\tam\ttabbed"
+ +++ "b/fickle\tnaming"
+ EOF
+ git diff-tree --cc -M --combined-all-paths HEAD >actual.tmp &&
+ grep ^[-+][-+][-+] <actual.tmp >actual &&
+ test_cmp expect actual
+'
+
test_done
export GIT_CEILING_DIRECTORIES &&
cd non/git &&
test_must_fail git diff --no-index a 2>actual.err &&
- echo "usage: git diff --no-index <path> <path>" >expect.err &&
- test_cmp expect.err actual.err
+ test_i18ngrep "usage: git diff --no-index" actual.err
)
'
test_cmp expect actual
'
+test_expect_success 'diff --no-index allows external diff' '
+ test_expect_code 1 \
+ env GIT_EXTERNAL_DIFF="echo external ;:" \
+ git diff --no-index non/git/a non/git/b >actual &&
+ echo external >expect &&
+ test_cmp expect actual
+'
+
test_done
--- /dev/null
+#!/bin/sh
+
+test_description='behavior of diff when reading objects in a partial clone'
+
+. ./test-lib.sh
+
+test_expect_success 'git show batches blobs' '
+ test_when_finished "rm -rf server client trace" &&
+
+ test_create_repo server &&
+ echo a >server/a &&
+ echo b >server/b &&
+ git -C server add a b &&
+ git -C server commit -m x &&
+
+ test_config -C server uploadpack.allowfilter 1 &&
+ test_config -C server uploadpack.allowanysha1inwant 1 &&
+ git clone --bare --filter=blob:limit=0 "file://$(pwd)/server" client &&
+
+ # Ensure that there is exactly 1 negotiation by checking that there is
+ # only 1 "done" line sent. ("done" marks the end of negotiation.)
+ GIT_TRACE_PACKET="$(pwd)/trace" git -C client show HEAD &&
+ grep "git> done" trace >done_lines &&
+ test_line_count = 1 done_lines
+'
+
+test_expect_success 'diff batches blobs' '
+ test_when_finished "rm -rf server client trace" &&
+
+ test_create_repo server &&
+ echo a >server/a &&
+ echo b >server/b &&
+ git -C server add a b &&
+ git -C server commit -m x &&
+ echo c >server/c &&
+ echo d >server/d &&
+ git -C server add c d &&
+ git -C server commit -m x &&
+
+ test_config -C server uploadpack.allowfilter 1 &&
+ test_config -C server uploadpack.allowanysha1inwant 1 &&
+ git clone --bare --filter=blob:limit=0 "file://$(pwd)/server" client &&
+
+ # Ensure that there is exactly 1 negotiation by checking that there is
+ # only 1 "done" line sent. ("done" marks the end of negotiation.)
+ GIT_TRACE_PACKET="$(pwd)/trace" git -C client diff HEAD^ HEAD &&
+ grep "git> done" trace >done_lines &&
+ test_line_count = 1 done_lines
+'
+
+test_expect_success 'diff skips same-OID blobs' '
+ test_when_finished "rm -rf server client trace" &&
+
+ test_create_repo server &&
+ echo a >server/a &&
+ echo b >server/b &&
+ git -C server add a b &&
+ git -C server commit -m x &&
+ echo another-a >server/a &&
+ git -C server add a &&
+ git -C server commit -m x &&
+
+ test_config -C server uploadpack.allowfilter 1 &&
+ test_config -C server uploadpack.allowanysha1inwant 1 &&
+ git clone --bare --filter=blob:limit=0 "file://$(pwd)/server" client &&
+
+ echo a | git hash-object --stdin >hash-old-a &&
+ echo another-a | git hash-object --stdin >hash-new-a &&
+ echo b | git hash-object --stdin >hash-b &&
+
+ # Ensure that only a and another-a are fetched.
+ GIT_TRACE_PACKET="$(pwd)/trace" git -C client diff HEAD^ HEAD &&
+ grep "want $(cat hash-old-a)" trace &&
+ grep "want $(cat hash-new-a)" trace &&
+ ! grep "want $(cat hash-b)" trace
+'
+
+test_expect_success 'diff with rename detection batches blobs' '
+ test_when_finished "rm -rf server client trace" &&
+
+ test_create_repo server &&
+ echo a >server/a &&
+ printf "b\nb\nb\nb\nb\n" >server/b &&
+ git -C server add a b &&
+ git -C server commit -m x &&
+ rm server/b &&
+ printf "b\nb\nb\nb\nbX\n" >server/c &&
+ git -C server add c &&
+ git -C server commit -a -m x &&
+
+ test_config -C server uploadpack.allowfilter 1 &&
+ test_config -C server uploadpack.allowanysha1inwant 1 &&
+ git clone --bare --filter=blob:limit=0 "file://$(pwd)/server" client &&
+
+ # Ensure that there is exactly 1 negotiation by checking that there is
+ # only 1 "done" line sent. ("done" marks the end of negotiation.)
+ GIT_TRACE_PACKET="$(pwd)/trace" git -C client diff -M HEAD^ HEAD >out &&
+ grep "similarity index" out &&
+ grep "git> done" trace >done_lines &&
+ test_line_count = 1 done_lines
+'
+
+test_done
printf "Subject: " >subject-prefix &&
- cat - subject-prefix msg-without-scissors-line >msg-with-scissors-line <<-\EOF &&
+ cat - subject-prefix msg-without-scissors-line >msg-with-scissors-line <<-\EOF
This line should not be included in the commit message with --scissors enabled.
- - >8 - - remove everything above this line - - >8 - -
EOF
-
- signoff="Signed-off-by: $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL>"
'
test_expect_success setup '
test_cmp expect actual
'
+test_expect_success '%(trailers:only=yes) shows only "key: value" trailers' '
+ git log --no-walk --pretty=format:"%(trailers:only=yes)" >actual &&
+ grep -v patch.description <trailers >expect &&
+ test_cmp expect actual
+'
+
+test_expect_success '%(trailers:only=no) shows all trailers' '
+ git log --no-walk --pretty=format:"%(trailers:only=no)" >actual &&
+ cat trailers >expect &&
+ test_cmp expect actual
+'
+
+test_expect_success '%(trailers:only=no,only=true) shows only "key: value" trailers' '
+ git log --no-walk --pretty=format:"%(trailers:only=yes)" >actual &&
+ grep -v patch.description <trailers >expect &&
+ test_cmp expect actual
+'
+
test_expect_success '%(trailers:unfold) unfolds trailers' '
git log --no-walk --pretty="%(trailers:unfold)" >actual &&
{
test_cmp expect actual
'
+test_expect_success 'pretty format %(trailers:key=foo) shows that trailer' '
+ git log --no-walk --pretty="format:%(trailers:key=Acked-by)" >actual &&
+ echo "Acked-by: A U Thor <author@example.com>" >expect &&
+ test_cmp expect actual
+'
+
+test_expect_success 'pretty format %(trailers:key=foo) is case insensitive' '
+ git log --no-walk --pretty="format:%(trailers:key=AcKed-bY)" >actual &&
+ echo "Acked-by: A U Thor <author@example.com>" >expect &&
+ test_cmp expect actual
+'
+
+test_expect_success 'pretty format %(trailers:key=foo:) trailing colon also works' '
+ git log --no-walk --pretty="format:%(trailers:key=Acked-by:)" >actual &&
+ echo "Acked-by: A U Thor <author@example.com>" >expect &&
+ test_cmp expect actual
+'
+
+test_expect_success 'pretty format %(trailers:key=foo) multiple keys' '
+ git log --no-walk --pretty="format:%(trailers:key=Acked-by:,key=Signed-off-By)" >actual &&
+ grep -v patch.description <trailers >expect &&
+ test_cmp expect actual
+'
+
+test_expect_success '%(trailers:key=nonexistant) becomes empty' '
+ git log --no-walk --pretty="x%(trailers:key=Nacked-by)x" >actual &&
+ echo "xx" >expect &&
+ test_cmp expect actual
+'
+
+test_expect_success '%(trailers:key=foo) handles multiple lines even if folded' '
+ git log --no-walk --pretty="format:%(trailers:key=Signed-Off-by)" >actual &&
+ grep -v patch.description <trailers | grep -v Acked-by >expect &&
+ test_cmp expect actual
+'
+
+test_expect_success '%(trailers:key=foo,unfold) properly unfolds' '
+ git log --no-walk --pretty="format:%(trailers:key=Signed-Off-by,unfold)" >actual &&
+ unfold <trailers | grep Signed-off-by >expect &&
+ test_cmp expect actual
+'
+
+test_expect_success 'pretty format %(trailers:key=foo,only=no) also includes nontrailer lines' '
+ git log --no-walk --pretty="format:%(trailers:key=Acked-by,only=no)" >actual &&
+ {
+ echo "Acked-by: A U Thor <author@example.com>" &&
+ grep patch.description <trailers
+ } >expect &&
+ test_cmp expect actual
+'
+
+test_expect_success '%(trailers:key) without value is error' '
+ git log --no-walk --pretty="tformat:%(trailers:key)" >actual &&
+ echo "%(trailers:key)" >expect &&
+ test_cmp expect actual
+'
+
+test_expect_success '%(trailers:key=foo,valueonly) shows only value' '
+ git log --no-walk --pretty="format:%(trailers:key=Acked-by,valueonly)" >actual &&
+ echo "A U Thor <author@example.com>" >expect &&
+ test_cmp expect actual
+'
+
+test_expect_success 'pretty format %(trailers:separator) changes separator' '
+ git log --no-walk --pretty=format:"X%(trailers:separator=%x00,unfold)X" >actual &&
+ printf "XSigned-off-by: A U Thor <author@example.com>\0Acked-by: A U Thor <author@example.com>\0[ v2 updated patch description ]\0Signed-off-by: A U Thor <author@example.com>X" >expect &&
+ test_cmp expect actual
+'
+
+test_expect_success 'pretty format %(trailers) combining separator/key/valueonly' '
+ git commit --allow-empty -F - <<-\EOF &&
+ Important fix
+
+ The fix is explained here
+
+ Closes: #1234
+ EOF
+
+ git commit --allow-empty -F - <<-\EOF &&
+ Another fix
+
+ The fix is explained here
+
+ Closes: #567
+ Closes: #890
+ EOF
+
+ git commit --allow-empty -F - <<-\EOF &&
+ Does not close any tickets
+ EOF
+
+ git log --pretty="%s% (trailers:separator=%x2c%x20,key=Closes,valueonly)" HEAD~3.. >actual &&
+ test_write_lines \
+ "Does not close any tickets" \
+ "Another fix #567, #890" \
+ "Important fix #1234" >expect &&
+ test_cmp expect actual
+'
+
test_expect_success 'trailer parsing not fooled by --- line' '
git commit --allow-empty -F - <<-\EOF &&
this is the subject
git log $(for x in $(test_seq 200); do echo -L $((2*x)),+1:c.c; done)
'
+test_expect_success '-s shows only line-log commits' '
+ git log --format="commit %s" -L1,24:b.c >expect.raw &&
+ grep ^commit expect.raw >expect &&
+ git log --format="commit %s" -L1,24:b.c -s >actual &&
+ test_cmp expect actual
+'
+
+test_expect_success '-p shows the default patch output' '
+ git log -L1,24:b.c >expect &&
+ git log -L1,24:b.c -p >actual &&
+ test_cmp expect actual
+'
+
+test_expect_success '--raw is forbidden' '
+ test_must_fail git log -L1,24:b.c --raw
+'
+
test_done
test_expect_success 'am with dos files with --keep-cr' '
git checkout -b dosfiles-keep-cr initial &&
- git format-patch -k --stdout initial..master | git am --keep-cr -k -3 &&
+ git format-patch -k --stdout initial..master >output &&
+ git am --keep-cr -k -3 output &&
git diff --exit-code master
'
test_expect_success 'am with dos files config am.keepcr' '
git config am.keepcr 1 &&
git checkout -b dosfiles-conf-keepcr initial &&
- git format-patch -k --stdout initial..master | git am -k -3 &&
+ git format-patch -k --stdout initial..master >output &&
+ git am -k -3 output &&
git diff --exit-code master
'
test_expect_success 'prune: prune former HEAD after checking out branch' '
- head_sha1=$(git rev-parse HEAD) &&
+ head_oid=$(git rev-parse HEAD) &&
git checkout --quiet master &&
git prune -v >prune_actual &&
- grep "$head_sha1" prune_actual
+ grep "$head_oid" prune_actual
'
'
test_expect_success 'prune .git/shallow' '
- SHA1=$(echo hi|git commit-tree HEAD^{tree}) &&
- echo $SHA1 >.git/shallow &&
+ oid=$(echo hi|git commit-tree HEAD^{tree}) &&
+ echo $oid >.git/shallow &&
git prune --dry-run >out &&
- grep $SHA1 .git/shallow &&
- grep $SHA1 out &&
+ grep $oid .git/shallow &&
+ grep $oid out &&
git prune &&
test_path_is_missing .git/shallow
'
+test_expect_success 'prune .git/shallow when there are no loose objects' '
+ oid=$(echo hi|git commit-tree HEAD^{tree}) &&
+ echo $oid >.git/shallow &&
+ git update-ref refs/heads/shallow-tip $oid &&
+ git repack -ad &&
+ # verify assumption that all loose objects are gone
+ git count-objects | grep ^0 &&
+ git prune &&
+ echo $oid >expect &&
+ test_cmp expect .git/shallow
+'
+
test_expect_success 'prune: handle alternate object database' '
test_create_repo A &&
git -C A commit --allow-empty -m "initial commit" &&
git reset --hard HEAD^
) &&
git prune --expire=now &&
- SHA1=`git hash-object expected` &&
- git -C third-worktree show "$SHA1" >actual &&
+ oid=`git hash-object expected` &&
+ git -C third-worktree show "$oid" >actual &&
test_cmp expected actual
'
git prune --no-expire
'
+test_expect_success 'trivial prune with bitmaps enabled' '
+ git repack -adb &&
+ blob=$(echo bitmap-unreachable-blob | git hash-object -w --stdin) &&
+ git prune --expire=now &&
+ git cat-file -e HEAD &&
+ test_must_fail git cat-file -e $blob
+'
+
test_done
bitmaptip=$(git rev-parse master) &&
blob=$(echo tagged-blob | git hash-object -w --stdin) &&
git tag tagged-blob $blob &&
- git config repack.writebitmaps true &&
- git config pack.writebitmaphashcache true
+ git config repack.writebitmaps true
'
test_expect_success 'full repack creates bitmaps' '
git clone --bare . compat-jgit.git &&
(
cd compat-jgit.git &&
- rm -f .git/objects/pack/*.bitmap &&
+ rm -f objects/pack/*.bitmap &&
jgit gc &&
git rev-list --test-bitmap HEAD
)
awk -f print_2.awk ls_files_result |
sort >expected &&
- git -C r1 pack-objects --rev --stdout >all.pack <<-EOF &&
+ git -C r1 pack-objects --revs --stdout >all.pack <<-EOF &&
HEAD
EOF
git -C r1 index-pack ../all.pack &&
'
test_expect_success 'verify blob:none packfile has no blobs' '
- git -C r1 pack-objects --rev --stdout --filter=blob:none >filter.pack <<-EOF &&
+ git -C r1 pack-objects --revs --stdout --filter=blob:none >filter.pack <<-EOF &&
HEAD
EOF
git -C r1 index-pack ../filter.pack &&
git -C r5 commit -m "foo" &&
del=$(git -C r5 rev-parse HEAD^{tree} | sed "s|..|&/|") &&
rm r5/.git/objects/$del &&
- test_must_fail git -C r5 pack-objects --rev --stdout 2>bad_tree <<-EOF &&
+ test_must_fail git -C r5 pack-objects --revs --stdout 2>bad_tree <<-EOF &&
HEAD
EOF
grep "bad tree object" bad_tree
'
test_expect_success 'verify tree:0 packfile has no blobs or trees' '
- git -C r1 pack-objects --rev --stdout --filter=tree:0 >commitsonly.pack <<-EOF &&
+ git -C r1 pack-objects --revs --stdout --filter=tree:0 >commitsonly.pack <<-EOF &&
HEAD
EOF
git -C r1 index-pack ../commitsonly.pack &&
test_expect_success 'grab tree directly when using tree:0' '
# We should get the tree specified directly but not its blobs or subtrees.
- git -C r1 pack-objects --rev --stdout --filter=tree:0 >commitsonly.pack <<-EOF &&
+ git -C r1 pack-objects --revs --stdout --filter=tree:0 >commitsonly.pack <<-EOF &&
HEAD:
EOF
git -C r1 index-pack ../commitsonly.pack &&
awk -f print_2.awk ls_files_result |
sort >expected &&
- git -C r2 pack-objects --rev --stdout >all.pack <<-EOF &&
+ git -C r2 pack-objects --revs --stdout >all.pack <<-EOF &&
HEAD
EOF
git -C r2 index-pack ../all.pack &&
'
test_expect_success 'verify blob:limit=500 omits all blobs' '
- git -C r2 pack-objects --rev --stdout --filter=blob:limit=500 >filter.pack <<-EOF &&
+ git -C r2 pack-objects --revs --stdout --filter=blob:limit=500 >filter.pack <<-EOF &&
HEAD
EOF
git -C r2 index-pack ../filter.pack &&
'
test_expect_success 'verify blob:limit=1000' '
- git -C r2 pack-objects --rev --stdout --filter=blob:limit=1000 >filter.pack <<-EOF &&
+ git -C r2 pack-objects --revs --stdout --filter=blob:limit=1000 >filter.pack <<-EOF &&
HEAD
EOF
git -C r2 index-pack ../filter.pack &&
awk -f print_2.awk ls_files_result |
sort >expected &&
- git -C r2 pack-objects --rev --stdout --filter=blob:limit=1001 >filter.pack <<-EOF &&
+ git -C r2 pack-objects --revs --stdout --filter=blob:limit=1001 >filter.pack <<-EOF &&
HEAD
EOF
git -C r2 index-pack ../filter.pack &&
awk -f print_2.awk ls_files_result |
sort >expected &&
- git -C r2 pack-objects --rev --stdout --filter=blob:limit=10001 >filter.pack <<-EOF &&
+ git -C r2 pack-objects --revs --stdout --filter=blob:limit=10001 >filter.pack <<-EOF &&
HEAD
EOF
git -C r2 index-pack ../filter.pack &&
awk -f print_2.awk ls_files_result |
sort >expected &&
- git -C r2 pack-objects --rev --stdout --filter=blob:limit=1k >filter.pack <<-EOF &&
+ git -C r2 pack-objects --revs --stdout --filter=blob:limit=1k >filter.pack <<-EOF &&
HEAD
EOF
git -C r2 index-pack ../filter.pack &&
awk -f print_2.awk ls_files_result |
sort >expected &&
- git -C r2 pack-objects --rev --stdout --filter=blob:limit=1k >filter.pack <<-EOF &&
+ git -C r2 pack-objects --revs --stdout --filter=blob:limit=1k >filter.pack <<-EOF &&
HEAD
$(git -C r2 rev-parse HEAD:large.10000)
EOF
awk -f print_2.awk ls_files_result |
sort >expected &&
- git -C r2 pack-objects --rev --stdout --filter=blob:limit=1m >filter.pack <<-EOF &&
+ git -C r2 pack-objects --revs --stdout --filter=blob:limit=1m >filter.pack <<-EOF &&
HEAD
EOF
git -C r2 index-pack ../filter.pack &&
awk -f print_2.awk ls_files_result |
sort >expected &&
- git -C r3 pack-objects --rev --stdout >all.pack <<-EOF &&
+ git -C r3 pack-objects --revs --stdout >all.pack <<-EOF &&
HEAD
EOF
git -C r3 index-pack ../all.pack &&
awk -f print_2.awk ls_files_result |
sort >expected &&
- git -C r3 pack-objects --rev --stdout --filter=sparse:path=../pattern1 >filter.pack <<-EOF &&
+ git -C r3 pack-objects --revs --stdout --filter=sparse:path=../pattern1 >filter.pack <<-EOF &&
HEAD
EOF
git -C r3 index-pack ../filter.pack &&
awk -f print_2.awk ls_files_result |
sort >expected &&
- git -C r3 pack-objects --rev --stdout --filter=sparse:path=../pattern2 >filter.pack <<-EOF &&
+ git -C r3 pack-objects --revs --stdout --filter=sparse:path=../pattern2 >filter.pack <<-EOF &&
HEAD
EOF
git -C r3 index-pack ../filter.pack &&
awk -f print_2.awk ls_files_result |
sort >expected &&
- git -C r4 pack-objects --rev --stdout >all.pack <<-EOF &&
+ git -C r4 pack-objects --revs --stdout >all.pack <<-EOF &&
HEAD
EOF
git -C r4 index-pack ../all.pack &&
sort >expected &&
oid=$(git -C r4 ls-files -s pattern | awk -f print_2.awk) &&
- git -C r4 pack-objects --rev --stdout --filter=sparse:oid=$oid >filter.pack <<-EOF &&
+ git -C r4 pack-objects --revs --stdout --filter=sparse:oid=$oid >filter.pack <<-EOF &&
HEAD
EOF
git -C r4 index-pack ../filter.pack &&
awk -f print_2.awk ls_files_result |
sort >expected &&
- git -C r4 pack-objects --rev --stdout --filter=sparse:oid=master:pattern >filter.pack <<-EOF &&
+ git -C r4 pack-objects --revs --stdout --filter=sparse:oid=master:pattern >filter.pack <<-EOF &&
HEAD
EOF
git -C r4 index-pack ../filter.pack &&
'
test_expect_success 'verify pack-objects fails w/ missing objects' '
- test_must_fail git -C r1 pack-objects --rev --stdout >miss.pack <<-EOF
+ test_must_fail git -C r1 pack-objects --revs --stdout >miss.pack <<-EOF
HEAD
EOF
'
test_expect_success 'verify pack-objects fails w/ --missing=error' '
- test_must_fail git -C r1 pack-objects --rev --stdout --missing=error >miss.pack <<-EOF
+ test_must_fail git -C r1 pack-objects --revs --stdout --missing=error >miss.pack <<-EOF
HEAD
EOF
'
test_expect_success 'verify pack-objects w/ --missing=allow-any' '
- git -C r1 pack-objects --rev --stdout --missing=allow-any >miss.pack <<-EOF
+ git -C r1 pack-objects --revs --stdout --missing=allow-any >miss.pack <<-EOF
HEAD
EOF
'
test_expect_success 'write graph' '
cd "$TRASH_DIRECTORY/full" &&
- graph1=$(git commit-graph write) &&
+ git commit-graph write &&
test_path_is_file $objdir/info/commit-graph &&
graph_read_expect "3"
'
GRAPH_BYTE_OCTOPUS=$(($GRAPH_OCTOPUS_DATA_OFFSET + 4))
GRAPH_BYTE_FOOTER=$(($GRAPH_OCTOPUS_DATA_OFFSET + 4 * $NUM_OCTOPUS_EDGES))
+corrupt_graph_setup() {
+ cd "$TRASH_DIRECTORY/full" &&
+ test_when_finished mv commit-graph-backup $objdir/info/commit-graph &&
+ cp $objdir/info/commit-graph commit-graph-backup
+}
+
+corrupt_graph_verify() {
+ grepstr=$1
+ test_must_fail git commit-graph verify 2>test_err &&
+ grep -v "^+" test_err >err &&
+ test_i18ngrep "$grepstr" err &&
+ if test "$2" != "no-copy"
+ then
+ cp $objdir/info/commit-graph commit-graph-pre-write-test
+ fi &&
+ git status --short &&
+ GIT_TEST_COMMIT_GRAPH_DIE_ON_LOAD=true git commit-graph write &&
+ git commit-graph verify
+}
+
# usage: corrupt_graph_and_verify <position> <data> <string> [<zero_pos>]
# Manipulates the commit-graph file at the position
# by inserting the data, optionally zeroing the file
pos=$1
data="${2:-\0}"
grepstr=$3
- cd "$TRASH_DIRECTORY/full" &&
+ corrupt_graph_setup &&
orig_size=$(wc -c < $objdir/info/commit-graph) &&
zero_pos=${4:-${orig_size}} &&
- test_when_finished mv commit-graph-backup $objdir/info/commit-graph &&
- cp $objdir/info/commit-graph commit-graph-backup &&
printf "$data" | dd of="$objdir/info/commit-graph" bs=1 seek="$pos" conv=notrunc &&
dd of="$objdir/info/commit-graph" bs=1 seek="$zero_pos" if=/dev/null &&
generate_zero_bytes $(($orig_size - $zero_pos)) >>"$objdir/info/commit-graph" &&
- test_must_fail git commit-graph verify 2>test_err &&
- grep -v "^+" test_err >err &&
- test_i18ngrep "$grepstr" err
+ corrupt_graph_verify "$grepstr"
+
}
+test_expect_success POSIXPERM,SANITY 'detect permission problem' '
+ corrupt_graph_setup &&
+ chmod 000 $objdir/info/commit-graph &&
+ corrupt_graph_verify "Could not open" "no-copy"
+'
+
+test_expect_success 'detect too small' '
+ corrupt_graph_setup &&
+ echo "a small graph" >$objdir/info/commit-graph &&
+ corrupt_graph_verify "too small"
+'
+
test_expect_success 'detect bad signature' '
corrupt_graph_and_verify 0 "\0" \
"graph signature"
git fsck &&
corrupt_graph_and_verify $GRAPH_BYTE_FOOTER "\00" \
"incorrect checksum" &&
+ cp commit-graph-pre-write-test $objdir/info/commit-graph &&
test_must_fail git fsck
'
'
midx_git_two_modes () {
+ git -c core.multiPackIndex=false $1 >expect &&
+ git -c core.multiPackIndex=true $1 >actual &&
if [ "$2" = "sorted" ]
then
- git -c core.multiPackIndex=false $1 | sort >expect &&
- git -c core.multiPackIndex=true $1 | sort >actual
- else
- git -c core.multiPackIndex=false $1 >expect &&
- git -c core.multiPackIndex=true $1 >actual
+ sort <expect >expect.sorted &&
+ mv expect.sorted expect &&
+ sort <actual >actual.sorted &&
+ mv actual.sorted actual
fi &&
test_cmp expect actual
}
midx_git_two_modes "rev-list --objects --all" &&
midx_git_two_modes "log --raw" &&
midx_git_two_modes "count-objects --verbose" &&
- midx_git_two_modes "cat-file --batch-all-objects --buffer --batch-check" &&
- midx_git_two_modes "cat-file --batch-all-objects --buffer --batch-check --unsorted" sorted
+ midx_git_two_modes "cat-file --batch-all-objects --batch-check" &&
+ midx_git_two_modes "cat-file --batch-all-objects --batch-check --unordered" sorted
'
}
compare_results_with_midx "one v2 pack"
+test_expect_success 'corrupt idx not opened' '
+ idx=$(test-tool read-midx $objdir | grep "\.idx\$") &&
+ mv $objdir/pack/$idx backup-$idx &&
+ test_when_finished "mv backup-\$idx \$objdir/pack/\$idx" &&
+
+ # This is the minimum size for a sha-1 based .idx; this lets
+ # us pass perfunctory tests, but anything that actually opens and reads
+ # the idx file will complain.
+ test_copy_bytes 1064 <backup-$idx >$objdir/pack/$idx &&
+
+ git -c core.multiPackIndex=true rev-list --objects --all 2>err &&
+ test_must_be_empty err
+'
+
test_expect_success 'add more objects' '
for i in $(test_seq 6 10)
do
--- /dev/null
+#!/bin/sh
+#
+# Copyright (c) 2018 Jiang Xin
+#
+
+test_description='Test git pack-redundant
+
+In order to test git-pack-redundant, we will create a number of objects and
+packs in the repository `master.git`. The relationship between packs (P1-P8)
+and objects (T, A-R) is showed in the following chart. Objects of a pack will
+be marked with letter x, while objects of redundant packs will be marked with
+exclamation point, and redundant pack itself will be marked with asterisk.
+
+ | T A B C D E F G H I J K L M N O P Q R
+ ----+--------------------------------------
+ P1 | x x x x x x x x
+ P2* | ! ! ! ! ! ! !
+ P3 | x x x x x x
+ P4* | ! ! ! ! !
+ P5 | x x x x
+ P6* | ! ! !
+ P7 | x x
+ P8* | !
+ ----+--------------------------------------
+ ALL | x x x x x x x x x x x x x x x x x x x
+
+Another repository `shared.git` has unique objects (X-Z), while other objects
+(marked with letter s) are shared through alt-odb (of `master.git`). The
+relationship between packs and objects is as follows:
+
+ | T A B C D E F G H I J K L M N O P Q R X Y Z
+ ----+----------------------------------------------
+ Px1 | s s s x x x
+ Px2 | s s s x x x
+'
+
+. ./test-lib.sh
+
+master_repo=master.git
+shared_repo=shared.git
+
+# Create commits in <repo> and assign each commit's oid to shell variables
+# given in the arguments (A, B, and C). E.g.:
+#
+# create_commits_in <repo> A B C
+#
+# NOTE: Avoid calling this function from a subshell since variable
+# assignments will disappear when subshell exits.
+create_commits_in () {
+ repo="$1" &&
+ if ! parent=$(git -C "$repo" rev-parse HEAD^{} 2>/dev/null)
+ then
+ parent=
+ fi &&
+ T=$(git -C "$repo" write-tree) &&
+ shift &&
+ while test $# -gt 0
+ do
+ name=$1 &&
+ test_tick &&
+ if test -z "$parent"
+ then
+ oid=$(echo $name | git -C "$repo" commit-tree $T)
+ else
+ oid=$(echo $name | git -C "$repo" commit-tree -p $parent $T)
+ fi &&
+ eval $name=$oid &&
+ parent=$oid &&
+ shift ||
+ return 1
+ done &&
+ git -C "$repo" update-ref refs/heads/master $oid
+}
+
+# Create pack in <repo> and assign pack id to variable given in the 2nd argument
+# (<name>). Commits in the pack will be read from stdin. E.g.:
+#
+# create_pack_in <repo> <name> <<-EOF
+# ...
+# EOF
+#
+# NOTE: commits from stdin should be given using heredoc, not using pipe, and
+# avoid calling this function from a subshell since variable assignments will
+# disappear when subshell exits.
+create_pack_in () {
+ repo="$1" &&
+ name="$2" &&
+ pack=$(git -C "$repo/objects/pack" pack-objects -q pack) &&
+ eval $name=$pack &&
+ eval P$pack=$name:$pack
+}
+
+format_packfiles () {
+ sed \
+ -e "s#.*/pack-\(.*\)\.idx#\1#" \
+ -e "s#.*/pack-\(.*\)\.pack#\1#" |
+ sort -u |
+ while read p
+ do
+ if test -z "$(eval echo \${P$p})"
+ then
+ echo $p
+ else
+ eval echo "\${P$p}"
+ fi
+ done |
+ sort
+}
+
+test_expect_success 'setup master repo' '
+ git init --bare "$master_repo" &&
+ create_commits_in "$master_repo" A B C D E F G H I J K L M N O P Q R
+'
+
+#############################################################################
+# Chart of packs and objects for this test case
+#
+# | T A B C D E F G H I J K L M N O P Q R
+# ----+--------------------------------------
+# P1 | x x x x x x x x
+# P2 | x x x x x x x
+# P3 | x x x x x x
+# ----+--------------------------------------
+# ALL | x x x x x x x x x x x x x x x
+#
+#############################################################################
+test_expect_success 'master: no redundant for pack 1, 2, 3' '
+ create_pack_in "$master_repo" P1 <<-EOF &&
+ $T
+ $A
+ $B
+ $C
+ $D
+ $E
+ $F
+ $R
+ EOF
+ create_pack_in "$master_repo" P2 <<-EOF &&
+ $B
+ $C
+ $D
+ $E
+ $G
+ $H
+ $I
+ EOF
+ create_pack_in "$master_repo" P3 <<-EOF &&
+ $F
+ $I
+ $J
+ $K
+ $L
+ $M
+ EOF
+ (
+ cd "$master_repo" &&
+ git pack-redundant --all >out &&
+ test_must_be_empty out
+ )
+'
+
+#############################################################################
+# Chart of packs and objects for this test case
+#
+# | T A B C D E F G H I J K L M N O P Q R
+# ----+--------------------------------------
+# P1 | x x x x x x x x
+# P2 | x x x x x x x
+# P3* | ! ! ! ! ! !
+# P4 | x x x x x
+# P5 | x x x x
+# ----+--------------------------------------
+# ALL | x x x x x x x x x x x x x x x x x x
+#
+#############################################################################
+test_expect_success 'master: one of pack-2/pack-3 is redundant' '
+ create_pack_in "$master_repo" P4 <<-EOF &&
+ $J
+ $K
+ $L
+ $M
+ $P
+ EOF
+ create_pack_in "$master_repo" P5 <<-EOF &&
+ $G
+ $H
+ $N
+ $O
+ EOF
+ (
+ cd "$master_repo" &&
+ cat >expect <<-EOF &&
+ P3:$P3
+ EOF
+ git pack-redundant --all >out &&
+ format_packfiles <out >actual &&
+ test_cmp expect actual
+ )
+'
+
+#############################################################################
+# Chart of packs and objects for this test case
+#
+# | T A B C D E F G H I J K L M N O P Q R
+# ----+--------------------------------------
+# P1 | x x x x x x x x
+# P2* | ! ! ! ! ! ! !
+# P3 | x x x x x x
+# P4* | ! ! ! ! !
+# P5 | x x x x
+# P6* | ! ! !
+# P7 | x x
+# ----+--------------------------------------
+# ALL | x x x x x x x x x x x x x x x x x x x
+#
+#############################################################################
+test_expect_success 'master: pack 2, 4, and 6 are redundant' '
+ create_pack_in "$master_repo" P6 <<-EOF &&
+ $N
+ $O
+ $Q
+ EOF
+ create_pack_in "$master_repo" P7 <<-EOF &&
+ $P
+ $Q
+ EOF
+ (
+ cd "$master_repo" &&
+ cat >expect <<-EOF &&
+ P2:$P2
+ P4:$P4
+ P6:$P6
+ EOF
+ git pack-redundant --all >out &&
+ format_packfiles <out >actual &&
+ test_cmp expect actual
+ )
+'
+
+#############################################################################
+# Chart of packs and objects for this test case
+#
+# | T A B C D E F G H I J K L M N O P Q R
+# ----+--------------------------------------
+# P1 | x x x x x x x x
+# P2* | ! ! ! ! ! ! !
+# P3 | x x x x x x
+# P4* | ! ! ! ! !
+# P5 | x x x x
+# P6* | ! ! !
+# P7 | x x
+# P8* | !
+# ----+--------------------------------------
+# ALL | x x x x x x x x x x x x x x x x x x x
+#
+#############################################################################
+test_expect_success 'master: pack-8 (subset of pack-1) is also redundant' '
+ create_pack_in "$master_repo" P8 <<-EOF &&
+ $A
+ EOF
+ (
+ cd "$master_repo" &&
+ cat >expect <<-EOF &&
+ P2:$P2
+ P4:$P4
+ P6:$P6
+ P8:$P8
+ EOF
+ git pack-redundant --all >out &&
+ format_packfiles <out >actual &&
+ test_cmp expect actual
+ )
+'
+
+test_expect_success 'master: clean loose objects' '
+ (
+ cd "$master_repo" &&
+ git prune-packed &&
+ find objects -type f | sed -e "/objects\/pack\//d" >out &&
+ test_must_be_empty out
+ )
+'
+
+test_expect_success 'master: remove redundant packs and pass fsck' '
+ (
+ cd "$master_repo" &&
+ git pack-redundant --all | xargs rm &&
+ git fsck &&
+ git pack-redundant --all >out &&
+ test_must_be_empty out
+ )
+'
+
+# The following test cases will execute inside `shared.git`, instead of
+# inside `master.git`.
+test_expect_success 'setup shared.git' '
+ git clone --mirror "$master_repo" "$shared_repo" &&
+ (
+ cd "$shared_repo" &&
+ printf "../../$master_repo/objects\n" >objects/info/alternates
+ )
+'
+
+test_expect_success 'shared: all packs are redundant, but no output without --alt-odb' '
+ (
+ cd "$shared_repo" &&
+ git pack-redundant --all >out &&
+ test_must_be_empty out
+ )
+'
+
+#############################################################################
+# Chart of packs and objects for this test case
+#
+# ================ master.git ===============
+# | T A B C D E F G H I J K L M N O P Q R <----------+
+# ----+-------------------------------------- |
+# P1 | x x x x x x x x |
+# P3 | x x x x x x |
+# P5 | x x x x |
+# P7 | x x |
+# ----+-------------------------------------- |
+# ALL | x x x x x x x x x x x x x x x x x x x |
+# |
+# |
+# ================ shared.git =============== |
+# | T A B C D E F G H I J K L M N O P Q R <objects/info/alternates>
+# ----+--------------------------------------
+# P1* | s s s s s s s s
+# P3* | s s s s s s
+# P5* | s s s s
+# P7* | s s
+# ----+--------------------------------------
+# ALL | x x x x x x x x x x x x x x x x x x x
+#
+#############################################################################
+test_expect_success 'shared: show redundant packs in stderr for verbose mode' '
+ (
+ cd "$shared_repo" &&
+ cat >expect <<-EOF &&
+ P1:$P1
+ P3:$P3
+ P5:$P5
+ P7:$P7
+ EOF
+ git pack-redundant --all --verbose >out 2>out.err &&
+ test_must_be_empty out &&
+ grep "pack$" out.err | format_packfiles >actual &&
+ test_cmp expect actual
+ )
+'
+
+test_expect_success 'shared: remove redundant packs, no packs left' '
+ (
+ cd "$shared_repo" &&
+ cat >expect <<-EOF &&
+ fatal: Zero packs found!
+ EOF
+ git pack-redundant --all --alt-odb | xargs rm &&
+ git fsck &&
+ test_must_fail git pack-redundant --all --alt-odb >actual 2>&1 &&
+ test_cmp expect actual
+ )
+'
+
+test_expect_success 'shared: create new objects and packs' '
+ create_commits_in "$shared_repo" X Y Z &&
+ create_pack_in "$shared_repo" Px1 <<-EOF &&
+ $X
+ $Y
+ $Z
+ $A
+ $B
+ $C
+ EOF
+ create_pack_in "$shared_repo" Px2 <<-EOF
+ $X
+ $Y
+ $Z
+ $D
+ $E
+ $F
+ EOF
+'
+
+test_expect_success 'shared: no redundant without --alt-odb' '
+ (
+ cd "$shared_repo" &&
+ git pack-redundant --all >out &&
+ test_must_be_empty out
+ )
+'
+
+#############################################################################
+# Chart of packs and objects for this test case
+#
+# ================ master.git ===============
+# | T A B C D E F G H I J K L M N O P Q R <----------------+
+# ----+-------------------------------------- |
+# P1 | x x x x x x x x |
+# P3 | x x x x x x |
+# P5 | x x x x |
+# P7 | x x |
+# ----+-------------------------------------- |
+# ALL | x x x x x x x x x x x x x x x x x x x |
+# |
+# |
+# ================ shared.git ======================= |
+# | T A B C D E F G H I J K L M N O P Q R X Y Z <objects/info/alternates>
+# ----+----------------------------------------------
+# Px1 | s s s x x x
+# Px2*| s s s ! ! !
+# ----+----------------------------------------------
+# ALL | s s s s s s s s s s s s s s s s s s s x x x
+#
+#############################################################################
+test_expect_success 'shared: one pack is redundant with --alt-odb' '
+ (
+ cd "$shared_repo" &&
+ git pack-redundant --all --alt-odb >out &&
+ format_packfiles <out >actual &&
+ test_line_count = 1 actual
+ )
+'
+
+#############################################################################
+# Chart of packs and objects for this test case
+#
+# ================ master.git ===============
+# | T A B C D E F G H I J K L M N O P Q R <----------------+
+# ----+-------------------------------------- |
+# P1 | x x x x x x x x |
+# P3 | x x x x x x |
+# P5 | x x x x |
+# P7 | x x |
+# ----+-------------------------------------- |
+# ALL | x x x x x x x x x x x x x x x x x x x |
+# |
+# |
+# ================ shared.git ======================= |
+# | T A B C D E F G H I J K L M N O P Q R X Y Z <objects/info/alternates>
+# ----+----------------------------------------------
+# Px1*| s s s i i i
+# Px2*| s s s i i i
+# ----+----------------------------------------------
+# ALL | s s s s s s s s s s s s s s s s s s s i i i
+# (ignored objects, marked with i)
+#
+#############################################################################
+test_expect_success 'shared: ignore unique objects and all two packs are redundant' '
+ (
+ cd "$shared_repo" &&
+ cat >expect <<-EOF &&
+ Px1:$Px1
+ Px2:$Px2
+ EOF
+ git pack-redundant --all --alt-odb >out <<-EOF &&
+ $X
+ $Y
+ $Z
+ EOF
+ format_packfiles <out >actual &&
+ test_cmp expect actual
+ )
+'
+
+test_done
$shared .have
EOF
- GIT_TRACE_PACKET=$(pwd)/trace \
+ GIT_TRACE_PACKET=$(pwd)/trace GIT_TEST_PROTOCOL_VERSION= \
git push \
--receive-pack="unset GIT_TRACE_PACKET; git-receive-pack" \
fork HEAD:foo &&
test_expect_success 'git rebase with implicit use of interactive backend' '
git reset --hard D &&
clear_hook_input &&
- test_must_fail git rebase --keep --onto A B &&
+ test_must_fail git rebase --keep-empty --onto A B &&
echo C > foo &&
git add foo &&
git rebase --continue &&
test_expect_success 'git rebase --skip with implicit use of interactive backend' '
git reset --hard D &&
clear_hook_input &&
- test_must_fail git rebase --keep --onto A B &&
+ test_must_fail git rebase --keep-empty --onto A B &&
test_must_fail git rebase --skip &&
echo D > foo &&
git add foo &&
test_commit -C server 6 &&
git init client &&
- test_must_fail git -C client fetch-pack ../server \
+ # Some protocol versions (e.g. 2) support fetching
+ # unadvertised objects, so restrict this test to v0.
+ test_must_fail env GIT_TEST_PROTOCOL_VERSION= git -C client fetch-pack ../server \
$(git -C server rev-parse refs/heads/master^) 2>err &&
test_i18ngrep "Server does not allow request for unadvertised object" err
'
fetch_filter_blob_limit_zero "$HTTPD_DOCUMENT_ROOT_PATH/server" "$HTTPD_URL/smart/server"
'
-stop_httpd
-
-
test_done
test -s "$1" &&
perl -alne '
next unless $F[1] eq "upload-pack<";
- last if $F[2] eq "0000";
+ next unless $F[2] eq "want";
print $F[2], " ", $F[3];
' "$1"
}
check_negotiation_tip
'
-stop_httpd
-
test_done
$(git rev-parse refs/tags/mark1.10) refs/tags/mark1.10
$(git rev-parse refs/tags/mark1.2) refs/tags/mark1.2
EOF
- git ls-remote --symref >actual &&
+ # Protocol v2 supports sending symrefs for refs other than HEAD, so use
+ # protocol v0 here.
+ GIT_TEST_PROTOCOL_VERSION= git ls-remote --symref >actual &&
test_cmp expect actual
'
ref: refs/heads/master HEAD
1bd44cb9d13204b0fe1958db0082f5028a16eb3a HEAD
EOF
- git ls-remote --symref . HEAD >actual &&
+ # Protocol v2 supports sending symrefs for refs other than HEAD, so use
+ # protocol v0 here.
+ GIT_TEST_PROTOCOL_VERSION= git ls-remote --symref . HEAD >actual &&
test_cmp expect actual
'
1bd44cb9d13204b0fe1958db0082f5028a16eb3a refs/heads/foo
1bd44cb9d13204b0fe1958db0082f5028a16eb3a refs/heads/master
EOF
- git ls-remote --symref --heads . >actual &&
+ # Protocol v2 supports sending symrefs for refs other than HEAD, so use
+ # protocol v0 here.
+ GIT_TEST_PROTOCOL_VERSION= git ls-remote --symref --heads . >actual &&
test_cmp expect actual
'
1bd44cb9d13204b0fe1958db0082f5028a16eb3a refs/heads/foo
1bd44cb9d13204b0fe1958db0082f5028a16eb3a refs/heads/master
EOF
- git ls-remote --symref --heads . >actual &&
+ # Protocol v2 supports sending symrefs for refs other than HEAD, so use
+ # protocol v0 here.
+ GIT_TEST_PROTOCOL_VERSION= git ls-remote --symref --heads . >actual &&
test_cmp expect actual &&
- git ls-remote --symref . "refs/heads/*" >actual &&
+ GIT_TEST_PROTOCOL_VERSION= git ls-remote --symref . "refs/heads/*" >actual &&
test_cmp expect actual
'
test_description='Merge logic in fetch'
+# NEEDSWORK: If the overspecification of the expected result is reduced, we
+# might be able to run this test in all protocol versions.
+GIT_TEST_PROTOCOL_VERSION=
+
. ./test-lib.sh
LF='
git prune &&
test_must_fail git cat-file -t $the_commit &&
+ # Some protocol versions (e.g. 2) support fetching
+ # unadvertised objects, so restrict this test to v0.
+
# fetching the hidden object should fail by default
- test_must_fail git fetch -v ../testrepo $the_commit:refs/heads/copy 2>err &&
+ test_must_fail env GIT_TEST_PROTOCOL_VERSION= \
+ git fetch -v ../testrepo $the_commit:refs/heads/copy 2>err &&
test_i18ngrep "Server does not allow request for unadvertised object" err &&
test_must_fail git rev-parse --verify refs/heads/copy &&
mk_empty shallow &&
(
cd shallow &&
- test_must_fail git fetch --depth=1 ../testrepo/.git $SHA1 &&
+ # Some protocol versions (e.g. 2) support fetching
+ # unadvertised objects, so restrict this test to v0.
+ test_must_fail env GIT_TEST_PROTOCOL_VERSION= \
+ git fetch --depth=1 ../testrepo/.git $SHA1 &&
git --git-dir=../testrepo/.git config uploadpack.allowreachablesha1inwant true &&
git fetch --depth=1 ../testrepo/.git $SHA1 &&
git cat-file commit $SHA1
mk_empty shallow &&
(
cd shallow &&
- test_must_fail ok=sigpipe git fetch ../testrepo/.git $SHA1_3 &&
- test_must_fail ok=sigpipe git fetch ../testrepo/.git $SHA1_1 &&
+ # Some protocol versions (e.g. 2) support fetching
+ # unadvertised objects, so restrict this test to v0.
+ test_must_fail env GIT_TEST_PROTOCOL_VERSION= \
+ git fetch ../testrepo/.git $SHA1_3 &&
+ test_must_fail env GIT_TEST_PROTOCOL_VERSION= \
+ git fetch ../testrepo/.git $SHA1_1 &&
git --git-dir=../testrepo/.git config uploadpack.allowreachablesha1inwant true &&
git fetch ../testrepo/.git $SHA1_1 &&
git cat-file commit $SHA1_1 &&
test_must_fail git cat-file commit $SHA1_2 &&
git fetch ../testrepo/.git $SHA1_2 &&
git cat-file commit $SHA1_2 &&
- test_must_fail ok=sigpipe git fetch ../testrepo/.git $SHA1_3
+ test_must_fail env GIT_TEST_PROTOCOL_VERSION= \
+ git fetch ../testrepo/.git $SHA1_3 2>err &&
+ test_i18ngrep "remote error:.*not our ref.*$SHA1_3\$" err
)
'
done
test_cmp expect actual
'
+test_expect_success 'peeled advertisements are not considered ref tips' '
+ mk_empty testrepo &&
+ git -C testrepo commit --allow-empty -m one &&
+ git -C testrepo commit --allow-empty -m two &&
+ git -C testrepo tag -m foo mytag HEAD^ &&
+ oid=$(git -C testrepo rev-parse mytag^{commit}) &&
+ test_must_fail env GIT_TEST_PROTOCOL_VERSION= \
+ git fetch testrepo $oid 2>err &&
+ test_i18ngrep "Server does not allow request for unadvertised object" err
+'
+
test_expect_success 'pushing a specific ref applies remote.$name.push as refmap' '
mk_test testrepo heads/master &&
rm -fr src dst &&
test_cmp expect actual
'
-test_expect_success 'push --follow-tag only pushes relevant tags' '
+test_expect_success 'push --follow-tags only pushes relevant tags' '
mk_test testrepo heads/master &&
rm -fr src dst &&
git init src &&
git tag -m "future" future &&
git checkout master &&
git for-each-ref refs/heads/master refs/tags/tag >../expect &&
- git push --follow-tag ../dst master
+ git push --follow-tags ../dst master
) &&
(
cd dst &&
test_must_be_empty out &&
test -s err)
'
+test_expect_success 'git pull --cleanup errors early on invalid argument' '
+ mkdir clonedcleanup &&
+ (cd clonedcleanup && git init &&
+ test_must_fail git pull --cleanup invalid "../parent" >out 2>err &&
+ test_must_be_empty out &&
+ test -s err)
+'
+
test_expect_success 'git pull --force' '
mkdir clonedoldstyle &&
grep "bad tree object" output.err
'
-test_expect_success 'upload-pack error message when bad ref requested' '
+test_expect_success 'upload-pack fails due to bad want (no object)' '
printf "0045want %s multi_ack_detailed\n00000009done\n0000" \
"deadbeefdeadbeefdeadbeefdeadbeefdeadbeef" >input &&
test_must_fail git upload-pack . <input >output 2>output.err &&
- grep -q "not our ref" output.err &&
- ! grep -q multi_ack_detailed output.err
+ grep "not our ref" output.err &&
+ grep "ERR" output &&
+ ! grep multi_ack_detailed output.err
+'
+
+test_expect_success 'upload-pack fails due to bad want (not tip)' '
+
+ oid=$(echo an object we have | git hash-object -w --stdin) &&
+ printf "0045want %s multi_ack_detailed\n00000009done\n0000" \
+ "$oid" >input &&
+ test_must_fail git upload-pack . <input >output 2>output.err &&
+ grep "not our ref" output.err &&
+ grep "ERR" output &&
+ ! grep multi_ack_detailed output.err
'
test_expect_success 'upload-pack fails due to error in pack-objects enumeration' '
) &&
git add b &&
git commit -m "added submodule" &&
- git push --recurse-submodule=check origin master
+ git push --recurse-submodules=check origin master
)
'
git -C client fsck
'
-stop_httpd
-
test_done
cd clone &&
git checkout --orphan newnew &&
test_commit new-too &&
- GIT_TRACE_PACKET="$TRASH_DIRECTORY/trace" git fetch --depth=2 &&
+ # NEEDSWORK: If the overspecification of the expected result is reduced, we
+ # might be able to run this test in all protocol versions.
+ GIT_TRACE_PACKET="$TRASH_DIRECTORY/trace" GIT_TEST_PROTOCOL_VERSION= \
+ git fetch --depth=2 &&
grep "fetch-pack< ACK .* ready" ../trace &&
! grep "fetch-pack> done" ../trace
)
)
'
-stop_httpd
test_done
test_cmp expect actual
'
-stop_httpd
-
test_done
cd "$ROOT_PATH" &&
git clone $HTTPD_URL/smart/test_repo.git/ test_repo_clone &&
- check_access_log exp
+ # NEEDSWORK: If the overspecification of the expected result is reduced, we
+ # might be able to run this test in all protocol versions.
+ if test -z "$GIT_TEST_PROTOCOL_VERSION"
+ then
+ check_access_log exp
+ fi
'
test_expect_success 'clone remote repository' '
POST /smart/test_repo.git/git-receive-pack HTTP/1.1 200
EOF
test_expect_success 'used receive-pack service' '
- check_access_log exp
+ # NEEDSWORK: If the overspecification of the expected result is reduced, we
+ # might be able to run this test in all protocol versions.
+ if test -z "$GIT_TEST_PROTOCOL_VERSION"
+ then
+ check_access_log exp
+ fi
'
test_http_push_nonff "$HTTPD_DOCUMENT_ROOT_PATH"/test_repo.git \
test_i18ngrep ! "^hint: " decoded
'
-stop_httpd
test_done
)
'
-stop_httpd
test_done
test_cmp expect "$HTTPD_DOCUMENT_ROOT_PATH"/upstream.git/hooks/pre-receive.push_options
'
-stop_httpd
-
test_done
test_i18ngrep "unable to access.*/redir-to/502" stderr
'
-stop_httpd
+test_expect_success 'fetching via http alternates works' '
+ parent=$HTTPD_DOCUMENT_ROOT_PATH/alt-parent.git &&
+ git init --bare "$parent" &&
+ git -C "$parent" --work-tree=. commit --allow-empty -m foo &&
+ git -C "$parent" update-server-info &&
+ commit=$(git -C "$parent" rev-parse HEAD) &&
+
+ child=$HTTPD_DOCUMENT_ROOT_PATH/alt-child.git &&
+ git init --bare "$child" &&
+ echo "../../alt-parent.git/objects" >"$child/objects/info/alternates" &&
+ git -C "$child" update-ref HEAD $commit &&
+ git -C "$child" update-server-info &&
+
+ git -c http.followredirects=true clone "$HTTPD_URL/dumb/alt-child.git"
+'
+
test_done
< Cache-Control: no-cache, max-age=0, must-revalidate
< Content-Type: application/x-git-upload-pack-result
EOF
- GIT_TRACE_CURL=true git clone --quiet $HTTPD_URL/smart/repo.git clone 2>err &&
+ GIT_TRACE_CURL=true GIT_TEST_PROTOCOL_VERSION= \
+ git clone --quiet $HTTPD_URL/smart/repo.git clone 2>err &&
test_cmp file clone/file &&
tr '\''\015'\'' Q <err |
sed -e "
/^< Content-Length: /d
/^< Transfer-Encoding: /d
" >actual &&
- sed -e "s/^> Accept-Encoding: .*/> Accept-Encoding: ENCODINGS/" \
- actual >actual.smudged &&
- test_cmp exp actual.smudged &&
- grep "Accept-Encoding:.*gzip" actual >actual.gzip &&
- test_line_count = 2 actual.gzip
+ # NEEDSWORK: If the overspecification of the expected result is reduced, we
+ # might be able to run this test in all protocol versions.
+ if test -z "$GIT_TEST_PROTOCOL_VERSION"
+ then
+ sed -e "s/^> Accept-Encoding: .*/> Accept-Encoding: ENCODINGS/" \
+ actual >actual.smudged &&
+ test_cmp exp actual.smudged &&
+
+ grep "Accept-Encoding:.*gzip" actual >actual.gzip &&
+ test_line_count = 2 actual.gzip
+ fi
'
test_expect_success 'fetch changes via http' '
GET /smart/repo.git/info/refs?service=git-upload-pack HTTP/1.1 200
POST /smart/repo.git/git-upload-pack HTTP/1.1 200
EOF
- check_access_log exp
+
+ # NEEDSWORK: If the overspecification of the expected result is reduced, we
+ # might be able to run this test in all protocol versions.
+ if test -z "$GIT_TEST_PROTOCOL_VERSION"
+ then
+ check_access_log exp
+ fi
'
test_expect_success 'follow redirects (301)' '
test_expect_success 'no-op half-auth fetch does not require a password' '
set_askpass wrong &&
- git --git-dir=half-auth fetch &&
+
+ # NEEDSWORK: When using HTTP(S), protocol v0 supports a "half-auth"
+ # configuration with authentication required only when downloading
+ # objects and not refs, by having the HTTP server only require
+ # authentication for the "git-upload-pack" path and not "info/refs".
+ # This is not possible with protocol v2, since both objects and refs
+ # are obtained from the "git-upload-pack" path. A solution to this is
+ # to teach the server and client to be able to inline ls-refs requests
+ # as an Extra Parameter (see pack-protocol.txt), so that "info/refs"
+ # can serve refs, just like it does in protocol v0.
+ GIT_TEST_PROTOCOL_VERSION=0 git --git-dir=half-auth fetch &&
expect_askpass none
'
git config http.cookiefile cookies.txt &&
git config http.savecookies true &&
git ls-remote $HTTPD_URL/smart_cookies/repo.git master &&
- tail -3 cookies.txt | sort >cookies_tail.txt &&
- test_cmp expect_cookies.txt cookies_tail.txt
+
+ # NEEDSWORK: If the overspecification of the expected result is reduced, we
+ # might be able to run this test in all protocol versions.
+ if test -z "$GIT_TEST_PROTOCOL_VERSION"
+ then
+ tail -3 cookies.txt | sort >cookies_tail.txt &&
+ test_cmp expect_cookies.txt cookies_tail.txt
+ fi
'
test_expect_success 'transfer.hiderefs works over smart-http' '
git init --bare test_reachable.git &&
git -C test_reachable.git remote add origin "$HTTPD_URL/smart/repo.git" &&
- test_must_fail git -C test_reachable.git fetch origin "$(git rev-parse HEAD)"
+ # Some protocol versions (e.g. 2) support fetching
+ # unadvertised objects, so restrict this test to v0.
+ test_must_fail env GIT_TEST_PROTOCOL_VERSION= \
+ git -C test_reachable.git fetch origin "$(git rev-parse HEAD)"
'
test_expect_success 'test allowanysha1inwant with unreachable' '
git init --bare test_reachable.git &&
git -C test_reachable.git remote add origin "$HTTPD_URL/smart/repo.git" &&
- test_must_fail git -C test_reachable.git fetch origin "$(git rev-parse HEAD)" &&
+ # Some protocol versions (e.g. 2) support fetching
+ # unadvertised objects, so restrict this test to v0.
+ test_must_fail env GIT_TEST_PROTOCOL_VERSION= \
+ git -C test_reachable.git fetch origin "$(git rev-parse HEAD)" &&
git -C "$server" config uploadpack.allowanysha1inwant 1 &&
git -C test_reachable.git fetch origin "$(git rev-parse HEAD)"
grep "server-side error" actual
'
-stop_httpd
test_done
# not need to send any ancestors of "c3", but we still need to send "c3"
# itself.
test_config -C client fetch.negotiationalgorithm skipping &&
- trace_fetch client origin to_fetch &&
+
+ # The ref advertisement itself is filtered when protocol v2 is used, so
+ # use v0.
+ GIT_TEST_PROTOCOL_VERSION= trace_fetch client origin to_fetch &&
have_sent c5 c4^ c2side &&
have_not_sent c4 c4^^ c4^^^
'
check_access_log exp
'
-stop_httpd
test_done
test_expect_success 'fetch notices corrupt idx' '
cp -R "$GIT_DAEMON_DOCUMENT_ROOT_PATH"/repo_pack.git "$GIT_DAEMON_DOCUMENT_ROOT_PATH"/repo_bad2.git &&
(cd "$GIT_DAEMON_DOCUMENT_ROOT_PATH"/repo_bad2.git &&
+ rm -f objects/pack/multi-pack-index &&
p=$(ls objects/pack/pack-*.idx) &&
chmod u+w $p &&
printf %0256d 0 | dd of=$p bs=256 count=1 seek=1 conv=notrunc
test_cmp expect actual
'
-stop_git_daemon
test_done
grep "< HTTP/1.1 500 Intentional Breakage" curl_log
'
-stop_httpd
-
test_done
}
test_expect_success 'clone myhost:src uses ssh' '
- git clone myhost:src ssh-clone &&
+ GIT_TEST_PROTOCOL_VERSION=0 git clone myhost:src ssh-clone &&
expect_ssh myhost src
'
'
test_expect_success 'bracketed hostnames are still ssh' '
- git clone "[myhost:123]:src" ssh-bracket-clone &&
+ GIT_TEST_PROTOCOL_VERSION=0 git clone "[myhost:123]:src" ssh-bracket-clone &&
expect_ssh "-p 123" myhost src
'
test_expect_success 'OpenSSH variant passes -4' '
- git clone -4 "[myhost:123]:src" ssh-ipv4-clone &&
+ GIT_TEST_PROTOCOL_VERSION=0 git clone -4 "[myhost:123]:src" ssh-ipv4-clone &&
expect_ssh "-4 -p 123" myhost src
'
test_when_finished "rm -f \"\$TRASH_DIRECTORY/uplink\"" &&
GIT_SSH="$TRASH_DIRECTORY/uplink" &&
test_when_finished "GIT_SSH=\"\$TRASH_DIRECTORY/ssh\$X\"" &&
- git clone "[myhost:123]:src" ssh-bracket-clone-sshlike-uplink &&
+ GIT_TEST_PROTOCOL_VERSION=0 git clone "[myhost:123]:src" ssh-bracket-clone-sshlike-uplink &&
expect_ssh "-p 123" myhost src
'
test_expect_success 'GIT_SSH_VARIANT overrides plink detection' '
copy_ssh_wrapper_as "$TRASH_DIRECTORY/plink" &&
- GIT_SSH_VARIANT=ssh \
- git clone "[myhost:123]:src" ssh-bracket-clone-variant-1 &&
+ GIT_TEST_PROTOCOL_VERSION=0 GIT_SSH_VARIANT=ssh \
+ git clone "[myhost:123]:src" ssh-bracket-clone-variant-1 &&
expect_ssh "-p 123" myhost src
'
test_expect_success 'ssh.variant overrides plink detection' '
copy_ssh_wrapper_as "$TRASH_DIRECTORY/plink" &&
- git -c ssh.variant=ssh \
+ GIT_TEST_PROTOCOL_VERSION=0 git -c ssh.variant=ssh \
clone "[myhost:123]:src" ssh-bracket-clone-variant-2 &&
expect_ssh "-p 123" myhost src
'
# $3 path
test_clone_url () {
counter=$(($counter + 1))
- test_might_fail git clone "$1" tmp$counter &&
+ test_might_fail env GIT_TEST_PROTOCOL_VERSION=0 git clone "$1" tmp$counter &&
shift &&
expect_ssh "$@"
}
git -C replay.git index-pack -v --stdin <tmp.pack
'
-hex2oct () {
- perl -ne 'printf "\\%03o", hex for /../g'
-}
-
test_expect_success 'clone on case-insensitive fs' '
git init icasefs &&
(
partial_clone "$HTTPD_DOCUMENT_ROOT_PATH/server" "$HTTPD_URL/smart/server"
'
-stop_httpd
-
test_done
! test -e "$HTTPD_ROOT_PATH/one-time-sed"
'
-stop_httpd
-
test_done
TEST_NO_CREATE_REPO=1
+# This is a protocol-specific test.
+GIT_TEST_PROTOCOL_VERSION=
+
. ./test-lib.sh
# Test protocol v1 with 'git://' transport
grep "git< version 1" log
'
-stop_httpd
-
test_done
#!/bin/sh
-test_description='test git-serve and server commands'
+test_description='test protocol v2 server commands'
. ./test-lib.sh
0000
EOF
- GIT_TEST_SIDEBAND_ALL=0 git serve --advertise-capabilities >out &&
+ GIT_TEST_SIDEBAND_ALL=0 test-tool serve-v2 \
+ --advertise-capabilities >out &&
test-tool pkt-line unpack <out >actual &&
test_cmp expect actual
'
test-tool pkt-line pack >in <<-EOF &&
0000
EOF
- git serve --stateless-rpc >out <in &&
+ test-tool serve-v2 --stateless-rpc >out <in &&
test_must_be_empty out &&
# EOF
- git serve --stateless-rpc >out &&
+ test-tool serve-v2 --stateless-rpc >out &&
test_must_be_empty out
'
foobar
0000
EOF
- test_must_fail git serve --stateless-rpc 2>err <in &&
+ test_must_fail test-tool serve-v2 --stateless-rpc 2>err <in &&
test_i18ngrep "unknown capability" err
'
agent=git/test
0000
EOF
- test_must_fail git serve --stateless-rpc 2>err <in &&
+ test_must_fail test-tool serve-v2 --stateless-rpc 2>err <in &&
test_i18ngrep "no command requested" err
'
agent=git/test
0000
EOF
- test_must_fail git serve --stateless-rpc 2>err <in &&
+ test_must_fail test-tool serve-v2 --stateless-rpc 2>err <in &&
test_i18ngrep "invalid command" err
'
0000
EOF
- git serve --stateless-rpc <in >out &&
+ test-tool serve-v2 --stateless-rpc <in >out &&
test-tool pkt-line unpack <out >actual &&
test_cmp expect actual
'
0000
EOF
- git serve --stateless-rpc <in >out &&
+ test-tool serve-v2 --stateless-rpc <in >out &&
test-tool pkt-line unpack <out >actual &&
test_cmp expect actual
'
0000
EOF
- git serve --stateless-rpc <in >out &&
+ test-tool serve-v2 --stateless-rpc <in >out &&
test-tool pkt-line unpack <out >actual &&
test_cmp expect actual
'
0000
EOF
- git serve --stateless-rpc <in >out &&
+ test-tool serve-v2 --stateless-rpc <in >out &&
test-tool pkt-line unpack <out >actual &&
test_cmp expect actual
'
0000
EOF
- git serve --stateless-rpc <in >out &&
+ test-tool serve-v2 --stateless-rpc <in >out &&
test-tool pkt-line unpack <out >actual &&
test_cmp expect actual
'
0000
EOF
- git serve --stateless-rpc <in >out &&
+ test-tool serve-v2 --stateless-rpc <in >out &&
test-tool pkt-line unpack <out >actual &&
test_cmp expect actual
'
0000
EOF
- test_must_fail git -C server serve --stateless-rpc <in >/dev/null 2>err &&
+ (
+ cd server &&
+ test_must_fail test-tool serve-v2 --stateless-rpc
+ ) <in >/dev/null 2>err &&
grep "unexpected line: .this-is-not-a-command." err
'
grep "server-option=world" log
'
+test_expect_success 'warn if using server-option with ls-remote with legacy protocol' '
+ test_must_fail env GIT_TEST_PROTOCOL_VERSION=0 git -c protocol.version=0 \
+ ls-remote -o hello -o world "file://$(pwd)/file_parent" master 2>err &&
+
+ test_i18ngrep "see protocol.version in" err &&
+ test_i18ngrep "server options require protocol version 2 or later" err
+'
test_expect_success 'clone with file:// using protocol v2' '
test_when_finished "rm -f log" &&
grep "server-option=world" log
'
+test_expect_success 'warn if using server-option with fetch with legacy protocol' '
+ test_when_finished "rm -rf temp_child" &&
+
+ git init temp_child &&
+
+ test_must_fail env GIT_TEST_PROTOCOL_VERSION=0 git -C temp_child -c protocol.version=0 \
+ fetch -o hello -o world "file://$(pwd)/file_parent" master 2>err &&
+
+ test_i18ngrep "see protocol.version in" err &&
+ test_i18ngrep "server options require protocol version 2 or later" err
+'
+
+test_expect_success 'server-options are sent when cloning' '
+ test_when_finished "rm -rf log myclone" &&
+
+ GIT_TRACE_PACKET="$(pwd)/log" git -c protocol.version=2 \
+ clone --server-option=hello --server-option=world \
+ "file://$(pwd)/file_parent" myclone &&
+
+ grep "server-option=hello" log &&
+ grep "server-option=world" log
+'
+
+test_expect_success 'warn if using server-option with clone with legacy protocol' '
+ test_when_finished "rm -rf myclone" &&
+
+ test_must_fail env GIT_TEST_PROTOCOL_VERSION=0 git -c protocol.version=0 \
+ clone --server-option=hello --server-option=world \
+ "file://$(pwd)/file_parent" myclone 2>err &&
+
+ test_i18ngrep "see protocol.version in" err &&
+ test_i18ngrep "server options require protocol version 2 or later" err
+'
+
test_expect_success 'upload-pack respects config using protocol v2' '
git init server &&
write_script server/.git/hook <<-\EOF &&
0000
EOF
- test_must_fail git -C server serve --stateless-rpc <in >/dev/null 2>err &&
+ test_must_fail test-tool -C server serve-v2 --stateless-rpc \
+ <in >/dev/null 2>err &&
grep "unexpected line: .filter blob:none." err &&
# Exercise to ensure that if advertised, filter works
git -C server config uploadpack.allowfilter 1 &&
- git -C server serve --stateless-rpc <in >/dev/null
+ test-tool -C server serve-v2 --stateless-rpc <in >/dev/null
'
test_expect_success 'default refspec is used to filter ref when fetchcing' '
# Client requested to use protocol v2
grep "Git-Protocol: version=2" log &&
# Server responded using protocol v2
- grep "git< version 2" log
+ grep "git< version 2" log &&
+ # Verify that the chunked encoding sending codepath is NOT exercised
+ ! grep "Send header: Transfer-Encoding: chunked" log
+'
+
+test_expect_success 'clone big repository with http:// using protocol v2' '
+ test_when_finished "rm -f log" &&
+
+ git init "$HTTPD_DOCUMENT_ROOT_PATH/big" &&
+ # Ensure that the list of wants is greater than http.postbuffer below
+ for i in $(test_seq 1 1500)
+ do
+ # do not use here-doc, because it requires a process
+ # per loop iteration
+ echo "commit refs/heads/too-many-refs-$i" &&
+ echo "committer git <git@example.com> $i +0000" &&
+ echo "data 0" &&
+ echo "M 644 inline bla.txt" &&
+ echo "data 4" &&
+ echo "bla"
+ done | git -C "$HTTPD_DOCUMENT_ROOT_PATH/big" fast-import &&
+
+ GIT_TRACE_PACKET="$(pwd)/log" GIT_TRACE_CURL="$(pwd)/log" git \
+ -c protocol.version=2 -c http.postbuffer=65536 \
+ clone "$HTTPD_URL/smart/big" big_child &&
+
+ # Client requested to use protocol v2
+ grep "Git-Protocol: version=2" log &&
+ # Server responded using protocol v2
+ grep "git< version 2" log &&
+ # Verify that the chunked encoding sending codepath is exercised
+ grep "Send header: Transfer-Encoding: chunked" log
'
test_expect_success 'fetch with http:// using protocol v2' '
test_i18ngrep "expected no other sections to be sent after no .ready." err
'
-stop_httpd
-
test_done
'
test_expect_success 'config controls ref-in-want advertisement' '
- git serve --advertise-capabilities >out &&
+ test-tool serve-v2 --advertise-capabilities >out &&
! grep -a ref-in-want out &&
git config uploadpack.allowRefInWant false &&
- git serve --advertise-capabilities >out &&
+ test-tool serve-v2 --advertise-capabilities >out &&
! grep -a ref-in-want out &&
git config uploadpack.allowRefInWant true &&
- git serve --advertise-capabilities >out &&
+ test-tool serve-v2 --advertise-capabilities >out &&
grep -a ref-in-want out
'
0000
EOF
- test_must_fail git serve --stateless-rpc 2>out <in &&
+ test_must_fail test-tool serve-v2 --stateless-rpc 2>out <in &&
grep "unknown ref" out
'
0000
EOF
- git serve --stateless-rpc >out <in &&
+ test-tool serve-v2 --stateless-rpc >out <in &&
check_output
'
0000
EOF
- git serve --stateless-rpc >out <in &&
+ test-tool serve-v2 --stateless-rpc >out <in &&
check_output
'
0000
EOF
- git serve --stateless-rpc >out <in &&
+ test-tool serve-v2 --stateless-rpc >out <in &&
check_output
'
0000
EOF
- git serve --stateless-rpc >out <in &&
+ test-tool serve-v2 --stateless-rpc >out <in &&
check_output
'
test_i18ngrep "fatal: remote error: unknown ref refs/heads/raster" err
'
-stop_httpd
-
REPO="$(pwd)/repo"
LOCAL_PRISTINE="$(pwd)/local_pristine"
. ./test-lib.sh
. "$TEST_DIRECTORY"/lib-gpg.sh
+PATH="$TEST_DIRECTORY/t5801:$PATH"
+
compare_refs() {
git --git-dir="$1/.git" rev-parse --verify $2 >expect &&
git --git-dir="$3/.git" rev-parse --verify $4 >actual &&
--- /dev/null
+#!/bin/sh
+# Copyright (c) 2012 Felipe Contreras
+
+# The first argument can be a url when the fetch/push command was a url
+# instead of a configured remote. In this case, use a generic alias.
+if test "$1" = "testgit::$2"; then
+ alias=_
+else
+ alias=$1
+fi
+url=$2
+
+dir="$GIT_DIR/testgit/$alias"
+prefix="refs/testgit/$alias"
+
+default_refspec="refs/heads/*:${prefix}/heads/*"
+
+refspec="${GIT_REMOTE_TESTGIT_REFSPEC-$default_refspec}"
+
+test -z "$refspec" && prefix="refs"
+
+GIT_DIR="$url/.git"
+export GIT_DIR
+
+force=
+
+mkdir -p "$dir"
+
+if test -z "$GIT_REMOTE_TESTGIT_NO_MARKS"
+then
+ gitmarks="$dir/git.marks"
+ testgitmarks="$dir/testgit.marks"
+ test -e "$gitmarks" || >"$gitmarks"
+ test -e "$testgitmarks" || >"$testgitmarks"
+fi
+
+while read line
+do
+ case $line in
+ capabilities)
+ echo 'import'
+ echo 'export'
+ test -n "$refspec" && echo "refspec $refspec"
+ if test -n "$gitmarks"
+ then
+ echo "*import-marks $gitmarks"
+ echo "*export-marks $gitmarks"
+ fi
+ test -n "$GIT_REMOTE_TESTGIT_SIGNED_TAGS" && echo "signed-tags"
+ test -n "$GIT_REMOTE_TESTGIT_NO_PRIVATE_UPDATE" && echo "no-private-update"
+ echo 'option'
+ echo
+ ;;
+ list)
+ git for-each-ref --format='? %(refname)' 'refs/heads/'
+ head=$(git symbolic-ref HEAD)
+ echo "@$head HEAD"
+ echo
+ ;;
+ import*)
+ # read all import lines
+ while true
+ do
+ ref="${line#* }"
+ refs="$refs $ref"
+ read line
+ test "${line%% *}" != "import" && break
+ done
+
+ if test -n "$gitmarks"
+ then
+ echo "feature import-marks=$gitmarks"
+ echo "feature export-marks=$gitmarks"
+ fi
+
+ if test -n "$GIT_REMOTE_TESTGIT_FAILURE"
+ then
+ echo "feature done"
+ exit 1
+ fi
+
+ echo "feature done"
+ git fast-export \
+ ${testgitmarks:+"--import-marks=$testgitmarks"} \
+ ${testgitmarks:+"--export-marks=$testgitmarks"} \
+ $refs |
+ sed -e "s#refs/heads/#${prefix}/heads/#g"
+ echo "done"
+ ;;
+ export)
+ if test -n "$GIT_REMOTE_TESTGIT_FAILURE"
+ then
+ # consume input so fast-export doesn't get SIGPIPE;
+ # git would also notice that case, but we want
+ # to make sure we are exercising the later
+ # error checks
+ while read line; do
+ test "done" = "$line" && break
+ done
+ exit 1
+ fi
+
+ before=$(git for-each-ref --format=' %(refname) %(objectname) ')
+
+ git fast-import \
+ ${force:+--force} \
+ ${testgitmarks:+"--import-marks=$testgitmarks"} \
+ ${testgitmarks:+"--export-marks=$testgitmarks"} \
+ --quiet
+
+ # figure out which refs were updated
+ git for-each-ref --format='%(refname) %(objectname)' |
+ while read ref a
+ do
+ case "$before" in
+ *" $ref $a "*)
+ continue ;; # unchanged
+ esac
+ if test -z "$GIT_REMOTE_TESTGIT_PUSH_ERROR"
+ then
+ echo "ok $ref"
+ else
+ echo "error $ref $GIT_REMOTE_TESTGIT_PUSH_ERROR"
+ fi
+ done
+
+ echo
+ ;;
+ option\ *)
+ read cmd opt val <<-EOF
+ $line
+ EOF
+ case $opt in
+ force)
+ test $val = "true" && force="true" || force=
+ echo "ok"
+ ;;
+ *)
+ echo "unsupported"
+ ;;
+ esac
+ ;;
+ '')
+ exit
+ ;;
+ esac
+done
clone "$HTTPD_URL/smart-redir-perm/repo.git" redir.git
'
-stop_httpd
test_done
check_same BROKEN_HASH6 BISECT_HEAD &&
git bisect bad BISECT_HEAD &&
check_same BROKEN_HASH5 BISECT_HEAD &&
- git bisect good BISECT_HEAD &&
+ test_must_fail git bisect good BISECT_HEAD &&
check_same BROKEN_HASH6 bisect/bad &&
git bisect reset
'
check_same BROKEN_HASH6 BISECT_HEAD &&
git bisect good BISECT_HEAD &&
check_same BROKEN_HASH8 BISECT_HEAD &&
- git bisect good BISECT_HEAD &&
+ test_must_fail git bisect good BISECT_HEAD &&
check_same BROKEN_HASH9 bisect/bad &&
git bisect reset
'
git bisect reset &&
git checkout broken &&
git bisect start broken master --no-checkout &&
- git bisect run \"\$SHELL_PATH\" -c '
+ test_must_fail git bisect run \"\$SHELL_PATH\" -c '
GOOD=\$(git for-each-ref \"--format=%(objectname)\" refs/bisect/good-*) &&
git rev-list --objects BISECT_HEAD --not \$GOOD >tmp.\$\$ &&
git pack-objects --stdout >/dev/null < tmp.\$\$
git checkout A^0 &&
- git merge -s recursive B^0 &&
+ git -c merge.directoryRenames=true merge -s recursive B^0 >out &&
git ls-files -s >out &&
test_line_count = 4 out &&
git checkout A^0 &&
- git merge -s recursive B^0 &&
+ git -c merge.directoryRenames=true merge -s recursive B^0 &&
git ls-files -s >out &&
test_line_count = 4 out &&
git checkout A^0 &&
- git merge -s recursive B^0 &&
+ git -c merge.directoryRenames=true merge -s recursive B^0 >out &&
git ls-files -s >out &&
test_line_count = 3 out &&
git checkout A^0 &&
- test_must_fail git merge -s recursive B^0 >out &&
+ test_must_fail git -c merge.directoryRenames=true merge -s recursive B^0 >out &&
test_i18ngrep "CONFLICT (rename/rename)" out &&
git ls-files -s >out &&
git checkout A^0 &&
- git merge -s recursive B^0 &&
+ git -c merge.directoryRenames=true merge -s recursive B^0 &&
git ls-files -s >out &&
test_line_count = 3 out &&
git checkout A^0 &&
- git merge -s recursive B^0 &&
+ git -c merge.directoryRenames=true merge -s recursive B^0 &&
git ls-files -s >out &&
test_line_count = 6 out &&
git checkout A^0 &&
- test_must_fail git merge -s recursive B^0 >out &&
+ test_must_fail git -c merge.directoryRenames=true merge -s recursive B^0 >out &&
test_i18ngrep "CONFLICT.*directory rename split" out &&
git ls-files -s >out &&
git checkout A^0 &&
- git merge -s recursive B^0 >out &&
+ git -c merge.directoryRenames=true merge -s recursive B^0 >out &&
git ls-files -s >out &&
test_line_count = 3 out &&
git checkout A^0 &&
- git merge -s recursive B^0 &&
+ git -c merge.directoryRenames=true merge -s recursive B^0 &&
git ls-files -s >out &&
test_line_count = 3 out &&
git checkout A^0 &&
- test_must_fail git merge -s recursive B^0 >out &&
+ test_must_fail git -c merge.directoryRenames=true merge -s recursive B^0 >out &&
test_i18ngrep CONFLICT.*rename/rename.*z/d.*x/d.*w/d out &&
test_i18ngrep ! CONFLICT.*rename/rename.*y/d out &&
git checkout A^0 &&
- git merge -s recursive B^0 &&
+ git -c merge.directoryRenames=true merge -s recursive B^0 &&
git ls-files -s >out &&
test_line_count = 5 out &&
git checkout A^0 &&
- test_must_fail git merge -s recursive B^0 >out &&
+ test_must_fail git -c merge.directoryRenames=true merge -s recursive B^0 >out &&
test_i18ngrep "CONFLICT.*implicit dir rename" out &&
git ls-files -s >out &&
git checkout A^0 &&
- test_must_fail git merge -s recursive B^0 >out &&
+ test_must_fail git -c merge.directoryRenames=true merge -s recursive B^0 >out &&
test_i18ngrep "CONFLICT (add/add).* y/d" out &&
git ls-files -s >out &&
git checkout A^0 &&
- test_must_fail git merge -s recursive B^0 >out &&
+ test_must_fail git -c merge.directoryRenames=true merge -s recursive B^0 >out &&
test_i18ngrep "CONFLICT (rename/rename).*x/d.*w/d.*z/d" out &&
test_i18ngrep "CONFLICT (add/add).* y/d" out &&
git checkout A^0 &&
- test_must_fail git merge -s recursive B^0 >out &&
+ test_must_fail git -c merge.directoryRenames=true merge -s recursive B^0 >out &&
test_i18ngrep "CONFLICT (file/directory).*y/d" out &&
git ls-files -s >out &&
git checkout A^0 &&
- test_must_fail git merge -s recursive B^0 >out &&
+ test_must_fail git -c merge.directoryRenames=true merge -s recursive B^0 >out &&
test_i18ngrep "CONFLICT (rename/delete).*z/c.*y/c" out &&
git ls-files -s >out &&
git checkout A^0 &&
- git merge -s recursive B^0 &&
+ git -c merge.directoryRenames=true merge -s recursive B^0 &&
git ls-files -s >out &&
test_line_count = 3 out &&
git checkout A^0 &&
- git merge -s recursive B^0 &&
+ git -c merge.directoryRenames=true merge -s recursive B^0 &&
git ls-files -s >out &&
test_line_count = 3 out &&
git checkout A^0 &&
- git merge -s recursive B^0 &&
+ git -c merge.directoryRenames=true merge -s recursive B^0 &&
git ls-files -s >out &&
test_line_count = 3 out &&
git checkout A^0 &&
- git merge -s recursive B^0 &&
+ git -c merge.directoryRenames=true merge -s recursive B^0 &&
git ls-files -s >out &&
test_line_count = 4 out &&
git checkout A^0 &&
- test_must_fail git merge -s recursive B^0 >out &&
+ test_must_fail git -c merge.directoryRenames=true merge -s recursive B^0 >out &&
test_i18ngrep "CONFLICT (rename/rename).*z/b.*y/b.*w/b" out &&
test_i18ngrep "CONFLICT (rename/rename).*z/c.*y/c.*x/c" out &&
git checkout A^0 &&
- test_must_fail git merge -s recursive B^0 >out &&
+ test_must_fail git -c merge.directoryRenames=true merge -s recursive B^0 >out &&
test_i18ngrep "CONFLICT (rename/rename)" out &&
git ls-files -s >out &&
git checkout A^0 &&
- test_must_fail git merge -s recursive B^0 >out &&
+ test_must_fail git -c merge.directoryRenames=true merge -s recursive B^0 >out &&
test_i18ngrep "CONFLICT (rename/rename).*x/d.*w/d.*y/d" out &&
git ls-files -s >out &&
git checkout A^0 &&
- test_must_fail git merge -s recursive B^0 >out &&
+ test_must_fail git -c merge.directoryRenames=true merge -s recursive B^0 >out &&
test_i18ngrep "CONFLICT (rename/delete).*x/d.*y/d" out &&
git ls-files -s >out &&
git checkout A^0 &&
- test_must_fail git merge -s recursive B^0 >out &&
+ test_must_fail git -c merge.directoryRenames=true merge -s recursive B^0 >out &&
test_i18ngrep "CONFLICT (rename/delete).*x/d.*y/d" out &&
git ls-files -s >out &&
git checkout A^0 &&
- git merge -s recursive B^0 &&
+ git -c merge.directoryRenames=true merge -s recursive B^0 &&
git ls-files -s >out &&
test_line_count = 6 out &&
git checkout A^0 &&
- git merge -s recursive B^0 &&
+ git -c merge.directoryRenames=true merge -s recursive B^0 &&
git ls-files -s >out &&
test_line_count = 6 out &&
git checkout A^0 &&
- test_must_fail git merge -s recursive B^0 >out &&
+ test_must_fail git -c merge.directoryRenames=true merge -s recursive B^0 >out &&
test_i18ngrep "CONFLICT (modify/delete).* z/d" out &&
git ls-files -s >out &&
git checkout A^0 &&
- git merge -s recursive B^0 &&
+ git -c merge.directoryRenames=true merge -s recursive B^0 &&
git ls-files -s >out &&
test_line_count = 3 out &&
git checkout A^0 &&
- test_must_fail git merge -s recursive B^0 >out 2>err &&
+ test_must_fail git -c merge.directoryRenames=true merge -s recursive B^0 >out 2>err &&
test_i18ngrep CONFLICT.*rename/rename.*z/c.*y/c.*w/c out &&
test_i18ngrep CONFLICT.*rename/rename.*z/b.*y/b.*w/b out &&
git checkout A^0 &&
- git merge -s recursive B^0 &&
+ git -c merge.directoryRenames=true merge -s recursive B^0 &&
git ls-files -s >out &&
test_line_count = 7 out &&
git checkout A^0 &&
- git merge -s recursive B^0 &&
+ git -c merge.directoryRenames=true merge -s recursive B^0 &&
git ls-files -s >out &&
test_line_count = 3 out &&
git checkout A^0 &&
- git merge -s recursive B^0 >out &&
+ git -c merge.directoryRenames=true merge -s recursive B^0 >out &&
test_i18ngrep "WARNING: Avoiding applying x -> z rename to x/f" out &&
git ls-files -s >out &&
git checkout A^0 &&
- git merge -s recursive B^0 >out &&
+ git -c merge.directoryRenames=true merge -s recursive B^0 >out &&
test_i18ngrep "WARNING: Avoiding applying z -> y rename to z/t" out &&
test_i18ngrep "WARNING: Avoiding applying y -> x rename to y/a" out &&
test_i18ngrep "WARNING: Avoiding applying x -> w rename to x/b" out &&
git checkout A^0 &&
- test_must_fail git merge -s recursive B^0 >out &&
+ test_must_fail git -c merge.directoryRenames=true merge -s recursive B^0 >out &&
grep "CONFLICT (implicit dir rename): Cannot map more than one path to combined/yo" out >error_line &&
grep -q dir1/yo error_line &&
grep -q dir2/yo error_line &&
git checkout A^0 &&
- git merge -s recursive B^0 &&
+ git -c merge.directoryRenames=true merge -s recursive B^0 &&
git ls-files -s >out &&
test_line_count = 4 out &&
git checkout A^0 &&
- git merge -s recursive B^0 &&
+ git -c merge.directoryRenames=true merge -s recursive B^0 &&
git ls-files -s >out &&
test_line_count = 4 out &&
git checkout A^0 &&
- git merge -s recursive B^0 &&
+ git -c merge.directoryRenames=true merge -s recursive B^0 &&
git ls-files -s >out &&
test_line_count = 3 out &&
echo very >z/c &&
echo important >z/d &&
- test_must_fail git merge -s recursive B^0 >out 2>err &&
+ test_must_fail git -c merge.directoryRenames=true merge -s recursive B^0 >out 2>err &&
test_i18ngrep "The following untracked working tree files would be overwritten by merge" err &&
git ls-files -s >out &&
echo important >y/d &&
echo contents >y/e &&
- test_must_fail git merge -s recursive B^0 >out 2>err &&
+ test_must_fail git -c merge.directoryRenames=true merge -s recursive B^0 >out 2>err &&
test_i18ngrep "CONFLICT (rename/delete).*Version B\^0 of y/d left in tree at y/d~B\^0" out &&
test_i18ngrep "Error: Refusing to lose untracked file at y/e; writing to y/e~B\^0 instead" out &&
git checkout A^0 &&
echo important >y/c &&
- test_must_fail git merge -s recursive B^0 >out 2>err &&
+ test_must_fail git -c merge.directoryRenames=true merge -s recursive B^0 >out 2>err &&
test_i18ngrep "CONFLICT (rename/rename)" out &&
test_i18ngrep "Refusing to lose untracked file at y/c; adding as y/c~B\^0 instead" out &&
mkdir y &&
echo important >y/c &&
- test_must_fail git merge -s recursive A^0 >out 2>err &&
+ test_must_fail git -c merge.directoryRenames=true merge -s recursive A^0 >out 2>err &&
test_i18ngrep "CONFLICT (rename/rename)" out &&
test_i18ngrep "Refusing to lose untracked file at y/c; adding as y/c~HEAD instead" out &&
git checkout A^0 &&
echo important >y/wham &&
- test_must_fail git merge -s recursive B^0 >out 2>err &&
+ test_must_fail git -c merge.directoryRenames=true merge -s recursive B^0 >out 2>err &&
test_i18ngrep "CONFLICT (rename/rename)" out &&
test_i18ngrep "Refusing to lose untracked file at y/wham" out &&
mkdir z &&
echo random >z/c &&
- git merge -s recursive B^0 >out 2>err &&
+ git -c merge.directoryRenames=true merge -s recursive B^0 >out 2>err &&
test_i18ngrep ! "following untracked working tree files would be overwritten by merge" err &&
git ls-files -s >out &&
git checkout A^0 &&
echo stuff >>z/c &&
- test_must_fail git merge -s recursive B^0 >out 2>err &&
+ test_must_fail git -c merge.directoryRenames=true merge -s recursive B^0 >out 2>err &&
test_i18ngrep "Refusing to lose dirty file at z/c" out &&
test_seq 1 10 >expected &&
git checkout A^0 &&
echo stuff >>z/c &&
- git merge -s recursive B^0 >out 2>err &&
+ git -c merge.directoryRenames=true merge -s recursive B^0 >out 2>err &&
test_i18ngrep "Refusing to lose dirty file at z/c" out &&
grep -q stuff z/c &&
git checkout A^0 &&
echo stuff >>y/c &&
- test_must_fail git merge -s recursive B^0 >out 2>err &&
+ test_must_fail git -c merge.directoryRenames=true merge -s recursive B^0 >out 2>err &&
test_i18ngrep "following files would be overwritten by merge" err &&
grep -q stuff y/c &&
git checkout A^0 &&
echo stuff >>z/c &&
- test_must_fail git merge -s recursive B^0 >out 2>err &&
+ test_must_fail git -c merge.directoryRenames=true merge -s recursive B^0 >out 2>err &&
test_i18ngrep "Refusing to lose dirty file at z/c" out &&
grep -q stuff z/c &&
git checkout A^0 &&
echo mods >>y/c &&
- test_must_fail git merge -s recursive B^0 >out 2>err &&
+ test_must_fail git -c merge.directoryRenames=true merge -s recursive B^0 >out 2>err &&
test_i18ngrep "CONFLICT (rename/rename)" out &&
test_i18ngrep "Refusing to lose dirty file at y/c" out &&
git checkout A^0 &&
echo important >>y/wham &&
- test_must_fail git merge -s recursive B^0 >out 2>err &&
+ test_must_fail git -c merge.directoryRenames=true merge -s recursive B^0 >out 2>err &&
test_i18ngrep "CONFLICT (rename/rename)" out &&
test_i18ngrep "Refusing to lose dirty file at y/wham" out &&
git checkout A^0 &&
- git merge -s recursive B^0 &&
+ git -c merge.directoryRenames=true merge -s recursive B^0 &&
git ls-files -s >out &&
test_line_count = 6 out &&
# To which, I can do no more than shrug my shoulders and say that
# even simple rules give weird results when given weird inputs.
-test_expect_success '12b-setup: Moving one directory hierarchy into another' '
+test_expect_success '12b-setup: Moving two directory hierarchies into each other' '
test_create_repo 12b &&
(
cd 12b &&
)
'
-test_expect_success '12b-check: Moving one directory hierarchy into another' '
+test_expect_success '12b-check: Moving two directory hierarchies into each other' '
(
cd 12b &&
git checkout A^0 &&
- git merge -s recursive B^0 &&
+ git -c merge.directoryRenames=true merge -s recursive B^0 &&
git ls-files -s >out &&
test_line_count = 4 out &&
git checkout A^0 &&
- test_must_fail git merge -s recursive B^0 &&
+ test_must_fail git -c merge.directoryRenames=true merge -s recursive B^0 &&
git ls-files -u >out &&
test_line_count = 12 out &&
)
'
+###########################################################################
+# SECTION 13: Checking informational and conflict messages
+#
+# A year after directory rename detection became the default, it was
+# instead decided to report conflicts on the pathname on the basis that
+# some users may expect the new files added or moved into a directory to
+# be unrelated to all the other files in that directory, and thus that
+# directory rename detection is unexpected. Test that the messages printed
+# match our expectation.
+###########################################################################
+
+# Testcase 13a, Basic directory rename with newly added files
+# Commit O: z/{b,c}
+# Commit A: y/{b,c}
+# Commit B: z/{b,c,d,e/f}
+# Expected: y/{b,c,d,e/f}, with notices/conflicts for both y/d and y/e/f
+
+test_expect_success '13a-setup: messages for newly added files' '
+ test_create_repo 13a &&
+ (
+ cd 13a &&
+
+ mkdir z &&
+ echo b >z/b &&
+ echo c >z/c &&
+ git add z &&
+ test_tick &&
+ git commit -m "O" &&
+
+ git branch O &&
+ git branch A &&
+ git branch B &&
+
+ git checkout A &&
+ git mv z y &&
+ test_tick &&
+ git commit -m "A" &&
+
+ git checkout B &&
+ echo d >z/d &&
+ mkdir z/e &&
+ echo f >z/e/f &&
+ git add z/d z/e/f &&
+ test_tick &&
+ git commit -m "B"
+ )
+'
+
+test_expect_success '13a-check(conflict): messages for newly added files' '
+ (
+ cd 13a &&
+
+ git checkout A^0 &&
+
+ test_must_fail git merge -s recursive B^0 >out 2>err &&
+
+ test_i18ngrep CONFLICT..file.location.*z/e/f.added.in.B^0.*y/e/f out &&
+ test_i18ngrep CONFLICT..file.location.*z/d.added.in.B^0.*y/d out &&
+
+ git ls-files >paths &&
+ ! grep z/ paths &&
+ grep "y/[de]" paths &&
+
+ test_path_is_missing z/d &&
+ test_path_is_file y/d &&
+ test_path_is_missing z/e/f &&
+ test_path_is_file y/e/f
+ )
+'
+
+test_expect_success '13a-check(info): messages for newly added files' '
+ (
+ cd 13a &&
+
+ git reset --hard &&
+ git checkout A^0 &&
+
+ git -c merge.directoryRenames=true merge -s recursive B^0 >out 2>err &&
+
+ test_i18ngrep Path.updated:.*z/e/f.added.in.B^0.*y/e/f out &&
+ test_i18ngrep Path.updated:.*z/d.added.in.B^0.*y/d out &&
+
+ git ls-files >paths &&
+ ! grep z/ paths &&
+ grep "y/[de]" paths &&
+
+ test_path_is_missing z/d &&
+ test_path_is_file y/d &&
+ test_path_is_missing z/e/f &&
+ test_path_is_file y/e/f
+ )
+'
+
+# Testcase 13b, Transitive rename with conflicted content merge and default
+# "conflict" setting
+# (Related to testcase 1c, 9b)
+# Commit O: z/{b,c}, x/d_1
+# Commit A: y/{b,c}, x/d_2
+# Commit B: z/{b,c,d_3}
+# Expected: y/{b,c,d_merged}, with two conflict messages for y/d,
+# one about content, and one about file location
+
+test_expect_success '13b-setup: messages for transitive rename with conflicted content' '
+ test_create_repo 13b &&
+ (
+ cd 13b &&
+
+ mkdir x &&
+ mkdir z &&
+ test_seq 1 10 >x/d &&
+ echo b >z/b &&
+ echo c >z/c &&
+ git add x z &&
+ test_tick &&
+ git commit -m "O" &&
+
+ git branch O &&
+ git branch A &&
+ git branch B &&
+
+ git checkout A &&
+ git mv z y &&
+ echo 11 >>x/d &&
+ git add x/d &&
+ test_tick &&
+ git commit -m "A" &&
+
+ git checkout B &&
+ echo eleven >>x/d &&
+ git mv x/d z/d &&
+ git add z/d &&
+ test_tick &&
+ git commit -m "B"
+ )
+'
+
+test_expect_success '13b-check(conflict): messages for transitive rename with conflicted content' '
+ (
+ cd 13b &&
+
+ git checkout A^0 &&
+
+ test_must_fail git merge -s recursive B^0 >out 2>err &&
+
+ test_i18ngrep CONFLICT.*content.*Merge.conflict.in.y/d out &&
+ test_i18ngrep CONFLICT..file.location.*x/d.renamed.to.z/d.*moved.to.y/d out &&
+
+ git ls-files >paths &&
+ ! grep z/ paths &&
+ grep "y/d" paths &&
+
+ test_path_is_missing z/d &&
+ test_path_is_file y/d
+ )
+'
+
+test_expect_success '13b-check(info): messages for transitive rename with conflicted content' '
+ (
+ cd 13b &&
+
+ git reset --hard &&
+ git checkout A^0 &&
+
+ test_must_fail git -c merge.directoryRenames=true merge -s recursive B^0 >out 2>err &&
+
+ test_i18ngrep CONFLICT.*content.*Merge.conflict.in.y/d out &&
+ test_i18ngrep Path.updated:.*x/d.renamed.to.z/d.in.B^0.*moving.it.to.y/d out &&
+
+ git ls-files >paths &&
+ ! grep z/ paths &&
+ grep "y/d" paths &&
+
+ test_path_is_missing z/d &&
+ test_path_is_file y/d
+ )
+'
+
+# Testcase 13c, Rename/rename(1to1) due to directory rename
+# Commit O: z/{b,c}, x/{d,e}
+# Commit A: y/{b,c,d}, x/e
+# Commit B: z/{b,c,d}, x/e
+# Expected: y/{b,c,d}, with info or conflict messages for d (
+# A: renamed x/d -> z/d; B: renamed z/ -> y/ AND renamed x/d to y/d
+# One could argue A had partial knowledge of what was done with
+# d and B had full knowledge, but that's a slippery slope as
+# shown in testcase 13d.
+
+test_expect_success '13c-setup: messages for rename/rename(1to1) via transitive rename' '
+ test_create_repo 13c &&
+ (
+ cd 13c &&
+
+ mkdir x &&
+ mkdir z &&
+ test_seq 1 10 >x/d &&
+ echo e >x/e &&
+ echo b >z/b &&
+ echo c >z/c &&
+ git add x z &&
+ test_tick &&
+ git commit -m "O" &&
+
+ git branch O &&
+ git branch A &&
+ git branch B &&
+
+ git checkout A &&
+ git mv z y &&
+ git mv x/d y/ &&
+ test_tick &&
+ git commit -m "A" &&
+
+ git checkout B &&
+ git mv x/d z/d &&
+ git add z/d &&
+ test_tick &&
+ git commit -m "B"
+ )
+'
+
+test_expect_success '13c-check(conflict): messages for rename/rename(1to1) via transitive rename' '
+ (
+ cd 13c &&
+
+ git checkout A^0 &&
+
+ test_must_fail git merge -s recursive B^0 >out 2>err &&
+
+ test_i18ngrep CONFLICT..file.location.*x/d.renamed.to.z/d.*moved.to.y/d out &&
+
+ git ls-files >paths &&
+ ! grep z/ paths &&
+ grep "y/d" paths &&
+
+ test_path_is_missing z/d &&
+ test_path_is_file y/d
+ )
+'
+
+test_expect_success '13c-check(info): messages for rename/rename(1to1) via transitive rename' '
+ (
+ cd 13c &&
+
+ git reset --hard &&
+ git checkout A^0 &&
+
+ git -c merge.directoryRenames=true merge -s recursive B^0 >out 2>err &&
+
+ test_i18ngrep Path.updated:.*x/d.renamed.to.z/d.in.B^0.*moving.it.to.y/d out &&
+
+ git ls-files >paths &&
+ ! grep z/ paths &&
+ grep "y/d" paths &&
+
+ test_path_is_missing z/d &&
+ test_path_is_file y/d
+ )
+'
+
+# Testcase 13d, Rename/rename(1to1) due to directory rename on both sides
+# Commit O: a/{z,y}, b/x, c/w
+# Commit A: a/z, b/{y,x}, d/w
+# Commit B: a/z, d/x, c/{y,w}
+# Expected: a/z, d/{y,x,w} with no file location conflict for x
+# Easy cases:
+# * z is always in a; so it stays in a.
+# * x starts in b, only modified on one side to move into d/
+# * w starts in c, only modified on one side to move into d/
+# Hard case:
+# * A renames a/y to b/y, and B renames b/->d/ => a/y -> d/y
+# * B renames a/y to c/y, and A renames c/->d/ => a/y -> d/y
+# No conflict in where a/y ends up, so put it in d/y.
+
+test_expect_success '13d-setup: messages for rename/rename(1to1) via dual transitive rename' '
+ test_create_repo 13d &&
+ (
+ cd 13d &&
+
+ mkdir a &&
+ mkdir b &&
+ mkdir c &&
+ echo z >a/z &&
+ echo y >a/y &&
+ echo x >b/x &&
+ echo w >c/w &&
+ git add a b c &&
+ test_tick &&
+ git commit -m "O" &&
+
+ git branch O &&
+ git branch A &&
+ git branch B &&
+
+ git checkout A &&
+ git mv a/y b/ &&
+ git mv c/ d/ &&
+ test_tick &&
+ git commit -m "A" &&
+
+ git checkout B &&
+ git mv a/y c/ &&
+ git mv b/ d/ &&
+ test_tick &&
+ git commit -m "B"
+ )
+'
+
+test_expect_success '13d-check(conflict): messages for rename/rename(1to1) via dual transitive rename' '
+ (
+ cd 13d &&
+
+ git checkout A^0 &&
+
+ test_must_fail git merge -s recursive B^0 >out 2>err &&
+
+ test_i18ngrep CONFLICT..file.location.*a/y.renamed.to.b/y.*moved.to.d/y out &&
+ test_i18ngrep CONFLICT..file.location.*a/y.renamed.to.c/y.*moved.to.d/y out &&
+
+ git ls-files >paths &&
+ ! grep b/ paths &&
+ ! grep c/ paths &&
+ grep "d/y" paths &&
+
+ test_path_is_missing b/y &&
+ test_path_is_missing c/y &&
+ test_path_is_file d/y
+ )
+'
+
+test_expect_success '13d-check(info): messages for rename/rename(1to1) via dual transitive rename' '
+ (
+ cd 13d &&
+
+ git reset --hard &&
+ git checkout A^0 &&
+
+ git -c merge.directoryRenames=true merge -s recursive B^0 >out 2>err &&
+
+ test_i18ngrep Path.updated.*a/y.renamed.to.b/y.*moving.it.to.d/y out &&
+ test_i18ngrep Path.updated.*a/y.renamed.to.c/y.*moving.it.to.d/y out &&
+
+ git ls-files >paths &&
+ ! grep b/ paths &&
+ ! grep c/ paths &&
+ grep "d/y" paths &&
+
+ test_path_is_missing b/y &&
+ test_path_is_missing c/y &&
+ test_path_is_file d/y
+ )
+'
+
test_done
git checkout A^0 &&
- GIT_MERGE_VERBOSITY=3 git merge -s recursive B^0 >out 2>err &&
+ GIT_MERGE_VERBOSITY=3 git -c merge.directoryRenames=true merge -s recursive B^0 >out 2>err &&
test_i18ngrep ! "Skipped bar/bq" out &&
test_must_be_empty err &&
git checkout B^0 &&
- GIT_MERGE_VERBOSITY=3 git merge -s recursive A^0 >out 2>err &&
+ GIT_MERGE_VERBOSITY=3 git -c merge.directoryRenames=true merge -s recursive A^0 >out 2>err &&
test_i18ngrep ! "Skipped bar/bq" out &&
test_must_be_empty err &&
git checkout A^0 &&
- GIT_MERGE_VERBOSITY=3 git merge -s recursive B^0 >out 2>err &&
+ GIT_MERGE_VERBOSITY=3 git -c merge.directoryRenames=true merge -s recursive B^0 >out 2>err &&
test_i18ngrep ! "Skipped bar/bq" out &&
test_must_be_empty err &&
git checkout B^0 &&
- GIT_MERGE_VERBOSITY=3 git merge -s recursive A^0 >out 2>err &&
+ GIT_MERGE_VERBOSITY=3 git -c merge.directoryRenames=true merge -s recursive A^0 >out 2>err &&
test_i18ngrep ! "Skipped bar/bq" out &&
test_must_be_empty err &&
test "$_found" = "$_parent" || return 1
_parent_number=$(( $_parent_number + 1 ))
done &&
- test_must_fail git rev-parse --verify $_commit^$_parent_number
+ test_must_fail git rev-parse --verify $_commit^$_parent_number 2>err &&
+ test_i18ngrep "Needed a single revision" err
}
commit_has_parents ()
'
test_expect_success '--graft with and without already replaced object' '
- test $(git log --oneline | wc -l) = 7 &&
+ git log --oneline >log &&
+ test_line_count = 7 log &&
git replace --graft $HASH5 &&
- test $(git log --oneline | wc -l) = 3 &&
+ git log --oneline >log &&
+ test_line_count = 3 log &&
commit_has_parents $HASH5 &&
test_must_fail git replace --graft $HASH5 $HASH4 $HASH3 &&
git replace --force -g $HASH5 $HASH4 $HASH3 &&
git replace -d $HASH5
'
+test_expect_success '--graft using a tag as the new parent' '
+ git tag new_parent $HASH5 &&
+ git replace --graft $HASH7 new_parent &&
+ commit_has_parents $HASH7 $HASH5 &&
+ git replace -d $HASH7 &&
+ git tag -a -m "annotated new parent tag" annotated_new_parent $HASH5 &&
+ git replace --graft $HASH7 annotated_new_parent &&
+ commit_has_parents $HASH7 $HASH5 &&
+ git replace -d $HASH7
+'
+
+test_expect_success '--graft using a tag as the replaced object' '
+ git tag replaced_object $HASH7 &&
+ git replace --graft replaced_object $HASH5 &&
+ commit_has_parents $HASH7 $HASH5 &&
+ git replace -d $HASH7 &&
+ git tag -a -m "annotated replaced object tag" annotated_replaced_object $HASH7 &&
+ git replace --graft annotated_replaced_object $HASH5 &&
+ commit_has_parents $HASH7 $HASH5 &&
+ git replace -d $HASH7
+'
+
test_expect_success GPG 'set up a signed commit' '
echo "line 17" >>hello &&
echo "line 18" >>hello &&
--- /dev/null
+#!/bin/sh
+
+test_description='git rev-list should handle unexpected object types'
+
+. ./test-lib.sh
+
+test_expect_success 'setup well-formed objects' '
+ blob="$(printf "foo" | git hash-object -w --stdin)" &&
+ tree="$(printf "100644 blob $blob\tfoo" | git mktree)" &&
+ commit="$(git commit-tree $tree -m "first commit")" &&
+ git cat-file commit $commit >good-commit
+'
+
+test_expect_success 'setup unexpected non-blob entry' '
+ printf "100644 foo\0$(echo $tree | hex2oct)" >broken-tree &&
+ broken_tree="$(git hash-object -w --literally -t tree broken-tree)"
+'
+
+test_expect_failure 'traverse unexpected non-blob entry (lone)' '
+ test_must_fail git rev-list --objects $broken_tree
+'
+
+test_expect_success 'traverse unexpected non-blob entry (seen)' '
+ test_must_fail git rev-list --objects $tree $broken_tree >output 2>&1 &&
+ test_i18ngrep "is not a blob" output
+'
+
+test_expect_success 'setup unexpected non-tree entry' '
+ printf "40000 foo\0$(echo $blob | hex2oct)" >broken-tree &&
+ broken_tree="$(git hash-object -w --literally -t tree broken-tree)"
+'
+
+test_expect_success 'traverse unexpected non-tree entry (lone)' '
+ test_must_fail git rev-list --objects $broken_tree
+'
+
+test_expect_success 'traverse unexpected non-tree entry (seen)' '
+ test_must_fail git rev-list --objects $blob $broken_tree >output 2>&1 &&
+ test_i18ngrep "is not a tree" output
+'
+
+test_expect_success 'setup unexpected non-commit parent' '
+ sed "/^author/ { h; s/.*/parent $blob/; G; }" <good-commit \
+ >broken-commit &&
+ broken_commit="$(git hash-object -w --literally -t commit \
+ broken-commit)"
+'
+
+test_expect_success 'traverse unexpected non-commit parent (lone)' '
+ test_must_fail git rev-list --objects $broken_commit >output 2>&1 &&
+ test_i18ngrep "not a commit" output
+'
+
+test_expect_success 'traverse unexpected non-commit parent (seen)' '
+ test_must_fail git rev-list --objects $commit $broken_commit \
+ >output 2>&1 &&
+ test_i18ngrep "not a commit" output
+'
+
+test_expect_success 'setup unexpected non-tree root' '
+ sed -e "s/$tree/$blob/" <good-commit >broken-commit &&
+ broken_commit="$(git hash-object -w --literally -t commit \
+ broken-commit)"
+'
+
+test_expect_success 'traverse unexpected non-tree root (lone)' '
+ test_must_fail git rev-list --objects $broken_commit
+'
+
+test_expect_success 'traverse unexpected non-tree root (seen)' '
+ test_must_fail git rev-list --objects $blob $broken_commit \
+ >output 2>&1 &&
+ test_i18ngrep "not a tree" output
+'
+
+test_expect_success 'setup unexpected non-commit tag' '
+ git tag -a -m "tagged commit" tag $commit &&
+ git cat-file tag tag >good-tag &&
+ test_when_finished "git tag -d tag" &&
+ sed -e "s/$commit/$blob/" <good-tag >broken-tag &&
+ tag=$(git hash-object -w --literally -t tag broken-tag)
+'
+
+test_expect_success 'traverse unexpected non-commit tag (lone)' '
+ test_must_fail git rev-list --objects $tag
+'
+
+test_expect_success 'traverse unexpected non-commit tag (seen)' '
+ test_must_fail git rev-list --objects $blob $tag >output 2>&1 &&
+ test_i18ngrep "not a commit" output
+'
+
+test_expect_success 'setup unexpected non-tree tag' '
+ git tag -a -m "tagged tree" tag $tree &&
+ git cat-file tag tag >good-tag &&
+ test_when_finished "git tag -d tag" &&
+ sed -e "s/$tree/$blob/" <good-tag >broken-tag &&
+ tag=$(git hash-object -w --literally -t tag broken-tag)
+'
+
+test_expect_success 'traverse unexpected non-tree tag (lone)' '
+ test_must_fail git rev-list --objects $tag
+'
+
+test_expect_success 'traverse unexpected non-tree tag (seen)' '
+ test_must_fail git rev-list --objects $blob $tag >output 2>&1 &&
+ test_i18ngrep "not a tree" output
+'
+
+test_expect_success 'setup unexpected non-blob tag' '
+ git tag -a -m "tagged blob" tag $blob &&
+ git cat-file tag tag >good-tag &&
+ test_when_finished "git tag -d tag" &&
+ sed -e "s/$blob/$commit/" <good-tag >broken-tag &&
+ tag=$(git hash-object -w --literally -t tag broken-tag)
+'
+
+test_expect_failure 'traverse unexpected non-blob tag (lone)' '
+ test_must_fail git rev-list --objects $tag
+'
+
+test_expect_success 'traverse unexpected non-blob tag (seen)' '
+ test_must_fail git rev-list --objects $commit $tag >output 2>&1 &&
+ test_i18ngrep "not a blob" output
+'
+
+test_done
test_atom head upstream:trackshort '>'
test_atom head upstream:track,nobracket 'ahead 1'
test_atom head upstream:nobracket,track 'ahead 1'
-test_atom head push:track '[ahead 1]'
-test_atom head push:trackshort '>'
+
+test_expect_success 'setup for push:track[short]' '
+ test_commit third &&
+ git update-ref refs/remotes/myfork/master master &&
+ git reset master~1
+'
+
+test_atom head push:track '[behind 1]'
+test_atom head push:trackshort '<'
test_expect_success 'Check that :track[short] cannot be used with other atoms' '
test_must_fail git for-each-ref --format="%(refname:track)" 2>/dev/null &&
test_expect_success 'set up color tests' '
cat >expected.color <<-EOF &&
$(git rev-parse --short refs/heads/master) <GREEN>master<RESET>
+ $(git rev-parse --short refs/remotes/myfork/master) <GREEN>myfork/master<RESET>
$(git rev-parse --short refs/remotes/origin/master) <GREEN>origin/master<RESET>
$(git rev-parse --short refs/tags/testtag) <GREEN>testtag<RESET>
+ $(git rev-parse --short refs/tags/third) <GREEN>third<RESET>
$(git rev-parse --short refs/tags/two) <GREEN>two<RESET>
EOF
sed "s/<[^>]*>//g" <expected.color >expected.bare &&
test_must_be_empty stderr
'
+test_expect_success 'gc.reflogExpire{Unreachable,}=never skips "expire" via "gc"' '
+ test_config gc.reflogExpire never &&
+ test_config gc.reflogExpireUnreachable never &&
+
+ GIT_TRACE=$(pwd)/trace.out git gc &&
+
+ # Check that git-pack-refs is run as a sanity check (done via
+ # gc_before_repack()) but that git-expire is not.
+ grep -E "^trace: (built-in|exec|run_command): git pack-refs --" trace.out &&
+ ! grep -E "^trace: (built-in|exec|run_command): git reflog expire --" trace.out
+'
+
+test_expect_success 'one of gc.reflogExpire{Unreachable,}=never does not skip "expire" via "gc"' '
+ >trace.out &&
+ test_config gc.reflogExpire never &&
+ GIT_TRACE=$(pwd)/trace.out git gc &&
+ grep -E "^trace: (built-in|exec|run_command): git reflog expire --" trace.out
+'
+
run_and_wait_for_auto_gc () {
# We read stdout from gc for the side effect of waiting until the
# background gc process exits, closing its fd 9. Furthermore, the
# now fake a concurrent gc that holds the lock; we can use our
# shell pid so that it looks valid.
hostname=$(hostname || echo unknown) &&
- printf "$$ %s" "$hostname" >.git/gc.pid &&
+ shell_pid=$$ &&
+ if test_have_prereq MINGW && test -f /proc/$shell_pid/winpid
+ then
+ # In Git for Windows, Bash (actually, the MSYS2 runtime) has a
+ # different idea of PIDs than git.exe (actually Windows). Use
+ # the Windows PID in this case.
+ shell_pid=$(cat /proc/$shell_pid/winpid)
+ fi &&
+ printf "%d %s" "$shell_pid" "$hostname" >.git/gc.pid &&
# our gc should exit zero without doing anything
run_and_wait_for_auto_gc &&
test_cmp expect actual
'
+test_expect_success 'recursive tagging should give advice' '
+ sed -e "s/|$//" <<-EOF >expect &&
+ hint: You have created a nested tag. The object referred to by your new tag is
+ hint: already a tag. If you meant to tag the object that it points to, use:
+ hint: |
+ hint: git tag -f nested annotated-v4.0^{}
+ EOF
+ git tag -m nested nested annotated-v4.0 2>actual &&
+ test_i18ncmp expect actual
+'
+
test_expect_success 'multiple --points-at are OR-ed together' '
cat >expect <<-\EOF &&
v2.0
--- /dev/null
+#!/bin/sh
+
+test_description='post index change hook'
+
+. ./test-lib.sh
+
+test_expect_success 'setup' '
+ mkdir -p dir1 &&
+ touch dir1/file1.txt &&
+ echo testing >dir1/file2.txt &&
+ git add . &&
+ git commit -m "initial"
+'
+
+test_expect_success 'test status, add, commit, others trigger hook without flags set' '
+ mkdir -p .git/hooks &&
+ write_script .git/hooks/post-index-change <<-\EOF &&
+ if test "$1" -eq 1; then
+ echo "Invalid combination of flags passed to hook; updated_workdir is set." >testfailure
+ exit 1
+ fi
+ if test "$2" -eq 1; then
+ echo "Invalid combination of flags passed to hook; updated_skipworktree is set." >testfailure
+ exit 1
+ fi
+ if test -f ".git/index.lock"; then
+ echo ".git/index.lock exists" >testfailure
+ exit 3
+ fi
+ if ! test -f ".git/index"; then
+ echo ".git/index does not exist" >testfailure
+ exit 3
+ fi
+ echo "success" >testsuccess
+ EOF
+ mkdir -p dir2 &&
+ touch dir2/file1.txt &&
+ touch dir2/file2.txt &&
+ : force index to be dirty &&
+ test-tool chmtime +60 dir1/file1.txt &&
+ git status &&
+ test_path_is_file testsuccess && rm -f testsuccess &&
+ test_path_is_missing testfailure &&
+ git add . &&
+ test_path_is_file testsuccess && rm -f testsuccess &&
+ test_path_is_missing testfailure &&
+ git commit -m "second" &&
+ test_path_is_file testsuccess && rm -f testsuccess &&
+ test_path_is_missing testfailure &&
+ git checkout -- dir1/file1.txt &&
+ test_path_is_file testsuccess && rm -f testsuccess &&
+ test_path_is_missing testfailure &&
+ git update-index &&
+ test_path_is_missing testsuccess &&
+ test_path_is_missing testfailure &&
+ git reset --soft &&
+ test_path_is_missing testsuccess &&
+ test_path_is_missing testfailure
+'
+
+test_expect_success 'test checkout and reset trigger the hook' '
+ write_script .git/hooks/post-index-change <<-\EOF &&
+ if test "$1" -eq 1 && test "$2" -eq 1; then
+ echo "Invalid combination of flags passed to hook; updated_workdir and updated_skipworktree are both set." >testfailure
+ exit 1
+ fi
+ if test "$1" -eq 0 && test "$2" -eq 0; then
+ echo "Invalid combination of flags passed to hook; neither updated_workdir or updated_skipworktree are set." >testfailure
+ exit 2
+ fi
+ if test "$1" -eq 1; then
+ if test -f ".git/index.lock"; then
+ echo "updated_workdir set but .git/index.lock exists" >testfailure
+ exit 3
+ fi
+ if ! test -f ".git/index"; then
+ echo "updated_workdir set but .git/index does not exist" >testfailure
+ exit 3
+ fi
+ else
+ echo "update_workdir should be set for checkout" >testfailure
+ exit 4
+ fi
+ echo "success" >testsuccess
+ EOF
+ : force index to be dirty &&
+ test-tool chmtime +60 dir1/file1.txt &&
+ git checkout master &&
+ test_path_is_file testsuccess && rm -f testsuccess &&
+ test_path_is_missing testfailure &&
+ test-tool chmtime +60 dir1/file1.txt &&
+ git checkout HEAD &&
+ test_path_is_file testsuccess && rm -f testsuccess &&
+ test_path_is_missing testfailure &&
+ test-tool chmtime +60 dir1/file1.txt &&
+ git reset --hard &&
+ test_path_is_file testsuccess && rm -f testsuccess &&
+ test_path_is_missing testfailure &&
+ git checkout -B test &&
+ test_path_is_file testsuccess && rm -f testsuccess &&
+ test_path_is_missing testfailure
+'
+
+test_expect_success 'test reset --mixed and update-index triggers the hook' '
+ write_script .git/hooks/post-index-change <<-\EOF &&
+ if test "$1" -eq 1 && test "$2" -eq 1; then
+ echo "Invalid combination of flags passed to hook; updated_workdir and updated_skipworktree are both set." >testfailure
+ exit 1
+ fi
+ if test "$1" -eq 0 && test "$2" -eq 0; then
+ echo "Invalid combination of flags passed to hook; neither updated_workdir or updated_skipworktree are set." >testfailure
+ exit 2
+ fi
+ if test "$2" -eq 1; then
+ if test -f ".git/index.lock"; then
+ echo "updated_skipworktree set but .git/index.lock exists" >testfailure
+ exit 3
+ fi
+ if ! test -f ".git/index"; then
+ echo "updated_skipworktree set but .git/index does not exist" >testfailure
+ exit 3
+ fi
+ else
+ echo "updated_skipworktree should be set for reset --mixed and update-index" >testfailure
+ exit 4
+ fi
+ echo "success" >testsuccess
+ EOF
+ : force index to be dirty &&
+ test-tool chmtime +60 dir1/file1.txt &&
+ git reset --mixed --quiet HEAD~1 &&
+ test_path_is_file testsuccess && rm -f testsuccess &&
+ test_path_is_missing testfailure &&
+ git hash-object -w --stdin <dir1/file2.txt >expect &&
+ git update-index --cacheinfo 100644 "$(cat expect)" dir1/file1.txt &&
+ test_path_is_file testsuccess && rm -f testsuccess &&
+ test_path_is_missing testfailure &&
+ git update-index --skip-worktree dir1/file2.txt &&
+ git update-index --remove dir1/file2.txt &&
+ test_path_is_file testsuccess && rm -f testsuccess &&
+ test_path_is_missing testfailure
+'
+
+test_done
test_must_fail git checkout simple 2>errs &&
test_i18ngrep overwritten errs &&
- git checkout --merge simple 2>errs &&
- test_i18ngrep ! overwritten errs &&
- git ls-files -u &&
- test_must_fail git cat-file -t :0:two &&
- test "$(git cat-file -t :1:two)" = blob &&
- test "$(git cat-file -t :2:two)" = blob &&
- test_must_fail git cat-file -t :3:two
+ test_must_fail git read-tree --quiet -m -u HEAD simple 2>errs &&
+ test_must_be_empty errs
'
test_expect_success 'checkout to detach HEAD (with advice declined)' '
test_must_fail git submodule init
'
+test_expect_success 'add aborts on repository with no commits' '
+ cat >expect <<-\EOF &&
+ '"'repo-no-commits'"' does not have a commit checked out
+ EOF
+ git init repo-no-commits &&
+ test_must_fail git submodule add ../a ./repo-no-commits 2>actual &&
+ test_i18ncmp expect actual
+'
+
test_expect_success 'setup - repository in init subdirectory' '
mkdir init &&
(
cp pristine-.git-config .git/config &&
cp pristine-.gitmodules .gitmodules &&
mkdir -p a/b/c &&
- (cd a/b/c && git init) &&
+ (cd a/b/c && git init && test_commit msg) &&
git config remote.origin.url ../foo/bar.git &&
git submodule add ../bar/a/b/c ./a/b/c &&
git submodule init &&
cd super3 &&
sed -e "s#url = ../#url = file://$pwd/#" <.gitmodules >.gitmodules.tmp &&
mv -f .gitmodules.tmp .gitmodules &&
- test_must_fail git submodule update --init --depth=1 2>actual &&
+ # Some protocol versions (e.g. 2) support fetching
+ # unadvertised objects, so restrict this test to v0.
+ test_must_fail env GIT_TEST_PROTOCOL_VERSION= \
+ git submodule update --init --depth=1 2>actual &&
test_i18ngrep "Direct fetching of that commit failed." actual &&
git -C ../submodule config uploadpack.allowReachableSHA1InWant true &&
git submodule update --init --depth=1 >actual &&
test_cmp expected actual
'
+test_expect_success 'option-like arguments passed to foreach commands are not lost' '
+ (
+ cd super &&
+ git submodule foreach "echo be --quiet" > ../expected &&
+ git submodule foreach echo be --quiet > ../actual
+ ) &&
+ grep -sq -e "--quiet" expected &&
+ test_cmp expected actual
+'
+
test_done
)
'
+test_expect_success 'unsetting submodules config from the working tree with "submodule--helper config --unset"' '
+ (cd super &&
+ git submodule--helper config --unset submodule.submodule.url &&
+ git submodule--helper config submodule.submodule.url >actual &&
+ test_must_be_empty actual
+ )
+'
+
+
test_expect_success 'writing submodules config with "submodule--helper config"' '
(cd super &&
echo "new_url" >expect &&
--- /dev/null
+#!/bin/sh
+#
+# Copyright (c) 2019 Denton Liu
+#
+
+test_description='Test submodules set-branch subcommand
+
+This test verifies that the set-branch subcommand of git-submodule is working
+as expected.
+'
+
+TEST_NO_CREATE_REPO=1
+. ./test-lib.sh
+
+test_expect_success 'submodule config cache setup' '
+ mkdir submodule &&
+ (cd submodule &&
+ git init &&
+ echo a >a &&
+ git add . &&
+ git commit -ma &&
+ git checkout -b topic &&
+ echo b >a &&
+ git add . &&
+ git commit -mb
+ ) &&
+ mkdir super &&
+ (cd super &&
+ git init &&
+ git submodule add ../submodule &&
+ git commit -m "add submodule"
+ )
+'
+
+test_expect_success 'ensure submodule branch is unset' '
+ (cd super &&
+ test_must_fail grep branch .gitmodules
+ )
+'
+
+test_expect_success 'test submodule set-branch --branch' '
+ (cd super &&
+ git submodule set-branch --branch topic submodule &&
+ grep "branch = topic" .gitmodules &&
+ git submodule update --remote &&
+ cat <<-\EOF >expect &&
+ b
+ EOF
+ git -C submodule show -s --pretty=%s >actual &&
+ test_cmp expect actual
+ )
+'
+
+test_expect_success 'test submodule set-branch --default' '
+ (cd super &&
+ git submodule set-branch --default submodule &&
+ test_must_fail grep branch .gitmodules &&
+ git submodule update --remote &&
+ cat <<-\EOF >expect &&
+ a
+ EOF
+ git -C submodule show -s --pretty=%s >actual &&
+ test_cmp expect actual
+ )
+'
+
+test_expect_success 'test submodule set-branch -b' '
+ (cd super &&
+ git submodule set-branch -b topic submodule &&
+ grep "branch = topic" .gitmodules &&
+ git submodule update --remote &&
+ cat <<-\EOF >expect &&
+ b
+ EOF
+ git -C submodule show -s --pretty=%s >actual &&
+ test_cmp expect actual
+ )
+'
+
+test_expect_success 'test submodule set-branch -d' '
+ (cd super &&
+ git submodule set-branch -d submodule &&
+ test_must_fail grep branch .gitmodules &&
+ git submodule update --remote &&
+ cat <<-\EOF >expect &&
+ a
+ EOF
+ git -C submodule show -s --pretty=%s >actual &&
+ test_cmp expect actual
+ )
+'
+
+test_done
# Arguments: [<prefix] [<commit message>] [<commit options>]
check_summary_oneline() {
test_tick &&
- git commit ${3+"$3"} -m "$2" | head -1 > act &&
+ git commit ${3+"$3"} -m "$2" >raw &&
+ head -n 1 raw >act &&
# branch name
SUMMARY_PREFIX="$(git name-rev --name-only HEAD)" &&
git checkout recursive-a &&
test_must_fail git merge recursive-b &&
# resolve the conflict
- echo commit-a > file1 &&
+ echo commit-a >file1 &&
git add file1 &&
check_summary_oneline "" "Merge"
'
>positive &&
git add positive &&
git commit -s -m "thank you" &&
- actual=$(git cat-file commit HEAD | sed -ne "s/Signed-off-by: //p") &&
- expected=$(git var GIT_COMMITTER_IDENT | sed -e "s/>.*/>/") &&
- test "z$actual" = "z$expected"
+ git cat-file commit HEAD >commit.msg &&
+ sed -ne "s/Signed-off-by: //p" commit.msg >actual &&
+ git var GIT_COMMITTER_IDENT >ident &&
+ sed -e "s/>.*/>/" ident >expected &&
+ test_cmp expected actual
'
>negative &&
git add negative &&
git commit -m "one" -m "two" -m "three" &&
- actual=$(git cat-file commit HEAD | sed -e "1,/^\$/d") &&
- expected=$(echo one; echo; echo two; echo; echo three) &&
+ actual=$(git cat-file commit HEAD >tmp && sed -e "1,/^\$/d" tmp && rm tmp) &&
+ expected=$(test_write_lines "one" "" "two" "" "three") &&
test "z$actual" = "z$expected"
'
echo minus >negative &&
git add negative &&
- git status -v | sed -ne "/^diff --git /p" >actual &&
+ git status -v >raw &&
+ sed -ne "/^diff --git /p" raw >actual &&
echo "diff --git a/negative b/negative" >expect &&
test_cmp expect actual
echo >>negative &&
git commit --cleanup=verbatim --no-status -t expect -a &&
- git cat-file -p HEAD |sed -e "1,/^\$/d" >actual &&
+ git cat-file -p HEAD >raw &&
+ sed -e "1,/^\$/d" raw >actual &&
test_cmp expect actual
'
echo >>negative &&
git commit --cleanup=verbatim -F expect -a &&
- git cat-file -p HEAD |sed -e "1,/^\$/d">actual &&
+ git cat-file -p HEAD >raw &&
+ sed -e "1,/^\$/d" raw >actual &&
test_cmp expect actual
'
echo >>negative &&
git commit --cleanup=verbatim -m "$mesg_with_comment_and_newlines" -a &&
- git cat-file -p HEAD |sed -e "1,/^\$/d">actual &&
+ git cat-file -p HEAD >raw &&
+ sed -e "1,/^\$/d" raw >actual &&
test_cmp expect actual
'
test_expect_success 'cleanup commit messages (whitespace option,-F)' '
echo >>negative &&
- { echo;echo "# text";echo; } >text &&
+ test_write_lines "" "# text" "" >text &&
echo "# text" >expect &&
git commit --cleanup=whitespace -F text -a &&
- git cat-file -p HEAD |sed -e "1,/^\$/d">actual &&
+ git cat-file -p HEAD >raw &&
+ sed -e "1,/^\$/d" raw >actual &&
test_cmp expect actual
'
test_expect_success 'cleanup commit messages (scissors option,-F,-e)' '
echo >>negative &&
- cat >text <<EOF &&
+ cat >text <<-\EOF &&
-# to be kept
+ # to be kept
- # ------------------------ >8 ------------------------
-# to be kept, too
-# ------------------------ >8 ------------------------
-to be removed
-# ------------------------ >8 ------------------------
-to be removed, too
-EOF
+ # ------------------------ >8 ------------------------
+ # to be kept, too
+ # ------------------------ >8 ------------------------
+ to be removed
+ # ------------------------ >8 ------------------------
+ to be removed, too
+ EOF
- cat >expect <<EOF &&
-# to be kept
+ cat >expect <<-\EOF &&
+ # to be kept
- # ------------------------ >8 ------------------------
-# to be kept, too
-EOF
+ # ------------------------ >8 ------------------------
+ # to be kept, too
+ EOF
git commit --cleanup=scissors -e -F text -a &&
- git cat-file -p HEAD |sed -e "1,/^\$/d">actual &&
+ git cat-file -p HEAD >raw &&
+ sed -e "1,/^\$/d" raw >actual &&
test_cmp expect actual
'
test_expect_success 'cleanup commit messages (scissors option,-F,-e, scissors on first line)' '
echo >>negative &&
- cat >text <<EOF &&
-# ------------------------ >8 ------------------------
-to be removed
-EOF
+ cat >text <<-\EOF &&
+ # ------------------------ >8 ------------------------
+ to be removed
+ EOF
git commit --cleanup=scissors -e -F text -a --allow-empty-message &&
- git cat-file -p HEAD |sed -e "1,/^\$/d">actual &&
+ git cat-file -p HEAD >raw &&
+ sed -e "1,/^\$/d" raw >actual &&
test_must_be_empty actual
'
test_expect_success 'cleanup commit messages (strip option,-F)' '
echo >>negative &&
- { echo;echo "# text";echo sample;echo; } >text &&
+ test_write_lines "" "# text" "sample" "" >text &&
echo sample >expect &&
git commit --cleanup=strip -F text -a &&
- git cat-file -p HEAD |sed -e "1,/^\$/d">actual &&
+ git cat-file -p HEAD >raw &&
+ sed -e "1,/^\$/d" raw >actual &&
test_cmp expect actual
'
test_expect_success 'cleanup commit messages (strip option,-F,-e)' '
echo >>negative &&
- { echo;echo sample;echo; } >text &&
+ test_write_lines "" "sample" "" >text &&
git commit -e -F text -a &&
head -n 4 .git/COMMIT_EDITMSG >actual
'
'
write_script .git/FAKE_EDITOR <<EOF
-echo editor started > "$(pwd)/.git/result"
+echo editor started >"$(pwd)/.git/result"
exit 0
EOF
test_expect_success EXECKEEPSPID 'a SIGTERM should break locks' '
echo >>negative &&
! "$SHELL_PATH" -c '\''
- echo kill -TERM $$ >> .git/FAKE_EDITOR
+ echo kill -TERM $$ >>.git/FAKE_EDITOR
GIT_EDITOR=.git/FAKE_EDITOR
export GIT_EDITOR
exec git commit -a'\'' &&
test_must_fail git merge second master &&
git checkout master g &&
EDITOR=: git commit -a &&
- git cat-file commit HEAD | sed -n -e "s/^parent //p" -e "/^$/q" >actual &&
+ git cat-file commit HEAD >raw &&
+ sed -n -e "s/^parent //p" -e "/^$/q" raw >actual &&
test_cmp expect actual
'
git reset --hard &&
git commit -s -m "hello: kitty" --allow-empty &&
- git cat-file commit HEAD | sed -e "1,/^$/d" >actual &&
+ git cat-file commit HEAD >raw &&
+ sed -e "1,/^$/d" raw >actual &&
test_line_count = 3 actual
'
test_i18ncmp expected actual
'
+test_expect_success 'status when cherry-picking after committing conflict resolution' '
+ git reset --hard cherry_branch &&
+ test_when_finished "git cherry-pick --abort" &&
+ test_must_fail git cherry-pick cherry_branch_second one_cherry &&
+ echo end >main.txt &&
+ git commit -a &&
+ cat >expected <<EOF &&
+On branch cherry_branch
+Cherry-pick currently in progress.
+ (run "git cherry-pick --continue" to continue)
+ (use "git cherry-pick --abort" to cancel the cherry-pick operation)
+
+nothing to commit (use -u to show untracked files)
+EOF
+ git status --untracked-files=no >actual &&
+ test_i18ncmp expected actual
+'
+
test_expect_success 'status showing detached at and from a tag' '
test_commit atag tagging &&
git checkout atag &&
test_i18ncmp expected actual
'
+test_expect_success 'status while reverting after committing conflict resolution' '
+ test_when_finished "git revert --abort" &&
+ git reset --hard new &&
+ test_must_fail git revert old new &&
+ echo reverted >to-revert.txt &&
+ git commit -a &&
+ cat >expected <<EOF &&
+On branch master
+Revert currently in progress.
+ (run "git revert --continue" to continue)
+ (use "git revert --abort" to cancel the revert operation)
+
+nothing to commit (use -u to show untracked files)
+EOF
+ git status --untracked-files=no >actual &&
+ test_i18ncmp expected actual
+'
+
test_expect_success 'prepare for different number of commits rebased' '
git reset --hard master &&
git checkout -b several_commits &&
test_must_fail git rebase -p master
'
+test_expect_success 'author.name overrides user.name' '
+ test_config user.name user &&
+ test_config user.email user@example.com &&
+ test_config author.name author &&
+ test_commit author-name-override-user &&
+ echo author user@example.com > expected-author &&
+ echo user user@example.com > expected-committer &&
+ git log --format="%an %ae" -1 > actual-author &&
+ git log --format="%cn %ce" -1 > actual-committer &&
+ test_cmp expected-author actual-author &&
+ test_cmp expected-committer actual-committer
+'
+
+test_expect_success 'author.email overrides user.email' '
+ test_config user.name user &&
+ test_config user.email user@example.com &&
+ test_config author.email author@example.com &&
+ test_commit author-email-override-user &&
+ echo user author@example.com > expected-author &&
+ echo user user@example.com > expected-committer &&
+ git log --format="%an %ae" -1 > actual-author &&
+ git log --format="%cn %ce" -1 > actual-committer &&
+ test_cmp expected-author actual-author &&
+ test_cmp expected-committer actual-committer
+'
+
+test_expect_success 'committer.name overrides user.name' '
+ test_config user.name user &&
+ test_config user.email user@example.com &&
+ test_config committer.name committer &&
+ test_commit committer-name-override-user &&
+ echo user user@example.com > expected-author &&
+ echo committer user@example.com > expected-committer &&
+ git log --format="%an %ae" -1 > actual-author &&
+ git log --format="%cn %ce" -1 > actual-committer &&
+ test_cmp expected-author actual-author &&
+ test_cmp expected-committer actual-committer
+'
+
+test_expect_success 'committer.email overrides user.email' '
+ test_config user.name user &&
+ test_config user.email user@example.com &&
+ test_config committer.email committer@example.com &&
+ test_commit committer-email-override-user &&
+ echo user user@example.com > expected-author &&
+ echo user committer@example.com > expected-committer &&
+ git log --format="%an %ae" -1 > actual-author &&
+ git log --format="%cn %ce" -1 > actual-committer &&
+ test_cmp expected-author actual-author &&
+ test_cmp expected-committer actual-committer
+'
+
+test_expect_success 'author and committer environment variables override config settings' '
+ test_config user.name user &&
+ test_config user.email user@example.com &&
+ test_config author.name author &&
+ test_config author.email author@example.com &&
+ test_config committer.name committer &&
+ test_config committer.email committer@example.com &&
+ GIT_AUTHOR_NAME=env_author && export GIT_AUTHOR_NAME &&
+ GIT_AUTHOR_EMAIL=env_author@example.com && export GIT_AUTHOR_EMAIL &&
+ GIT_COMMITTER_NAME=env_commit && export GIT_COMMITTER_NAME &&
+ GIT_COMMITTER_EMAIL=env_commit@example.com && export GIT_COMMITTER_EMAIL &&
+ test_commit env-override-conf &&
+ echo env_author env_author@example.com > expected-author &&
+ echo env_commit env_commit@example.com > expected-committer &&
+ git log --format="%an %ae" -1 > actual-author &&
+ git log --format="%cn %ce" -1 > actual-committer &&
+ sane_unset GIT_AUTHOR_NAME GIT_AUTHOR_EMAIL &&
+ sane_unset GIT_COMMITTER_NAME GIT_COMMITTER_EMAIL &&
+ test_cmp expected-author actual-author &&
+ test_cmp expected-committer actual-committer
+'
+
test_done
test_i18ngrep "deleted:" actual &&
test_i18ngrep "new file:" actual &&
- git status --find-rename=100% >actual &&
+ git status --find-renames=100% >actual &&
test_i18ngrep "deleted:" actual &&
test_i18ngrep "new file:" actual
'
git status -M=01% >actual &&
test_i18ngrep "renamed:" actual &&
- git status --find-rename=01% >actual &&
+ git status --find-renames=01% >actual &&
test_i18ngrep "renamed:" actual
'
-test_expect_success 'copies not overridden by find-rename' '
+test_expect_success 'copies not overridden by find-renames' '
cp renamed copy &&
git add copy &&
test_i18ngrep "copied:" actual &&
test_i18ngrep "renamed:" actual &&
- git -c status.renames=copies status --find-rename=01% >actual &&
+ git -c status.renames=copies status --find-renames=01% >actual &&
test_i18ngrep "copied:" actual &&
test_i18ngrep "renamed:" actual
'
cat result.9z >file &&
git commit --no-edit -a &&
- {
- cat <<-EOF
- Squashed commit of the following:
+ cat >expect <<-EOF &&
+ Squashed commit of the following:
- $(git show -s c7)
+ $(git show -s c7)
- # Conflicts:
- # file
- EOF
- } >expect &&
- git cat-file commit HEAD | sed -e '1,/^$/d' >actual &&
+ # Conflicts:
+ # file
+ EOF
+ git cat-file commit HEAD >raw &&
+ sed -e '1,/^$/d' raw >actual &&
test_cmp expect actual
'
+test_expect_success 'merge c3 with c7 with commit.cleanup = scissors' '
+ git config commit.cleanup scissors &&
+ git reset --hard c3 &&
+ test_must_fail git merge c7 &&
+ cat result.9z >file &&
+ git commit --no-edit -a &&
+
+ cat >expect <<-\EOF &&
+ Merge tag '"'"'c7'"'"'
+
+ # ------------------------ >8 ------------------------
+ # Do not modify or remove the line above.
+ # Everything below it will be ignored.
+ #
+ # Conflicts:
+ # file
+ EOF
+ git cat-file commit HEAD >raw &&
+ sed -e '1,/^$/d' raw >actual &&
+ test_i18ncmp expect actual
+'
+
+test_expect_success 'merge c3 with c7 with --squash commit.cleanup = scissors' '
+ git config commit.cleanup scissors &&
+ git reset --hard c3 &&
+ test_must_fail git merge --squash c7 &&
+ cat result.9z >file &&
+ git commit --no-edit -a &&
+
+ cat >expect <<-EOF &&
+ Squashed commit of the following:
+
+ $(git show -s c7)
+
+ # ------------------------ >8 ------------------------
+ # Do not modify or remove the line above.
+ # Everything below it will be ignored.
+ #
+ # Conflicts:
+ # file
+ EOF
+ git cat-file commit HEAD >raw &&
+ sed -e '1,/^$/d' raw >actual &&
+ test_i18ncmp expect actual
+'
+
test_debug 'git log --graph --decorate --oneline --all'
test_expect_success 'merge c1 with c2 and c3' '
(
echo "Merge work done on the side branch c1"
echo
- cat <"$1"
+ cat "$1"
) >"$1.tmp" && mv "$1.tmp" "$1"
# strip comments and blank lines from end of message
-sed -e '/^#/d' < "$1" | sed -e :a -e '/^\n*$/{$d;N;ba' -e '}' > expected
+sed -e '/^#/d' "$1" | sed -e :a -e '/^\n*$/{$d;N;ba' -e '}' >expected
EOF
chmod 755 editor
git commit -m base &&
# one side changes the first line of each to "master"
- sed s/-1/-master/ <file >tmp &&
+ sed s/-1/-master/ file >tmp &&
mv tmp file &&
git commit -am master &&
# and the other to "side"; merging the two will
# yield 256 separate conflicts
git checkout -b side HEAD^ &&
- sed s/-1/-side/ <file >tmp &&
+ sed s/-1/-side/ file >tmp &&
mv tmp file &&
git commit -am side
'
test_expect_success EXECKEEPSPID 'killed merge can be completed with --continue' '
git reset --hard c0 &&
! "$SHELL_PATH" -c '\''
- echo kill -TERM $$ >> .git/FAKE_EDITOR
+ echo kill -TERM $$ >>.git/FAKE_EDITOR
GIT_EDITOR=.git/FAKE_EDITOR
export GIT_EDITOR
exec git merge --no-ff --edit c1'\'' &&
}
test_expect_success 'setup' '
- echo c0 > c0.c &&
+ echo c0 >c0.c &&
git add c0.c &&
git commit -m c0 &&
git tag c0 &&
- echo c1 > c1.c &&
+ echo c1 >c1.c &&
git add c1.c &&
git commit -m c1 &&
git tag c1 &&
git reset --hard c0 &&
- echo c2 > c2.c &&
+ echo c2 >c2.c &&
git add c2.c &&
git commit -m c2 &&
git tag c2 &&
test_expect_success 'merge c2 with a custom message' '
git reset --hard c1 &&
git merge -m "$(cat exp.subject)" c2 &&
- git cat-file commit HEAD | sed -e "1,/^$/d" >actual &&
+ git cat-file commit HEAD >raw &&
+ sed -e "1,/^$/d" raw >actual &&
test_cmp exp.subject actual
'
test_expect_success 'merge --log appends to custom message' '
git reset --hard c1 &&
git merge --log -m "$(cat exp.subject)" c2 &&
- git cat-file commit HEAD | sed -e "1,/^$/d" >actual &&
+ git cat-file commit HEAD >raw &&
+ sed -e "1,/^$/d" raw >actual &&
test_cmp exp.log actual
'
+mesg_with_comment_and_newlines='
+# text
+
+'
+
+test_expect_success 'prepare file with comment line and trailing newlines' '
+ printf "%s" "$mesg_with_comment_and_newlines" >expect
+'
+
+test_expect_success 'cleanup commit messages (verbatim option)' '
+ git reset --hard c1 &&
+ git merge --cleanup=verbatim -F expect c2 &&
+ git cat-file commit HEAD >raw &&
+ sed -e "1,/^$/d" raw >actual &&
+ test_cmp expect actual
+'
+
+test_expect_success 'cleanup commit messages (whitespace option)' '
+ git reset --hard c1 &&
+ test_write_lines "" "# text" "" >text &&
+ echo "# text" >expect &&
+ git merge --cleanup=whitespace -F text c2 &&
+ git cat-file commit HEAD >raw &&
+ sed -e "1,/^$/d" raw >actual &&
+ test_cmp expect actual
+'
+
+test_expect_success 'cleanup merge messages (scissors option)' '
+ git reset --hard c1 &&
+ cat >text <<-\EOF &&
+
+ # to be kept
+
+ # ------------------------ >8 ------------------------
+ # to be kept, too
+ # ------------------------ >8 ------------------------
+ to be removed
+ # ------------------------ >8 ------------------------
+ to be removed, too
+ EOF
+
+ cat >expect <<-\EOF &&
+ # to be kept
+
+ # ------------------------ >8 ------------------------
+ # to be kept, too
+ EOF
+ git merge --cleanup=scissors -e -F text c2 &&
+ git cat-file commit HEAD >raw &&
+ sed -e "1,/^$/d" raw >actual &&
+ test_cmp expect actual
+'
+
+test_expect_success 'cleanup commit messages (strip option)' '
+ git reset --hard c1 &&
+ test_write_lines "" "# text" "sample" "" >text &&
+ echo sample >expect &&
+ git merge --cleanup=strip -F text c2 &&
+ git cat-file commit HEAD >raw &&
+ sed -e "1,/^$/d" raw >actual &&
+ test_cmp expect actual
+'
+
test_done
test_when_finished "git reset --hard" &&
git checkout -b test$test_count branch1 &&
git submodule update -N &&
- test_must_fail git merge master >/dev/null 2>&1 &&
- ( yes "" | git mergetool both >/dev/null 2>&1 ) &&
+ test_must_fail git merge master &&
+ ( yes "" | git mergetool both ) &&
( yes "" | git mergetool file1 file1 ) &&
- ( yes "" | git mergetool file2 "spaced name" >/dev/null 2>&1 ) &&
- ( yes "" | git mergetool subdir/file3 >/dev/null 2>&1 ) &&
- ( yes "d" | git mergetool file11 >/dev/null 2>&1 ) &&
- ( yes "d" | git mergetool file12 >/dev/null 2>&1 ) &&
- ( yes "l" | git mergetool submod >/dev/null 2>&1 ) &&
+ ( yes "" | git mergetool file2 "spaced name" ) &&
+ ( yes "" | git mergetool subdir/file3 ) &&
+ ( yes "d" | git mergetool file11 ) &&
+ ( yes "d" | git mergetool file12 ) &&
+ ( yes "l" | git mergetool submod ) &&
+ test "$(cat file1)" = "master updated" &&
+ test "$(cat file2)" = "master new" &&
+ test "$(cat subdir/file3)" = "master new sub" &&
+ test "$(cat submod/bar)" = "branch1 submodule" &&
+ git commit -m "branch1 resolved with mergetool"
+'
+
+test_expect_success 'gui mergetool' '
+ test_config merge.guitool myguitool &&
+ test_config mergetool.myguitool.cmd "(printf \"gui \" && cat \"\$REMOTE\") >\"\$MERGED\"" &&
+ test_config mergetool.myguitool.trustExitCode true &&
+ test_when_finished "git reset --hard" &&
+ git checkout -b test$test_count branch1 &&
+ git submodule update -N &&
+ test_must_fail git merge master &&
+ ( yes "" | git mergetool --gui both ) &&
+ ( yes "" | git mergetool -g file1 file1 ) &&
+ ( yes "" | git mergetool --gui file2 "spaced name" ) &&
+ ( yes "" | git mergetool --gui subdir/file3 ) &&
+ ( yes "d" | git mergetool --gui file11 ) &&
+ ( yes "d" | git mergetool --gui file12 ) &&
+ ( yes "l" | git mergetool --gui submod ) &&
+ test "$(cat file1)" = "gui master updated" &&
+ test "$(cat file2)" = "gui master new" &&
+ test "$(cat subdir/file3)" = "gui master new sub" &&
+ test "$(cat submod/bar)" = "branch1 submodule" &&
+ git commit -m "branch1 resolved with mergetool"
+'
+
+test_expect_success 'gui mergetool without merge.guitool set falls back to merge.tool' '
+ test_when_finished "git reset --hard" &&
+ git checkout -b test$test_count branch1 &&
+ git submodule update -N &&
+ test_must_fail git merge master &&
+ ( yes "" | git mergetool --gui both ) &&
+ ( yes "" | git mergetool -g file1 file1 ) &&
+ ( yes "" | git mergetool --gui file2 "spaced name" ) &&
+ ( yes "" | git mergetool --gui subdir/file3 ) &&
+ ( yes "d" | git mergetool --gui file11 ) &&
+ ( yes "d" | git mergetool --gui file12 ) &&
+ ( yes "l" | git mergetool --gui submod ) &&
test "$(cat file1)" = "master updated" &&
test "$(cat file2)" = "master new" &&
test "$(cat subdir/file3)" = "master new sub" &&
# test_when_finished is LIFO.)
test_config core.autocrlf true &&
git checkout -b test$test_count branch1 &&
- test_must_fail git merge master >/dev/null 2>&1 &&
- ( yes "" | git mergetool file1 >/dev/null 2>&1 ) &&
- ( yes "" | git mergetool file2 >/dev/null 2>&1 ) &&
- ( yes "" | git mergetool "spaced name" >/dev/null 2>&1 ) &&
- ( yes "" | git mergetool both >/dev/null 2>&1 ) &&
- ( yes "" | git mergetool subdir/file3 >/dev/null 2>&1 ) &&
- ( yes "d" | git mergetool file11 >/dev/null 2>&1 ) &&
- ( yes "d" | git mergetool file12 >/dev/null 2>&1 ) &&
- ( yes "r" | git mergetool submod >/dev/null 2>&1 ) &&
+ test_must_fail git merge master &&
+ ( yes "" | git mergetool file1 ) &&
+ ( yes "" | git mergetool file2 ) &&
+ ( yes "" | git mergetool "spaced name" ) &&
+ ( yes "" | git mergetool both ) &&
+ ( yes "" | git mergetool subdir/file3 ) &&
+ ( yes "d" | git mergetool file11 ) &&
+ ( yes "d" | git mergetool file12 ) &&
+ ( yes "r" | git mergetool submod ) &&
test "$(printf x | cat file1 -)" = "$(printf "master updated\r\nx")" &&
test "$(printf x | cat file2 -)" = "$(printf "master new\r\nx")" &&
test "$(printf x | cat subdir/file3 -)" = "$(printf "master new sub\r\nx")" &&
git submodule update -N &&
(
cd subdir &&
- test_must_fail git merge master >/dev/null 2>&1 &&
- ( yes "" | git mergetool file3 >/dev/null 2>&1 ) &&
+ test_must_fail git merge master &&
+ ( yes "" | git mergetool file3 ) &&
test "$(cat file3)" = "master new sub"
)
'
git submodule update -N &&
(
cd subdir &&
- test_must_fail git merge master >/dev/null 2>&1 &&
- ( yes "" | git mergetool file3 >/dev/null 2>&1 ) &&
- ( yes "" | git mergetool ../file1 >/dev/null 2>&1 ) &&
- ( yes "" | git mergetool ../file2 ../spaced\ name >/dev/null 2>&1 ) &&
- ( yes "" | git mergetool ../both >/dev/null 2>&1 ) &&
- ( yes "d" | git mergetool ../file11 >/dev/null 2>&1 ) &&
- ( yes "d" | git mergetool ../file12 >/dev/null 2>&1 ) &&
- ( yes "l" | git mergetool ../submod >/dev/null 2>&1 ) &&
+ test_must_fail git merge master &&
+ ( yes "" | git mergetool file3 ) &&
+ ( yes "" | git mergetool ../file1 ) &&
+ ( yes "" | git mergetool ../file2 ../spaced\ name ) &&
+ ( yes "" | git mergetool ../both ) &&
+ ( yes "d" | git mergetool ../file11 ) &&
+ ( yes "d" | git mergetool ../file12 ) &&
+ ( yes "l" | git mergetool ../submod ) &&
test "$(cat ../file1)" = "master updated" &&
test "$(cat ../file2)" = "master new" &&
test "$(cat ../submod/bar)" = "branch1 submodule" &&
git submodule update -N &&
test_must_fail git merge master &&
test -n "$(git ls-files -u)" &&
- ( yes "d" | git mergetool file11 >/dev/null 2>&1 ) &&
- ( yes "d" | git mergetool file12 >/dev/null 2>&1 ) &&
- ( yes "l" | git mergetool submod >/dev/null 2>&1 ) &&
+ ( yes "d" | git mergetool file11 ) &&
+ ( yes "d" | git mergetool file12 ) &&
+ ( yes "l" | git mergetool submod ) &&
output="$(git mergetool --no-prompt)" &&
test "$output" = "No files need merging"
'
rm -rf .git/rr-cache &&
git checkout -b test$test_count branch1 &&
git submodule update -N &&
- test_must_fail git merge master >/dev/null 2>&1 &&
- ( yes "l" | git mergetool --no-prompt submod >/dev/null 2>&1 ) &&
- ( yes "d" "d" | git mergetool --no-prompt >/dev/null 2>&1 ) &&
+ test_must_fail git merge master &&
+ ( yes "l" | git mergetool --no-prompt submod ) &&
+ ( yes "d" "d" | git mergetool --no-prompt ) &&
git submodule update -N &&
output="$(yes "n" | git mergetool --no-prompt)" &&
test "$output" = "No files need merging"
git checkout -b test$test_count.a test$test_count &&
test_must_fail git merge master &&
test -n "$(git ls-files -u)" &&
- ( yes "" | git mergetool file1 file2 spaced\ name subdir/file3 >/dev/null 2>&1 ) &&
- ( yes "" | git mergetool both >/dev/null 2>&1 ) &&
- ( yes "d" | git mergetool file11 file12 >/dev/null 2>&1 ) &&
+ ( yes "" | git mergetool file1 file2 spaced\ name subdir/file3 ) &&
+ ( yes "" | git mergetool both ) &&
+ ( yes "d" | git mergetool file11 file12 ) &&
( yes "r" | git mergetool submod ) &&
rmdir submod && mv submod-movedaside submod &&
test "$(cat submod/bar)" = "branch1 submodule" &&
git submodule update -N &&
test_must_fail git merge master &&
test -n "$(git ls-files -u)" &&
- ( yes "" | git mergetool file1 file2 spaced\ name subdir/file3 >/dev/null 2>&1 ) &&
- ( yes "" | git mergetool both >/dev/null 2>&1 ) &&
- ( yes "d" | git mergetool file11 file12 >/dev/null 2>&1 ) &&
+ ( yes "" | git mergetool file1 file2 spaced\ name subdir/file3 ) &&
+ ( yes "" | git mergetool both ) &&
+ ( yes "d" | git mergetool file11 file12 ) &&
( yes "l" | git mergetool submod ) &&
test ! -e submod &&
output="$(git mergetool --no-prompt)" &&
git submodule update -N &&
test_must_fail git merge test$test_count &&
test -n "$(git ls-files -u)" &&
- ( yes "" | git mergetool file1 file2 spaced\ name subdir/file3 >/dev/null 2>&1 ) &&
- ( yes "" | git mergetool both >/dev/null 2>&1 ) &&
- ( yes "d" | git mergetool file11 file12 >/dev/null 2>&1 ) &&
+ ( yes "" | git mergetool file1 file2 spaced\ name subdir/file3 ) &&
+ ( yes "" | git mergetool both ) &&
+ ( yes "d" | git mergetool file11 file12 ) &&
( yes "r" | git mergetool submod ) &&
test ! -e submod &&
test -d submod.orig &&
git submodule update -N &&
test_must_fail git merge test$test_count &&
test -n "$(git ls-files -u)" &&
- ( yes "" | git mergetool file1 file2 spaced\ name subdir/file3 >/dev/null 2>&1 ) &&
- ( yes "" | git mergetool both >/dev/null 2>&1 ) &&
- ( yes "d" | git mergetool file11 file12 >/dev/null 2>&1 ) &&
+ ( yes "" | git mergetool file1 file2 spaced\ name subdir/file3 ) &&
+ ( yes "" | git mergetool both ) &&
+ ( yes "d" | git mergetool file11 file12 ) &&
( yes "l" | git mergetool submod ) &&
test "$(cat submod/bar)" = "master submodule" &&
git submodule update -N &&
git checkout -b test$test_count.a branch1 &&
test_must_fail git merge master &&
test -n "$(git ls-files -u)" &&
- ( yes "" | git mergetool file1 file2 spaced\ name subdir/file3 >/dev/null 2>&1 ) &&
- ( yes "" | git mergetool both >/dev/null 2>&1 ) &&
- ( yes "d" | git mergetool file11 file12 >/dev/null 2>&1 ) &&
+ ( yes "" | git mergetool file1 file2 spaced\ name subdir/file3 ) &&
+ ( yes "" | git mergetool both ) &&
+ ( yes "d" | git mergetool file11 file12 ) &&
( yes "r" | git mergetool submod ) &&
rmdir submod && mv submod-movedaside submod &&
test "$(cat submod/bar)" = "branch1 submodule" &&
git checkout -b test$test_count.b test$test_count &&
test_must_fail git merge master &&
test -n "$(git ls-files -u)" &&
- ( yes "" | git mergetool file1 file2 spaced\ name subdir/file3 >/dev/null 2>&1 ) &&
- ( yes "" | git mergetool both >/dev/null 2>&1 ) &&
- ( yes "d" | git mergetool file11 file12 >/dev/null 2>&1 ) &&
+ ( yes "" | git mergetool file1 file2 spaced\ name subdir/file3 ) &&
+ ( yes "" | git mergetool both ) &&
+ ( yes "d" | git mergetool file11 file12 ) &&
( yes "l" | git mergetool submod ) &&
git submodule update -N &&
test "$(cat submod)" = "not a submodule" &&
git submodule update -N &&
test_must_fail git merge test$test_count &&
test -n "$(git ls-files -u)" &&
- ( yes "" | git mergetool file1 file2 spaced\ name subdir/file3 >/dev/null 2>&1 ) &&
- ( yes "" | git mergetool both >/dev/null 2>&1 ) &&
- ( yes "d" | git mergetool file11 file12 >/dev/null 2>&1 ) &&
+ ( yes "" | git mergetool file1 file2 spaced\ name subdir/file3 ) &&
+ ( yes "" | git mergetool both ) &&
+ ( yes "d" | git mergetool file11 file12 ) &&
( yes "r" | git mergetool submod ) &&
test -d submod.orig &&
git submodule update -N &&
git submodule update -N &&
test_must_fail git merge test$test_count &&
test -n "$(git ls-files -u)" &&
- ( yes "" | git mergetool file1 file2 spaced\ name subdir/file3 >/dev/null 2>&1 ) &&
- ( yes "" | git mergetool both>/dev/null 2>&1 ) &&
- ( yes "d" | git mergetool file11 file12 >/dev/null 2>&1 ) &&
+ ( yes "" | git mergetool file1 file2 spaced\ name subdir/file3 ) &&
+ ( yes "" | git mergetool both ) &&
+ ( yes "d" | git mergetool file11 file12 ) &&
( yes "l" | git mergetool submod ) &&
test "$(cat submod/bar)" = "master submodule" &&
git submodule update -N &&
git add subdir/subdir_module &&
git commit -m "change submodule in subdirectory on test$test_count.b" &&
- test_must_fail git merge test$test_count.a >/dev/null 2>&1 &&
+ test_must_fail git merge test$test_count.a &&
(
cd subdir &&
( yes "l" | git mergetool subdir_module )
git reset --hard &&
git submodule update -N &&
- test_must_fail git merge test$test_count.a >/dev/null 2>&1 &&
+ test_must_fail git merge test$test_count.a &&
( yes "r" | git mergetool subdir/subdir_module ) &&
test "$(cat subdir/subdir_module/file15)" = "test$test_count.b" &&
git submodule update -N &&
test_config mergetool.myecho.trustExitCode true &&
test_must_fail git merge master &&
git mergetool --no-prompt --tool myecho -- both >actual &&
- grep ^\./both_LOCAL_ actual >/dev/null
+ grep ^\./both_LOCAL_ actual
'
test_lazy_prereq MKTEMP '
test_config mergetool.myecho.trustExitCode true &&
test_must_fail git merge master &&
git mergetool --no-prompt --tool myecho -- both >actual &&
- ! grep ^\./both_LOCAL_ actual >/dev/null &&
- grep /both_LOCAL_ actual >/dev/null
+ ! grep ^\./both_LOCAL_ actual &&
+ grep /both_LOCAL_ actual
'
test_expect_success 'diff.orderFile configuration is honored' '
)
'
-test_done
+test_expect_success 'bitmaps are created by default in bare repos' '
+ git clone --bare .git bare.git &&
+ git -C bare.git repack -ad &&
+ bitmap=$(ls bare.git/objects/pack/*.bitmap) &&
+ test_path_is_file "$bitmap"
+'
+
+test_expect_success 'incremental repack does not complain' '
+ git -C bare.git repack -q 2>repack.err &&
+ test_must_be_empty repack.err
+'
+test_expect_success 'bitmaps can be disabled on bare repos' '
+ git -c repack.writeBitmaps=false -C bare.git repack -ad &&
+ bitmap=$(ls bare.git/objects/pack/*.bitmap 2>/dev/null || :) &&
+ test -z "$bitmap"
+'
+
+test_done
echo branch >expect &&
git difftool --no-prompt branch >actual &&
test_cmp expect actual &&
+ git difftool --gui --no-prompt branch >actual &&
+ test_cmp expect actual &&
# set merge.tool to something bogus, diff.tool to test-tool
test_config merge.tool bogus-tool &&
test_config diff.tool test-tool &&
git difftool --no-prompt branch >actual &&
+ test_cmp expect actual &&
+ git difftool --gui --no-prompt branch >actual &&
+ test_cmp expect actual &&
+
+ # set merge.tool, diff.tool to something bogus, merge.guitool to test-tool
+ test_config diff.tool bogus-tool &&
+ test_config merge.guitool test-tool &&
+ git difftool --gui --no-prompt branch >actual &&
+ test_cmp expect actual &&
+
+ # set merge.tool, diff.tool, merge.guitool to something bogus, diff.guitool to test-tool
+ test_config merge.guitool bogus-tool &&
+ test_config diff.guitool test-tool &&
+ git difftool --gui --no-prompt branch >actual &&
test_cmp expect actual
'
done >actual
EOF
-test_expect_success SYMLINKS 'difftool --dir-diff --symlink without unstaged changes' '
+test_expect_success SYMLINKS 'difftool --dir-diff --symlinks without unstaged changes' '
cat >expect <<-EOF &&
file
$PWD/file
sub/sub
$PWD/sub/sub
EOF
- git difftool --dir-diff --symlink \
+ git difftool --dir-diff --symlinks \
--extcmd "./.git/CHECK_SYMLINKS" branch HEAD &&
test_cmp expect actual
'
test_cmp expect actual
'
+test_expect_success 'outside worktree' '
+ echo 1 >1 &&
+ echo 2 >2 &&
+ test_expect_code 1 nongit git \
+ -c diff.tool=echo -c difftool.echo.cmd="echo \$LOCAL \$REMOTE" \
+ difftool --no-prompt --no-index ../1 ../2 >actual &&
+ echo "../1 ../2" >expect &&
+ test_cmp expect actual
+'
+
+test_expect_success 'difftool --gui, --tool and --extcmd are mutually exclusive' '
+ difftool_test_setup &&
+ test_must_fail git difftool --gui --tool=test-tool &&
+ test_must_fail git difftool --gui --extcmd=cat &&
+ test_must_fail git difftool --tool=test-tool --extcmd=cat &&
+ test_must_fail git difftool --gui --tool=test-tool --extcmd=cat
+'
+
test_done
test_cmp expected actual
'
- test_expect_success "grep -w $L (with --column, --invert)" '
+ test_expect_success "grep -w $L (with --column, --invert-match)" '
{
echo ${HC}file:1:foo mmap bar
echo ${HC}file:1:foo_mmap bar
echo ${HC}file:1:foo_mmap bar mmap
echo ${HC}file:1:foo mmap bar_mmap
} >expected &&
- git grep --column --invert -w -e baz $H -- file >actual &&
+ git grep --column --invert-match -w -e baz $H -- file >actual &&
test_cmp expected actual
'
- test_expect_success "grep $L (with --column, --invert, extended OR)" '
+ test_expect_success "grep $L (with --column, --invert-match, extended OR)" '
{
echo ${HC}hello_world:6:HeLLo_world
} >expected &&
- git grep --column --invert -e ll --or --not -e _ $H -- hello_world \
+ git grep --column --invert-match -e ll --or --not -e _ $H -- hello_world \
>actual &&
test_cmp expected actual
'
- test_expect_success "grep $L (with --column, --invert, extended AND)" '
+ test_expect_success "grep $L (with --column, --invert-match, extended AND)" '
{
echo ${HC}hello_world:3:Hello world
echo ${HC}hello_world:3:Hello_world
echo ${HC}hello_world:6:HeLLo_world
} >expected &&
- git grep --column --invert --not -e _ --and --not -e ll $H -- hello_world \
+ git grep --column --invert-match --not -e _ --and --not -e ll $H -- hello_world \
>actual &&
test_cmp expected actual
'
echo ".gitignore:.*o*" &&
cat ../expect.full
} >../expect.with.ignored &&
- git grep --no-index --no-exclude o >../actual.full &&
+ git grep --no-index --no-exclude-standard o >../actual.full &&
test_cmp ../expect.with.ignored ../actual.full
)
'
echo ".gitignore:.*o*" &&
cat ../expect.full
} >../expect.with.ignored &&
- git -c grep.fallbackToNoIndex grep --no-exclude o >../actual.full &&
+ git -c grep.fallbackToNoIndex grep --no-exclude-standard o >../actual.full &&
test_cmp ../expect.with.ignored ../actual.full
)
'
fi
'
-# Recursing down into nested submodules which do not have .gitmodules in their
-# working tree does not work yet. This is because config_from_gitmodules()
-# uses get_oid() and the latter is still not able to get objects from an
-# arbitrary repository (the nested submodule, in this case).
-test_expect_failure 'grep --recurse-submodules with submodules without .gitmodules in the working tree' '
+test_expect_success 'grep --recurse-submodules with submodules without .gitmodules in the working tree' '
test_when_finished "git -C submodule checkout .gitmodules" &&
rm submodule/.gitmodules &&
git grep --recurse-submodules -e "(.|.)[\d]" >actual &&
grep "Content-Transfer-Encoding: quoted-printable" msgtxt1
'
+test_expect_success $PREREQ 'carriage returns with auto encoding are quoted-printable' '
+ clean_fake_sendmail &&
+ cp $patches cr.patch &&
+ printf "this is a line\r\n" >>cr.patch &&
+ git send-email \
+ --from="Example <nobody@example.com>" \
+ --to=nobody@example.com \
+ --smtp-server="$(pwd)/fake.sendmail" \
+ --transfer-encoding=auto \
+ --no-validate \
+ cr.patch &&
+ grep "Content-Transfer-Encoding: quoted-printable" msgtxt1
+'
+
for enc in auto quoted-printable base64
do
test_expect_success $PREREQ "--validate passes with encoding $enc" '
git svn dcommit
'
-stop_httpd
-
test_done
)
'
-stop_httpd
-
test_done
)
'
-stop_httpd
-
test_done
( cd g && git rev-parse --symbolic --verify HEAD )
'
-stop_httpd
-
test_done
background_import_still_running
'
+###
+### series W (get-mark and empty orphan commits)
+###
+
+cat >>W-input <<-W_INPUT_END
+ commit refs/heads/W-branch
+ mark :1
+ author Full Name <user@company.tld> 1000000000 +0100
+ committer Full Name <user@company.tld> 1000000000 +0100
+ data 27
+ Intentionally empty commit
+ LFsget-mark :1
+ W_INPUT_END
+
+test_expect_success !MINGW 'W: get-mark & empty orphan commit with no newlines' '
+ sed -e s/LFs// W-input | tr L "\n" | git fast-import
+'
+
+test_expect_success !MINGW 'W: get-mark & empty orphan commit with one newline' '
+ sed -e s/LFs/L/ W-input | tr L "\n" | git fast-import
+'
+
+test_expect_success !MINGW 'W: get-mark & empty orphan commit with ugly second newline' '
+ # Technically, this should fail as it has too many linefeeds
+ # according to the grammar in fast-import.txt. But, for whatever
+ # reason, it works. Since using the correct number of newlines
+ # does not work with older (pre-2.22) versions of git, allow apps
+ # that used this second-newline workaround to keep working by
+ # checking it with this test...
+ sed -e s/LFs/LL/ W-input | tr L "\n" | git fast-import
+'
+
+test_expect_success !MINGW 'W: get-mark & empty orphan commit with erroneous third newline' '
+ # ...but do NOT allow more empty lines than that (see previous test).
+ sed -e s/LFs/LLL/ W-input | tr L "\n" | test_must_fail git fast-import
+'
+
test_done
)
'
-test_expect_success 'kill p4d' '
- kill_p4d
-'
-
test_done
'
test_expect_success 'restart p4d' '
- kill_p4d &&
+ stop_and_cleanup_p4d &&
start_p4d
'
'
test_expect_success 'restart p4d' '
- kill_p4d &&
+ stop_and_cleanup_p4d &&
start_p4d
'
)
'
-test_expect_success 'kill p4d' '
- kill_p4d
-'
-
test_done
)
'
-test_expect_success 'kill p4d' '
- kill_p4d
-'
-
test_done
)
'
-test_expect_success 'kill p4d' '
- kill_p4d
-'
-
test_done
)
'
-test_expect_success 'kill p4d' '
- kill_p4d
-'
-
test_done
)
'
-test_expect_success 'kill p4d' '
- kill_p4d
-'
-
test_done
test_path_is_file "$git"/cli_file2.t
'
-
-test_expect_success 'kill p4d' '
- kill_p4d
-'
-
test_done
)
'
-test_expect_success 'kill p4d' '
- kill_p4d
-'
-
test_done
)
'
-test_expect_success 'kill p4d' '
- kill_p4d
-'
-
test_done
git_verify "cdir 1/file11" "cdir 1/file12"
'
-test_expect_success 'kill p4d' '
- kill_p4d
-'
-
test_done
)
'
-test_expect_success 'kill p4d' '
- kill_p4d
-'
-
test_done
(
cd "$cli" &&
p4 sync ... &&
- !(p4 labels | grep GIT_TAG_ON_A_BRANCH)
+ ! p4 labels | grep GIT_TAG_ON_A_BRANCH
)
'
)
'
-
-test_expect_success 'kill p4d' '
- kill_p4d
-'
-
test_done
)
'
-test_expect_success 'kill p4d' '
- kill_p4d
-'
-
test_done
)
'
-test_expect_success 'kill p4d' '
- kill_p4d
-'
-
test_done
)
'
-test_expect_success 'kill p4d' '
- kill_p4d
-'
-
test_done
)
'
-test_expect_success 'kill p4d' '
- kill_p4d
-'
-
test_done
)
'
-test_expect_success 'kill p4d' '
- kill_p4d
-'
-
test_done
)
'
-test_expect_success 'kill p4d' '
- kill_p4d
-'
-
test_done
test_line_count \> 10 log
'
-test_expect_success 'kill p4d' '
- kill_p4d
-'
-
test_done
test_must_fail git p4 clone //depot/uc/...
'
-test_expect_success 'kill p4d' '
- kill_p4d
-'
-
test_done
)
'
-test_expect_success 'kill p4d' '
- kill_p4d
-'
-
test_done
)
'
-test_expect_success 'kill p4d' '
- kill_p4d
-'
-
test_done
UTF8_ESCAPED="a-\303\244_o-\303\266_u-\303\274.txt"
ISO8859_ESCAPED="a-\344_o-\366_u-\374.txt"
+ISO8859="$(printf "$ISO8859_ESCAPED")" &&
+echo content123 >"$ISO8859" &&
+rm "$ISO8859" || {
+ skip_all="fs does not accept ISO-8859-1 filenames"
+ test_done
+}
+
test_expect_success 'start p4d' '
start_p4d
'
)
'
-test_expect_success 'kill p4d' '
- kill_p4d
-'
-
test_done
)
'
-test_expect_success 'kill p4d' '
- kill_p4d
-'
-
test_done
)
'
-test_expect_success 'kill p4d' '
- kill_p4d
-'
-
test_done
git p4 clone --dest="$git" //depot
'
-test_expect_success 'kill p4d' '
- kill_p4d
-'
-
test_done
)
'
-test_expect_success 'kill p4d' '
- kill_p4d
-'
-
test_done
)
'
-test_expect_success 'kill p4d' '
- kill_p4d
-'
-
test_done
)
'
-test_expect_success 'kill p4d' '
- kill_p4d
-'
-
test_done
)
'
-test_expect_success 'kill p4d' '
- kill_p4d
-'
-
test_done
'
-test_expect_success 'kill p4d' '
- kill_p4d
-'
-
test_done
)
'
-test_expect_success 'kill p4d' '
- kill_p4d
-'
-
test_done
test_path_is_file file_to_shelve
)
'
-test_expect_success 'kill p4d' '
- kill_p4d
-'
test_done
)
'
-test_expect_success 'kill p4d' '
- kill_p4d
-'
-
-
test_done
--guess Z
--no-guess Z
--no-... Z
+ --overlay Z
EOF
'
test_completion "git --help core" "core-tutorial "
'
+test_expect_success 'completion.commands removes multiple commands' '
+ test_config completion.commands "-cherry -mergetool" &&
+ git --list-cmds=list-mainporcelain,list-complete,config >out &&
+ ! grep -E "^(cherry|mergetool)$" out
+'
+
test_expect_success 'setup for integration tests' '
echo content >file1 &&
echo more >file2 &&
fi
}
+# Check if the file exists and has a size greater than zero
+test_file_not_empty () {
+ if ! test -s "$1"
+ then
+ echo "'$1' is not a non-empty file."
+ false
+ fi
+}
+
test_path_is_missing () {
if test -e "$1"
then
} && (exit \"\$eval_ret\"); eval_ret=\$?; $test_cleanup"
}
+# This function can be used to schedule some commands to be run
+# unconditionally at the end of the test script, e.g. to stop a daemon:
+#
+# test_expect_success 'test git daemon' '
+# git daemon &
+# daemon_pid=$! &&
+# test_atexit 'kill $daemon_pid' &&
+# hello world
+# '
+#
+# The commands will be executed before the trash directory is removed,
+# i.e. the atexit commands will still be able to access any pidfiles or
+# socket files.
+#
+# Note that these commands will be run even when a test script run
+# with '--immediate' fails. Be careful with your atexit commands to
+# minimize any changes to the failed state.
+
+test_atexit () {
+ # We cannot detect when we are in a subshell in general, but by
+ # doing so on Bash is better than nothing (the test will
+ # silently pass on other shells).
+ test "${BASH_SUBSHELL-0}" = 0 ||
+ error "bug in test script: test_atexit does nothing in a subshell"
+ test_atexit_cleanup="{ $*
+ } && (exit \"\$eval_ret\"); eval_ret=\$?; $test_atexit_cleanup"
+}
+
# Most tests can use the created repository, but some may need to create more.
# Usage: test_create_repo <directory>
test_create_repo () {
'
}
+# Converts base-16 data into base-8. The output is given as a sequence of
+# escaped octals, suitable for consumption by 'printf'.
+hex2oct () {
+ perl -ne 'printf "\\%03o", hex for /../g'
+}
+
# Set the hash algorithm in use to $1. Only useful when testing the testsuite.
test_set_hash () {
test_hash_algo="$1"
. "$GIT_BUILD_DIR"/GIT-BUILD-OPTIONS
export PERL_PATH SHELL_PATH
+# Disallow the use of abbreviated options in the test suite by default
+if test -z "${GIT_TEST_DISALLOW_ABBREVIATED_OPTIONS}"
+then
+ GIT_TEST_DISALLOW_ABBREVIATED_OPTIONS=true
+ export GIT_TEST_DISALLOW_ABBREVIATED_OPTIONS
+fi
+
################################################################
# It appears that people try to run tests without building...
"${GIT_TEST_INSTALLED:-$GIT_BUILD_DIR}/git$X" >/dev/null
--stress)
stress=t ;;
--stress=*)
+ echo "error: --stress does not accept an argument: '$opt'" >&2
+ echo "did you mean --stress-jobs=${opt#*=} or --stress-limit=${opt#*=}?" >&2
+ exit 1
+ ;;
+ --stress-jobs=*)
+ stress=t;
stress=${opt#--*=}
case "$stress" in
*[!0-9]*|0*|"")
- echo "error: --stress=<N> requires the number of jobs to run" >&2
+ echo "error: --stress-jobs=<N> requires the number of jobs to run" >&2
exit 1
;;
*) # Good.
esac
;;
--stress-limit=*)
+ stress=t;
stress_limit=${opt#--*=}
case "$stress_limit" in
*[!0-9]*|0*|"")
my @env = keys %ENV;
my $ok = join("|", qw(
TRACE
+ TR2_
DEBUG
TEST
.*_TEST
die () {
code=$?
+ # This is responsible for running the atexit commands even when a
+ # test script run with '--immediate' fails, or when the user hits
+ # ctrl-C, i.e. when 'test_done' is not invoked at all.
+ test_atexit_handler || code=$?
if test -n "$GIT_EXIT_OK"
then
exit $code
GIT_EXIT_OK=
trap 'die' EXIT
-trap 'exit $?' INT TERM HUP
+# Disable '-x' tracing, because with some shells, notably dash, it
+# prevents running the cleanup commands when a test script run with
+# '--verbose-log -x' is interrupted.
+trap '{ code=$?; set +x; } 2>/dev/null; exit $code' INT TERM HUP
# The user-facing functions are loaded from a separate file so that
# test_perf subshells can have them too
junit_have_testcase=t
}
+test_atexit_cleanup=:
+test_atexit_handler () {
+ # In a succeeding test script 'test_atexit_handler' is invoked
+ # twice: first from 'test_done', then from 'die' in the trap on
+ # EXIT.
+ # This condition and resetting 'test_atexit_cleanup' below makes
+ # sure that the registered cleanup commands are run only once.
+ test : != "$test_atexit_cleanup" || return 0
+
+ setup_malloc_check
+ test_eval_ "$test_atexit_cleanup"
+ test_atexit_cleanup=:
+ teardown_malloc_check
+}
+
test_done () {
GIT_EXIT_OK=t
+ # Run the atexit commands _before_ the trash directory is
+ # removed, so the commands can access pidfiles and socket files.
+ test_atexit_handler
+
if test -n "$write_junit_xml" && test -n "$junit_xml_path"
then
test -n "$junit_have_testcase" || {
fi
fi
-# Provide an implementation of the 'yes' utility
+# Provide an implementation of the 'yes' utility; the upper bound
+# limit is there to help Windows that cannot stop this loop from
+# wasting cycles when the downstream stops reading, so do not be
+# tempted to turn it into an infinite loop. cf. 6129c930 ("test-lib:
+# limit the output of the yes utility", 2016-02-02)
yes () {
if test $# = 0
then
if test -n "$GIT_TEST_GETTEXT_POISON_ORIG"
then
GIT_TEST_GETTEXT_POISON=$GIT_TEST_GETTEXT_POISON_ORIG
+ export GIT_TEST_GETTEXT_POISON
unset GIT_TEST_GETTEXT_POISON_ORIG
fi
char *tag;
timestamp_t date;
};
-extern struct tag *lookup_tag(struct repository *r, const struct object_id *oid);
-extern int parse_tag_buffer(struct repository *r, struct tag *item, const void *data, unsigned long size);
-extern int parse_tag(struct tag *item);
-extern void release_tag_memory(struct tag *t);
-extern struct object *deref_tag(struct repository *r, struct object *, const char *, int);
-extern struct object *deref_tag_noverify(struct object *);
-extern int gpg_verify_tag(const struct object_id *oid,
- const char *name_to_report, unsigned flags);
+struct tag *lookup_tag(struct repository *r, const struct object_id *oid);
+int parse_tag_buffer(struct repository *r, struct tag *item, const void *data, unsigned long size);
+int parse_tag(struct tag *item);
+void release_tag_memory(struct tag *t);
+struct object *deref_tag(struct repository *r, struct object *, const char *, int);
+struct object *deref_tag_noverify(struct object *);
+int gpg_verify_tag(const struct object_id *oid,
+ const char *name_to_report, unsigned flags);
#endif /* TAG_H */
* a tempfile (whose "fd" member can be used for writing to it), or
* NULL on error. It is an error if a file already exists at that path.
*/
-extern struct tempfile *create_tempfile(const char *path);
+struct tempfile *create_tempfile(const char *path);
/*
* Register an existing file as a tempfile, meaning that it will be
* but it can be worked with like any other closed tempfile (for
* example, it can be opened using reopen_tempfile()).
*/
-extern struct tempfile *register_tempfile(const char *path);
+struct tempfile *register_tempfile(const char *path);
/*
*/
/* See "mks_tempfile functions" above. */
-extern struct tempfile *mks_tempfile_sm(const char *filename_template,
- int suffixlen, int mode);
+struct tempfile *mks_tempfile_sm(const char *filename_template,
+ int suffixlen, int mode);
/* See "mks_tempfile functions" above. */
static inline struct tempfile *mks_tempfile_s(const char *filename_template,
}
/* See "mks_tempfile functions" above. */
-extern struct tempfile *mks_tempfile_tsm(const char *filename_template,
- int suffixlen, int mode);
+struct tempfile *mks_tempfile_tsm(const char *filename_template,
+ int suffixlen, int mode);
/* See "mks_tempfile functions" above. */
static inline struct tempfile *mks_tempfile_ts(const char *filename_template,
}
/* See "mks_tempfile functions" above. */
-extern struct tempfile *xmks_tempfile_m(const char *filename_template, int mode);
+struct tempfile *xmks_tempfile_m(const char *filename_template, int mode);
/* See "mks_tempfile functions" above. */
static inline struct tempfile *xmks_tempfile(const char *filename_template)
* stream is closed automatically when `close_tempfile_gently()` is called or
* when the file is deleted or renamed.
*/
-extern FILE *fdopen_tempfile(struct tempfile *tempfile, const char *mode);
+FILE *fdopen_tempfile(struct tempfile *tempfile, const char *mode);
static inline int is_tempfile_active(struct tempfile *tempfile)
{
* Return the path of the lockfile. The return value is a pointer to a
* field within the lock_file object and should not be freed.
*/
-extern const char *get_tempfile_path(struct tempfile *tempfile);
+const char *get_tempfile_path(struct tempfile *tempfile);
-extern int get_tempfile_fd(struct tempfile *tempfile);
-extern FILE *get_tempfile_fp(struct tempfile *tempfile);
+int get_tempfile_fd(struct tempfile *tempfile);
+FILE *get_tempfile_fp(struct tempfile *tempfile);
/*
* If the temporary file is still open, close it (and the file pointer
* should eventually be called regardless of whether `close_tempfile_gently()`
* succeeds.
*/
-extern int close_tempfile_gently(struct tempfile *tempfile);
+int close_tempfile_gently(struct tempfile *tempfile);
/*
* Re-open a temporary file that has been closed using
*
* * `rename_tempfile()` to move the file to its permanent location.
*/
-extern int reopen_tempfile(struct tempfile *tempfile);
+int reopen_tempfile(struct tempfile *tempfile);
/*
* Close the file descriptor and/or file pointer and remove the
* `delete_tempfile()` for a `tempfile` object that has already been
* deleted or renamed.
*/
-extern void delete_tempfile(struct tempfile **tempfile_p);
+void delete_tempfile(struct tempfile **tempfile_p);
/*
* Close the file descriptor and/or file pointer if they are still
* `rename(2)`. It is a bug to call `rename_tempfile()` for a
* `tempfile` object that is not currently active.
*/
-extern int rename_tempfile(struct tempfile **tempfile_p, const char *path);
+int rename_tempfile(struct tempfile **tempfile_p, const char *path);
#endif /* TEMPFILE_H */
extern struct trace_key trace_perf_key;
extern struct trace_key trace_setup_key;
-extern void trace_repo_setup(const char *prefix);
-extern int trace_want(struct trace_key *key);
-extern void trace_disable(struct trace_key *key);
-extern uint64_t getnanotime(void);
-extern void trace_command_performance(const char **argv);
-extern void trace_verbatim(struct trace_key *key, const void *buf, unsigned len);
+void trace_repo_setup(const char *prefix);
+int trace_want(struct trace_key *key);
+void trace_disable(struct trace_key *key);
+uint64_t getnanotime(void);
+void trace_command_performance(const char **argv);
+void trace_verbatim(struct trace_key *key, const void *buf, unsigned len);
uint64_t trace_performance_enter(void);
#ifndef HAVE_VARIADIC_MACROS
__attribute__((format (printf, 1, 2)))
-extern void trace_printf(const char *format, ...);
+void trace_printf(const char *format, ...);
__attribute__((format (printf, 2, 3)))
-extern void trace_printf_key(struct trace_key *key, const char *format, ...);
+void trace_printf_key(struct trace_key *key, const char *format, ...);
__attribute__((format (printf, 2, 3)))
-extern void trace_argv_printf(const char **argv, const char *format, ...);
+void trace_argv_printf(const char **argv, const char *format, ...);
-extern void trace_strbuf(struct trace_key *key, const struct strbuf *data);
+void trace_strbuf(struct trace_key *key, const struct strbuf *data);
/* Prints elapsed time (in nanoseconds) if GIT_TRACE_PERFORMANCE is enabled. */
__attribute__((format (printf, 2, 3)))
-extern void trace_performance(uint64_t nanos, const char *format, ...);
+void trace_performance(uint64_t nanos, const char *format, ...);
/* Prints elapsed time since 'start' if GIT_TRACE_PERFORMANCE is enabled. */
__attribute__((format (printf, 2, 3)))
-extern void trace_performance_since(uint64_t start, const char *format, ...);
+void trace_performance_since(uint64_t start, const char *format, ...);
__attribute__((format (printf, 1, 2)))
void trace_performance_leave(const char *format, ...);
/* backend functions, use non-*fl macros instead */
__attribute__((format (printf, 4, 5)))
-extern void trace_printf_key_fl(const char *file, int line, struct trace_key *key,
- const char *format, ...);
+void trace_printf_key_fl(const char *file, int line, struct trace_key *key,
+ const char *format, ...);
__attribute__((format (printf, 4, 5)))
-extern void trace_argv_printf_fl(const char *file, int line, const char **argv,
- const char *format, ...);
-extern void trace_strbuf_fl(const char *file, int line, struct trace_key *key,
- const struct strbuf *data);
+void trace_argv_printf_fl(const char *file, int line, const char **argv,
+ const char *format, ...);
+void trace_strbuf_fl(const char *file, int line, struct trace_key *key,
+ const struct strbuf *data);
__attribute__((format (printf, 4, 5)))
-extern void trace_performance_fl(const char *file, int line,
- uint64_t nanos, const char *fmt, ...);
+void trace_performance_fl(const char *file, int line,
+ uint64_t nanos, const char *fmt, ...);
__attribute__((format (printf, 4, 5)))
-extern void trace_performance_leave_fl(const char *file, int line,
- uint64_t nanos, const char *fmt, ...);
+void trace_performance_leave_fl(const char *file, int line,
+ uint64_t nanos, const char *fmt, ...);
static inline int trace_pass_fl(struct trace_key *key)
{
return key->fd || !key->initialized;
--- /dev/null
+#include "cache.h"
+#include "config.h"
+#include "json-writer.h"
+#include "quote.h"
+#include "run-command.h"
+#include "sigchain.h"
+#include "thread-utils.h"
+#include "version.h"
+#include "trace2/tr2_cfg.h"
+#include "trace2/tr2_cmd_name.h"
+#include "trace2/tr2_dst.h"
+#include "trace2/tr2_sid.h"
+#include "trace2/tr2_sysenv.h"
+#include "trace2/tr2_tgt.h"
+#include "trace2/tr2_tls.h"
+
+static int trace2_enabled;
+
+static int tr2_next_child_id; /* modify under lock */
+static int tr2_next_exec_id; /* modify under lock */
+static int tr2_next_repo_id = 1; /* modify under lock. zero is reserved */
+
+/*
+ * A table of the builtin TRACE2 targets. Each of these may be independently
+ * enabled or disabled. Each TRACE2 API method will try to write an event to
+ * *each* of the enabled targets.
+ */
+/* clang-format off */
+static struct tr2_tgt *tr2_tgt_builtins[] =
+{
+ &tr2_tgt_normal,
+ &tr2_tgt_perf,
+ &tr2_tgt_event,
+ NULL
+};
+/* clang-format on */
+
+/* clang-format off */
+#define for_each_builtin(j, tgt_j) \
+ for (j = 0, tgt_j = tr2_tgt_builtins[j]; \
+ tgt_j; \
+ j++, tgt_j = tr2_tgt_builtins[j])
+/* clang-format on */
+
+/* clang-format off */
+#define for_each_wanted_builtin(j, tgt_j) \
+ for_each_builtin(j, tgt_j) \
+ if (tr2_dst_trace_want(tgt_j->pdst))
+/* clang-format on */
+
+/*
+ * Force (rather than lazily) initialize any of the requested
+ * builtin TRACE2 targets at startup (and before we've seen an
+ * actual TRACE2 event call) so we can see if we need to setup
+ * the TR2 and TLS machinery.
+ *
+ * Return the number of builtin targets enabled.
+ */
+static int tr2_tgt_want_builtins(void)
+{
+ struct tr2_tgt *tgt_j;
+ int j;
+ int sum = 0;
+
+ for_each_builtin (j, tgt_j)
+ if (tgt_j->pfn_init())
+ sum++;
+
+ return sum;
+}
+
+/*
+ * Properly terminate each builtin target. Give each target
+ * a chance to write a summary event and/or flush if necessary
+ * and then close the fd.
+ */
+static void tr2_tgt_disable_builtins(void)
+{
+ struct tr2_tgt *tgt_j;
+ int j;
+
+ for_each_builtin (j, tgt_j)
+ tgt_j->pfn_term();
+}
+
+static int tr2main_exit_code;
+
+/*
+ * Our atexit routine should run after everything has finished.
+ *
+ * Note that events generated here might not actually appear if
+ * we are writing to fd 1 or 2 and our atexit routine runs after
+ * the pager's atexit routine (since it closes them to shutdown
+ * the pipes).
+ */
+static void tr2main_atexit_handler(void)
+{
+ struct tr2_tgt *tgt_j;
+ int j;
+ uint64_t us_now;
+ uint64_t us_elapsed_absolute;
+
+ us_now = getnanotime() / 1000;
+ us_elapsed_absolute = tr2tls_absolute_elapsed(us_now);
+
+ /*
+ * Clear any unbalanced regions so that our atexit message
+ * does not appear nested. This improves the appearance of
+ * the trace output if someone calls die(), for example.
+ */
+ tr2tls_pop_unwind_self();
+
+ for_each_wanted_builtin (j, tgt_j)
+ if (tgt_j->pfn_atexit)
+ tgt_j->pfn_atexit(us_elapsed_absolute,
+ tr2main_exit_code);
+
+ tr2_tgt_disable_builtins();
+
+ tr2tls_release();
+ tr2_sid_release();
+ tr2_cmd_name_release();
+ tr2_cfg_free_patterns();
+ tr2_sysenv_release();
+
+ trace2_enabled = 0;
+}
+
+static void tr2main_signal_handler(int signo)
+{
+ struct tr2_tgt *tgt_j;
+ int j;
+ uint64_t us_now;
+ uint64_t us_elapsed_absolute;
+
+ us_now = getnanotime() / 1000;
+ us_elapsed_absolute = tr2tls_absolute_elapsed(us_now);
+
+ for_each_wanted_builtin (j, tgt_j)
+ if (tgt_j->pfn_signal)
+ tgt_j->pfn_signal(us_elapsed_absolute, signo);
+
+ sigchain_pop(signo);
+ raise(signo);
+}
+
+void trace2_initialize_clock(void)
+{
+ tr2tls_start_process_clock();
+}
+
+void trace2_initialize_fl(const char *file, int line)
+{
+ struct tr2_tgt *tgt_j;
+ int j;
+
+ if (trace2_enabled)
+ return;
+
+ tr2_sysenv_load();
+
+ if (!tr2_tgt_want_builtins())
+ return;
+ trace2_enabled = 1;
+
+ tr2_sid_get();
+
+ atexit(tr2main_atexit_handler);
+ sigchain_push(SIGPIPE, tr2main_signal_handler);
+ tr2tls_init();
+
+ /*
+ * Emit 'version' message on each active builtin target.
+ */
+ for_each_wanted_builtin (j, tgt_j)
+ if (tgt_j->pfn_version_fl)
+ tgt_j->pfn_version_fl(file, line);
+}
+
+int trace2_is_enabled(void)
+{
+ return trace2_enabled;
+}
+
+void trace2_cmd_start_fl(const char *file, int line, const char **argv)
+{
+ struct tr2_tgt *tgt_j;
+ int j;
+ uint64_t us_now;
+ uint64_t us_elapsed_absolute;
+
+ if (!trace2_enabled)
+ return;
+
+ us_now = getnanotime() / 1000;
+ us_elapsed_absolute = tr2tls_absolute_elapsed(us_now);
+
+ for_each_wanted_builtin (j, tgt_j)
+ if (tgt_j->pfn_start_fl)
+ tgt_j->pfn_start_fl(file, line, us_elapsed_absolute,
+ argv);
+}
+
+int trace2_cmd_exit_fl(const char *file, int line, int code)
+{
+ struct tr2_tgt *tgt_j;
+ int j;
+ uint64_t us_now;
+ uint64_t us_elapsed_absolute;
+
+ code &= 0xff;
+
+ if (!trace2_enabled)
+ return code;
+
+ trace2_collect_process_info(TRACE2_PROCESS_INFO_EXIT);
+
+ tr2main_exit_code = code;
+
+ us_now = getnanotime() / 1000;
+ us_elapsed_absolute = tr2tls_absolute_elapsed(us_now);
+
+ for_each_wanted_builtin (j, tgt_j)
+ if (tgt_j->pfn_exit_fl)
+ tgt_j->pfn_exit_fl(file, line, us_elapsed_absolute,
+ code);
+
+ return code;
+}
+
+void trace2_cmd_error_va_fl(const char *file, int line, const char *fmt,
+ va_list ap)
+{
+ struct tr2_tgt *tgt_j;
+ int j;
+
+ if (!trace2_enabled)
+ return;
+
+ /*
+ * We expect each target function to treat 'ap' as constant
+ * and use va_copy (because an 'ap' can only be walked once).
+ */
+ for_each_wanted_builtin (j, tgt_j)
+ if (tgt_j->pfn_error_va_fl)
+ tgt_j->pfn_error_va_fl(file, line, fmt, ap);
+}
+
+void trace2_cmd_path_fl(const char *file, int line, const char *pathname)
+{
+ struct tr2_tgt *tgt_j;
+ int j;
+
+ if (!trace2_enabled)
+ return;
+
+ for_each_wanted_builtin (j, tgt_j)
+ if (tgt_j->pfn_command_path_fl)
+ tgt_j->pfn_command_path_fl(file, line, pathname);
+}
+
+void trace2_cmd_name_fl(const char *file, int line, const char *name)
+{
+ struct tr2_tgt *tgt_j;
+ const char *hierarchy;
+ int j;
+
+ if (!trace2_enabled)
+ return;
+
+ tr2_cmd_name_append_hierarchy(name);
+ hierarchy = tr2_cmd_name_get_hierarchy();
+
+ for_each_wanted_builtin (j, tgt_j)
+ if (tgt_j->pfn_command_name_fl)
+ tgt_j->pfn_command_name_fl(file, line, name, hierarchy);
+}
+
+void trace2_cmd_mode_fl(const char *file, int line, const char *mode)
+{
+ struct tr2_tgt *tgt_j;
+ int j;
+
+ if (!trace2_enabled)
+ return;
+
+ for_each_wanted_builtin (j, tgt_j)
+ if (tgt_j->pfn_command_mode_fl)
+ tgt_j->pfn_command_mode_fl(file, line, mode);
+}
+
+void trace2_cmd_alias_fl(const char *file, int line, const char *alias,
+ const char **argv)
+{
+ struct tr2_tgt *tgt_j;
+ int j;
+
+ if (!trace2_enabled)
+ return;
+
+ for_each_wanted_builtin (j, tgt_j)
+ if (tgt_j->pfn_alias_fl)
+ tgt_j->pfn_alias_fl(file, line, alias, argv);
+}
+
+void trace2_cmd_list_config_fl(const char *file, int line)
+{
+ if (!trace2_enabled)
+ return;
+
+ tr2_cfg_list_config_fl(file, line);
+}
+
+void trace2_cmd_set_config_fl(const char *file, int line, const char *key,
+ const char *value)
+{
+ if (!trace2_enabled)
+ return;
+
+ tr2_cfg_set_fl(file, line, key, value);
+}
+
+void trace2_child_start_fl(const char *file, int line,
+ struct child_process *cmd)
+{
+ struct tr2_tgt *tgt_j;
+ int j;
+ uint64_t us_now;
+ uint64_t us_elapsed_absolute;
+
+ if (!trace2_enabled)
+ return;
+
+ us_now = getnanotime() / 1000;
+ us_elapsed_absolute = tr2tls_absolute_elapsed(us_now);
+
+ cmd->trace2_child_id = tr2tls_locked_increment(&tr2_next_child_id);
+ cmd->trace2_child_us_start = us_now;
+
+ for_each_wanted_builtin (j, tgt_j)
+ if (tgt_j->pfn_child_start_fl)
+ tgt_j->pfn_child_start_fl(file, line,
+ us_elapsed_absolute, cmd);
+}
+
+void trace2_child_exit_fl(const char *file, int line, struct child_process *cmd,
+ int child_exit_code)
+{
+ struct tr2_tgt *tgt_j;
+ int j;
+ uint64_t us_now;
+ uint64_t us_elapsed_absolute;
+ uint64_t us_elapsed_child;
+
+ if (!trace2_enabled)
+ return;
+
+ us_now = getnanotime() / 1000;
+ us_elapsed_absolute = tr2tls_absolute_elapsed(us_now);
+
+ if (cmd->trace2_child_us_start)
+ us_elapsed_child = us_now - cmd->trace2_child_us_start;
+ else
+ us_elapsed_child = 0;
+
+ for_each_wanted_builtin (j, tgt_j)
+ if (tgt_j->pfn_child_exit_fl)
+ tgt_j->pfn_child_exit_fl(file, line,
+ us_elapsed_absolute,
+ cmd->trace2_child_id, cmd->pid,
+ child_exit_code,
+ us_elapsed_child);
+}
+
+int trace2_exec_fl(const char *file, int line, const char *exe,
+ const char **argv)
+{
+ struct tr2_tgt *tgt_j;
+ int j;
+ int exec_id;
+ uint64_t us_now;
+ uint64_t us_elapsed_absolute;
+
+ if (!trace2_enabled)
+ return -1;
+
+ us_now = getnanotime() / 1000;
+ us_elapsed_absolute = tr2tls_absolute_elapsed(us_now);
+
+ exec_id = tr2tls_locked_increment(&tr2_next_exec_id);
+
+ for_each_wanted_builtin (j, tgt_j)
+ if (tgt_j->pfn_exec_fl)
+ tgt_j->pfn_exec_fl(file, line, us_elapsed_absolute,
+ exec_id, exe, argv);
+
+ return exec_id;
+}
+
+void trace2_exec_result_fl(const char *file, int line, int exec_id, int code)
+{
+ struct tr2_tgt *tgt_j;
+ int j;
+ uint64_t us_now;
+ uint64_t us_elapsed_absolute;
+
+ if (!trace2_enabled)
+ return;
+
+ us_now = getnanotime() / 1000;
+ us_elapsed_absolute = tr2tls_absolute_elapsed(us_now);
+
+ for_each_wanted_builtin (j, tgt_j)
+ if (tgt_j->pfn_exec_result_fl)
+ tgt_j->pfn_exec_result_fl(
+ file, line, us_elapsed_absolute, exec_id, code);
+}
+
+void trace2_thread_start_fl(const char *file, int line, const char *thread_name)
+{
+ struct tr2_tgt *tgt_j;
+ int j;
+ uint64_t us_now;
+ uint64_t us_elapsed_absolute;
+
+ if (!trace2_enabled)
+ return;
+
+ if (tr2tls_is_main_thread()) {
+ /*
+ * We should only be called from the new thread's thread-proc,
+ * so this is technically a bug. But in those cases where the
+ * main thread also runs the thread-proc function (or when we
+ * are built with threading disabled), we need to allow it.
+ *
+ * Convert this call to a region-enter so the nesting looks
+ * correct.
+ */
+ trace2_region_enter_printf_fl(file, line, NULL, NULL, NULL,
+ "thread-proc on main: %s",
+ thread_name);
+ return;
+ }
+
+ us_now = getnanotime() / 1000;
+ us_elapsed_absolute = tr2tls_absolute_elapsed(us_now);
+
+ tr2tls_create_self(thread_name, us_now);
+
+ for_each_wanted_builtin (j, tgt_j)
+ if (tgt_j->pfn_thread_start_fl)
+ tgt_j->pfn_thread_start_fl(file, line,
+ us_elapsed_absolute);
+}
+
+void trace2_thread_exit_fl(const char *file, int line)
+{
+ struct tr2_tgt *tgt_j;
+ int j;
+ uint64_t us_now;
+ uint64_t us_elapsed_absolute;
+ uint64_t us_elapsed_thread;
+
+ if (!trace2_enabled)
+ return;
+
+ if (tr2tls_is_main_thread()) {
+ /*
+ * We should only be called from the exiting thread's
+ * thread-proc, so this is technically a bug. But in
+ * those cases where the main thread also runs the
+ * thread-proc function (or when we are built with
+ * threading disabled), we need to allow it.
+ *
+ * Convert this call to a region-leave so the nesting
+ * looks correct.
+ */
+ trace2_region_leave_printf_fl(file, line, NULL, NULL, NULL,
+ "thread-proc on main");
+ return;
+ }
+
+ us_now = getnanotime() / 1000;
+ us_elapsed_absolute = tr2tls_absolute_elapsed(us_now);
+
+ /*
+ * Clear any unbalanced regions and then get the relative time
+ * for the outer-most region (which we pushed when the thread
+ * started). This gives us the run time of the thread.
+ */
+ tr2tls_pop_unwind_self();
+ us_elapsed_thread = tr2tls_region_elasped_self(us_now);
+
+ for_each_wanted_builtin (j, tgt_j)
+ if (tgt_j->pfn_thread_exit_fl)
+ tgt_j->pfn_thread_exit_fl(file, line,
+ us_elapsed_absolute,
+ us_elapsed_thread);
+
+ tr2tls_unset_self();
+}
+
+void trace2_def_param_fl(const char *file, int line, const char *param,
+ const char *value)
+{
+ struct tr2_tgt *tgt_j;
+ int j;
+
+ if (!trace2_enabled)
+ return;
+
+ for_each_wanted_builtin (j, tgt_j)
+ if (tgt_j->pfn_param_fl)
+ tgt_j->pfn_param_fl(file, line, param, value);
+}
+
+void trace2_def_repo_fl(const char *file, int line, struct repository *repo)
+{
+ struct tr2_tgt *tgt_j;
+ int j;
+
+ if (!trace2_enabled)
+ return;
+
+ if (repo->trace2_repo_id)
+ return;
+
+ repo->trace2_repo_id = tr2tls_locked_increment(&tr2_next_repo_id);
+
+ for_each_wanted_builtin (j, tgt_j)
+ if (tgt_j->pfn_repo_fl)
+ tgt_j->pfn_repo_fl(file, line, repo);
+}
+
+void trace2_region_enter_printf_va_fl(const char *file, int line,
+ const char *category, const char *label,
+ const struct repository *repo,
+ const char *fmt, va_list ap)
+{
+ struct tr2_tgt *tgt_j;
+ int j;
+ uint64_t us_now;
+ uint64_t us_elapsed_absolute;
+
+ if (!trace2_enabled)
+ return;
+
+ us_now = getnanotime() / 1000;
+ us_elapsed_absolute = tr2tls_absolute_elapsed(us_now);
+
+ /*
+ * Print the region-enter message at the current nesting
+ * (indentation) level and then push a new level.
+ *
+ * We expect each target function to treat 'ap' as constant
+ * and use va_copy.
+ */
+ for_each_wanted_builtin (j, tgt_j)
+ if (tgt_j->pfn_region_enter_printf_va_fl)
+ tgt_j->pfn_region_enter_printf_va_fl(
+ file, line, us_elapsed_absolute, category,
+ label, repo, fmt, ap);
+
+ tr2tls_push_self(us_now);
+}
+
+void trace2_region_enter_fl(const char *file, int line, const char *category,
+ const char *label, const struct repository *repo, ...)
+{
+ va_list ap;
+ va_start(ap, repo);
+ trace2_region_enter_printf_va_fl(file, line, category, label, repo,
+ NULL, ap);
+ va_end(ap);
+
+}
+
+void trace2_region_enter_printf_fl(const char *file, int line,
+ const char *category, const char *label,
+ const struct repository *repo,
+ const char *fmt, ...)
+{
+ va_list ap;
+
+ va_start(ap, fmt);
+ trace2_region_enter_printf_va_fl(file, line, category, label, repo, fmt,
+ ap);
+ va_end(ap);
+}
+
+#ifndef HAVE_VARIADIC_MACROS
+void trace2_region_enter_printf(const char *category, const char *label,
+ const struct repository *repo, const char *fmt,
+ ...)
+{
+ va_list ap;
+
+ va_start(ap, fmt);
+ trace2_region_enter_printf_va_fl(NULL, 0, category, label, repo, fmt,
+ ap);
+ va_end(ap);
+}
+#endif
+
+void trace2_region_leave_printf_va_fl(const char *file, int line,
+ const char *category, const char *label,
+ const struct repository *repo,
+ const char *fmt, va_list ap)
+{
+ struct tr2_tgt *tgt_j;
+ int j;
+ uint64_t us_now;
+ uint64_t us_elapsed_absolute;
+ uint64_t us_elapsed_region;
+
+ if (!trace2_enabled)
+ return;
+
+ us_now = getnanotime() / 1000;
+ us_elapsed_absolute = tr2tls_absolute_elapsed(us_now);
+
+ /*
+ * Get the elapsed time in the current region before we
+ * pop it off the stack. Pop the stack. And then print
+ * the perf message at the new (shallower) level so that
+ * it lines up with the corresponding push/enter.
+ */
+ us_elapsed_region = tr2tls_region_elasped_self(us_now);
+
+ tr2tls_pop_self();
+
+ /*
+ * We expect each target function to treat 'ap' as constant
+ * and use va_copy.
+ */
+ for_each_wanted_builtin (j, tgt_j)
+ if (tgt_j->pfn_region_leave_printf_va_fl)
+ tgt_j->pfn_region_leave_printf_va_fl(
+ file, line, us_elapsed_absolute,
+ us_elapsed_region, category, label, repo, fmt,
+ ap);
+}
+
+void trace2_region_leave_fl(const char *file, int line, const char *category,
+ const char *label, const struct repository *repo, ...)
+{
+ va_list ap;
+ va_start(ap, repo);
+ trace2_region_leave_printf_va_fl(file, line, category, label, repo,
+ NULL, ap);
+ va_end(ap);
+}
+
+void trace2_region_leave_printf_fl(const char *file, int line,
+ const char *category, const char *label,
+ const struct repository *repo,
+ const char *fmt, ...)
+{
+ va_list ap;
+
+ va_start(ap, fmt);
+ trace2_region_leave_printf_va_fl(file, line, category, label, repo, fmt,
+ ap);
+ va_end(ap);
+}
+
+#ifndef HAVE_VARIADIC_MACROS
+void trace2_region_leave_printf(const char *category, const char *label,
+ const struct repository *repo, const char *fmt,
+ ...)
+{
+ va_list ap;
+
+ va_start(ap, fmt);
+ trace2_region_leave_printf_va_fl(NULL, 0, category, label, repo, fmt,
+ ap);
+ va_end(ap);
+}
+#endif
+
+void trace2_data_string_fl(const char *file, int line, const char *category,
+ const struct repository *repo, const char *key,
+ const char *value)
+{
+ struct tr2_tgt *tgt_j;
+ int j;
+ uint64_t us_now;
+ uint64_t us_elapsed_absolute;
+ uint64_t us_elapsed_region;
+
+ if (!trace2_enabled)
+ return;
+
+ us_now = getnanotime() / 1000;
+ us_elapsed_absolute = tr2tls_absolute_elapsed(us_now);
+ us_elapsed_region = tr2tls_region_elasped_self(us_now);
+
+ for_each_wanted_builtin (j, tgt_j)
+ if (tgt_j->pfn_data_fl)
+ tgt_j->pfn_data_fl(file, line, us_elapsed_absolute,
+ us_elapsed_region, category, repo,
+ key, value);
+}
+
+void trace2_data_intmax_fl(const char *file, int line, const char *category,
+ const struct repository *repo, const char *key,
+ intmax_t value)
+{
+ struct strbuf buf_string = STRBUF_INIT;
+
+ if (!trace2_enabled)
+ return;
+
+ strbuf_addf(&buf_string, "%" PRIdMAX, value);
+ trace2_data_string_fl(file, line, category, repo, key, buf_string.buf);
+ strbuf_release(&buf_string);
+}
+
+void trace2_data_json_fl(const char *file, int line, const char *category,
+ const struct repository *repo, const char *key,
+ const struct json_writer *value)
+{
+ struct tr2_tgt *tgt_j;
+ int j;
+ uint64_t us_now;
+ uint64_t us_elapsed_absolute;
+ uint64_t us_elapsed_region;
+
+ if (!trace2_enabled)
+ return;
+
+ us_now = getnanotime() / 1000;
+ us_elapsed_absolute = tr2tls_absolute_elapsed(us_now);
+ us_elapsed_region = tr2tls_region_elasped_self(us_now);
+
+ for_each_wanted_builtin (j, tgt_j)
+ if (tgt_j->pfn_data_json_fl)
+ tgt_j->pfn_data_json_fl(file, line, us_elapsed_absolute,
+ us_elapsed_region, category,
+ repo, key, value);
+}
+
+void trace2_printf_va_fl(const char *file, int line, const char *fmt,
+ va_list ap)
+{
+ struct tr2_tgt *tgt_j;
+ int j;
+ uint64_t us_now;
+ uint64_t us_elapsed_absolute;
+
+ if (!trace2_enabled)
+ return;
+
+ us_now = getnanotime() / 1000;
+ us_elapsed_absolute = tr2tls_absolute_elapsed(us_now);
+
+ /*
+ * We expect each target function to treat 'ap' as constant
+ * and use va_copy.
+ */
+ for_each_wanted_builtin (j, tgt_j)
+ if (tgt_j->pfn_printf_va_fl)
+ tgt_j->pfn_printf_va_fl(file, line, us_elapsed_absolute,
+ fmt, ap);
+}
+
+void trace2_printf_fl(const char *file, int line, const char *fmt, ...)
+{
+ va_list ap;
+
+ va_start(ap, fmt);
+ trace2_printf_va_fl(file, line, fmt, ap);
+ va_end(ap);
+}
+
+#ifndef HAVE_VARIADIC_MACROS
+void trace2_printf(const char *fmt, ...)
+{
+ va_list ap;
+
+ va_start(ap, fmt);
+ trace2_printf_va_fl(NULL, 0, fmt, ap);
+ va_end(ap);
+}
+#endif
--- /dev/null
+#ifndef TRACE2_H
+#define TRACE2_H
+
+struct child_process;
+struct repository;
+struct json_writer;
+
+/*
+ * The public TRACE2 routines are grouped into the following groups:
+ *
+ * [] trace2_initialize -- initialization.
+ * [] trace2_cmd_* -- emit command/control messages.
+ * [] trace2_child* -- emit child start/stop messages.
+ * [] trace2_exec* -- emit exec start/stop messages.
+ * [] trace2_thread* -- emit thread start/stop messages.
+ * [] trace2_def* -- emit definition/parameter mesasges.
+ * [] trace2_region* -- emit region nesting messages.
+ * [] trace2_data* -- emit region/thread/repo data messages.
+ * [] trace2_printf* -- legacy trace[1] messages.
+ */
+
+/*
+ * Initialize the TRACE2 clock and do nothing else, in particular
+ * no mallocs, no system inspection, and no environment inspection.
+ *
+ * This should be called at the very top of main() to capture the
+ * process start time. This is intended to reduce chicken-n-egg
+ * bootstrap pressure.
+ *
+ * It is safe to call this more than once. This allows capturing
+ * absolute startup costs on Windows which uses a little trickery
+ * to do setup work before common-main.c:main() is called.
+ *
+ * The main trace2_initialize_fl() may be called a little later
+ * after more infrastructure is established.
+ */
+void trace2_initialize_clock(void);
+
+/*
+ * Initialize TRACE2 tracing facility if any of the builtin TRACE2
+ * targets are enabled in the system config or the environment.
+ * Emits a 'version' event.
+ *
+ * Cleanup/Termination is handled automatically by a registered
+ * atexit() routine.
+ */
+void trace2_initialize_fl(const char *file, int line);
+
+#define trace2_initialize() trace2_initialize_fl(__FILE__, __LINE__)
+
+/*
+ * Return true if trace2 is enabled.
+ */
+int trace2_is_enabled(void);
+
+/*
+ * Emit a 'start' event with the original (unmodified) argv.
+ */
+void trace2_cmd_start_fl(const char *file, int line, const char **argv);
+
+#define trace2_cmd_start(argv) trace2_cmd_start_fl(__FILE__, __LINE__, (argv))
+
+/*
+ * Emit an 'exit' event.
+ *
+ * Write the exit-code that will be passed to exit() or returned
+ * from main().
+ *
+ * Use this prior to actually calling exit().
+ * See "#define exit()" in git-compat-util.h
+ */
+int trace2_cmd_exit_fl(const char *file, int line, int code);
+
+#define trace2_cmd_exit(code) (trace2_cmd_exit_fl(__FILE__, __LINE__, (code)))
+
+/*
+ * Emit an 'error' event.
+ *
+ * Write an error message to the TRACE2 targets.
+ */
+void trace2_cmd_error_va_fl(const char *file, int line, const char *fmt,
+ va_list ap);
+
+#define trace2_cmd_error_va(fmt, ap) \
+ trace2_cmd_error_va_fl(__FILE__, __LINE__, (fmt), (ap))
+
+/*
+ * Emit a 'pathname' event with the canonical pathname of the current process
+ * This gives post-processors a simple field to identify the command without
+ * having to parse the argv. For example, to distinguish invocations from
+ * installed versus debug executables.
+ */
+void trace2_cmd_path_fl(const char *file, int line, const char *pathname);
+
+#define trace2_cmd_path(p) trace2_cmd_path_fl(__FILE__, __LINE__, (p))
+
+/*
+ * Emit a 'cmd_name' event with the canonical name of the command.
+ * This gives post-processors a simple field to identify the command
+ * without having to parse the argv.
+ */
+void trace2_cmd_name_fl(const char *file, int line, const char *name);
+
+#define trace2_cmd_name(v) trace2_cmd_name_fl(__FILE__, __LINE__, (v))
+
+/*
+ * Emit a 'cmd_mode' event to further describe the command being run.
+ * For example, "checkout" can checkout a single file or can checkout a
+ * different branch. This gives post-processors a simple field to compare
+ * equivalent commands without having to parse the argv.
+ */
+void trace2_cmd_mode_fl(const char *file, int line, const char *mode);
+
+#define trace2_cmd_mode(sv) trace2_cmd_mode_fl(__FILE__, __LINE__, (sv))
+
+/*
+ * Emit an 'alias' expansion event.
+ */
+void trace2_cmd_alias_fl(const char *file, int line, const char *alias,
+ const char **argv);
+
+#define trace2_cmd_alias(alias, argv) \
+ trace2_cmd_alias_fl(__FILE__, __LINE__, (alias), (argv))
+
+/*
+ * Emit one or more 'def_param' events for "interesting" configuration
+ * settings.
+ *
+ * Use the TR2_SYSENV_CFG_PARAM setting to register a comma-separated
+ * list of patterns configured important. For example:
+ * git config --system trace2.configParams 'core.*,remote.*.url'
+ * or:
+ * GIT_TR2_CONFIG_PARAMS=core.*,remote.*.url"
+ *
+ * Note: this routine does a read-only iteration on the config data
+ * (using read_early_config()), so it must not be called until enough
+ * of the process environment has been established. This includes the
+ * location of the git and worktree directories, expansion of any "-c"
+ * and "-C" command line options, and etc.
+ */
+void trace2_cmd_list_config_fl(const char *file, int line);
+
+#define trace2_cmd_list_config() trace2_cmd_list_config_fl(__FILE__, __LINE__)
+
+/*
+ * Emit a "def_param" event for the given config key/value pair IF
+ * we consider the key to be "interesting".
+ *
+ * Use this for new/updated config settings created/updated after
+ * trace2_cmd_list_config() is called.
+ */
+void trace2_cmd_set_config_fl(const char *file, int line, const char *key,
+ const char *value);
+
+#define trace2_cmd_set_config(k, v) \
+ trace2_cmd_set_config_fl(__FILE__, __LINE__, (k), (v))
+
+/*
+ * Emit a 'child_start' event prior to spawning a child process.
+ *
+ * Before calling optionally set "cmd->trace2_child_class" to a string
+ * describing the type of the child process. For example, "editor" or
+ * "pager".
+ */
+void trace2_child_start_fl(const char *file, int line,
+ struct child_process *cmd);
+
+#define trace2_child_start(cmd) trace2_child_start_fl(__FILE__, __LINE__, (cmd))
+
+/*
+ * Emit a 'child_exit' event after the child process completes.
+ */
+void trace2_child_exit_fl(const char *file, int line, struct child_process *cmd,
+ int child_exit_code);
+
+#define trace2_child_exit(cmd, code) \
+ trace2_child_exit_fl(__FILE__, __LINE__, (cmd), (code))
+
+/*
+ * Emit an 'exec' event prior to calling one of exec(), execv(),
+ * execvp(), and etc. On Unix-derived systems, this will be the
+ * last event emitted for the current process, unless the exec
+ * fails. On Windows, exec() behaves like 'child_start' and a
+ * waitpid(), so additional events may be emitted.
+ *
+ * Returns the "exec_id".
+ */
+int trace2_exec_fl(const char *file, int line, const char *exe,
+ const char **argv);
+
+#define trace2_exec(exe, argv) trace2_exec_fl(__FILE__, __LINE__, (exe), (argv))
+
+/*
+ * Emit an 'exec_result' when possible. On Unix-derived systems,
+ * this should be called after exec() returns (which only happens
+ * when there is an error starting the new process). On Windows,
+ * this should be called after the waitpid().
+ *
+ * The "exec_id" should be the value returned from trace2_exec().
+ */
+void trace2_exec_result_fl(const char *file, int line, int exec_id, int code);
+
+#define trace2_exec_result(id, code) \
+ trace2_exec_result_fl(__FILE__, __LINE__, (id), (code))
+
+/*
+ * Emit a 'thread_start' event. This must be called from inside the
+ * thread-proc to set up the trace2 TLS data for the thread.
+ *
+ * Thread names should be descriptive, like "preload_index".
+ * Thread names will be decorated with an instance number automatically.
+ */
+void trace2_thread_start_fl(const char *file, int line,
+ const char *thread_name);
+
+#define trace2_thread_start(thread_name) \
+ trace2_thread_start_fl(__FILE__, __LINE__, (thread_name))
+
+/*
+ * Emit a 'thread_exit' event. This must be called from inside the
+ * thread-proc to report thread-specific data and cleanup TLS data
+ * for the thread.
+ */
+void trace2_thread_exit_fl(const char *file, int line);
+
+#define trace2_thread_exit() trace2_thread_exit_fl(__FILE__, __LINE__)
+
+/*
+ * Emit a 'param' event.
+ *
+ * Write a "<param> = <value>" pair describing some aspect of the
+ * run such as an important configuration setting or command line
+ * option that significantly changes command behavior.
+ */
+void trace2_def_param_fl(const char *file, int line, const char *param,
+ const char *value);
+
+#define trace2_def_param(param, value) \
+ trace2_def_param_fl(__FILE__, __LINE__, (param), (value))
+
+/*
+ * Tell trace2 about a newly instantiated repo object and assign
+ * a trace2-repo-id to be used in subsequent activity events.
+ *
+ * Emits a 'worktree' event for this repo instance.
+ */
+void trace2_def_repo_fl(const char *file, int line, struct repository *repo);
+
+#define trace2_def_repo(repo) trace2_def_repo_fl(__FILE__, __LINE__, repo)
+
+/*
+ * Emit a 'region_enter' event for <category>.<label> with optional
+ * repo-id and printf message.
+ *
+ * Enter a new nesting level on the current thread and remember the
+ * current time. This controls the indenting of all subsequent events
+ * on this thread.
+ */
+void trace2_region_enter_fl(const char *file, int line, const char *category,
+ const char *label, const struct repository *repo, ...);
+
+#define trace2_region_enter(category, label, repo) \
+ trace2_region_enter_fl(__FILE__, __LINE__, (category), (label), (repo))
+
+void trace2_region_enter_printf_va_fl(const char *file, int line,
+ const char *category, const char *label,
+ const struct repository *repo,
+ const char *fmt, va_list ap);
+
+#define trace2_region_enter_printf_va(category, label, repo, fmt, ap) \
+ trace2_region_enter_printf_va_fl(__FILE__, __LINE__, (category), \
+ (label), (repo), (fmt), (ap))
+
+void trace2_region_enter_printf_fl(const char *file, int line,
+ const char *category, const char *label,
+ const struct repository *repo,
+ const char *fmt, ...);
+
+#ifdef HAVE_VARIADIC_MACROS
+#define trace2_region_enter_printf(category, label, repo, ...) \
+ trace2_region_enter_printf_fl(__FILE__, __LINE__, (category), (label), \
+ (repo), __VA_ARGS__)
+#else
+/* clang-format off */
+__attribute__((format (region_enter_printf, 4, 5)))
+void trace2_region_enter_printf(const char *category, const char *label,
+ const struct repository *repo, const char *fmt,
+ ...);
+/* clang-format on */
+#endif
+
+/*
+ * Emit a 'region_leave' event for <category>.<label> with optional
+ * repo-id and printf message.
+ *
+ * Leave current nesting level and report the elapsed time spent
+ * in this nesting level.
+ */
+void trace2_region_leave_fl(const char *file, int line, const char *category,
+ const char *label, const struct repository *repo, ...);
+
+#define trace2_region_leave(category, label, repo) \
+ trace2_region_leave_fl(__FILE__, __LINE__, (category), (label), (repo))
+
+void trace2_region_leave_printf_va_fl(const char *file, int line,
+ const char *category, const char *label,
+ const struct repository *repo,
+ const char *fmt, va_list ap);
+
+#define trace2_region_leave_printf_va(category, label, repo, fmt, ap) \
+ trace2_region_leave_printf_va_fl(__FILE__, __LINE__, (category), \
+ (label), (repo), (fmt), (ap))
+
+void trace2_region_leave_printf_fl(const char *file, int line,
+ const char *category, const char *label,
+ const struct repository *repo,
+ const char *fmt, ...);
+
+#ifdef HAVE_VARIADIC_MACROS
+#define trace2_region_leave_printf(category, label, repo, ...) \
+ trace2_region_leave_printf_fl(__FILE__, __LINE__, (category), (label), \
+ (repo), __VA_ARGS__)
+#else
+/* clang-format off */
+__attribute__((format (region_leave_printf, 4, 5)))
+void trace2_region_leave_printf(const char *category, const char *label,
+ const struct repository *repo, const char *fmt,
+ ...);
+/* clang-format on */
+#endif
+
+/*
+ * Emit a key-value pair 'data' event of the form <category>.<key> = <value>.
+ * This event implicitly contains information about thread, nesting region,
+ * and optional repo-id.
+ *
+ * On event-based TRACE2 targets, this generates a 'data' event suitable
+ * for post-processing. On printf-based TRACE2 targets, this is converted
+ * into a fixed-format printf message.
+ */
+void trace2_data_string_fl(const char *file, int line, const char *category,
+ const struct repository *repo, const char *key,
+ const char *value);
+
+#define trace2_data_string(category, repo, key, value) \
+ trace2_data_string_fl(__FILE__, __LINE__, (category), (repo), (key), \
+ (value))
+
+void trace2_data_intmax_fl(const char *file, int line, const char *category,
+ const struct repository *repo, const char *key,
+ intmax_t value);
+
+#define trace2_data_intmax(category, repo, key, value) \
+ trace2_data_intmax_fl(__FILE__, __LINE__, (category), (repo), (key), \
+ (value))
+
+void trace2_data_json_fl(const char *file, int line, const char *category,
+ const struct repository *repo, const char *key,
+ const struct json_writer *jw);
+
+#define trace2_data_json(category, repo, key, value) \
+ trace2_data_json_fl(__FILE__, __LINE__, (category), (repo), (key), \
+ (value))
+
+/*
+ * Emit a 'printf' event.
+ *
+ * Write an arbitrary formatted message to the TRACE2 targets. These
+ * text messages should be considered as human-readable strings without
+ * any formatting guidelines. Post-processors may choose to ignore
+ * them.
+ */
+void trace2_printf_va_fl(const char *file, int line, const char *fmt,
+ va_list ap);
+
+#define trace2_printf_va(fmt, ap) \
+ trace2_printf_va_fl(__FILE__, __LINE__, (fmt), (ap))
+
+void trace2_printf_fl(const char *file, int line, const char *fmt, ...);
+
+#ifdef HAVE_VARIADIC_MACROS
+#define trace2_printf(...) trace2_printf_fl(__FILE__, __LINE__, __VA_ARGS__)
+#else
+/* clang-format off */
+__attribute__((format (printf, 1, 2)))
+void trace2_printf(const char *fmt, ...);
+/* clang-format on */
+#endif
+
+/*
+ * Optional platform-specific code to dump information about the
+ * current and any parent process(es). This is intended to allow
+ * post-processors to know who spawned this git instance and anything
+ * else that the platform may be able to tell us about the current process.
+ */
+
+enum trace2_process_info_reason {
+ TRACE2_PROCESS_INFO_STARTUP,
+ TRACE2_PROCESS_INFO_EXIT,
+};
+
+#if defined(GIT_WINDOWS_NATIVE)
+void trace2_collect_process_info(enum trace2_process_info_reason reason);
+#else
+#define trace2_collect_process_info(reason) \
+ do { \
+ } while (0)
+#endif
+
+#endif /* TRACE2_H */
--- /dev/null
+#include "cache.h"
+#include "config.h"
+#include "trace2/tr2_cfg.h"
+#include "trace2/tr2_sysenv.h"
+
+static struct strbuf **tr2_cfg_patterns;
+static int tr2_cfg_count_patterns;
+static int tr2_cfg_loaded;
+
+/*
+ * Parse a string containing a comma-delimited list of config keys
+ * or wildcard patterns into a list of strbufs.
+ */
+static int tr2_cfg_load_patterns(void)
+{
+ struct strbuf **s;
+ const char *envvar;
+
+ if (tr2_cfg_loaded)
+ return tr2_cfg_count_patterns;
+ tr2_cfg_loaded = 1;
+
+ envvar = tr2_sysenv_get(TR2_SYSENV_CFG_PARAM);
+ if (!envvar || !*envvar)
+ return tr2_cfg_count_patterns;
+
+ tr2_cfg_patterns = strbuf_split_buf(envvar, strlen(envvar), ',', -1);
+ for (s = tr2_cfg_patterns; *s; s++) {
+ struct strbuf *buf = *s;
+
+ if (buf->len && buf->buf[buf->len - 1] == ',')
+ strbuf_setlen(buf, buf->len - 1);
+ strbuf_trim_trailing_newline(*s);
+ strbuf_trim(*s);
+ }
+
+ tr2_cfg_count_patterns = s - tr2_cfg_patterns;
+ return tr2_cfg_count_patterns;
+}
+
+void tr2_cfg_free_patterns(void)
+{
+ if (tr2_cfg_patterns)
+ strbuf_list_free(tr2_cfg_patterns);
+ tr2_cfg_count_patterns = 0;
+ tr2_cfg_loaded = 0;
+}
+
+struct tr2_cfg_data {
+ const char *file;
+ int line;
+};
+
+/*
+ * See if the given config key matches any of our patterns of interest.
+ */
+static int tr2_cfg_cb(const char *key, const char *value, void *d)
+{
+ struct strbuf **s;
+ struct tr2_cfg_data *data = (struct tr2_cfg_data *)d;
+
+ for (s = tr2_cfg_patterns; *s; s++) {
+ struct strbuf *buf = *s;
+ int wm = wildmatch(buf->buf, key, WM_CASEFOLD);
+ if (wm == WM_MATCH) {
+ trace2_def_param_fl(data->file, data->line, key, value);
+ return 0;
+ }
+ }
+
+ return 0;
+}
+
+void tr2_cfg_list_config_fl(const char *file, int line)
+{
+ struct tr2_cfg_data data = { file, line };
+
+ if (tr2_cfg_load_patterns() > 0)
+ read_early_config(tr2_cfg_cb, &data);
+}
+
+void tr2_cfg_set_fl(const char *file, int line, const char *key,
+ const char *value)
+{
+ struct tr2_cfg_data data = { file, line };
+
+ if (tr2_cfg_load_patterns() > 0)
+ tr2_cfg_cb(key, value, &data);
+}
--- /dev/null
+#ifndef TR2_CFG_H
+#define TR2_CFG_H
+
+/*
+ * Iterate over all config settings and emit 'def_param' events for the
+ * "interesting" ones to TRACE2.
+ */
+void tr2_cfg_list_config_fl(const char *file, int line);
+
+/*
+ * Emit a "def_param" event for the given key/value pair IF we consider
+ * the key to be "interesting".
+ */
+void tr2_cfg_set_fl(const char *file, int line, const char *key,
+ const char *value);
+
+void tr2_cfg_free_patterns(void);
+
+#endif /* TR2_CFG_H */
--- /dev/null
+#include "cache.h"
+#include "trace2/tr2_cmd_name.h"
+
+#define TR2_ENVVAR_PARENT_NAME "GIT_TR2_PARENT_NAME"
+
+static struct strbuf tr2cmdname_hierarchy = STRBUF_INIT;
+
+void tr2_cmd_name_append_hierarchy(const char *name)
+{
+ const char *parent_name = getenv(TR2_ENVVAR_PARENT_NAME);
+
+ strbuf_reset(&tr2cmdname_hierarchy);
+ if (parent_name && *parent_name) {
+ strbuf_addstr(&tr2cmdname_hierarchy, parent_name);
+ strbuf_addch(&tr2cmdname_hierarchy, '/');
+ }
+ strbuf_addstr(&tr2cmdname_hierarchy, name);
+
+ setenv(TR2_ENVVAR_PARENT_NAME, tr2cmdname_hierarchy.buf, 1);
+}
+
+const char *tr2_cmd_name_get_hierarchy(void)
+{
+ return tr2cmdname_hierarchy.buf;
+}
+
+void tr2_cmd_name_release(void)
+{
+ strbuf_release(&tr2cmdname_hierarchy);
+}
--- /dev/null
+#ifndef TR2_CMD_NAME_H
+#define TR2_CMD_NAME_H
+
+/*
+ * Append the current command name to the list being maintained
+ * in the environment.
+ *
+ * The hierarchy for a top-level git command is just the current
+ * command name. For a child git process, the hierarchy includes the
+ * names of the parent processes.
+ *
+ * The hierarchy for the current process will be exported to the
+ * environment and inherited by child processes.
+ */
+void tr2_cmd_name_append_hierarchy(const char *name);
+
+/*
+ * Get the command name hierarchy for the current process.
+ */
+const char *tr2_cmd_name_get_hierarchy(void);
+
+void tr2_cmd_name_release(void);
+
+#endif /* TR2_CMD_NAME_H */
--- /dev/null
+#include "cache.h"
+#include "trace2/tr2_dst.h"
+#include "trace2/tr2_sid.h"
+#include "trace2/tr2_sysenv.h"
+
+/*
+ * How many attempts we will make at creating an automatically-named trace file.
+ */
+#define MAX_AUTO_ATTEMPTS 10
+
+static int tr2_dst_want_warning(void)
+{
+ static int tr2env_dst_debug = -1;
+
+ if (tr2env_dst_debug == -1) {
+ const char *env_value = tr2_sysenv_get(TR2_SYSENV_DST_DEBUG);
+ if (!env_value || !*env_value)
+ tr2env_dst_debug = 0;
+ else
+ tr2env_dst_debug = atoi(env_value) > 0;
+ }
+
+ return tr2env_dst_debug;
+}
+
+void tr2_dst_trace_disable(struct tr2_dst *dst)
+{
+ if (dst->need_close)
+ close(dst->fd);
+ dst->fd = 0;
+ dst->initialized = 1;
+ dst->need_close = 0;
+}
+
+static int tr2_dst_try_auto_path(struct tr2_dst *dst, const char *tgt_prefix)
+{
+ int fd;
+ const char *last_slash, *sid = tr2_sid_get();
+ struct strbuf path = STRBUF_INIT;
+ size_t base_path_len;
+ unsigned attempt_count;
+
+ last_slash = strrchr(sid, '/');
+ if (last_slash)
+ sid = last_slash + 1;
+
+ strbuf_addstr(&path, tgt_prefix);
+ if (!is_dir_sep(path.buf[path.len - 1]))
+ strbuf_addch(&path, '/');
+ strbuf_addstr(&path, sid);
+ base_path_len = path.len;
+
+ for (attempt_count = 0; attempt_count < MAX_AUTO_ATTEMPTS; attempt_count++) {
+ if (attempt_count > 0) {
+ strbuf_setlen(&path, base_path_len);
+ strbuf_addf(&path, ".%d", attempt_count);
+ }
+
+ fd = open(path.buf, O_WRONLY | O_CREAT | O_EXCL, 0666);
+ if (fd != -1)
+ break;
+ }
+
+ if (fd == -1) {
+ if (tr2_dst_want_warning())
+ warning("trace2: could not open '%.*s' for '%s' tracing: %s",
+ (int) base_path_len, path.buf,
+ tr2_sysenv_display_name(dst->sysenv_var),
+ strerror(errno));
+
+ tr2_dst_trace_disable(dst);
+ strbuf_release(&path);
+ return 0;
+ }
+
+ strbuf_release(&path);
+
+ dst->fd = fd;
+ dst->need_close = 1;
+ dst->initialized = 1;
+
+ return dst->fd;
+}
+
+static int tr2_dst_try_path(struct tr2_dst *dst, const char *tgt_value)
+{
+ int fd = open(tgt_value, O_WRONLY | O_APPEND | O_CREAT, 0666);
+ if (fd == -1) {
+ if (tr2_dst_want_warning())
+ warning("trace2: could not open '%s' for '%s' tracing: %s",
+ tgt_value,
+ tr2_sysenv_display_name(dst->sysenv_var),
+ strerror(errno));
+
+ tr2_dst_trace_disable(dst);
+ return 0;
+ }
+
+ dst->fd = fd;
+ dst->need_close = 1;
+ dst->initialized = 1;
+
+ return dst->fd;
+}
+
+#ifndef NO_UNIX_SOCKETS
+#define PREFIX_AF_UNIX "af_unix:"
+#define PREFIX_AF_UNIX_STREAM "af_unix:stream:"
+#define PREFIX_AF_UNIX_DGRAM "af_unix:dgram:"
+
+static int tr2_dst_try_uds_connect(const char *path, int sock_type, int *out_fd)
+{
+ int fd;
+ struct sockaddr_un sa;
+
+ fd = socket(AF_UNIX, sock_type, 0);
+ if (fd == -1)
+ return errno;
+
+ sa.sun_family = AF_UNIX;
+ strlcpy(sa.sun_path, path, sizeof(sa.sun_path));
+
+ if (connect(fd, (struct sockaddr *)&sa, sizeof(sa)) == -1) {
+ int e = errno;
+ close(fd);
+ return e;
+ }
+
+ *out_fd = fd;
+ return 0;
+}
+
+#define TR2_DST_UDS_TRY_STREAM (1 << 0)
+#define TR2_DST_UDS_TRY_DGRAM (1 << 1)
+
+static int tr2_dst_try_unix_domain_socket(struct tr2_dst *dst,
+ const char *tgt_value)
+{
+ unsigned int uds_try = 0;
+ int fd;
+ int e;
+ const char *path = NULL;
+
+ /*
+ * Allow "af_unix:[<type>:]<absolute_path>"
+ *
+ * Trace2 always writes complete individual messages (without
+ * chunking), so we can talk to either DGRAM or STREAM type sockets.
+ *
+ * Allow the user to explicitly request the socket type.
+ *
+ * If they omit the socket type, try one and then the other.
+ */
+
+ if (skip_prefix(tgt_value, PREFIX_AF_UNIX_STREAM, &path))
+ uds_try |= TR2_DST_UDS_TRY_STREAM;
+
+ else if (skip_prefix(tgt_value, PREFIX_AF_UNIX_DGRAM, &path))
+ uds_try |= TR2_DST_UDS_TRY_DGRAM;
+
+ else if (skip_prefix(tgt_value, PREFIX_AF_UNIX, &path))
+ uds_try |= TR2_DST_UDS_TRY_STREAM | TR2_DST_UDS_TRY_DGRAM;
+
+ if (!path || !*path) {
+ if (tr2_dst_want_warning())
+ warning("trace2: invalid AF_UNIX value '%s' for '%s' tracing",
+ tgt_value,
+ tr2_sysenv_display_name(dst->sysenv_var));
+
+ tr2_dst_trace_disable(dst);
+ return 0;
+ }
+
+ if (!is_absolute_path(path) ||
+ strlen(path) >= sizeof(((struct sockaddr_un *)0)->sun_path)) {
+ if (tr2_dst_want_warning())
+ warning("trace2: invalid AF_UNIX path '%s' for '%s' tracing",
+ path, tr2_sysenv_display_name(dst->sysenv_var));
+
+ tr2_dst_trace_disable(dst);
+ return 0;
+ }
+
+ if (uds_try & TR2_DST_UDS_TRY_STREAM) {
+ e = tr2_dst_try_uds_connect(path, SOCK_STREAM, &fd);
+ if (!e)
+ goto connected;
+ if (e != EPROTOTYPE)
+ goto error;
+ }
+ if (uds_try & TR2_DST_UDS_TRY_DGRAM) {
+ e = tr2_dst_try_uds_connect(path, SOCK_DGRAM, &fd);
+ if (!e)
+ goto connected;
+ }
+
+error:
+ if (tr2_dst_want_warning())
+ warning("trace2: could not connect to socket '%s' for '%s' tracing: %s",
+ path, tr2_sysenv_display_name(dst->sysenv_var),
+ strerror(e));
+
+ tr2_dst_trace_disable(dst);
+ return 0;
+
+connected:
+ dst->fd = fd;
+ dst->need_close = 1;
+ dst->initialized = 1;
+
+ return dst->fd;
+}
+#endif
+
+static void tr2_dst_malformed_warning(struct tr2_dst *dst,
+ const char *tgt_value)
+{
+ struct strbuf buf = STRBUF_INIT;
+
+ strbuf_addf(&buf, "trace2: unknown value for '%s': '%s'",
+ tr2_sysenv_display_name(dst->sysenv_var), tgt_value);
+ warning("%s", buf.buf);
+
+ strbuf_release(&buf);
+}
+
+int tr2_dst_get_trace_fd(struct tr2_dst *dst)
+{
+ const char *tgt_value;
+
+ /* don't open twice */
+ if (dst->initialized)
+ return dst->fd;
+
+ dst->initialized = 1;
+
+ tgt_value = tr2_sysenv_get(dst->sysenv_var);
+
+ if (!tgt_value || !strcmp(tgt_value, "") || !strcmp(tgt_value, "0") ||
+ !strcasecmp(tgt_value, "false")) {
+ dst->fd = 0;
+ return dst->fd;
+ }
+
+ if (!strcmp(tgt_value, "1") || !strcasecmp(tgt_value, "true")) {
+ dst->fd = STDERR_FILENO;
+ return dst->fd;
+ }
+
+ if (strlen(tgt_value) == 1 && isdigit(*tgt_value)) {
+ dst->fd = atoi(tgt_value);
+ return dst->fd;
+ }
+
+ if (is_absolute_path(tgt_value)) {
+ if (is_directory(tgt_value))
+ return tr2_dst_try_auto_path(dst, tgt_value);
+ else
+ return tr2_dst_try_path(dst, tgt_value);
+ }
+
+#ifndef NO_UNIX_SOCKETS
+ if (starts_with(tgt_value, PREFIX_AF_UNIX))
+ return tr2_dst_try_unix_domain_socket(dst, tgt_value);
+#endif
+
+ /* Always warn about malformed values. */
+ tr2_dst_malformed_warning(dst, tgt_value);
+ tr2_dst_trace_disable(dst);
+ return 0;
+}
+
+int tr2_dst_trace_want(struct tr2_dst *dst)
+{
+ return !!tr2_dst_get_trace_fd(dst);
+}
+
+void tr2_dst_write_line(struct tr2_dst *dst, struct strbuf *buf_line)
+{
+ int fd = tr2_dst_get_trace_fd(dst);
+
+ strbuf_complete_line(buf_line); /* ensure final NL on buffer */
+
+ /*
+ * We do not use write_in_full() because we do not want
+ * a short-write to try again. We are using O_APPEND mode
+ * files and the kernel handles the atomic seek+write. If
+ * another thread or git process is concurrently writing to
+ * this fd or file, our remainder-write may not be contiguous
+ * with our initial write of this message. And that will
+ * confuse readers. So just don't bother.
+ *
+ * It is assumed that TRACE2 messages are short enough that
+ * the system can write them in 1 attempt and we won't see
+ * a short-write.
+ *
+ * If we get an IO error, just close the trace dst.
+ */
+ if (write(fd, buf_line->buf, buf_line->len) >= 0)
+ return;
+
+ if (tr2_dst_want_warning())
+ warning("unable to write trace to '%s': %s",
+ tr2_sysenv_display_name(dst->sysenv_var),
+ strerror(errno));
+ tr2_dst_trace_disable(dst);
+}
--- /dev/null
+#ifndef TR2_DST_H
+#define TR2_DST_H
+
+struct strbuf;
+#include "trace2/tr2_sysenv.h"
+
+struct tr2_dst {
+ enum tr2_sysenv_variable sysenv_var;
+ int fd;
+ unsigned int initialized : 1;
+ unsigned int need_close : 1;
+};
+
+/*
+ * Disable TRACE2 on the destination. In TRACE2 a destination (DST)
+ * wraps a file descriptor; it is associated with a TARGET which
+ * defines the formatting.
+ */
+void tr2_dst_trace_disable(struct tr2_dst *dst);
+
+/*
+ * Return the file descriptor for the DST.
+ * If 0, the dst is closed or disabled.
+ */
+int tr2_dst_get_trace_fd(struct tr2_dst *dst);
+
+/*
+ * Return true if the DST is opened for writing.
+ */
+int tr2_dst_trace_want(struct tr2_dst *dst);
+
+/*
+ * Write a single line/message to the trace file.
+ */
+void tr2_dst_write_line(struct tr2_dst *dst, struct strbuf *buf_line);
+
+#endif /* TR2_DST_H */
--- /dev/null
+#include "cache.h"
+#include "trace2/tr2_tbuf.h"
+#include "trace2/tr2_sid.h"
+
+#define TR2_ENVVAR_PARENT_SID "GIT_TR2_PARENT_SID"
+
+static struct strbuf tr2sid_buf = STRBUF_INIT;
+static int tr2sid_nr_git_parents;
+
+/*
+ * Compute the final component of the SID representing the current process.
+ * This should uniquely identify the process and be a valid filename (to
+ * allow writing trace2 data to per-process files). It should also be fixed
+ * length for possible use as a database key.
+ *
+ * "<yyyymmdd>T<hhmmss>.<fraction>Z-<host>-<process>"
+ *
+ * where <host> is a 9 character string:
+ * "H<first_8_chars_of_sha1_of_hostname>"
+ * "Localhost" when no hostname.
+ *
+ * where <process> is a 9 character string containing the least signifcant
+ * 32 bits in the process-id.
+ * "P<pid>"
+ * (This is an abribrary choice. On most systems pid_t is a 32 bit value,
+ * so limit doesn't matter. On larger systems, a truncated value is fine
+ * for our purposes here.)
+ */
+static void tr2_sid_append_my_sid_component(void)
+{
+ const struct git_hash_algo *algo = &hash_algos[GIT_HASH_SHA1];
+ struct tr2_tbuf tb_now;
+ git_hash_ctx ctx;
+ pid_t pid = getpid();
+ unsigned char hash[GIT_MAX_RAWSZ + 1];
+ char hex[GIT_MAX_HEXSZ + 1];
+ char hostname[HOST_NAME_MAX + 1];
+
+ tr2_tbuf_utc_datetime(&tb_now);
+ strbuf_addstr(&tr2sid_buf, tb_now.buf);
+
+ strbuf_addch(&tr2sid_buf, '-');
+ if (xgethostname(hostname, sizeof(hostname)))
+ strbuf_add(&tr2sid_buf, "Localhost", 9);
+ else {
+ algo->init_fn(&ctx);
+ algo->update_fn(&ctx, hostname, strlen(hostname));
+ algo->final_fn(hash, &ctx);
+ hash_to_hex_algop_r(hex, hash, algo);
+ strbuf_addch(&tr2sid_buf, 'H');
+ strbuf_add(&tr2sid_buf, hex, 8);
+ }
+
+ strbuf_addf(&tr2sid_buf, "-P%08"PRIx32, (uint32_t)pid);
+}
+
+/*
+ * Compute a "unique" session id (SID) for the current process. This allows
+ * all events from this process to have a single label (much like a PID).
+ *
+ * Export this into our environment so that all child processes inherit it.
+ *
+ * If we were started by another git instance, use our parent's SID as a
+ * prefix. (This lets us track parent/child relationships even if there
+ * is an intermediate shell process.)
+ *
+ * Additionally, count the number of nested git processes.
+ */
+static void tr2_sid_compute(void)
+{
+ const char *parent_sid;
+
+ if (tr2sid_buf.len)
+ return;
+
+ parent_sid = getenv(TR2_ENVVAR_PARENT_SID);
+ if (parent_sid && *parent_sid) {
+ const char *p;
+ for (p = parent_sid; *p; p++)
+ if (*p == '/')
+ tr2sid_nr_git_parents++;
+
+ strbuf_addstr(&tr2sid_buf, parent_sid);
+ strbuf_addch(&tr2sid_buf, '/');
+ tr2sid_nr_git_parents++;
+ }
+
+ tr2_sid_append_my_sid_component();
+
+ setenv(TR2_ENVVAR_PARENT_SID, tr2sid_buf.buf, 1);
+}
+
+const char *tr2_sid_get(void)
+{
+ if (!tr2sid_buf.len)
+ tr2_sid_compute();
+
+ return tr2sid_buf.buf;
+}
+
+int tr2_sid_depth(void)
+{
+ if (!tr2sid_buf.len)
+ tr2_sid_compute();
+
+ return tr2sid_nr_git_parents;
+}
+
+void tr2_sid_release(void)
+{
+ strbuf_release(&tr2sid_buf);
+}
--- /dev/null
+#ifndef TR2_SID_H
+#define TR2_SID_H
+
+/*
+ * Get our session id. Compute if necessary.
+ */
+const char *tr2_sid_get(void);
+
+/*
+ * Get our process depth. A top-level git process invoked from the
+ * command line will have depth=0. A child git process will have
+ * depth=1 and so on.
+ */
+int tr2_sid_depth(void);
+
+void tr2_sid_release(void);
+
+#endif /* TR2_SID_H */
--- /dev/null
+#include "cache.h"
+#include "config.h"
+#include "dir.h"
+#include "tr2_sysenv.h"
+
+/*
+ * Each entry represents a trace2 setting.
+ * See Documentation/technical/api-trace2.txt
+ */
+struct tr2_sysenv_entry {
+ const char *env_var_name;
+ const char *git_config_name;
+
+ char *value;
+ unsigned int getenv_called : 1;
+};
+
+/*
+ * This table must match "enum tr2_sysenv_variable" in tr2_sysenv.h.
+ *
+ * The strings in this table are constant and must match the published
+ * config and environment variable names as described in the documentation.
+ *
+ * We do not define entries for the GIT_TR2_PARENT_* environment
+ * variables because they are transient and used to pass information
+ * from parent to child git processes, rather than settings.
+ */
+/* clang-format off */
+static struct tr2_sysenv_entry tr2_sysenv_settings[] = {
+ [TR2_SYSENV_CFG_PARAM] = { "GIT_TR2_CONFIG_PARAMS",
+ "trace2.configparams" },
+
+ [TR2_SYSENV_DST_DEBUG] = { "GIT_TR2_DST_DEBUG",
+ "trace2.destinationdebug" },
+
+ [TR2_SYSENV_NORMAL] = { "GIT_TR2",
+ "trace2.normaltarget" },
+ [TR2_SYSENV_NORMAL_BRIEF] = { "GIT_TR2_BRIEF",
+ "trace2.normalbrief" },
+
+ [TR2_SYSENV_EVENT] = { "GIT_TR2_EVENT",
+ "trace2.eventtarget" },
+ [TR2_SYSENV_EVENT_BRIEF] = { "GIT_TR2_EVENT_BRIEF",
+ "trace2.eventbrief" },
+ [TR2_SYSENV_EVENT_NESTING] = { "GIT_TR2_EVENT_NESTING",
+ "trace2.eventnesting" },
+
+ [TR2_SYSENV_PERF] = { "GIT_TR2_PERF",
+ "trace2.perftarget" },
+ [TR2_SYSENV_PERF_BRIEF] = { "GIT_TR2_PERF_BRIEF",
+ "trace2.perfbrief" },
+};
+/* clang-format on */
+
+static int tr2_sysenv_cb(const char *key, const char *value, void *d)
+{
+ int k;
+
+ if (!starts_with(key, "trace2."))
+ return 0;
+
+ for (k = 0; k < ARRAY_SIZE(tr2_sysenv_settings); k++) {
+ if (!strcmp(key, tr2_sysenv_settings[k].git_config_name)) {
+ free(tr2_sysenv_settings[k].value);
+ tr2_sysenv_settings[k].value = xstrdup(value);
+ return 0;
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * Load Trace2 settings from the system config (usually "/etc/gitconfig"
+ * unless we were built with a runtime-prefix). These are intended to
+ * define the default values for Trace2 as requested by the administrator.
+ *
+ * Then override with the Trace2 settings from the global config.
+ */
+void tr2_sysenv_load(void)
+{
+ if (ARRAY_SIZE(tr2_sysenv_settings) != TR2_SYSENV_MUST_BE_LAST)
+ BUG("tr2_sysenv_settings size is wrong");
+
+ read_very_early_config(tr2_sysenv_cb, NULL);
+}
+
+/*
+ * Return the value for the requested Trace2 setting from these sources:
+ * the system config, the global config, and the environment.
+ */
+const char *tr2_sysenv_get(enum tr2_sysenv_variable var)
+{
+ if (var >= TR2_SYSENV_MUST_BE_LAST)
+ BUG("tr2_sysenv_get invalid var '%d'", var);
+
+ if (!tr2_sysenv_settings[var].getenv_called) {
+ const char *v = getenv(tr2_sysenv_settings[var].env_var_name);
+ if (v && *v) {
+ free(tr2_sysenv_settings[var].value);
+ tr2_sysenv_settings[var].value = xstrdup(v);
+ }
+ tr2_sysenv_settings[var].getenv_called = 1;
+ }
+
+ return tr2_sysenv_settings[var].value;
+}
+
+/*
+ * Return a friendly name for this setting that is suitable for printing
+ * in an error messages.
+ */
+const char *tr2_sysenv_display_name(enum tr2_sysenv_variable var)
+{
+ if (var >= TR2_SYSENV_MUST_BE_LAST)
+ BUG("tr2_sysenv_get invalid var '%d'", var);
+
+ return tr2_sysenv_settings[var].env_var_name;
+}
+
+void tr2_sysenv_release(void)
+{
+ int k;
+
+ for (k = 0; k < ARRAY_SIZE(tr2_sysenv_settings); k++)
+ free(tr2_sysenv_settings[k].value);
+}
--- /dev/null
+#ifndef TR2_SYSENV_H
+#define TR2_SYSENV_H
+
+/*
+ * The Trace2 settings that can be loaded from /etc/gitconfig
+ * and/or user environment variables.
+ *
+ * Note that this set does not contain any of the transient
+ * environment variables used to pass information from parent
+ * to child git processes, such "GIT_TR2_PARENT_SID".
+ */
+enum tr2_sysenv_variable {
+ TR2_SYSENV_CFG_PARAM = 0,
+
+ TR2_SYSENV_DST_DEBUG,
+
+ TR2_SYSENV_NORMAL,
+ TR2_SYSENV_NORMAL_BRIEF,
+
+ TR2_SYSENV_EVENT,
+ TR2_SYSENV_EVENT_BRIEF,
+ TR2_SYSENV_EVENT_NESTING,
+
+ TR2_SYSENV_PERF,
+ TR2_SYSENV_PERF_BRIEF,
+
+ TR2_SYSENV_MUST_BE_LAST
+};
+
+void tr2_sysenv_load(void);
+
+const char *tr2_sysenv_get(enum tr2_sysenv_variable);
+const char *tr2_sysenv_display_name(enum tr2_sysenv_variable var);
+void tr2_sysenv_release(void);
+
+#endif /* TR2_SYSENV_H */
--- /dev/null
+#include "cache.h"
+#include "tr2_tbuf.h"
+
+void tr2_tbuf_local_time(struct tr2_tbuf *tb)
+{
+ struct timeval tv;
+ struct tm tm;
+ time_t secs;
+
+ gettimeofday(&tv, NULL);
+ secs = tv.tv_sec;
+ localtime_r(&secs, &tm);
+
+ xsnprintf(tb->buf, sizeof(tb->buf), "%02d:%02d:%02d.%06ld", tm.tm_hour,
+ tm.tm_min, tm.tm_sec, (long)tv.tv_usec);
+}
+
+void tr2_tbuf_utc_datetime_extended(struct tr2_tbuf *tb)
+{
+ struct timeval tv;
+ struct tm tm;
+ time_t secs;
+
+ gettimeofday(&tv, NULL);
+ secs = tv.tv_sec;
+ gmtime_r(&secs, &tm);
+
+ xsnprintf(tb->buf, sizeof(tb->buf),
+ "%4d-%02d-%02dT%02d:%02d:%02d.%06ldZ", tm.tm_year + 1900,
+ tm.tm_mon + 1, tm.tm_mday, tm.tm_hour, tm.tm_min, tm.tm_sec,
+ (long)tv.tv_usec);
+}
+
+void tr2_tbuf_utc_datetime(struct tr2_tbuf *tb)
+{
+ struct timeval tv;
+ struct tm tm;
+ time_t secs;
+
+ gettimeofday(&tv, NULL);
+ secs = tv.tv_sec;
+ gmtime_r(&secs, &tm);
+
+ xsnprintf(tb->buf, sizeof(tb->buf), "%4d%02d%02dT%02d%02d%02d.%06ldZ",
+ tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday, tm.tm_hour,
+ tm.tm_min, tm.tm_sec, (long)tv.tv_usec);
+}
--- /dev/null
+#ifndef TR2_TBUF_H
+#define TR2_TBUF_H
+
+/*
+ * A simple wrapper around a fixed buffer to avoid C syntax
+ * quirks and the need to pass around an additional size_t
+ * argument.
+ */
+struct tr2_tbuf {
+ char buf[32];
+};
+
+/*
+ * Fill buffer with formatted local time string.
+ */
+void tr2_tbuf_local_time(struct tr2_tbuf *tb);
+
+/*
+ * Fill buffer with formatted UTC datatime string.
+ */
+void tr2_tbuf_utc_datetime_extended(struct tr2_tbuf *tb);
+void tr2_tbuf_utc_datetime(struct tr2_tbuf *tb);
+
+#endif /* TR2_TBUF_H */
--- /dev/null
+#ifndef TR2_TGT_H
+#define TR2_TGT_H
+
+struct child_process;
+struct repository;
+struct json_writer;
+
+/*
+ * Function prototypes for a TRACE2 "target" vtable.
+ */
+
+typedef int(tr2_tgt_init_t)(void);
+typedef void(tr2_tgt_term_t)(void);
+
+typedef void(tr2_tgt_evt_version_fl_t)(const char *file, int line);
+
+typedef void(tr2_tgt_evt_start_fl_t)(const char *file, int line,
+ uint64_t us_elapsed_absolute,
+ const char **argv);
+typedef void(tr2_tgt_evt_exit_fl_t)(const char *file, int line,
+ uint64_t us_elapsed_absolute, int code);
+typedef void(tr2_tgt_evt_signal_t)(uint64_t us_elapsed_absolute, int signo);
+typedef void(tr2_tgt_evt_atexit_t)(uint64_t us_elapsed_absolute, int code);
+
+typedef void(tr2_tgt_evt_error_va_fl_t)(const char *file, int line,
+ const char *fmt, va_list ap);
+
+typedef void(tr2_tgt_evt_command_path_fl_t)(const char *file, int line,
+ const char *command_path);
+typedef void(tr2_tgt_evt_command_name_fl_t)(const char *file, int line,
+ const char *name,
+ const char *hierarchy);
+typedef void(tr2_tgt_evt_command_mode_fl_t)(const char *file, int line,
+ const char *mode);
+
+typedef void(tr2_tgt_evt_alias_fl_t)(const char *file, int line,
+ const char *alias, const char **argv);
+
+typedef void(tr2_tgt_evt_child_start_fl_t)(const char *file, int line,
+ uint64_t us_elapsed_absolute,
+ const struct child_process *cmd);
+typedef void(tr2_tgt_evt_child_exit_fl_t)(const char *file, int line,
+ uint64_t us_elapsed_absolute, int cid,
+ int pid, int code,
+ uint64_t us_elapsed_child);
+
+typedef void(tr2_tgt_evt_thread_start_fl_t)(const char *file, int line,
+ uint64_t us_elapsed_absolute);
+typedef void(tr2_tgt_evt_thread_exit_fl_t)(const char *file, int line,
+ uint64_t us_elapsed_absolute,
+ uint64_t us_elapsed_thread);
+
+typedef void(tr2_tgt_evt_exec_fl_t)(const char *file, int line,
+ uint64_t us_elapsed_absolute, int exec_id,
+ const char *exe, const char **argv);
+typedef void(tr2_tgt_evt_exec_result_fl_t)(const char *file, int line,
+ uint64_t us_elapsed_absolute,
+ int exec_id, int code);
+
+typedef void(tr2_tgt_evt_param_fl_t)(const char *file, int line,
+ const char *param, const char *value);
+
+typedef void(tr2_tgt_evt_repo_fl_t)(const char *file, int line,
+ const struct repository *repo);
+
+typedef void(tr2_tgt_evt_region_enter_printf_va_fl_t)(
+ const char *file, int line, uint64_t us_elapsed_absolute,
+ const char *category, const char *label, const struct repository *repo,
+ const char *fmt, va_list ap);
+typedef void(tr2_tgt_evt_region_leave_printf_va_fl_t)(
+ const char *file, int line, uint64_t us_elapsed_absolute,
+ uint64_t us_elapsed_region, const char *category, const char *label,
+ const struct repository *repo, const char *fmt, va_list ap);
+
+typedef void(tr2_tgt_evt_data_fl_t)(const char *file, int line,
+ uint64_t us_elapsed_absolute,
+ uint64_t us_elapsed_region,
+ const char *category,
+ const struct repository *repo,
+ const char *key, const char *value);
+typedef void(tr2_tgt_evt_data_json_fl_t)(const char *file, int line,
+ uint64_t us_elapsed_absolute,
+ uint64_t us_elapsed_region,
+ const char *category,
+ const struct repository *repo,
+ const char *key,
+ const struct json_writer *value);
+
+typedef void(tr2_tgt_evt_printf_va_fl_t)(const char *file, int line,
+ uint64_t us_elapsed_absolute,
+ const char *fmt, va_list ap);
+
+/*
+ * "vtable" for a TRACE2 target. Use NULL if a target does not want
+ * to emit that message.
+ */
+/* clang-format off */
+struct tr2_tgt {
+ struct tr2_dst *pdst;
+
+ tr2_tgt_init_t *pfn_init;
+ tr2_tgt_term_t *pfn_term;
+
+ tr2_tgt_evt_version_fl_t *pfn_version_fl;
+ tr2_tgt_evt_start_fl_t *pfn_start_fl;
+ tr2_tgt_evt_exit_fl_t *pfn_exit_fl;
+ tr2_tgt_evt_signal_t *pfn_signal;
+ tr2_tgt_evt_atexit_t *pfn_atexit;
+ tr2_tgt_evt_error_va_fl_t *pfn_error_va_fl;
+ tr2_tgt_evt_command_path_fl_t *pfn_command_path_fl;
+ tr2_tgt_evt_command_name_fl_t *pfn_command_name_fl;
+ tr2_tgt_evt_command_mode_fl_t *pfn_command_mode_fl;
+ tr2_tgt_evt_alias_fl_t *pfn_alias_fl;
+ tr2_tgt_evt_child_start_fl_t *pfn_child_start_fl;
+ tr2_tgt_evt_child_exit_fl_t *pfn_child_exit_fl;
+ tr2_tgt_evt_thread_start_fl_t *pfn_thread_start_fl;
+ tr2_tgt_evt_thread_exit_fl_t *pfn_thread_exit_fl;
+ tr2_tgt_evt_exec_fl_t *pfn_exec_fl;
+ tr2_tgt_evt_exec_result_fl_t *pfn_exec_result_fl;
+ tr2_tgt_evt_param_fl_t *pfn_param_fl;
+ tr2_tgt_evt_repo_fl_t *pfn_repo_fl;
+ tr2_tgt_evt_region_enter_printf_va_fl_t *pfn_region_enter_printf_va_fl;
+ tr2_tgt_evt_region_leave_printf_va_fl_t *pfn_region_leave_printf_va_fl;
+ tr2_tgt_evt_data_fl_t *pfn_data_fl;
+ tr2_tgt_evt_data_json_fl_t *pfn_data_json_fl;
+ tr2_tgt_evt_printf_va_fl_t *pfn_printf_va_fl;
+};
+/* clang-format on */
+
+extern struct tr2_tgt tr2_tgt_event;
+extern struct tr2_tgt tr2_tgt_normal;
+extern struct tr2_tgt tr2_tgt_perf;
+
+#endif /* TR2_TGT_H */
--- /dev/null
+#include "cache.h"
+#include "config.h"
+#include "json-writer.h"
+#include "run-command.h"
+#include "version.h"
+#include "trace2/tr2_dst.h"
+#include "trace2/tr2_tbuf.h"
+#include "trace2/tr2_sid.h"
+#include "trace2/tr2_sysenv.h"
+#include "trace2/tr2_tgt.h"
+#include "trace2/tr2_tls.h"
+
+static struct tr2_dst tr2dst_event = { TR2_SYSENV_EVENT, 0, 0, 0 };
+
+/*
+ * The version number of the JSON data generated by the EVENT target
+ * in this source file. Update this if you make a significant change
+ * to the JSON fields or message structure. You probably do not need
+ * to update this if you just add another call to one of the existing
+ * TRACE2 API methods.
+ */
+#define TR2_EVENT_VERSION "1"
+
+/*
+ * Region nesting limit for messages written to the event target.
+ *
+ * The "region_enter" and "region_leave" messages (especially recursive
+ * messages such as those produced while diving the worktree or index)
+ * are primarily intended for the performance target during debugging.
+ *
+ * Some of the outer-most messages, however, may be of interest to the
+ * event target. Use the TR2_SYSENV_EVENT_NESTING setting to increase
+ * region details in the event target.
+ */
+static int tr2env_event_max_nesting_levels = 2;
+
+/*
+ * Use the TR2_SYSENV_EVENT_BRIEF to omit the <time>, <file>, and
+ * <line> fields from most events.
+ */
+static int tr2env_event_be_brief;
+
+static int fn_init(void)
+{
+ int want = tr2_dst_trace_want(&tr2dst_event);
+ int max_nesting;
+ int want_brief;
+ const char *nesting;
+ const char *brief;
+
+ if (!want)
+ return want;
+
+ nesting = tr2_sysenv_get(TR2_SYSENV_EVENT_NESTING);
+ if (nesting && *nesting && ((max_nesting = atoi(nesting)) > 0))
+ tr2env_event_max_nesting_levels = max_nesting;
+
+ brief = tr2_sysenv_get(TR2_SYSENV_EVENT_BRIEF);
+ if (brief && *brief &&
+ ((want_brief = git_parse_maybe_bool(brief)) != -1))
+ tr2env_event_be_brief = want_brief;
+
+ return want;
+}
+
+static void fn_term(void)
+{
+ tr2_dst_trace_disable(&tr2dst_event);
+}
+
+/*
+ * Append common key-value pairs to the currently open JSON object.
+ * "event:"<event_name>"
+ * "sid":"<sid>"
+ * "thread":"<thread_name>"
+ * "time":"<time>"
+ * "file":"<filename>"
+ * "line":<line_number>
+ * "repo":<repo_id>
+ */
+static void event_fmt_prepare(const char *event_name, const char *file,
+ int line, const struct repository *repo,
+ struct json_writer *jw)
+{
+ struct tr2tls_thread_ctx *ctx = tr2tls_get_self();
+ struct tr2_tbuf tb_now;
+
+ jw_object_string(jw, "event", event_name);
+ jw_object_string(jw, "sid", tr2_sid_get());
+ jw_object_string(jw, "thread", ctx->thread_name.buf);
+
+ /*
+ * In brief mode, only emit <time> on these 2 event types.
+ */
+ if (!tr2env_event_be_brief || !strcmp(event_name, "version") ||
+ !strcmp(event_name, "atexit")) {
+ tr2_tbuf_utc_datetime_extended(&tb_now);
+ jw_object_string(jw, "time", tb_now.buf);
+ }
+
+ if (!tr2env_event_be_brief && file && *file) {
+ jw_object_string(jw, "file", file);
+ jw_object_intmax(jw, "line", line);
+ }
+
+ if (repo)
+ jw_object_intmax(jw, "repo", repo->trace2_repo_id);
+}
+
+static void fn_version_fl(const char *file, int line)
+{
+ const char *event_name = "version";
+ struct json_writer jw = JSON_WRITER_INIT;
+
+ jw_object_begin(&jw, 0);
+ event_fmt_prepare(event_name, file, line, NULL, &jw);
+ jw_object_string(&jw, "evt", TR2_EVENT_VERSION);
+ jw_object_string(&jw, "exe", git_version_string);
+ jw_end(&jw);
+
+ tr2_dst_write_line(&tr2dst_event, &jw.json);
+ jw_release(&jw);
+}
+
+static void fn_start_fl(const char *file, int line,
+ uint64_t us_elapsed_absolute, const char **argv)
+{
+ const char *event_name = "start";
+ struct json_writer jw = JSON_WRITER_INIT;
+ double t_abs = (double)us_elapsed_absolute / 1000000.0;
+
+ jw_object_begin(&jw, 0);
+ event_fmt_prepare(event_name, file, line, NULL, &jw);
+ jw_object_double(&jw, "t_abs", 6, t_abs);
+ jw_object_inline_begin_array(&jw, "argv");
+ jw_array_argv(&jw, argv);
+ jw_end(&jw);
+ jw_end(&jw);
+
+ tr2_dst_write_line(&tr2dst_event, &jw.json);
+ jw_release(&jw);
+}
+
+static void fn_exit_fl(const char *file, int line, uint64_t us_elapsed_absolute,
+ int code)
+{
+ const char *event_name = "exit";
+ struct json_writer jw = JSON_WRITER_INIT;
+ double t_abs = (double)us_elapsed_absolute / 1000000.0;
+
+ jw_object_begin(&jw, 0);
+ event_fmt_prepare(event_name, file, line, NULL, &jw);
+ jw_object_double(&jw, "t_abs", 6, t_abs);
+ jw_object_intmax(&jw, "code", code);
+ jw_end(&jw);
+
+ tr2_dst_write_line(&tr2dst_event, &jw.json);
+ jw_release(&jw);
+}
+
+static void fn_signal(uint64_t us_elapsed_absolute, int signo)
+{
+ const char *event_name = "signal";
+ struct json_writer jw = JSON_WRITER_INIT;
+ double t_abs = (double)us_elapsed_absolute / 1000000.0;
+
+ jw_object_begin(&jw, 0);
+ event_fmt_prepare(event_name, __FILE__, __LINE__, NULL, &jw);
+ jw_object_double(&jw, "t_abs", 6, t_abs);
+ jw_object_intmax(&jw, "signo", signo);
+ jw_end(&jw);
+
+ tr2_dst_write_line(&tr2dst_event, &jw.json);
+ jw_release(&jw);
+}
+
+static void fn_atexit(uint64_t us_elapsed_absolute, int code)
+{
+ const char *event_name = "atexit";
+ struct json_writer jw = JSON_WRITER_INIT;
+ double t_abs = (double)us_elapsed_absolute / 1000000.0;
+
+ jw_object_begin(&jw, 0);
+ event_fmt_prepare(event_name, __FILE__, __LINE__, NULL, &jw);
+ jw_object_double(&jw, "t_abs", 6, t_abs);
+ jw_object_intmax(&jw, "code", code);
+ jw_end(&jw);
+
+ tr2_dst_write_line(&tr2dst_event, &jw.json);
+ jw_release(&jw);
+}
+
+static void maybe_add_string_va(struct json_writer *jw, const char *field_name,
+ const char *fmt, va_list ap)
+{
+ if (fmt && *fmt) {
+ va_list copy_ap;
+ struct strbuf buf = STRBUF_INIT;
+
+ va_copy(copy_ap, ap);
+ strbuf_vaddf(&buf, fmt, copy_ap);
+ va_end(copy_ap);
+
+ jw_object_string(jw, field_name, buf.buf);
+ strbuf_release(&buf);
+ return;
+ }
+
+ if (fmt && *fmt) {
+ jw_object_string(jw, field_name, fmt);
+ return;
+ }
+}
+
+static void fn_error_va_fl(const char *file, int line, const char *fmt,
+ va_list ap)
+{
+ const char *event_name = "error";
+ struct json_writer jw = JSON_WRITER_INIT;
+
+ jw_object_begin(&jw, 0);
+ event_fmt_prepare(event_name, file, line, NULL, &jw);
+ maybe_add_string_va(&jw, "msg", fmt, ap);
+ /*
+ * Also emit the format string as a field in case
+ * post-processors want to aggregate common error
+ * messages by type without argument fields (such
+ * as pathnames or branch names) cluttering it up.
+ */
+ if (fmt && *fmt)
+ jw_object_string(&jw, "fmt", fmt);
+ jw_end(&jw);
+
+ tr2_dst_write_line(&tr2dst_event, &jw.json);
+ jw_release(&jw);
+}
+
+static void fn_command_path_fl(const char *file, int line, const char *pathname)
+{
+ const char *event_name = "cmd_path";
+ struct json_writer jw = JSON_WRITER_INIT;
+
+ jw_object_begin(&jw, 0);
+ event_fmt_prepare(event_name, file, line, NULL, &jw);
+ jw_object_string(&jw, "path", pathname);
+ jw_end(&jw);
+
+ tr2_dst_write_line(&tr2dst_event, &jw.json);
+ jw_release(&jw);
+}
+
+static void fn_command_name_fl(const char *file, int line, const char *name,
+ const char *hierarchy)
+{
+ const char *event_name = "cmd_name";
+ struct json_writer jw = JSON_WRITER_INIT;
+
+ jw_object_begin(&jw, 0);
+ event_fmt_prepare(event_name, file, line, NULL, &jw);
+ jw_object_string(&jw, "name", name);
+ if (hierarchy && *hierarchy)
+ jw_object_string(&jw, "hierarchy", hierarchy);
+ jw_end(&jw);
+
+ tr2_dst_write_line(&tr2dst_event, &jw.json);
+ jw_release(&jw);
+}
+
+static void fn_command_mode_fl(const char *file, int line, const char *mode)
+{
+ const char *event_name = "cmd_mode";
+ struct json_writer jw = JSON_WRITER_INIT;
+
+ jw_object_begin(&jw, 0);
+ event_fmt_prepare(event_name, file, line, NULL, &jw);
+ jw_object_string(&jw, "name", mode);
+ jw_end(&jw);
+
+ tr2_dst_write_line(&tr2dst_event, &jw.json);
+ jw_release(&jw);
+}
+
+static void fn_alias_fl(const char *file, int line, const char *alias,
+ const char **argv)
+{
+ const char *event_name = "alias";
+ struct json_writer jw = JSON_WRITER_INIT;
+
+ jw_object_begin(&jw, 0);
+ event_fmt_prepare(event_name, file, line, NULL, &jw);
+ jw_object_string(&jw, "alias", alias);
+ jw_object_inline_begin_array(&jw, "argv");
+ jw_array_argv(&jw, argv);
+ jw_end(&jw);
+ jw_end(&jw);
+
+ tr2_dst_write_line(&tr2dst_event, &jw.json);
+ jw_release(&jw);
+}
+
+static void fn_child_start_fl(const char *file, int line,
+ uint64_t us_elapsed_absolute,
+ const struct child_process *cmd)
+{
+ const char *event_name = "child_start";
+ struct json_writer jw = JSON_WRITER_INIT;
+
+ jw_object_begin(&jw, 0);
+ event_fmt_prepare(event_name, file, line, NULL, &jw);
+ jw_object_intmax(&jw, "child_id", cmd->trace2_child_id);
+ if (cmd->trace2_hook_name) {
+ jw_object_string(&jw, "child_class", "hook");
+ jw_object_string(&jw, "hook_name", cmd->trace2_hook_name);
+ } else {
+ const char *child_class =
+ cmd->trace2_child_class ? cmd->trace2_child_class : "?";
+ jw_object_string(&jw, "child_class", child_class);
+ }
+ if (cmd->dir)
+ jw_object_string(&jw, "cd", cmd->dir);
+ jw_object_bool(&jw, "use_shell", cmd->use_shell);
+ jw_object_inline_begin_array(&jw, "argv");
+ if (cmd->git_cmd)
+ jw_array_string(&jw, "git");
+ jw_array_argv(&jw, cmd->argv);
+ jw_end(&jw);
+ jw_end(&jw);
+
+ tr2_dst_write_line(&tr2dst_event, &jw.json);
+ jw_release(&jw);
+}
+
+static void fn_child_exit_fl(const char *file, int line,
+ uint64_t us_elapsed_absolute, int cid, int pid,
+ int code, uint64_t us_elapsed_child)
+{
+ const char *event_name = "child_exit";
+ struct json_writer jw = JSON_WRITER_INIT;
+ double t_rel = (double)us_elapsed_child / 1000000.0;
+
+ jw_object_begin(&jw, 0);
+ event_fmt_prepare(event_name, file, line, NULL, &jw);
+ jw_object_intmax(&jw, "child_id", cid);
+ jw_object_intmax(&jw, "pid", pid);
+ jw_object_intmax(&jw, "code", code);
+ jw_object_double(&jw, "t_rel", 6, t_rel);
+ jw_end(&jw);
+
+ tr2_dst_write_line(&tr2dst_event, &jw.json);
+
+ jw_release(&jw);
+}
+
+static void fn_thread_start_fl(const char *file, int line,
+ uint64_t us_elapsed_absolute)
+{
+ const char *event_name = "thread_start";
+ struct json_writer jw = JSON_WRITER_INIT;
+
+ jw_object_begin(&jw, 0);
+ event_fmt_prepare(event_name, file, line, NULL, &jw);
+ jw_end(&jw);
+
+ tr2_dst_write_line(&tr2dst_event, &jw.json);
+ jw_release(&jw);
+}
+
+static void fn_thread_exit_fl(const char *file, int line,
+ uint64_t us_elapsed_absolute,
+ uint64_t us_elapsed_thread)
+{
+ const char *event_name = "thread_exit";
+ struct json_writer jw = JSON_WRITER_INIT;
+ double t_rel = (double)us_elapsed_thread / 1000000.0;
+
+ jw_object_begin(&jw, 0);
+ event_fmt_prepare(event_name, file, line, NULL, &jw);
+ jw_object_double(&jw, "t_rel", 6, t_rel);
+ jw_end(&jw);
+
+ tr2_dst_write_line(&tr2dst_event, &jw.json);
+ jw_release(&jw);
+}
+
+static void fn_exec_fl(const char *file, int line, uint64_t us_elapsed_absolute,
+ int exec_id, const char *exe, const char **argv)
+{
+ const char *event_name = "exec";
+ struct json_writer jw = JSON_WRITER_INIT;
+
+ jw_object_begin(&jw, 0);
+ event_fmt_prepare(event_name, file, line, NULL, &jw);
+ jw_object_intmax(&jw, "exec_id", exec_id);
+ if (exe)
+ jw_object_string(&jw, "exe", exe);
+ jw_object_inline_begin_array(&jw, "argv");
+ jw_array_argv(&jw, argv);
+ jw_end(&jw);
+ jw_end(&jw);
+
+ tr2_dst_write_line(&tr2dst_event, &jw.json);
+ jw_release(&jw);
+}
+
+static void fn_exec_result_fl(const char *file, int line,
+ uint64_t us_elapsed_absolute, int exec_id,
+ int code)
+{
+ const char *event_name = "exec_result";
+ struct json_writer jw = JSON_WRITER_INIT;
+
+ jw_object_begin(&jw, 0);
+ event_fmt_prepare(event_name, file, line, NULL, &jw);
+ jw_object_intmax(&jw, "exec_id", exec_id);
+ jw_object_intmax(&jw, "code", code);
+ jw_end(&jw);
+
+ tr2_dst_write_line(&tr2dst_event, &jw.json);
+ jw_release(&jw);
+}
+
+static void fn_param_fl(const char *file, int line, const char *param,
+ const char *value)
+{
+ const char *event_name = "def_param";
+ struct json_writer jw = JSON_WRITER_INIT;
+
+ jw_object_begin(&jw, 0);
+ event_fmt_prepare(event_name, file, line, NULL, &jw);
+ jw_object_string(&jw, "param", param);
+ jw_object_string(&jw, "value", value);
+ jw_end(&jw);
+
+ tr2_dst_write_line(&tr2dst_event, &jw.json);
+ jw_release(&jw);
+}
+
+static void fn_repo_fl(const char *file, int line,
+ const struct repository *repo)
+{
+ const char *event_name = "def_repo";
+ struct json_writer jw = JSON_WRITER_INIT;
+
+ jw_object_begin(&jw, 0);
+ event_fmt_prepare(event_name, file, line, repo, &jw);
+ jw_object_string(&jw, "worktree", repo->worktree);
+ jw_end(&jw);
+
+ tr2_dst_write_line(&tr2dst_event, &jw.json);
+ jw_release(&jw);
+}
+
+static void fn_region_enter_printf_va_fl(const char *file, int line,
+ uint64_t us_elapsed_absolute,
+ const char *category,
+ const char *label,
+ const struct repository *repo,
+ const char *fmt, va_list ap)
+{
+ const char *event_name = "region_enter";
+ struct tr2tls_thread_ctx *ctx = tr2tls_get_self();
+ if (ctx->nr_open_regions <= tr2env_event_max_nesting_levels) {
+ struct json_writer jw = JSON_WRITER_INIT;
+
+ jw_object_begin(&jw, 0);
+ event_fmt_prepare(event_name, file, line, repo, &jw);
+ jw_object_intmax(&jw, "nesting", ctx->nr_open_regions);
+ if (category)
+ jw_object_string(&jw, "category", category);
+ if (label)
+ jw_object_string(&jw, "label", label);
+ maybe_add_string_va(&jw, "msg", fmt, ap);
+ jw_end(&jw);
+
+ tr2_dst_write_line(&tr2dst_event, &jw.json);
+ jw_release(&jw);
+ }
+}
+
+static void fn_region_leave_printf_va_fl(
+ const char *file, int line, uint64_t us_elapsed_absolute,
+ uint64_t us_elapsed_region, const char *category, const char *label,
+ const struct repository *repo, const char *fmt, va_list ap)
+{
+ const char *event_name = "region_leave";
+ struct tr2tls_thread_ctx *ctx = tr2tls_get_self();
+ if (ctx->nr_open_regions <= tr2env_event_max_nesting_levels) {
+ struct json_writer jw = JSON_WRITER_INIT;
+ double t_rel = (double)us_elapsed_region / 1000000.0;
+
+ jw_object_begin(&jw, 0);
+ event_fmt_prepare(event_name, file, line, repo, &jw);
+ jw_object_double(&jw, "t_rel", 6, t_rel);
+ jw_object_intmax(&jw, "nesting", ctx->nr_open_regions);
+ if (category)
+ jw_object_string(&jw, "category", category);
+ if (label)
+ jw_object_string(&jw, "label", label);
+ maybe_add_string_va(&jw, "msg", fmt, ap);
+ jw_end(&jw);
+
+ tr2_dst_write_line(&tr2dst_event, &jw.json);
+ jw_release(&jw);
+ }
+}
+
+static void fn_data_fl(const char *file, int line, uint64_t us_elapsed_absolute,
+ uint64_t us_elapsed_region, const char *category,
+ const struct repository *repo, const char *key,
+ const char *value)
+{
+ const char *event_name = "data";
+ struct tr2tls_thread_ctx *ctx = tr2tls_get_self();
+ if (ctx->nr_open_regions <= tr2env_event_max_nesting_levels) {
+ struct json_writer jw = JSON_WRITER_INIT;
+ double t_abs = (double)us_elapsed_absolute / 1000000.0;
+ double t_rel = (double)us_elapsed_region / 1000000.0;
+
+ jw_object_begin(&jw, 0);
+ event_fmt_prepare(event_name, file, line, repo, &jw);
+ jw_object_double(&jw, "t_abs", 6, t_abs);
+ jw_object_double(&jw, "t_rel", 6, t_rel);
+ jw_object_intmax(&jw, "nesting", ctx->nr_open_regions);
+ jw_object_string(&jw, "category", category);
+ jw_object_string(&jw, "key", key);
+ jw_object_string(&jw, "value", value);
+ jw_end(&jw);
+
+ tr2_dst_write_line(&tr2dst_event, &jw.json);
+ jw_release(&jw);
+ }
+}
+
+static void fn_data_json_fl(const char *file, int line,
+ uint64_t us_elapsed_absolute,
+ uint64_t us_elapsed_region, const char *category,
+ const struct repository *repo, const char *key,
+ const struct json_writer *value)
+{
+ const char *event_name = "data_json";
+ struct tr2tls_thread_ctx *ctx = tr2tls_get_self();
+ if (ctx->nr_open_regions <= tr2env_event_max_nesting_levels) {
+ struct json_writer jw = JSON_WRITER_INIT;
+ double t_abs = (double)us_elapsed_absolute / 1000000.0;
+ double t_rel = (double)us_elapsed_region / 1000000.0;
+
+ jw_object_begin(&jw, 0);
+ event_fmt_prepare(event_name, file, line, repo, &jw);
+ jw_object_double(&jw, "t_abs", 6, t_abs);
+ jw_object_double(&jw, "t_rel", 6, t_rel);
+ jw_object_intmax(&jw, "nesting", ctx->nr_open_regions);
+ jw_object_string(&jw, "category", category);
+ jw_object_string(&jw, "key", key);
+ jw_object_sub_jw(&jw, "value", value);
+ jw_end(&jw);
+
+ tr2_dst_write_line(&tr2dst_event, &jw.json);
+ jw_release(&jw);
+ }
+}
+
+struct tr2_tgt tr2_tgt_event = {
+ &tr2dst_event,
+
+ fn_init,
+ fn_term,
+
+ fn_version_fl,
+ fn_start_fl,
+ fn_exit_fl,
+ fn_signal,
+ fn_atexit,
+ fn_error_va_fl,
+ fn_command_path_fl,
+ fn_command_name_fl,
+ fn_command_mode_fl,
+ fn_alias_fl,
+ fn_child_start_fl,
+ fn_child_exit_fl,
+ fn_thread_start_fl,
+ fn_thread_exit_fl,
+ fn_exec_fl,
+ fn_exec_result_fl,
+ fn_param_fl,
+ fn_repo_fl,
+ fn_region_enter_printf_va_fl,
+ fn_region_leave_printf_va_fl,
+ fn_data_fl,
+ fn_data_json_fl,
+ NULL, /* printf */
+};
--- /dev/null
+#include "cache.h"
+#include "config.h"
+#include "run-command.h"
+#include "quote.h"
+#include "version.h"
+#include "trace2/tr2_dst.h"
+#include "trace2/tr2_sysenv.h"
+#include "trace2/tr2_tbuf.h"
+#include "trace2/tr2_tgt.h"
+#include "trace2/tr2_tls.h"
+
+static struct tr2_dst tr2dst_normal = { TR2_SYSENV_NORMAL, 0, 0, 0 };
+
+/*
+ * Use the TR2_SYSENV_NORMAL_BRIEF setting to omit the "<time> <file>:<line>"
+ * fields from each line written to the builtin normal target.
+ *
+ * Unit tests may want to use this to help with testing.
+ */
+static int tr2env_normal_be_brief;
+
+#define TR2FMT_NORMAL_FL_WIDTH (50)
+
+static int fn_init(void)
+{
+ int want = tr2_dst_trace_want(&tr2dst_normal);
+ int want_brief;
+ const char *brief;
+
+ if (!want)
+ return want;
+
+ brief = tr2_sysenv_get(TR2_SYSENV_NORMAL_BRIEF);
+ if (brief && *brief &&
+ ((want_brief = git_parse_maybe_bool(brief)) != -1))
+ tr2env_normal_be_brief = want_brief;
+
+ return want;
+}
+
+static void fn_term(void)
+{
+ tr2_dst_trace_disable(&tr2dst_normal);
+}
+
+static void normal_fmt_prepare(const char *file, int line, struct strbuf *buf)
+{
+ strbuf_setlen(buf, 0);
+
+ if (!tr2env_normal_be_brief) {
+ struct tr2_tbuf tb_now;
+
+ tr2_tbuf_local_time(&tb_now);
+ strbuf_addstr(buf, tb_now.buf);
+ strbuf_addch(buf, ' ');
+
+ if (file && *file)
+ strbuf_addf(buf, "%s:%d ", file, line);
+ while (buf->len < TR2FMT_NORMAL_FL_WIDTH)
+ strbuf_addch(buf, ' ');
+ }
+}
+
+static void normal_io_write_fl(const char *file, int line,
+ const struct strbuf *buf_payload)
+{
+ struct strbuf buf_line = STRBUF_INIT;
+
+ normal_fmt_prepare(file, line, &buf_line);
+ strbuf_addbuf(&buf_line, buf_payload);
+ tr2_dst_write_line(&tr2dst_normal, &buf_line);
+ strbuf_release(&buf_line);
+}
+
+static void fn_version_fl(const char *file, int line)
+{
+ struct strbuf buf_payload = STRBUF_INIT;
+
+ strbuf_addf(&buf_payload, "version %s", git_version_string);
+ normal_io_write_fl(file, line, &buf_payload);
+ strbuf_release(&buf_payload);
+}
+
+static void fn_start_fl(const char *file, int line,
+ uint64_t us_elapsed_absolute, const char **argv)
+{
+ struct strbuf buf_payload = STRBUF_INIT;
+
+ strbuf_addstr(&buf_payload, "start ");
+ sq_quote_argv_pretty(&buf_payload, argv);
+ normal_io_write_fl(file, line, &buf_payload);
+ strbuf_release(&buf_payload);
+}
+
+static void fn_exit_fl(const char *file, int line, uint64_t us_elapsed_absolute,
+ int code)
+{
+ struct strbuf buf_payload = STRBUF_INIT;
+ double elapsed = (double)us_elapsed_absolute / 1000000.0;
+
+ strbuf_addf(&buf_payload, "exit elapsed:%.6f code:%d", elapsed, code);
+ normal_io_write_fl(file, line, &buf_payload);
+ strbuf_release(&buf_payload);
+}
+
+static void fn_signal(uint64_t us_elapsed_absolute, int signo)
+{
+ struct strbuf buf_payload = STRBUF_INIT;
+ double elapsed = (double)us_elapsed_absolute / 1000000.0;
+
+ strbuf_addf(&buf_payload, "signal elapsed:%.6f code:%d", elapsed,
+ signo);
+ normal_io_write_fl(__FILE__, __LINE__, &buf_payload);
+ strbuf_release(&buf_payload);
+}
+
+static void fn_atexit(uint64_t us_elapsed_absolute, int code)
+{
+ struct strbuf buf_payload = STRBUF_INIT;
+ double elapsed = (double)us_elapsed_absolute / 1000000.0;
+
+ strbuf_addf(&buf_payload, "atexit elapsed:%.6f code:%d", elapsed, code);
+ normal_io_write_fl(__FILE__, __LINE__, &buf_payload);
+ strbuf_release(&buf_payload);
+}
+
+static void maybe_append_string_va(struct strbuf *buf, const char *fmt,
+ va_list ap)
+{
+ if (fmt && *fmt) {
+ va_list copy_ap;
+
+ va_copy(copy_ap, ap);
+ strbuf_vaddf(buf, fmt, copy_ap);
+ va_end(copy_ap);
+ return;
+ }
+
+ if (fmt && *fmt) {
+ strbuf_addstr(buf, fmt);
+ return;
+ }
+}
+
+static void fn_error_va_fl(const char *file, int line, const char *fmt,
+ va_list ap)
+{
+ struct strbuf buf_payload = STRBUF_INIT;
+
+ strbuf_addstr(&buf_payload, "error ");
+ maybe_append_string_va(&buf_payload, fmt, ap);
+ normal_io_write_fl(file, line, &buf_payload);
+ strbuf_release(&buf_payload);
+}
+
+static void fn_command_path_fl(const char *file, int line, const char *pathname)
+{
+ struct strbuf buf_payload = STRBUF_INIT;
+
+ strbuf_addf(&buf_payload, "cmd_path %s", pathname);
+ normal_io_write_fl(file, line, &buf_payload);
+ strbuf_release(&buf_payload);
+}
+
+static void fn_command_name_fl(const char *file, int line, const char *name,
+ const char *hierarchy)
+{
+ struct strbuf buf_payload = STRBUF_INIT;
+
+ strbuf_addf(&buf_payload, "cmd_name %s", name);
+ if (hierarchy && *hierarchy)
+ strbuf_addf(&buf_payload, " (%s)", hierarchy);
+ normal_io_write_fl(file, line, &buf_payload);
+ strbuf_release(&buf_payload);
+}
+
+static void fn_command_mode_fl(const char *file, int line, const char *mode)
+{
+ struct strbuf buf_payload = STRBUF_INIT;
+
+ strbuf_addf(&buf_payload, "cmd_mode %s", mode);
+ normal_io_write_fl(file, line, &buf_payload);
+ strbuf_release(&buf_payload);
+}
+
+static void fn_alias_fl(const char *file, int line, const char *alias,
+ const char **argv)
+{
+ struct strbuf buf_payload = STRBUF_INIT;
+
+ strbuf_addf(&buf_payload, "alias %s ->", alias);
+ sq_quote_argv_pretty(&buf_payload, argv);
+ normal_io_write_fl(file, line, &buf_payload);
+ strbuf_release(&buf_payload);
+}
+
+static void fn_child_start_fl(const char *file, int line,
+ uint64_t us_elapsed_absolute,
+ const struct child_process *cmd)
+{
+ struct strbuf buf_payload = STRBUF_INIT;
+
+ strbuf_addf(&buf_payload, "child_start[%d] ", cmd->trace2_child_id);
+
+ if (cmd->dir) {
+ strbuf_addstr(&buf_payload, " cd");
+ sq_quote_buf_pretty(&buf_payload, cmd->dir);
+ strbuf_addstr(&buf_payload, "; ");
+ }
+
+ /*
+ * TODO if (cmd->env) { Consider dumping changes to environment. }
+ * See trace_add_env() in run-command.c as used by original trace.c
+ */
+
+ if (cmd->git_cmd)
+ strbuf_addstr(&buf_payload, "git");
+ sq_quote_argv_pretty(&buf_payload, cmd->argv);
+
+ normal_io_write_fl(file, line, &buf_payload);
+ strbuf_release(&buf_payload);
+}
+
+static void fn_child_exit_fl(const char *file, int line,
+ uint64_t us_elapsed_absolute, int cid, int pid,
+ int code, uint64_t us_elapsed_child)
+{
+ struct strbuf buf_payload = STRBUF_INIT;
+ double elapsed = (double)us_elapsed_child / 1000000.0;
+
+ strbuf_addf(&buf_payload, "child_exit[%d] pid:%d code:%d elapsed:%.6f",
+ cid, pid, code, elapsed);
+ normal_io_write_fl(file, line, &buf_payload);
+ strbuf_release(&buf_payload);
+}
+
+static void fn_exec_fl(const char *file, int line, uint64_t us_elapsed_absolute,
+ int exec_id, const char *exe, const char **argv)
+{
+ struct strbuf buf_payload = STRBUF_INIT;
+
+ strbuf_addf(&buf_payload, "exec[%d] ", exec_id);
+ if (exe)
+ strbuf_addstr(&buf_payload, exe);
+ sq_quote_argv_pretty(&buf_payload, argv);
+ normal_io_write_fl(file, line, &buf_payload);
+ strbuf_release(&buf_payload);
+}
+
+static void fn_exec_result_fl(const char *file, int line,
+ uint64_t us_elapsed_absolute, int exec_id,
+ int code)
+{
+ struct strbuf buf_payload = STRBUF_INIT;
+
+ strbuf_addf(&buf_payload, "exec_result[%d] code:%d", exec_id, code);
+ if (code > 0)
+ strbuf_addf(&buf_payload, " err:%s", strerror(code));
+ normal_io_write_fl(file, line, &buf_payload);
+ strbuf_release(&buf_payload);
+}
+
+static void fn_param_fl(const char *file, int line, const char *param,
+ const char *value)
+{
+ struct strbuf buf_payload = STRBUF_INIT;
+
+ strbuf_addf(&buf_payload, "def_param %s=%s", param, value);
+ normal_io_write_fl(file, line, &buf_payload);
+ strbuf_release(&buf_payload);
+}
+
+static void fn_repo_fl(const char *file, int line,
+ const struct repository *repo)
+{
+ struct strbuf buf_payload = STRBUF_INIT;
+
+ strbuf_addstr(&buf_payload, "worktree ");
+ sq_quote_buf_pretty(&buf_payload, repo->worktree);
+ normal_io_write_fl(file, line, &buf_payload);
+ strbuf_release(&buf_payload);
+}
+
+static void fn_printf_va_fl(const char *file, int line,
+ uint64_t us_elapsed_absolute, const char *fmt,
+ va_list ap)
+{
+ struct strbuf buf_payload = STRBUF_INIT;
+
+ maybe_append_string_va(&buf_payload, fmt, ap);
+ normal_io_write_fl(file, line, &buf_payload);
+ strbuf_release(&buf_payload);
+}
+
+struct tr2_tgt tr2_tgt_normal = {
+ &tr2dst_normal,
+
+ fn_init,
+ fn_term,
+
+ fn_version_fl,
+ fn_start_fl,
+ fn_exit_fl,
+ fn_signal,
+ fn_atexit,
+ fn_error_va_fl,
+ fn_command_path_fl,
+ fn_command_name_fl,
+ fn_command_mode_fl,
+ fn_alias_fl,
+ fn_child_start_fl,
+ fn_child_exit_fl,
+ NULL, /* thread_start */
+ NULL, /* thread_exit */
+ fn_exec_fl,
+ fn_exec_result_fl,
+ fn_param_fl,
+ fn_repo_fl,
+ NULL, /* region_enter */
+ NULL, /* region_leave */
+ NULL, /* data */
+ NULL, /* data_json */
+ fn_printf_va_fl,
+};
--- /dev/null
+#include "cache.h"
+#include "config.h"
+#include "run-command.h"
+#include "quote.h"
+#include "version.h"
+#include "json-writer.h"
+#include "trace2/tr2_dst.h"
+#include "trace2/tr2_sid.h"
+#include "trace2/tr2_sysenv.h"
+#include "trace2/tr2_tbuf.h"
+#include "trace2/tr2_tgt.h"
+#include "trace2/tr2_tls.h"
+
+static struct tr2_dst tr2dst_perf = { TR2_SYSENV_PERF, 0, 0, 0 };
+
+/*
+ * Use TR2_SYSENV_PERF_BRIEF to omit the "<time> <file>:<line>"
+ * fields from each line written to the builtin performance target.
+ *
+ * Unit tests may want to use this to help with testing.
+ */
+static int tr2env_perf_be_brief;
+
+#define TR2FMT_PERF_FL_WIDTH (50)
+#define TR2FMT_PERF_MAX_EVENT_NAME (12)
+#define TR2FMT_PERF_REPO_WIDTH (4)
+#define TR2FMT_PERF_CATEGORY_WIDTH (10)
+
+#define TR2_DOTS_BUFFER_SIZE (100)
+#define TR2_INDENT (2)
+#define TR2_INDENT_LENGTH(ctx) (((ctx)->nr_open_regions - 1) * TR2_INDENT)
+
+static struct strbuf dots = STRBUF_INIT;
+
+static int fn_init(void)
+{
+ int want = tr2_dst_trace_want(&tr2dst_perf);
+ int want_brief;
+ const char *brief;
+
+ if (!want)
+ return want;
+
+ strbuf_addchars(&dots, '.', TR2_DOTS_BUFFER_SIZE);
+
+ brief = tr2_sysenv_get(TR2_SYSENV_PERF_BRIEF);
+ if (brief && *brief &&
+ ((want_brief = git_parse_maybe_bool(brief)) != -1))
+ tr2env_perf_be_brief = want_brief;
+
+ return want;
+}
+
+static void fn_term(void)
+{
+ tr2_dst_trace_disable(&tr2dst_perf);
+
+ strbuf_release(&dots);
+}
+
+/*
+ * Format trace line prefix in human-readable classic format for
+ * the performance target:
+ * "[<time> [<file>:<line>] <bar>] <nr_parents> <bar>
+ * <thread_name> <bar> <event_name> <bar> [<repo>] <bar>
+ * [<elapsed_absolute>] [<elapsed_relative>] <bar>
+ * [<category>] <bar> [<dots>] "
+ */
+static void perf_fmt_prepare(const char *event_name,
+ struct tr2tls_thread_ctx *ctx, const char *file,
+ int line, const struct repository *repo,
+ uint64_t *p_us_elapsed_absolute,
+ uint64_t *p_us_elapsed_relative,
+ const char *category, struct strbuf *buf)
+{
+ int len;
+
+ strbuf_setlen(buf, 0);
+
+ if (!tr2env_perf_be_brief) {
+ struct tr2_tbuf tb_now;
+
+ tr2_tbuf_local_time(&tb_now);
+ strbuf_addstr(buf, tb_now.buf);
+ strbuf_addch(buf, ' ');
+
+ if (file && *file)
+ strbuf_addf(buf, "%s:%d ", file, line);
+ while (buf->len < TR2FMT_PERF_FL_WIDTH)
+ strbuf_addch(buf, ' ');
+
+ strbuf_addstr(buf, "| ");
+ }
+
+ strbuf_addf(buf, "d%d | ", tr2_sid_depth());
+ strbuf_addf(buf, "%-*s | %-*s | ", TR2_MAX_THREAD_NAME,
+ ctx->thread_name.buf, TR2FMT_PERF_MAX_EVENT_NAME,
+ event_name);
+
+ len = buf->len + TR2FMT_PERF_REPO_WIDTH;
+ if (repo)
+ strbuf_addf(buf, "r%d ", repo->trace2_repo_id);
+ while (buf->len < len)
+ strbuf_addch(buf, ' ');
+ strbuf_addstr(buf, "| ");
+
+ if (p_us_elapsed_absolute)
+ strbuf_addf(buf, "%9.6f | ",
+ ((double)(*p_us_elapsed_absolute)) / 1000000.0);
+ else
+ strbuf_addf(buf, "%9s | ", " ");
+
+ if (p_us_elapsed_relative)
+ strbuf_addf(buf, "%9.6f | ",
+ ((double)(*p_us_elapsed_relative)) / 1000000.0);
+ else
+ strbuf_addf(buf, "%9s | ", " ");
+
+ strbuf_addf(buf, "%-*s | ", TR2FMT_PERF_CATEGORY_WIDTH,
+ (category ? category : ""));
+
+ if (ctx->nr_open_regions > 0) {
+ int len_indent = TR2_INDENT_LENGTH(ctx);
+ while (len_indent > dots.len) {
+ strbuf_addbuf(buf, &dots);
+ len_indent -= dots.len;
+ }
+ strbuf_addf(buf, "%.*s", len_indent, dots.buf);
+ }
+}
+
+static void perf_io_write_fl(const char *file, int line, const char *event_name,
+ const struct repository *repo,
+ uint64_t *p_us_elapsed_absolute,
+ uint64_t *p_us_elapsed_relative,
+ const char *category,
+ const struct strbuf *buf_payload)
+{
+ struct tr2tls_thread_ctx *ctx = tr2tls_get_self();
+ struct strbuf buf_line = STRBUF_INIT;
+
+ perf_fmt_prepare(event_name, ctx, file, line, repo,
+ p_us_elapsed_absolute, p_us_elapsed_relative, category,
+ &buf_line);
+ strbuf_addbuf(&buf_line, buf_payload);
+ tr2_dst_write_line(&tr2dst_perf, &buf_line);
+ strbuf_release(&buf_line);
+}
+
+static void fn_version_fl(const char *file, int line)
+{
+ const char *event_name = "version";
+ struct strbuf buf_payload = STRBUF_INIT;
+
+ strbuf_addstr(&buf_payload, git_version_string);
+
+ perf_io_write_fl(file, line, event_name, NULL, NULL, NULL, NULL,
+ &buf_payload);
+ strbuf_release(&buf_payload);
+}
+
+static void fn_start_fl(const char *file, int line,
+ uint64_t us_elapsed_absolute, const char **argv)
+{
+ const char *event_name = "start";
+ struct strbuf buf_payload = STRBUF_INIT;
+
+ sq_quote_argv_pretty(&buf_payload, argv);
+
+ perf_io_write_fl(file, line, event_name, NULL, &us_elapsed_absolute,
+ NULL, NULL, &buf_payload);
+ strbuf_release(&buf_payload);
+}
+
+static void fn_exit_fl(const char *file, int line, uint64_t us_elapsed_absolute,
+ int code)
+{
+ const char *event_name = "exit";
+ struct strbuf buf_payload = STRBUF_INIT;
+
+ strbuf_addf(&buf_payload, "code:%d", code);
+
+ perf_io_write_fl(file, line, event_name, NULL, &us_elapsed_absolute,
+ NULL, NULL, &buf_payload);
+ strbuf_release(&buf_payload);
+}
+
+static void fn_signal(uint64_t us_elapsed_absolute, int signo)
+{
+ const char *event_name = "signal";
+ struct strbuf buf_payload = STRBUF_INIT;
+
+ strbuf_addf(&buf_payload, "signo:%d", signo);
+
+ perf_io_write_fl(__FILE__, __LINE__, event_name, NULL,
+ &us_elapsed_absolute, NULL, NULL, &buf_payload);
+ strbuf_release(&buf_payload);
+}
+
+static void fn_atexit(uint64_t us_elapsed_absolute, int code)
+{
+ const char *event_name = "atexit";
+ struct strbuf buf_payload = STRBUF_INIT;
+
+ strbuf_addf(&buf_payload, "code:%d", code);
+
+ perf_io_write_fl(__FILE__, __LINE__, event_name, NULL,
+ &us_elapsed_absolute, NULL, NULL, &buf_payload);
+ strbuf_release(&buf_payload);
+}
+
+static void maybe_append_string_va(struct strbuf *buf, const char *fmt,
+ va_list ap)
+{
+ if (fmt && *fmt) {
+ va_list copy_ap;
+
+ va_copy(copy_ap, ap);
+ strbuf_vaddf(buf, fmt, copy_ap);
+ va_end(copy_ap);
+ return;
+ }
+
+ if (fmt && *fmt) {
+ strbuf_addstr(buf, fmt);
+ return;
+ }
+}
+
+static void fn_error_va_fl(const char *file, int line, const char *fmt,
+ va_list ap)
+{
+ const char *event_name = "error";
+ struct strbuf buf_payload = STRBUF_INIT;
+
+ maybe_append_string_va(&buf_payload, fmt, ap);
+
+ perf_io_write_fl(file, line, event_name, NULL, NULL, NULL, NULL,
+ &buf_payload);
+ strbuf_release(&buf_payload);
+}
+
+static void fn_command_path_fl(const char *file, int line, const char *pathname)
+{
+ const char *event_name = "cmd_path";
+ struct strbuf buf_payload = STRBUF_INIT;
+
+ strbuf_addstr(&buf_payload, pathname);
+
+ perf_io_write_fl(file, line, event_name, NULL, NULL, NULL, NULL,
+ &buf_payload);
+ strbuf_release(&buf_payload);
+}
+
+static void fn_command_name_fl(const char *file, int line, const char *name,
+ const char *hierarchy)
+{
+ const char *event_name = "cmd_name";
+ struct strbuf buf_payload = STRBUF_INIT;
+
+ strbuf_addstr(&buf_payload, name);
+ if (hierarchy && *hierarchy)
+ strbuf_addf(&buf_payload, " (%s)", hierarchy);
+
+ perf_io_write_fl(file, line, event_name, NULL, NULL, NULL, NULL,
+ &buf_payload);
+ strbuf_release(&buf_payload);
+}
+
+static void fn_command_mode_fl(const char *file, int line, const char *mode)
+{
+ const char *event_name = "cmd_mode";
+ struct strbuf buf_payload = STRBUF_INIT;
+
+ strbuf_addstr(&buf_payload, mode);
+
+ perf_io_write_fl(file, line, event_name, NULL, NULL, NULL, NULL,
+ &buf_payload);
+ strbuf_release(&buf_payload);
+}
+
+static void fn_alias_fl(const char *file, int line, const char *alias,
+ const char **argv)
+{
+ const char *event_name = "alias";
+ struct strbuf buf_payload = STRBUF_INIT;
+
+ strbuf_addf(&buf_payload, "alias:%s argv:", alias);
+ sq_quote_argv_pretty(&buf_payload, argv);
+
+ perf_io_write_fl(file, line, event_name, NULL, NULL, NULL, NULL,
+ &buf_payload);
+ strbuf_release(&buf_payload);
+}
+
+static void fn_child_start_fl(const char *file, int line,
+ uint64_t us_elapsed_absolute,
+ const struct child_process *cmd)
+{
+ const char *event_name = "child_start";
+ struct strbuf buf_payload = STRBUF_INIT;
+
+ if (cmd->trace2_hook_name) {
+ strbuf_addf(&buf_payload, "[ch%d] class:hook hook:%s",
+ cmd->trace2_child_id, cmd->trace2_hook_name);
+ } else {
+ const char *child_class =
+ cmd->trace2_child_class ? cmd->trace2_child_class : "?";
+ strbuf_addf(&buf_payload, "[ch%d] class:%s",
+ cmd->trace2_child_id, child_class);
+ }
+
+ if (cmd->dir) {
+ strbuf_addstr(&buf_payload, " cd:");
+ sq_quote_buf_pretty(&buf_payload, cmd->dir);
+ }
+
+ strbuf_addstr(&buf_payload, " argv:");
+ if (cmd->git_cmd)
+ strbuf_addstr(&buf_payload, " git");
+ sq_quote_argv_pretty(&buf_payload, cmd->argv);
+
+ perf_io_write_fl(file, line, event_name, NULL, &us_elapsed_absolute,
+ NULL, NULL, &buf_payload);
+ strbuf_release(&buf_payload);
+}
+
+static void fn_child_exit_fl(const char *file, int line,
+ uint64_t us_elapsed_absolute, int cid, int pid,
+ int code, uint64_t us_elapsed_child)
+{
+ const char *event_name = "child_exit";
+ struct strbuf buf_payload = STRBUF_INIT;
+
+ strbuf_addf(&buf_payload, "[ch%d] pid:%d code:%d", cid, pid, code);
+
+ perf_io_write_fl(file, line, event_name, NULL, &us_elapsed_absolute,
+ &us_elapsed_child, NULL, &buf_payload);
+ strbuf_release(&buf_payload);
+}
+
+static void fn_thread_start_fl(const char *file, int line,
+ uint64_t us_elapsed_absolute)
+{
+ const char *event_name = "thread_start";
+ struct strbuf buf_payload = STRBUF_INIT;
+
+ perf_io_write_fl(file, line, event_name, NULL, &us_elapsed_absolute,
+ NULL, NULL, &buf_payload);
+ strbuf_release(&buf_payload);
+}
+
+static void fn_thread_exit_fl(const char *file, int line,
+ uint64_t us_elapsed_absolute,
+ uint64_t us_elapsed_thread)
+{
+ const char *event_name = "thread_exit";
+ struct strbuf buf_payload = STRBUF_INIT;
+
+ perf_io_write_fl(file, line, event_name, NULL, &us_elapsed_absolute,
+ &us_elapsed_thread, NULL, &buf_payload);
+ strbuf_release(&buf_payload);
+}
+
+static void fn_exec_fl(const char *file, int line, uint64_t us_elapsed_absolute,
+ int exec_id, const char *exe, const char **argv)
+{
+ const char *event_name = "exec";
+ struct strbuf buf_payload = STRBUF_INIT;
+
+ strbuf_addf(&buf_payload, "id:%d ", exec_id);
+ strbuf_addstr(&buf_payload, "argv:");
+ if (exe)
+ strbuf_addf(&buf_payload, " %s", exe);
+ sq_quote_argv_pretty(&buf_payload, argv);
+
+ perf_io_write_fl(file, line, event_name, NULL, &us_elapsed_absolute,
+ NULL, NULL, &buf_payload);
+ strbuf_release(&buf_payload);
+}
+
+static void fn_exec_result_fl(const char *file, int line,
+ uint64_t us_elapsed_absolute, int exec_id,
+ int code)
+{
+ const char *event_name = "exec_result";
+ struct strbuf buf_payload = STRBUF_INIT;
+
+ strbuf_addf(&buf_payload, "id:%d code:%d", exec_id, code);
+ if (code > 0)
+ strbuf_addf(&buf_payload, " err:%s", strerror(code));
+
+ perf_io_write_fl(file, line, event_name, NULL, &us_elapsed_absolute,
+ NULL, NULL, &buf_payload);
+ strbuf_release(&buf_payload);
+}
+
+static void fn_param_fl(const char *file, int line, const char *param,
+ const char *value)
+{
+ const char *event_name = "def_param";
+ struct strbuf buf_payload = STRBUF_INIT;
+
+ strbuf_addf(&buf_payload, "%s:%s", param, value);
+
+ perf_io_write_fl(file, line, event_name, NULL, NULL, NULL, NULL,
+ &buf_payload);
+ strbuf_release(&buf_payload);
+}
+
+static void fn_repo_fl(const char *file, int line,
+ const struct repository *repo)
+{
+ const char *event_name = "def_repo";
+ struct strbuf buf_payload = STRBUF_INIT;
+
+ strbuf_addstr(&buf_payload, "worktree:");
+ sq_quote_buf_pretty(&buf_payload, repo->worktree);
+
+ perf_io_write_fl(file, line, event_name, repo, NULL, NULL, NULL,
+ &buf_payload);
+ strbuf_release(&buf_payload);
+}
+
+static void fn_region_enter_printf_va_fl(const char *file, int line,
+ uint64_t us_elapsed_absolute,
+ const char *category,
+ const char *label,
+ const struct repository *repo,
+ const char *fmt, va_list ap)
+{
+ const char *event_name = "region_enter";
+ struct strbuf buf_payload = STRBUF_INIT;
+
+ if (label)
+ strbuf_addf(&buf_payload, "label:%s ", label);
+ maybe_append_string_va(&buf_payload, fmt, ap);
+
+ perf_io_write_fl(file, line, event_name, repo, &us_elapsed_absolute,
+ NULL, category, &buf_payload);
+ strbuf_release(&buf_payload);
+}
+
+static void fn_region_leave_printf_va_fl(
+ const char *file, int line, uint64_t us_elapsed_absolute,
+ uint64_t us_elapsed_region, const char *category, const char *label,
+ const struct repository *repo, const char *fmt, va_list ap)
+{
+ const char *event_name = "region_leave";
+ struct strbuf buf_payload = STRBUF_INIT;
+
+ if (label)
+ strbuf_addf(&buf_payload, "label:%s ", label);
+ maybe_append_string_va(&buf_payload, fmt, ap);
+
+ perf_io_write_fl(file, line, event_name, repo, &us_elapsed_absolute,
+ &us_elapsed_region, category, &buf_payload);
+ strbuf_release(&buf_payload);
+}
+
+static void fn_data_fl(const char *file, int line, uint64_t us_elapsed_absolute,
+ uint64_t us_elapsed_region, const char *category,
+ const struct repository *repo, const char *key,
+ const char *value)
+{
+ const char *event_name = "data";
+ struct strbuf buf_payload = STRBUF_INIT;
+
+ strbuf_addf(&buf_payload, "%s:%s", key, value);
+
+ perf_io_write_fl(file, line, event_name, repo, &us_elapsed_absolute,
+ &us_elapsed_region, category, &buf_payload);
+ strbuf_release(&buf_payload);
+}
+
+static void fn_data_json_fl(const char *file, int line,
+ uint64_t us_elapsed_absolute,
+ uint64_t us_elapsed_region, const char *category,
+ const struct repository *repo, const char *key,
+ const struct json_writer *value)
+{
+ const char *event_name = "data_json";
+ struct strbuf buf_payload = STRBUF_INIT;
+
+ strbuf_addf(&buf_payload, "%s:%s", key, value->json.buf);
+
+ perf_io_write_fl(file, line, event_name, repo, &us_elapsed_absolute,
+ &us_elapsed_region, category, &buf_payload);
+ strbuf_release(&buf_payload);
+}
+
+static void fn_printf_va_fl(const char *file, int line,
+ uint64_t us_elapsed_absolute, const char *fmt,
+ va_list ap)
+{
+ const char *event_name = "printf";
+ struct strbuf buf_payload = STRBUF_INIT;
+
+ maybe_append_string_va(&buf_payload, fmt, ap);
+
+ perf_io_write_fl(file, line, event_name, NULL, &us_elapsed_absolute,
+ NULL, NULL, &buf_payload);
+ strbuf_release(&buf_payload);
+}
+
+struct tr2_tgt tr2_tgt_perf = {
+ &tr2dst_perf,
+
+ fn_init,
+ fn_term,
+
+ fn_version_fl,
+ fn_start_fl,
+ fn_exit_fl,
+ fn_signal,
+ fn_atexit,
+ fn_error_va_fl,
+ fn_command_path_fl,
+ fn_command_name_fl,
+ fn_command_mode_fl,
+ fn_alias_fl,
+ fn_child_start_fl,
+ fn_child_exit_fl,
+ fn_thread_start_fl,
+ fn_thread_exit_fl,
+ fn_exec_fl,
+ fn_exec_result_fl,
+ fn_param_fl,
+ fn_repo_fl,
+ fn_region_enter_printf_va_fl,
+ fn_region_leave_printf_va_fl,
+ fn_data_fl,
+ fn_data_json_fl,
+ fn_printf_va_fl,
+};
--- /dev/null
+#include "cache.h"
+#include "thread-utils.h"
+#include "trace2/tr2_tls.h"
+
+/*
+ * Initialize size of the thread stack for nested regions.
+ * This is used to store nested region start times. Note that
+ * this stack is per-thread and not per-trace-key.
+ */
+#define TR2_REGION_NESTING_INITIAL_SIZE (100)
+
+static struct tr2tls_thread_ctx *tr2tls_thread_main;
+static uint64_t tr2tls_us_start_process;
+
+static pthread_mutex_t tr2tls_mutex;
+static pthread_key_t tr2tls_key;
+
+static int tr2_next_thread_id; /* modify under lock */
+
+void tr2tls_start_process_clock(void)
+{
+ if (tr2tls_us_start_process)
+ return;
+
+ /*
+ * Keep the absolute start time of the process (i.e. the main
+ * process) in a fixed variable since other threads need to
+ * access it. This allows them to do that without a lock on
+ * main thread's array data (because of reallocs).
+ */
+ tr2tls_us_start_process = getnanotime() / 1000;
+}
+
+struct tr2tls_thread_ctx *tr2tls_create_self(const char *thread_name,
+ uint64_t us_thread_start)
+{
+ struct tr2tls_thread_ctx *ctx = xcalloc(1, sizeof(*ctx));
+
+ /*
+ * Implicitly "tr2tls_push_self()" to capture the thread's start
+ * time in array_us_start[0]. For the main thread this gives us the
+ * application run time.
+ */
+ ctx->alloc = TR2_REGION_NESTING_INITIAL_SIZE;
+ ctx->array_us_start = (uint64_t *)xcalloc(ctx->alloc, sizeof(uint64_t));
+ ctx->array_us_start[ctx->nr_open_regions++] = us_thread_start;
+
+ ctx->thread_id = tr2tls_locked_increment(&tr2_next_thread_id);
+
+ strbuf_init(&ctx->thread_name, 0);
+ if (ctx->thread_id)
+ strbuf_addf(&ctx->thread_name, "th%02d:", ctx->thread_id);
+ strbuf_addstr(&ctx->thread_name, thread_name);
+ if (ctx->thread_name.len > TR2_MAX_THREAD_NAME)
+ strbuf_setlen(&ctx->thread_name, TR2_MAX_THREAD_NAME);
+
+ pthread_setspecific(tr2tls_key, ctx);
+
+ return ctx;
+}
+
+struct tr2tls_thread_ctx *tr2tls_get_self(void)
+{
+ struct tr2tls_thread_ctx *ctx = pthread_getspecific(tr2tls_key);
+
+ /*
+ * If the thread-proc did not call trace2_thread_start(), we won't
+ * have any TLS data associated with the current thread. Fix it
+ * here and silently continue.
+ */
+ if (!ctx)
+ ctx = tr2tls_create_self("unknown", getnanotime() / 1000);
+
+ return ctx;
+}
+
+int tr2tls_is_main_thread(void)
+{
+ struct tr2tls_thread_ctx *ctx = pthread_getspecific(tr2tls_key);
+
+ return ctx == tr2tls_thread_main;
+}
+
+void tr2tls_unset_self(void)
+{
+ struct tr2tls_thread_ctx *ctx;
+
+ ctx = tr2tls_get_self();
+
+ pthread_setspecific(tr2tls_key, NULL);
+
+ free(ctx->array_us_start);
+ free(ctx);
+}
+
+void tr2tls_push_self(uint64_t us_now)
+{
+ struct tr2tls_thread_ctx *ctx = tr2tls_get_self();
+
+ ALLOC_GROW(ctx->array_us_start, ctx->nr_open_regions + 1, ctx->alloc);
+ ctx->array_us_start[ctx->nr_open_regions++] = us_now;
+}
+
+void tr2tls_pop_self(void)
+{
+ struct tr2tls_thread_ctx *ctx = tr2tls_get_self();
+
+ if (!ctx->nr_open_regions)
+ BUG("no open regions in thread '%s'", ctx->thread_name.buf);
+
+ ctx->nr_open_regions--;
+}
+
+void tr2tls_pop_unwind_self(void)
+{
+ struct tr2tls_thread_ctx *ctx = tr2tls_get_self();
+
+ while (ctx->nr_open_regions > 1)
+ tr2tls_pop_self();
+}
+
+uint64_t tr2tls_region_elasped_self(uint64_t us)
+{
+ struct tr2tls_thread_ctx *ctx;
+ uint64_t us_start;
+
+ ctx = tr2tls_get_self();
+ if (!ctx->nr_open_regions)
+ return 0;
+
+ us_start = ctx->array_us_start[ctx->nr_open_regions - 1];
+
+ return us - us_start;
+}
+
+uint64_t tr2tls_absolute_elapsed(uint64_t us)
+{
+ if (!tr2tls_thread_main)
+ return 0;
+
+ return us - tr2tls_us_start_process;
+}
+
+void tr2tls_init(void)
+{
+ tr2tls_start_process_clock();
+
+ pthread_key_create(&tr2tls_key, NULL);
+ init_recursive_mutex(&tr2tls_mutex);
+
+ tr2tls_thread_main =
+ tr2tls_create_self("main", tr2tls_us_start_process);
+}
+
+void tr2tls_release(void)
+{
+ tr2tls_unset_self();
+ tr2tls_thread_main = NULL;
+
+ pthread_mutex_destroy(&tr2tls_mutex);
+ pthread_key_delete(tr2tls_key);
+}
+
+int tr2tls_locked_increment(int *p)
+{
+ int current_value;
+
+ pthread_mutex_lock(&tr2tls_mutex);
+ current_value = *p;
+ *p = current_value + 1;
+ pthread_mutex_unlock(&tr2tls_mutex);
+
+ return current_value;
+}
--- /dev/null
+#ifndef TR2_TLS_H
+#define TR2_TLS_H
+
+#include "strbuf.h"
+
+/*
+ * Arbitry limit for thread names for column alignment.
+ */
+#define TR2_MAX_THREAD_NAME (24)
+
+struct tr2tls_thread_ctx {
+ struct strbuf thread_name;
+ uint64_t *array_us_start;
+ int alloc;
+ int nr_open_regions; /* plays role of "nr" in ALLOC_GROW */
+ int thread_id;
+};
+
+/*
+ * Create TLS data for the current thread. This gives us a place to
+ * put per-thread data, such as thread start time, function nesting
+ * and a per-thread label for our messages.
+ *
+ * We assume the first thread is "main". Other threads are given
+ * non-zero thread-ids to help distinguish messages from concurrent
+ * threads.
+ *
+ * Truncate the thread name if necessary to help with column alignment
+ * in printf-style messages.
+ *
+ * In this and all following functions the term "self" refers to the
+ * current thread.
+ */
+struct tr2tls_thread_ctx *tr2tls_create_self(const char *thread_name,
+ uint64_t us_thread_start);
+
+/*
+ * Get our TLS data.
+ */
+struct tr2tls_thread_ctx *tr2tls_get_self(void);
+
+/*
+ * return true if the current thread is the main thread.
+ */
+int tr2tls_is_main_thread(void);
+
+/*
+ * Free our TLS data.
+ */
+void tr2tls_unset_self(void);
+
+/*
+ * Begin a new nested region and remember the start time.
+ */
+void tr2tls_push_self(uint64_t us_now);
+
+/*
+ * End the innermost nested region.
+ */
+void tr2tls_pop_self(void);
+
+/*
+ * Pop any extra (above the first) open regions on the current
+ * thread and discard. During a thread-exit, we should only
+ * have region[0] that was pushed in trace2_thread_start() if
+ * the thread exits normally.
+ */
+void tr2tls_pop_unwind_self(void);
+
+/*
+ * Compute the elapsed time since the innermost region in the
+ * current thread started and the given time (usually now).
+ */
+uint64_t tr2tls_region_elasped_self(uint64_t us);
+
+/*
+ * Compute the elapsed time since the main thread started
+ * and the given time (usually now). This is assumed to
+ * be the absolute run time of the process.
+ */
+uint64_t tr2tls_absolute_elapsed(uint64_t us);
+
+/*
+ * Initialize the tr2 TLS system.
+ */
+void tr2tls_init(void);
+
+/*
+ * Free all tr2 TLS resources.
+ */
+void tr2tls_release(void);
+
+/*
+ * Protected increment of an integer.
+ */
+int tr2tls_locked_increment(int *p);
+
+/*
+ * Capture the process start time and do nothing else.
+ */
+void tr2tls_start_process_clock(void);
+
+#endif /* TR2_TLS_H */
const struct trailer_info *info,
const struct process_trailer_options *opts)
{
+ size_t origlen = out->len;
size_t i;
/* If we want the whole block untouched, we can take the fast path. */
- if (!opts->only_trailers && !opts->unfold) {
+ if (!opts->only_trailers && !opts->unfold && !opts->filter && !opts->separator) {
strbuf_add(out, info->trailer_start,
info->trailer_end - info->trailer_start);
return;
struct strbuf val = STRBUF_INIT;
parse_trailer(&tok, &val, NULL, trailer, separator_pos);
- if (opts->unfold)
- unfold_value(&val);
-
- strbuf_addf(out, "%s: %s\n", tok.buf, val.buf);
+ if (!opts->filter || opts->filter(&tok, opts->filter_data)) {
+ if (opts->unfold)
+ unfold_value(&val);
+
+ if (opts->separator && out->len != origlen)
+ strbuf_addbuf(out, opts->separator);
+ if (!opts->value_only)
+ strbuf_addf(out, "%s: ", tok.buf);
+ strbuf_addbuf(out, &val);
+ if (!opts->separator)
+ strbuf_addch(out, '\n');
+ }
strbuf_release(&tok);
strbuf_release(&val);
} else if (!opts->only_trailers) {
+ if (opts->separator && out->len != origlen) {
+ strbuf_addbuf(out, opts->separator);
+ }
strbuf_addstr(out, trailer);
+ if (opts->separator) {
+ strbuf_rtrim(out);
+ }
}
}
int only_input;
int unfold;
int no_divider;
+ int value_only;
+ const struct strbuf *separator;
+ int (*filter)(const struct strbuf *, void *);
+ void *filter_data;
};
#define PROCESS_TRAILER_OPTIONS_INIT {0}
argv_array_pushf(&helper->env_array, "%s=%s",
GIT_DIR_ENVIRONMENT, get_git_dir());
+ helper->trace2_child_class = helper->args.argv[0]; /* "remote-<name>" */
+
code = start_command(helper);
if (code < 0 && errno == ENOENT)
die(_("unable to find remote helper for '%s'"), data->name);
return 0;
}
+static void die_if_server_options(struct transport *transport)
+{
+ if (!transport->server_options || !transport->server_options->nr)
+ return;
+ advise(_("see protocol.version in 'git help config' for more details"));
+ die(_("server options require protocol version 2 or later"));
+}
+
/*
* Obtains the protocol version from the transport and writes it to
* transport->data->version, first connecting if not already connected.
break;
case protocol_v1:
case protocol_v0:
+ die_if_server_options(transport);
get_remote_heads(&reader, &refs,
for_push ? REF_NORMAL : 0,
&data->extra_have,
int ret = 0;
struct git_transport_data *data = transport->data;
struct ref *refs = NULL;
- char *dest = xstrdup(transport->url);
struct fetch_pack_args args;
struct ref *refs_tmp = NULL;
switch (data->version) {
case protocol_v2:
- refs = fetch_pack(&args, data->fd, data->conn,
+ refs = fetch_pack(&args, data->fd,
refs_tmp ? refs_tmp : transport->remote_refs,
- dest, to_fetch, nr_heads, &data->shallow,
+ to_fetch, nr_heads, &data->shallow,
&transport->pack_lockfile, data->version);
break;
case protocol_v1:
case protocol_v0:
- refs = fetch_pack(&args, data->fd, data->conn,
+ die_if_server_options(transport);
+ refs = fetch_pack(&args, data->fd,
refs_tmp ? refs_tmp : transport->remote_refs,
- dest, to_fetch, nr_heads, &data->shallow,
+ to_fetch, nr_heads, &data->shallow,
&transport->pack_lockfile, data->version);
break;
case protocol_unknown_version:
free_refs(refs_tmp);
free_refs(refs);
- free(dest);
return ret;
}
proc.argv = argv;
proc.in = -1;
+ proc.trace2_hook_name = "pre-push";
if (start_command(&proc)) {
finish_command(&proc);
#define TRANSPORT_PUSH_OPTIONS (1<<14)
#define TRANSPORT_RECURSE_SUBMODULES_ONLY (1<<15)
-extern int transport_summary_width(const struct ref *refs);
+int transport_summary_width(const struct ref *refs);
/* Returns a transport suitable for the url */
struct transport *transport_get(struct remote *, const char *);
int verbose, int porcelain, unsigned int *reject_reasons);
typedef void alternate_ref_fn(const struct object_id *oid, void *);
-extern void for_each_alternate_ref(alternate_ref_fn, void *);
+void for_each_alternate_ref(alternate_ref_fn, void *);
#endif
struct tree_desc *t, struct tree_desc *tp,
int imin)
{
- unsigned mode;
+ unsigned short mode;
const char *path;
const struct object_id *oid;
int pathlen;
struct object_id oid;
};
-static int find_tree_entry(struct tree_desc *t, const char *name, struct object_id *result, unsigned *mode)
+static int find_tree_entry(struct tree_desc *t, const char *name, struct object_id *result, unsigned short *mode)
{
int namelen = strlen(name);
while (t->size) {
return -1;
}
-int get_tree_entry(const struct object_id *tree_oid, const char *name, struct object_id *oid, unsigned *mode)
+int get_tree_entry(const struct object_id *tree_oid, const char *name, struct object_id *oid, unsigned short *mode)
{
int retval;
void *tree;
* See the code for enum get_oid_result for a description of
* the return values.
*/
-enum get_oid_result get_tree_entry_follow_symlinks(struct object_id *tree_oid, const char *name, struct object_id *result, struct strbuf *result_path, unsigned *mode)
+enum get_oid_result get_tree_entry_follow_symlinks(struct object_id *tree_oid, const char *name, struct object_id *result, struct strbuf *result_path, unsigned short *mode)
{
int retval = MISSING_OBJECT;
struct dir_state *parents = NULL;
unsigned int size;
};
-static inline const struct object_id *tree_entry_extract(struct tree_desc *desc, const char **pathp, unsigned int *modep)
+static inline const struct object_id *tree_entry_extract(struct tree_desc *desc, const char **pathp, unsigned short *modep)
{
*pathp = desc->entry.path;
*modep = desc->entry.mode;
typedef int (*traverse_callback_t)(int n, unsigned long mask, unsigned long dirmask, struct name_entry *entry, struct traverse_info *);
int traverse_trees(struct index_state *istate, int n, struct tree_desc *t, struct traverse_info *info);
-enum get_oid_result get_tree_entry_follow_symlinks(struct object_id *tree_oid, const char *name, struct object_id *result, struct strbuf *result_path, unsigned *mode);
+enum get_oid_result get_tree_entry_follow_symlinks(struct object_id *tree_oid, const char *name, struct object_id *result, struct strbuf *result_path, unsigned short *mode);
struct traverse_info {
const char *traverse_path;
int show_all_errors;
};
-int get_tree_entry(const struct object_id *, const char *, struct object_id *, unsigned *);
-extern char *make_traverse_path(char *path, const struct traverse_info *info, const struct name_entry *n);
-extern void setup_traverse_info(struct traverse_info *info, const char *base);
+int get_tree_entry(const struct object_id *, const char *, struct object_id *, unsigned short *);
+char *make_traverse_path(char *path, const struct traverse_info *info, const struct name_entry *n);
+void setup_traverse_info(struct traverse_info *info, const char *base);
static inline int traverse_path_len(const struct traverse_info *info, const struct name_entry *n)
{
{ 0x0E34, 0x0E3A },
{ 0x0E47, 0x0E4E },
{ 0x0EB1, 0x0EB1 },
-{ 0x0EB4, 0x0EB9 },
-{ 0x0EBB, 0x0EBC },
+{ 0x0EB4, 0x0EBC },
{ 0x0EC8, 0x0ECD },
{ 0x0F18, 0x0F19 },
{ 0x0F35, 0x0F35 },
{ 0xA980, 0xA982 },
{ 0xA9B3, 0xA9B3 },
{ 0xA9B6, 0xA9B9 },
-{ 0xA9BC, 0xA9BC },
+{ 0xA9BC, 0xA9BD },
{ 0xA9E5, 0xA9E5 },
{ 0xAA29, 0xAA2E },
{ 0xAA31, 0xAA32 },
{ 0x11727, 0x1172B },
{ 0x1182F, 0x11837 },
{ 0x11839, 0x1183A },
+{ 0x119D4, 0x119D7 },
+{ 0x119DA, 0x119DB },
+{ 0x119E0, 0x119E0 },
{ 0x11A01, 0x11A0A },
{ 0x11A33, 0x11A38 },
{ 0x11A3B, 0x11A3E },
{ 0x11D95, 0x11D95 },
{ 0x11D97, 0x11D97 },
{ 0x11EF3, 0x11EF4 },
+{ 0x13430, 0x13438 },
{ 0x16AF0, 0x16AF4 },
{ 0x16B30, 0x16B36 },
+{ 0x16F4F, 0x16F4F },
{ 0x16F8F, 0x16F92 },
{ 0x1BC9D, 0x1BC9E },
{ 0x1BCA0, 0x1BCA3 },
{ 0x1E01B, 0x1E021 },
{ 0x1E023, 0x1E024 },
{ 0x1E026, 0x1E02A },
+{ 0x1E130, 0x1E136 },
+{ 0x1E2EC, 0x1E2EF },
{ 0x1E8D0, 0x1E8D6 },
{ 0x1E944, 0x1E94A },
{ 0xE0001, 0xE0001 },
{ 0xFE68, 0xFE6B },
{ 0xFF01, 0xFF60 },
{ 0xFFE0, 0xFFE6 },
-{ 0x16FE0, 0x16FE1 },
-{ 0x17000, 0x187F1 },
+{ 0x16FE0, 0x16FE3 },
+{ 0x17000, 0x187F7 },
{ 0x18800, 0x18AF2 },
{ 0x1B000, 0x1B11E },
+{ 0x1B150, 0x1B152 },
+{ 0x1B164, 0x1B167 },
{ 0x1B170, 0x1B2FB },
{ 0x1F004, 0x1F004 },
{ 0x1F0CF, 0x1F0CF },
{ 0x1F680, 0x1F6C5 },
{ 0x1F6CC, 0x1F6CC },
{ 0x1F6D0, 0x1F6D2 },
+{ 0x1F6D5, 0x1F6D5 },
{ 0x1F6EB, 0x1F6EC },
-{ 0x1F6F4, 0x1F6F9 },
-{ 0x1F910, 0x1F93E },
-{ 0x1F940, 0x1F970 },
+{ 0x1F6F4, 0x1F6FA },
+{ 0x1F7E0, 0x1F7EB },
+{ 0x1F90D, 0x1F971 },
{ 0x1F973, 0x1F976 },
-{ 0x1F97A, 0x1F97A },
-{ 0x1F97C, 0x1F9A2 },
-{ 0x1F9B0, 0x1F9B9 },
-{ 0x1F9C0, 0x1F9C2 },
-{ 0x1F9D0, 0x1F9FF },
+{ 0x1F97A, 0x1F9A2 },
+{ 0x1F9A5, 0x1F9AA },
+{ 0x1F9AE, 0x1F9CA },
+{ 0x1F9CD, 0x1F9FF },
+{ 0x1FA70, 0x1FA73 },
+{ 0x1FA78, 0x1FA7A },
+{ 0x1FA80, 0x1FA82 },
+{ 0x1FA90, 0x1FA95 },
{ 0x20000, 0x2FFFD },
{ 0x30000, 0x3FFFD }
};
enum unpack_trees_error_types e,
const char *path)
{
+ if (o->quiet)
+ return -1;
+
if (!o->show_all_errors)
return error(ERRORMSG(o, e), super_prefixed(path));
flags |= SUBMODULE_MOVE_HEAD_FORCE;
if (submodule_move_head(ce->name, old_id, new_id, flags))
- return o->gently ? -1 :
- add_rejected_path(o, ERROR_WOULD_LOSE_SUBMODULE, ce->name);
+ return add_rejected_path(o, ERROR_WOULD_LOSE_SUBMODULE, ce->name);
return 0;
}
}
}
-/*
- * Unlink the last component and schedule the leading directories for
- * removal, such that empty directories get removed.
- */
-static void unlink_entry(const struct cache_entry *ce)
-{
- const struct submodule *sub = submodule_from_ce(ce);
- if (sub) {
- /* state.force is set at the caller. */
- submodule_move_head(ce->name, "HEAD", NULL,
- SUBMODULE_MOVE_HEAD_FORCE);
- }
- if (!check_leading_path(ce->name, ce_namelen(ce)))
- return;
- if (remove_or_warn(ce->ce_mode, ce->name))
- return;
- schedule_dir_for_removal(ce->name, ce_namelen(ce));
-}
-
static struct progress *get_progress(struct unpack_trees_options *o)
{
unsigned cnt = 0, total = 0;
unlink_entry(ce);
}
}
- remove_marked_cache_entries(index);
+ remove_marked_cache_entries(index, 0);
remove_scheduled_dirs();
if (should_update_submodules() && o->update && !o->dry_run)
* below.
*/
struct oid_array to_fetch = OID_ARRAY_INIT;
- int fetch_if_missing_store = fetch_if_missing;
- fetch_if_missing = 0;
for (i = 0; i < index->cache_nr; i++) {
struct cache_entry *ce = index->cache[i];
- if ((ce->ce_flags & CE_UPDATE) &&
- !S_ISGITLINK(ce->ce_mode)) {
- if (!has_object_file(&ce->oid))
- oid_array_append(&to_fetch, &ce->oid);
- }
+
+ if (!(ce->ce_flags & CE_UPDATE) ||
+ S_ISGITLINK(ce->ce_mode))
+ continue;
+ if (!oid_object_info_extended(the_repository, &ce->oid,
+ NULL,
+ OBJECT_INFO_FOR_PREFETCH))
+ continue;
+ oid_array_append(&to_fetch, &ce->oid);
}
if (to_fetch.nr)
fetch_objects(repository_format_partial_clone,
to_fetch.oid, to_fetch.nr);
- fetch_if_missing = fetch_if_missing_store;
oid_array_clear(&to_fetch);
}
for (i = 0; i < index->cache_nr; i++) {
* instead of ODB since we already know what these trees contain.
*/
static int traverse_by_cache_tree(int pos, int nr_entries, int nr_names,
- struct name_entry *names,
struct traverse_info *info)
{
struct cache_entry *src[MAX_UNPACK_TREES + 1] = { NULL, };
* unprocessed entries before 'pos'.
*/
bottom = o->cache_bottom;
- ret = traverse_by_cache_tree(pos, nr_entries, n, names, info);
+ ret = traverse_by_cache_tree(pos, nr_entries, n, info);
o->cache_bottom = bottom;
return ret;
}
static int unpack_failed(struct unpack_trees_options *o, const char *message)
{
discard_index(&o->result);
- if (!o->gently && !o->exiting_early) {
+ if (!o->quiet && !o->exiting_early) {
if (message)
return error("%s", message);
return -1;
WRITE_TREE_SILENT |
WRITE_TREE_REPAIR);
}
+
+ o->result.updated_workdir = 1;
discard_index(o->dst_index);
*o->dst_index = o->result;
} else {
static int reject_merge(const struct cache_entry *ce,
struct unpack_trees_options *o)
{
- return o->gently ? -1 :
- add_rejected_path(o, ERROR_WOULD_OVERWRITE, ce->name);
+ return add_rejected_path(o, ERROR_WOULD_OVERWRITE, ce->name);
}
static int same(const struct cache_entry *a, const struct cache_entry *b)
int r = check_submodule_move_head(ce,
"HEAD", oid_to_hex(&ce->oid), o);
if (r)
- return o->gently ? -1 :
- add_rejected_path(o, error_type, ce->name);
+ return add_rejected_path(o, error_type, ce->name);
return 0;
}
}
if (errno == ENOENT)
return 0;
- return o->gently ? -1 :
- add_rejected_path(o, error_type, ce->name);
+ return add_rejected_path(o, error_type, ce->name);
}
int verify_uptodate(const struct cache_entry *ce,
*/
static int verify_clean_submodule(const char *old_sha1,
const struct cache_entry *ce,
- enum unpack_trees_error_types error_type,
struct unpack_trees_options *o)
{
if (!submodule_from_ce(ce))
}
static int verify_clean_subdirectory(const struct cache_entry *ce,
- enum unpack_trees_error_types error_type,
struct unpack_trees_options *o)
{
/*
if (!sub_head && oideq(&oid, &ce->oid))
return 0;
return verify_clean_submodule(sub_head ? NULL : oid_to_hex(&oid),
- ce, error_type, o);
+ ce, o);
}
/*
d.exclude_per_dir = o->dir->exclude_per_dir;
i = read_directory(&d, o->src_index, pathbuf, namelen+1, NULL);
if (i)
- return o->gently ? -1 :
- add_rejected_path(o, ERROR_NOT_UPTODATE_DIR, ce->name);
+ return add_rejected_path(o, ERROR_NOT_UPTODATE_DIR, ce->name);
free(pathbuf);
return cnt;
}
* files that are in "foo/" we would lose
* them.
*/
- if (verify_clean_subdirectory(ce, error_type, o) < 0)
+ if (verify_clean_subdirectory(ce, o) < 0)
return -1;
return 0;
}
return 0;
}
- return o->gently ? -1 :
- add_rejected_path(o, error_type, name);
+ return add_rejected_path(o, error_type, name);
}
/*
return error("Cannot do a bind merge of %d trees",
o->merge_size);
if (a && old)
- return o->gently ? -1 :
+ return o->quiet ? -1 :
error(ERRORMSG(o, ERROR_BIND_OVERLAP),
super_prefixed(a->name),
super_prefixed(old->name));
if (o->update && S_ISGITLINK(old->ce_mode) &&
should_update_submodules() && !verify_uptodate(old, o))
update |= CE_UPDATE;
- add_entry(o, old, update, 0);
+ add_entry(o, old, update, CE_STAGEMASK);
return 0;
}
return merged_entry(a, old, o);
diff_index_cached,
debug_unpack,
skip_sparse_checkout,
- gently,
+ quiet,
exiting_early,
show_all_errors,
dry_run;
return 1;
}
-static void check_non_tip(struct object_array *want_obj)
+static void check_non_tip(struct object_array *want_obj,
+ struct packet_writer *writer)
{
int i;
/* Pick one of them (we know there at least is one) */
for (i = 0; i < want_obj->nr; i++) {
struct object *o = want_obj->objects[i].item;
- if (!is_our_ref(o))
+ if (!is_our_ref(o)) {
+ packet_writer_error(writer,
+ "upload-pack: not our ref %s",
+ oid_to_hex(&o->oid));
die("git upload-pack: not our ref %s",
oid_to_hex(&o->oid));
+ }
}
}
if (skip_prefix(line, "deepen-not ", &arg)) {
char *ref = NULL;
struct object_id oid;
- if (expand_ref(arg, strlen(arg), &oid, &ref) != 1)
+ if (expand_ref(the_repository, arg, strlen(arg), &oid, &ref) != 1)
die("git upload-pack: ambiguous deepen-not: %s", line);
string_list_append(deepen_not, ref);
free(ref);
* by another process that handled the initial request.
*/
if (has_non_tip)
- check_non_tip(want_obj);
+ check_non_tip(want_obj, &writer);
if (!use_sideband && daemon_mode)
no_progress = 1;
allow_ref_in_want = git_config_bool(var, value);
} else if (!strcmp("uploadpack.allowsidebandall", var)) {
allow_sideband_all = git_config_bool(var, value);
+ } else if (!strcmp("core.precomposeunicode", var)) {
+ precomposed_unicode = git_config_bool(var, value);
}
if (current_config_scope() != CONFIG_SCOPE_REPO) {
struct repository;
struct argv_array;
struct packet_reader;
-extern int upload_pack_v2(struct repository *r, struct argv_array *keys,
- struct packet_reader *request);
+int upload_pack_v2(struct repository *r, struct argv_array *keys,
+ struct packet_reader *request);
struct strbuf;
-extern int upload_pack_advertise(struct repository *r,
- struct strbuf *value);
+int upload_pack_advertise(struct repository *r,
+ struct strbuf *value);
#endif /* UPLOAD_PACK_H */
struct strbuf;
-extern int is_url(const char *url);
-extern int is_urlschemechar(int first_flag, int ch);
-extern char *url_decode(const char *url);
-extern char *url_decode_mem(const char *url, int len);
-extern char *url_decode_parameter_name(const char **query);
-extern char *url_decode_parameter_value(const char **query);
+int is_url(const char *url);
+int is_urlschemechar(int first_flag, int ch);
+char *url_decode(const char *url);
+char *url_decode_mem(const char *url, int len);
+char *url_decode_parameter_name(const char **query);
+char *url_decode_parameter_value(const char **query);
-extern void end_url_with_slash(struct strbuf *buf, const char *url);
-extern void str_end_url_with_slash(const char *url, char **dest);
+void end_url_with_slash(struct strbuf *buf, const char *url);
+void str_end_url_with_slash(const char *url, char **dest);
#endif /* URL_H */
* '?...' and '#...' portion; will always be >= 1 */
};
-extern char *url_normalize(const char *, struct url_info *);
+char *url_normalize(const char *, struct url_info *);
struct urlmatch_item {
size_t hostmatch_len;
int (*cascade_fn)(const char *var, const char *value, void *cb);
};
-extern int urlmatch_config_entry(const char *var, const char *value, void *cb);
+int urlmatch_config_entry(const char *var, const char *value, void *cb);
#endif /* URL_MATCH_H */
static NORETURN void usage_builtin(const char *err, va_list params)
{
vreportf("usage: ", err, params);
+
+ /*
+ * When we detect a usage error *before* the command dispatch in
+ * cmd_main(), we don't know what verb to report. Force it to this
+ * to facilitate post-processing.
+ */
+ trace2_cmd_name("_usage_");
+
+ /*
+ * Currently, the (err, params) are usually just the static usage
+ * string which isn't very useful here. Usually, the call site
+ * manually calls fprintf(stderr,...) with the actual detailed
+ * syntax error before calling usage().
+ *
+ * TODO It would be nice to update the call sites to pass both
+ * the static usage string and the detailed error message.
+ */
+
exit(129);
}
static NORETURN void die_builtin(const char *err, va_list params)
{
+ /*
+ * We call this trace2 function first and expect it to va_copy 'params'
+ * before using it (because an 'ap' can only be walked once).
+ */
+ trace2_cmd_error_va(err, params);
+
vreportf("fatal: ", err, params);
+
exit(128);
}
static void error_builtin(const char *err, va_list params)
{
+ /*
+ * We call this trace2 function first and expect it to va_copy 'params'
+ * before using it (because an 'ap' can only be walked once).
+ */
+ trace2_cmd_error_va(err, params);
+
vreportf("error: ", err, params);
}
int utf8_fprintf(FILE *, const char *, ...);
extern const char utf8_bom[];
-extern int skip_utf8_bom(char **, size_t);
+int skip_utf8_bom(char **, size_t);
void strbuf_add_wrapped_text(struct strbuf *buf,
const char *text, int indent, int indent2, int width);
#ifndef VARINT_H
#define VARINT_H
-extern int encode_varint(uintmax_t, unsigned char *);
-extern uintmax_t decode_varint(const unsigned char **);
+int encode_varint(uintmax_t, unsigned char *);
+uintmax_t decode_varint(const unsigned char **);
#endif /* VARINT_H */
#define SLIDING_VIEW_INIT(input, len) { (input), 0, 0, (len), STRBUF_INIT }
-extern int move_window(struct sliding_view *view, off_t off, size_t width);
+int move_window(struct sliding_view *view, off_t off, size_t width);
#endif
struct line_buffer;
struct sliding_view;
-extern int svndiff0_apply(struct line_buffer *delta, off_t delta_len,
- struct sliding_view *preimage, FILE *postimage);
+int svndiff0_apply(struct line_buffer *delta, off_t delta_len,
+ struct sliding_view *preimage, FILE *postimage);
#endif
struct worktree *worktree = NULL;
struct strbuf path = STRBUF_INIT;
struct strbuf worktree_path = STRBUF_INIT;
- int is_bare = 0;
strbuf_add_absolute_path(&worktree_path, get_git_common_dir());
- is_bare = !strbuf_strip_suffix(&worktree_path, "/.git");
- if (is_bare)
+ if (!strbuf_strip_suffix(&worktree_path, "/.git"))
strbuf_strip_suffix(&worktree_path, "/.");
strbuf_addf(&path, "%s/HEAD", get_git_common_dir());
worktree = xcalloc(1, sizeof(*worktree));
worktree->path = strbuf_detach(&worktree_path, NULL);
- worktree->is_bare = is_bare;
+ /*
+ * NEEDSWORK: If this function is called from a secondary worktree and
+ * config.worktree is present, is_bare_repository_cfg will reflect the
+ * contents of config.worktree, not the contents of the main worktree.
+ * This means that worktree->is_bare may be set to 0 even if the main
+ * worktree is configured to be bare.
+ */
+ worktree->is_bare = (is_bare_repository_cfg == 1) ||
+ is_bare_repository();
add_head_info(worktree);
strbuf_release(&path);
DIR *dir;
struct dirent *d;
int ret = 0;
- struct repository_format format;
+ struct repository_format format = REPOSITORY_FORMAT_INIT;
submodule_gitdir = git_pathdup_submodule(path, "%s", "");
if (!submodule_gitdir)
read_repository_format(&format, sb.buf);
if (format.version != 0) {
strbuf_release(&sb);
+ clear_repository_format(&format);
return 1;
}
+ clear_repository_format(&format);
/* Replace config by worktrees. */
strbuf_setlen(&sb, sb.len - strlen("config"));
* The caller is responsible for freeing the memory from the returned
* worktree(s).
*/
-extern struct worktree **get_worktrees(unsigned flags);
+struct worktree **get_worktrees(unsigned flags);
/*
* Returns 1 if linked worktrees exist, 0 otherwise.
*/
-extern int submodule_uses_worktrees(const char *path);
+int submodule_uses_worktrees(const char *path);
/*
* Return git dir of the worktree. Note that the path may be relative.
* If wt is NULL, git dir of current worktree is returned.
*/
-extern const char *get_worktree_git_dir(const struct worktree *wt);
+const char *get_worktree_git_dir(const struct worktree *wt);
/*
* Search a worktree that can be unambiguously identified by
* "arg". "prefix" must not be NULL.
*/
-extern struct worktree *find_worktree(struct worktree **list,
- const char *prefix,
- const char *arg);
+struct worktree *find_worktree(struct worktree **list,
+ const char *prefix,
+ const char *arg);
/*
* Return true if the given worktree is the main one.
*/
-extern int is_main_worktree(const struct worktree *wt);
+int is_main_worktree(const struct worktree *wt);
/*
* Return the reason string if the given worktree is locked or NULL
* otherwise.
*/
-extern const char *worktree_lock_reason(struct worktree *wt);
+const char *worktree_lock_reason(struct worktree *wt);
#define WT_VALIDATE_WORKTREE_MISSING_OK (1 << 0)
* Return zero if the worktree is in good condition. Error message is
* returned if "errmsg" is not NULL.
*/
-extern int validate_worktree(const struct worktree *wt,
- struct strbuf *errmsg,
- unsigned flags);
+int validate_worktree(const struct worktree *wt,
+ struct strbuf *errmsg,
+ unsigned flags);
/*
* Update worktrees/xxx/gitdir with the new path.
*/
-extern void update_worktree_location(struct worktree *wt,
- const char *path_);
+void update_worktree_location(struct worktree *wt,
+ const char *path_);
/*
* Free up the memory for worktree(s)
*/
-extern void free_worktrees(struct worktree **);
+void free_worktrees(struct worktree **);
/*
* Check if a per-worktree symref points to a ref in the main worktree
* or any linked worktree, and return the worktree that holds the ref,
* or NULL otherwise. The result may be destroyed by the next call.
*/
-extern const struct worktree *find_shared_symref(const char *symref,
- const char *target);
+const struct worktree *find_shared_symref(const char *symref,
+ const char *target);
/*
* Similar to head_ref() for all HEADs _except_ one from the current
* Similar to git_path() but can produce paths for a specified
* worktree instead of current one
*/
-extern const char *worktree_git_path(const struct worktree *wt,
- const char *fmt, ...)
+const char *worktree_git_path(const struct worktree *wt,
+ const char *fmt, ...)
__attribute__((format (printf, 2, 3)));
/*
#include "utf8.h"
#include "worktree.h"
#include "lockfile.h"
+#include "sequencer.h"
static const char cut_line[] =
"------------------------ >8 ------------------------\n";
void wt_status_collect(struct wt_status *s)
{
+ trace2_region_enter("status", "worktrees", s->repo);
wt_status_collect_changes_worktree(s);
- if (s->is_initial)
+ trace2_region_leave("status", "worktrees", s->repo);
+
+ if (s->is_initial) {
+ trace2_region_enter("status", "initial", s->repo);
wt_status_collect_changes_initial(s);
- else
+ trace2_region_leave("status", "initial", s->repo);
+ } else {
+ trace2_region_enter("status", "index", s->repo);
wt_status_collect_changes_index(s);
+ trace2_region_leave("status", "index", s->repo);
+ }
+
+ trace2_region_enter("status", "untracked", s->repo);
wt_status_collect_untracked(s);
+ trace2_region_leave("status", "untracked", s->repo);
wt_status_get_state(s->repo, &s->state, s->branch && !strcmp(s->branch, "HEAD"));
if (s->state.merge_in_progress && !has_unmerged(s))
return len;
}
-void wt_status_add_cut_line(FILE *fp)
+void wt_status_append_cut_line(struct strbuf *buf)
{
const char *explanation = _("Do not modify or remove the line above.\nEverything below it will be ignored.");
+
+ strbuf_commented_addf(buf, "%s", cut_line);
+ strbuf_add_commented_lines(buf, explanation, strlen(explanation));
+}
+
+void wt_status_add_cut_line(FILE *fp)
+{
struct strbuf buf = STRBUF_INIT;
- fprintf(fp, "%c %s", comment_line_char, cut_line);
- strbuf_add_commented_lines(&buf, explanation, strlen(explanation));
+ wt_status_append_cut_line(&buf);
fputs(buf.buf, fp);
strbuf_release(&buf);
}
static void show_cherry_pick_in_progress(struct wt_status *s,
const char *color)
{
- status_printf_ln(s, color, _("You are currently cherry-picking commit %s."),
- find_unique_abbrev(&s->state.cherry_pick_head_oid, DEFAULT_ABBREV));
+ if (is_null_oid(&s->state.cherry_pick_head_oid))
+ status_printf_ln(s, color,
+ _("Cherry-pick currently in progress."));
+ else
+ status_printf_ln(s, color,
+ _("You are currently cherry-picking commit %s."),
+ find_unique_abbrev(&s->state.cherry_pick_head_oid,
+ DEFAULT_ABBREV));
+
if (s->hints) {
if (has_unmerged(s))
status_printf_ln(s, color,
_(" (fix conflicts and run \"git cherry-pick --continue\")"));
+ else if (is_null_oid(&s->state.cherry_pick_head_oid))
+ status_printf_ln(s, color,
+ _(" (run \"git cherry-pick --continue\" to continue)"));
else
status_printf_ln(s, color,
_(" (all conflicts fixed: run \"git cherry-pick --continue\")"));
static void show_revert_in_progress(struct wt_status *s,
const char *color)
{
- status_printf_ln(s, color, _("You are currently reverting commit %s."),
- find_unique_abbrev(&s->state.revert_head_oid, DEFAULT_ABBREV));
+ if (is_null_oid(&s->state.revert_head_oid))
+ status_printf_ln(s, color,
+ _("Revert currently in progress."));
+ else
+ status_printf_ln(s, color,
+ _("You are currently reverting commit %s."),
+ find_unique_abbrev(&s->state.revert_head_oid,
+ DEFAULT_ABBREV));
if (s->hints) {
if (has_unmerged(s))
status_printf_ln(s, color,
_(" (fix conflicts and run \"git revert --continue\")"));
+ else if (is_null_oid(&s->state.revert_head_oid))
+ status_printf_ln(s, color,
+ _(" (run \"git revert --continue\" to continue)"));
else
status_printf_ln(s, color,
_(" (all conflicts fixed: run \"git revert --continue\")"));
{
struct stat st;
struct object_id oid;
+ enum replay_action action;
if (!stat(git_path_merge_head(r), &st)) {
wt_status_check_rebase(NULL, state);
state->revert_in_progress = 1;
oidcpy(&state->revert_head_oid, &oid);
}
-
+ if (!sequencer_get_last_command(r, &action)) {
+ if (action == REPLAY_PICK) {
+ state->cherry_pick_in_progress = 1;
+ oidcpy(&state->cherry_pick_head_oid, &null_oid);
+ } else {
+ state->revert_in_progress = 1;
+ oidcpy(&state->revert_head_oid, &null_oid);
+ }
+ }
if (get_detached_from)
wt_status_get_detached_from(r, state);
}
color_fprintf(s->fp, branch_color_local, "%s", branch_name);
sti = stat_tracking_info(branch, &num_ours, &num_theirs, &base,
- s->ahead_behind_flags);
+ 0, s->ahead_behind_flags);
if (sti < 0) {
if (!base)
goto conclude;
branch = branch_get(branch_name);
base = NULL;
ab_info = stat_tracking_info(branch, &nr_ahead, &nr_behind,
- &base, s->ahead_behind_flags);
+ &base, 0, s->ahead_behind_flags);
if (base) {
base = shorten_unambiguous_ref(base, 0);
fprintf(s->fp, "# branch.upstream %s%c", base, eol);
void wt_status_print(struct wt_status *s)
{
+ trace2_data_intmax("status", s->repo, "count/changed", s->change.nr);
+ trace2_data_intmax("status", s->repo, "count/untracked",
+ s->untracked.nr);
+ trace2_data_intmax("status", s->repo, "count/ignored", s->ignored.nr);
+
+ trace2_region_enter("status", "print", s->repo);
+
switch (s->status_format) {
case STATUS_FORMAT_SHORT:
wt_shortstatus_print(s);
wt_longstatus_print(s);
break;
}
+
+ trace2_region_leave("status", "print", s->repo);
}
/**
};
size_t wt_status_locate_end(const char *s, size_t len);
+void wt_status_append_cut_line(struct strbuf *buf);
void wt_status_add_cut_line(FILE *fp);
void wt_status_prepare(struct repository *r, struct wt_status *s);
void wt_status_print(struct wt_status *s);
git_xmerge_style = XDL_MERGE_DIFF3;
else if (!strcmp(value, "merge"))
git_xmerge_style = 0;
+ /*
+ * Please update _git_checkout() in
+ * git-completion.bash when you add new merge config
+ */
else
die("unknown style '%s' given for '%s'",
value, var);
void read_mmblob(mmfile_t *ptr, const struct object_id *oid);
int buffer_is_binary(const char *ptr, unsigned long size);
-extern void xdiff_set_find_func(xdemitconf_t *xecfg, const char *line, int cflags);
-extern void xdiff_clear_find_func(xdemitconf_t *xecfg);
-extern int git_xmerge_config(const char *var, const char *value, void *cb);
+void xdiff_set_find_func(xdemitconf_t *xecfg, const char *line, int cflags);
+void xdiff_clear_find_func(xdemitconf_t *xecfg);
+int git_xmerge_config(const char *var, const char *value, void *cb);
extern int git_xmerge_style;
/*
* The `flags` given as XDF_WHITESPACE_FLAGS determine how white spaces
* are treated for the comparision.
*/
-extern int xdiff_compare_lines(const char *l1, long s1,
- const char *l2, long s2, long flags);
+int xdiff_compare_lines(const char *l1, long s1,
+ const char *l2, long s2, long flags);
/*
* Returns a hash of the string s of length len.
* The `flags` given as XDF_WHITESPACE_FLAGS determine how white spaces
* are treated for the hash.
*/
-extern unsigned long xdiff_hash_string(const char *s, size_t len, long flags);
+unsigned long xdiff_hash_string(const char *s, size_t len, long flags);
#endif
} bdiffparam_t;
-#define xdl_malloc(x) malloc(x)
+#define xdl_malloc(x) xmalloc(x)
#define xdl_free(ptr) free(ptr)
-#define xdl_realloc(ptr,x) realloc(ptr,x)
+#define xdl_realloc(ptr,x) xrealloc(ptr,x)
void *xdl_mmfile_first(mmfile_t *mmf, long *size);
long xdl_mmfile_size(mmfile_t *mmf);
#if !defined(XINCLUDE_H)
#define XINCLUDE_H
-#include <ctype.h>
-#include <stdio.h>
-#include <stdlib.h>
-#include <unistd.h>
-#include <string.h>
-#include <limits.h>
-
+#include "git-compat-util.h"
#include "xmacros.h"
#include "xdiff.h"
#include "xtypes.h"